1/* 2 * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy 3 * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy 4 * Support: freebsdraid@avagotech.com 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are 8 * met: 9 * 10 * 1. Redistributions of source code must retain the above copyright notice, 11 * this list of conditions and the following disclaimer. 2. Redistributions 12 * in binary form must reproduce the above copyright notice, this list of 13 * conditions and the following disclaimer in the documentation and/or other 14 * materials provided with the distribution. 3. Neither the name of the 15 * <ORGANIZATION> nor the names of its contributors may be used to endorse or 16 * promote products derived from this software without specific prior written 17 * permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 * 31 * The views and conclusions contained in the software and documentation are 32 * those of the authors and should not be interpreted as representing 33 * official policies,either expressed or implied, of the FreeBSD Project. 34 * 35 * Send feedback to: <megaraidfbsd@avagotech.com> Mail to: AVAGO TECHNOLOGIES 1621 36 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD 37 * 38 */ 39 40#include <sys/cdefs.h> 41__FBSDID("$FreeBSD: stable/10/sys/dev/mrsas/mrsas.c 310264 2016-12-19 13:14:39Z kadesai $"); 42 43#include <dev/mrsas/mrsas.h> 44#include <dev/mrsas/mrsas_ioctl.h> 45 46#include <cam/cam.h> 47#include <cam/cam_ccb.h> 48 49#include <sys/sysctl.h> 50#include <sys/types.h> 51#include <sys/sysent.h> 52#include <sys/kthread.h> 53#include <sys/taskqueue.h> 54#include <sys/smp.h> 55 56 57/* 58 * Function prototypes 59 */ 60static d_open_t mrsas_open; 61static d_close_t mrsas_close; 62static d_read_t mrsas_read; 63static d_write_t mrsas_write; 64static d_ioctl_t mrsas_ioctl; 65static d_poll_t mrsas_poll; 66 67static void mrsas_ich_startup(void *arg); 68static struct mrsas_mgmt_info mrsas_mgmt_info; 69static struct mrsas_ident *mrsas_find_ident(device_t); 70static int mrsas_setup_msix(struct mrsas_softc *sc); 71static int mrsas_allocate_msix(struct mrsas_softc *sc); 72static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode); 73static void mrsas_flush_cache(struct mrsas_softc *sc); 74static void mrsas_reset_reply_desc(struct mrsas_softc *sc); 75static void mrsas_ocr_thread(void *arg); 76static int mrsas_get_map_info(struct mrsas_softc *sc); 77static int mrsas_get_ld_map_info(struct mrsas_softc *sc); 78static int mrsas_sync_map_info(struct mrsas_softc *sc); 79static int mrsas_get_pd_list(struct mrsas_softc *sc); 80static int mrsas_get_ld_list(struct mrsas_softc *sc); 81static int mrsas_setup_irq(struct mrsas_softc *sc); 82static int mrsas_alloc_mem(struct mrsas_softc *sc); 83static int mrsas_init_fw(struct mrsas_softc *sc); 84static int mrsas_setup_raidmap(struct mrsas_softc *sc); 85static void megasas_setup_jbod_map(struct mrsas_softc *sc); 86static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend); 87static int mrsas_clear_intr(struct mrsas_softc *sc); 88static int mrsas_get_ctrl_info(struct mrsas_softc *sc); 89static void mrsas_update_ext_vd_details(struct mrsas_softc *sc); 90static int 91mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc, 92 struct mrsas_mfi_cmd *cmd_to_abort); 93static struct mrsas_softc * 94mrsas_get_softc_instance(struct cdev *dev, 95 u_long cmd, caddr_t arg); 96u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset); 97u_int8_t 98mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, 99 struct mrsas_mfi_cmd *mfi_cmd); 100void mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc); 101int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr); 102int mrsas_init_adapter(struct mrsas_softc *sc); 103int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc); 104int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc); 105int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc); 106int mrsas_ioc_init(struct mrsas_softc *sc); 107int mrsas_bus_scan(struct mrsas_softc *sc); 108int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 109int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 110int mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason); 111int mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason); 112int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex); 113int mrsas_reset_targets(struct mrsas_softc *sc); 114int 115mrsas_issue_blocked_cmd(struct mrsas_softc *sc, 116 struct mrsas_mfi_cmd *cmd); 117int 118mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd, 119 int size); 120void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd); 121void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 122void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 123void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 124void mrsas_disable_intr(struct mrsas_softc *sc); 125void mrsas_enable_intr(struct mrsas_softc *sc); 126void mrsas_free_ioc_cmd(struct mrsas_softc *sc); 127void mrsas_free_mem(struct mrsas_softc *sc); 128void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp); 129void mrsas_isr(void *arg); 130void mrsas_teardown_intr(struct mrsas_softc *sc); 131void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error); 132void mrsas_kill_hba(struct mrsas_softc *sc); 133void mrsas_aen_handler(struct mrsas_softc *sc); 134void 135mrsas_write_reg(struct mrsas_softc *sc, int offset, 136 u_int32_t value); 137void 138mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, 139 u_int32_t req_desc_hi); 140void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc); 141void 142mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, 143 struct mrsas_mfi_cmd *cmd, u_int8_t status); 144void 145mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, 146 u_int8_t extStatus); 147struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc); 148 149MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd 150 (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 151 152extern int mrsas_cam_attach(struct mrsas_softc *sc); 153extern void mrsas_cam_detach(struct mrsas_softc *sc); 154extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd); 155extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); 156extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc); 157extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc); 158extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd); 159extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc); 160extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 161extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); 162extern void mrsas_xpt_freeze(struct mrsas_softc *sc); 163extern void mrsas_xpt_release(struct mrsas_softc *sc); 164extern MRSAS_REQUEST_DESCRIPTOR_UNION * 165mrsas_get_request_desc(struct mrsas_softc *sc, 166 u_int16_t index); 167extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim); 168static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc); 169static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc); 170 171SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters"); 172 173/* 174 * PCI device struct and table 175 * 176 */ 177typedef struct mrsas_ident { 178 uint16_t vendor; 179 uint16_t device; 180 uint16_t subvendor; 181 uint16_t subdevice; 182 const char *desc; 183} MRSAS_CTLR_ID; 184 185MRSAS_CTLR_ID device_table[] = { 186 {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"}, 187 {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"}, 188 {0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"}, 189 {0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"}, 190 {0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"}, 191 {0x1000, MRSAS_CUTLASS_52, 0xffff, 0xffff, "AVAGO Cutlass_52 SAS Controller"}, 192 {0x1000, MRSAS_CUTLASS_53, 0xffff, 0xffff, "AVAGO Cutlass_53 SAS Controller"}, 193 {0, 0, 0, 0, NULL} 194}; 195 196/* 197 * Character device entry points 198 * 199 */ 200static struct cdevsw mrsas_cdevsw = { 201 .d_version = D_VERSION, 202 .d_open = mrsas_open, 203 .d_close = mrsas_close, 204 .d_read = mrsas_read, 205 .d_write = mrsas_write, 206 .d_ioctl = mrsas_ioctl, 207 .d_poll = mrsas_poll, 208 .d_name = "mrsas", 209}; 210 211MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver"); 212 213/* 214 * In the cdevsw routines, we find our softc by using the si_drv1 member of 215 * struct cdev. We set this variable to point to our softc in our attach 216 * routine when we create the /dev entry. 217 */ 218int 219mrsas_open(struct cdev *dev, int oflags, int devtype, d_thread_t *td) 220{ 221 struct mrsas_softc *sc; 222 223 sc = dev->si_drv1; 224 return (0); 225} 226 227int 228mrsas_close(struct cdev *dev, int fflag, int devtype, d_thread_t *td) 229{ 230 struct mrsas_softc *sc; 231 232 sc = dev->si_drv1; 233 return (0); 234} 235 236int 237mrsas_read(struct cdev *dev, struct uio *uio, int ioflag) 238{ 239 struct mrsas_softc *sc; 240 241 sc = dev->si_drv1; 242 return (0); 243} 244int 245mrsas_write(struct cdev *dev, struct uio *uio, int ioflag) 246{ 247 struct mrsas_softc *sc; 248 249 sc = dev->si_drv1; 250 return (0); 251} 252 253/* 254 * Register Read/Write Functions 255 * 256 */ 257void 258mrsas_write_reg(struct mrsas_softc *sc, int offset, 259 u_int32_t value) 260{ 261 bus_space_tag_t bus_tag = sc->bus_tag; 262 bus_space_handle_t bus_handle = sc->bus_handle; 263 264 bus_space_write_4(bus_tag, bus_handle, offset, value); 265} 266 267u_int32_t 268mrsas_read_reg(struct mrsas_softc *sc, int offset) 269{ 270 bus_space_tag_t bus_tag = sc->bus_tag; 271 bus_space_handle_t bus_handle = sc->bus_handle; 272 273 return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset)); 274} 275 276 277/* 278 * Interrupt Disable/Enable/Clear Functions 279 * 280 */ 281void 282mrsas_disable_intr(struct mrsas_softc *sc) 283{ 284 u_int32_t mask = 0xFFFFFFFF; 285 u_int32_t status; 286 287 sc->mask_interrupts = 1; 288 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask); 289 /* Dummy read to force pci flush */ 290 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); 291} 292 293void 294mrsas_enable_intr(struct mrsas_softc *sc) 295{ 296 u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK; 297 u_int32_t status; 298 299 sc->mask_interrupts = 0; 300 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0); 301 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); 302 303 mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask); 304 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); 305} 306 307static int 308mrsas_clear_intr(struct mrsas_softc *sc) 309{ 310 u_int32_t status; 311 312 /* Read received interrupt */ 313 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); 314 315 /* Not our interrupt, so just return */ 316 if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) 317 return (0); 318 319 /* We got a reply interrupt */ 320 return (1); 321} 322 323/* 324 * PCI Support Functions 325 * 326 */ 327static struct mrsas_ident * 328mrsas_find_ident(device_t dev) 329{ 330 struct mrsas_ident *pci_device; 331 332 for (pci_device = device_table; pci_device->vendor != 0; pci_device++) { 333 if ((pci_device->vendor == pci_get_vendor(dev)) && 334 (pci_device->device == pci_get_device(dev)) && 335 ((pci_device->subvendor == pci_get_subvendor(dev)) || 336 (pci_device->subvendor == 0xffff)) && 337 ((pci_device->subdevice == pci_get_subdevice(dev)) || 338 (pci_device->subdevice == 0xffff))) 339 return (pci_device); 340 } 341 return (NULL); 342} 343 344static int 345mrsas_probe(device_t dev) 346{ 347 static u_int8_t first_ctrl = 1; 348 struct mrsas_ident *id; 349 350 if ((id = mrsas_find_ident(dev)) != NULL) { 351 if (first_ctrl) { 352 printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n", 353 MRSAS_VERSION); 354 first_ctrl = 0; 355 } 356 device_set_desc(dev, id->desc); 357 /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */ 358 return (-30); 359 } 360 return (ENXIO); 361} 362 363/* 364 * mrsas_setup_sysctl: setup sysctl values for mrsas 365 * input: Adapter instance soft state 366 * 367 * Setup sysctl entries for mrsas driver. 368 */ 369static void 370mrsas_setup_sysctl(struct mrsas_softc *sc) 371{ 372 struct sysctl_ctx_list *sysctl_ctx = NULL; 373 struct sysctl_oid *sysctl_tree = NULL; 374 char tmpstr[80], tmpstr2[80]; 375 376 /* 377 * Setup the sysctl variable so the user can change the debug level 378 * on the fly. 379 */ 380 snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d", 381 device_get_unit(sc->mrsas_dev)); 382 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev)); 383 384 sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev); 385 if (sysctl_ctx != NULL) 386 sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev); 387 388 if (sysctl_tree == NULL) { 389 sysctl_ctx_init(&sc->sysctl_ctx); 390 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 391 SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2, 392 CTLFLAG_RD, 0, tmpstr); 393 if (sc->sysctl_tree == NULL) 394 return; 395 sysctl_ctx = &sc->sysctl_ctx; 396 sysctl_tree = sc->sysctl_tree; 397 } 398 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 399 OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0, 400 "Disable the use of OCR"); 401 402 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 403 OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION, 404 strlen(MRSAS_VERSION), "driver version"); 405 406 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 407 OID_AUTO, "reset_count", CTLFLAG_RD, 408 &sc->reset_count, 0, "number of ocr from start of the day"); 409 410 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 411 OID_AUTO, "fw_outstanding", CTLFLAG_RD, 412 &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands"); 413 414 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 415 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD, 416 &sc->io_cmds_highwater, 0, "Max FW outstanding commands"); 417 418 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 419 OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0, 420 "Driver debug level"); 421 422 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 423 OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout, 424 0, "Driver IO timeout value in mili-second."); 425 426 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 427 OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW, 428 &sc->mrsas_fw_fault_check_delay, 429 0, "FW fault check thread delay in seconds. <default is 1 sec>"); 430 431 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 432 OID_AUTO, "reset_in_progress", CTLFLAG_RD, 433 &sc->reset_in_progress, 0, "ocr in progress status"); 434 435 SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 436 OID_AUTO, "block_sync_cache", CTLFLAG_RW, 437 &sc->block_sync_cache, 0, 438 "Block SYNC CACHE at driver. <default: 0, send it to FW>"); 439 440} 441 442/* 443 * mrsas_get_tunables: get tunable parameters. 444 * input: Adapter instance soft state 445 * 446 * Get tunable parameters. This will help to debug driver at boot time. 447 */ 448static void 449mrsas_get_tunables(struct mrsas_softc *sc) 450{ 451 char tmpstr[80]; 452 453 /* XXX default to some debugging for now */ 454 sc->mrsas_debug = MRSAS_FAULT; 455 sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT; 456 sc->mrsas_fw_fault_check_delay = 1; 457 sc->reset_count = 0; 458 sc->reset_in_progress = 0; 459 sc->block_sync_cache = 0; 460 461 /* 462 * Grab the global variables. 463 */ 464 TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug); 465 466 /* 467 * Grab the global variables. 468 */ 469 TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds); 470 471 /* Grab the unit-instance variables */ 472 snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level", 473 device_get_unit(sc->mrsas_dev)); 474 TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug); 475} 476 477/* 478 * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information. 479 * Used to get sequence number at driver load time. 480 * input: Adapter soft state 481 * 482 * Allocates DMAable memory for the event log info internal command. 483 */ 484int 485mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc) 486{ 487 int el_info_size; 488 489 /* Allocate get event log info command */ 490 el_info_size = sizeof(struct mrsas_evt_log_info); 491 if (bus_dma_tag_create(sc->mrsas_parent_tag, 492 1, 0, 493 BUS_SPACE_MAXADDR_32BIT, 494 BUS_SPACE_MAXADDR, 495 NULL, NULL, 496 el_info_size, 497 1, 498 el_info_size, 499 BUS_DMA_ALLOCNOW, 500 NULL, NULL, 501 &sc->el_info_tag)) { 502 device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n"); 503 return (ENOMEM); 504 } 505 if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem, 506 BUS_DMA_NOWAIT, &sc->el_info_dmamap)) { 507 device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n"); 508 return (ENOMEM); 509 } 510 if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap, 511 sc->el_info_mem, el_info_size, mrsas_addr_cb, 512 &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) { 513 device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n"); 514 return (ENOMEM); 515 } 516 memset(sc->el_info_mem, 0, el_info_size); 517 return (0); 518} 519 520/* 521 * mrsas_free_evt_info_cmd: Free memory for Event log info command 522 * input: Adapter soft state 523 * 524 * Deallocates memory for the event log info internal command. 525 */ 526void 527mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc) 528{ 529 if (sc->el_info_phys_addr) 530 bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap); 531 if (sc->el_info_mem != NULL) 532 bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap); 533 if (sc->el_info_tag != NULL) 534 bus_dma_tag_destroy(sc->el_info_tag); 535} 536 537/* 538 * mrsas_get_seq_num: Get latest event sequence number 539 * @sc: Adapter soft state 540 * @eli: Firmware event log sequence number information. 541 * 542 * Firmware maintains a log of all events in a non-volatile area. 543 * Driver get the sequence number using DCMD 544 * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time. 545 */ 546 547static int 548mrsas_get_seq_num(struct mrsas_softc *sc, 549 struct mrsas_evt_log_info *eli) 550{ 551 struct mrsas_mfi_cmd *cmd; 552 struct mrsas_dcmd_frame *dcmd; 553 u_int8_t do_ocr = 1, retcode = 0; 554 555 cmd = mrsas_get_mfi_cmd(sc); 556 557 if (!cmd) { 558 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n"); 559 return -ENOMEM; 560 } 561 dcmd = &cmd->frame->dcmd; 562 563 if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) { 564 device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n"); 565 mrsas_release_mfi_cmd(cmd); 566 return -ENOMEM; 567 } 568 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 569 570 dcmd->cmd = MFI_CMD_DCMD; 571 dcmd->cmd_status = 0x0; 572 dcmd->sge_count = 1; 573 dcmd->flags = MFI_FRAME_DIR_READ; 574 dcmd->timeout = 0; 575 dcmd->pad_0 = 0; 576 dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info); 577 dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; 578 dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr; 579 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info); 580 581 retcode = mrsas_issue_blocked_cmd(sc, cmd); 582 if (retcode == ETIMEDOUT) 583 goto dcmd_timeout; 584 585 do_ocr = 0; 586 /* 587 * Copy the data back into callers buffer 588 */ 589 memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info)); 590 mrsas_free_evt_log_info_cmd(sc); 591 592dcmd_timeout: 593 if (do_ocr) 594 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 595 else 596 mrsas_release_mfi_cmd(cmd); 597 598 return retcode; 599} 600 601 602/* 603 * mrsas_register_aen: Register for asynchronous event notification 604 * @sc: Adapter soft state 605 * @seq_num: Starting sequence number 606 * @class_locale: Class of the event 607 * 608 * This function subscribes for events beyond the @seq_num 609 * and type @class_locale. 610 * 611 */ 612static int 613mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num, 614 u_int32_t class_locale_word) 615{ 616 int ret_val; 617 struct mrsas_mfi_cmd *cmd; 618 struct mrsas_dcmd_frame *dcmd; 619 union mrsas_evt_class_locale curr_aen; 620 union mrsas_evt_class_locale prev_aen; 621 622 /* 623 * If there an AEN pending already (aen_cmd), check if the 624 * class_locale of that pending AEN is inclusive of the new AEN 625 * request we currently have. If it is, then we don't have to do 626 * anything. In other words, whichever events the current AEN request 627 * is subscribing to, have already been subscribed to. If the old_cmd 628 * is _not_ inclusive, then we have to abort that command, form a 629 * class_locale that is superset of both old and current and re-issue 630 * to the FW 631 */ 632 633 curr_aen.word = class_locale_word; 634 635 if (sc->aen_cmd) { 636 637 prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1]; 638 639 /* 640 * A class whose enum value is smaller is inclusive of all 641 * higher values. If a PROGRESS (= -1) was previously 642 * registered, then a new registration requests for higher 643 * classes need not be sent to FW. They are automatically 644 * included. Locale numbers don't have such hierarchy. They 645 * are bitmap values 646 */ 647 if ((prev_aen.members.class <= curr_aen.members.class) && 648 !((prev_aen.members.locale & curr_aen.members.locale) ^ 649 curr_aen.members.locale)) { 650 /* 651 * Previously issued event registration includes 652 * current request. Nothing to do. 653 */ 654 return 0; 655 } else { 656 curr_aen.members.locale |= prev_aen.members.locale; 657 658 if (prev_aen.members.class < curr_aen.members.class) 659 curr_aen.members.class = prev_aen.members.class; 660 661 sc->aen_cmd->abort_aen = 1; 662 ret_val = mrsas_issue_blocked_abort_cmd(sc, 663 sc->aen_cmd); 664 665 if (ret_val) { 666 printf("mrsas: Failed to abort previous AEN command\n"); 667 return ret_val; 668 } else 669 sc->aen_cmd = NULL; 670 } 671 } 672 cmd = mrsas_get_mfi_cmd(sc); 673 if (!cmd) 674 return ENOMEM; 675 676 dcmd = &cmd->frame->dcmd; 677 678 memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail)); 679 680 /* 681 * Prepare DCMD for aen registration 682 */ 683 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 684 685 dcmd->cmd = MFI_CMD_DCMD; 686 dcmd->cmd_status = 0x0; 687 dcmd->sge_count = 1; 688 dcmd->flags = MFI_FRAME_DIR_READ; 689 dcmd->timeout = 0; 690 dcmd->pad_0 = 0; 691 dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail); 692 dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; 693 dcmd->mbox.w[0] = seq_num; 694 sc->last_seq_num = seq_num; 695 dcmd->mbox.w[1] = curr_aen.word; 696 dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->evt_detail_phys_addr; 697 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail); 698 699 if (sc->aen_cmd != NULL) { 700 mrsas_release_mfi_cmd(cmd); 701 return 0; 702 } 703 /* 704 * Store reference to the cmd used to register for AEN. When an 705 * application wants us to register for AEN, we have to abort this 706 * cmd and re-register with a new EVENT LOCALE supplied by that app 707 */ 708 sc->aen_cmd = cmd; 709 710 /* 711 * Issue the aen registration frame 712 */ 713 if (mrsas_issue_dcmd(sc, cmd)) { 714 device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n"); 715 return (1); 716 } 717 return 0; 718} 719 720/* 721 * mrsas_start_aen: Subscribes to AEN during driver load time 722 * @instance: Adapter soft state 723 */ 724static int 725mrsas_start_aen(struct mrsas_softc *sc) 726{ 727 struct mrsas_evt_log_info eli; 728 union mrsas_evt_class_locale class_locale; 729 730 731 /* Get the latest sequence number from FW */ 732 733 memset(&eli, 0, sizeof(eli)); 734 735 if (mrsas_get_seq_num(sc, &eli)) 736 return -1; 737 738 /* Register AEN with FW for latest sequence number plus 1 */ 739 class_locale.members.reserved = 0; 740 class_locale.members.locale = MR_EVT_LOCALE_ALL; 741 class_locale.members.class = MR_EVT_CLASS_DEBUG; 742 743 return mrsas_register_aen(sc, eli.newest_seq_num + 1, 744 class_locale.word); 745 746} 747 748/* 749 * mrsas_setup_msix: Allocate MSI-x vectors 750 * @sc: adapter soft state 751 */ 752static int 753mrsas_setup_msix(struct mrsas_softc *sc) 754{ 755 int i; 756 757 for (i = 0; i < sc->msix_vectors; i++) { 758 sc->irq_context[i].sc = sc; 759 sc->irq_context[i].MSIxIndex = i; 760 sc->irq_id[i] = i + 1; 761 sc->mrsas_irq[i] = bus_alloc_resource_any 762 (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i] 763 ,RF_ACTIVE); 764 if (sc->mrsas_irq[i] == NULL) { 765 device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n"); 766 goto irq_alloc_failed; 767 } 768 if (bus_setup_intr(sc->mrsas_dev, 769 sc->mrsas_irq[i], 770 INTR_MPSAFE | INTR_TYPE_CAM, 771 NULL, mrsas_isr, &sc->irq_context[i], 772 &sc->intr_handle[i])) { 773 device_printf(sc->mrsas_dev, 774 "Cannot set up MSI-x interrupt handler\n"); 775 goto irq_alloc_failed; 776 } 777 } 778 return SUCCESS; 779 780irq_alloc_failed: 781 mrsas_teardown_intr(sc); 782 return (FAIL); 783} 784 785/* 786 * mrsas_allocate_msix: Setup MSI-x vectors 787 * @sc: adapter soft state 788 */ 789static int 790mrsas_allocate_msix(struct mrsas_softc *sc) 791{ 792 if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) { 793 device_printf(sc->mrsas_dev, "Using MSI-X with %d number" 794 " of vectors\n", sc->msix_vectors); 795 } else { 796 device_printf(sc->mrsas_dev, "MSI-x setup failed\n"); 797 goto irq_alloc_failed; 798 } 799 return SUCCESS; 800 801irq_alloc_failed: 802 mrsas_teardown_intr(sc); 803 return (FAIL); 804} 805 806/* 807 * mrsas_attach: PCI entry point 808 * input: pointer to device struct 809 * 810 * Performs setup of PCI and registers, initializes mutexes and linked lists, 811 * registers interrupts and CAM, and initializes the adapter/controller to 812 * its proper state. 813 */ 814static int 815mrsas_attach(device_t dev) 816{ 817 struct mrsas_softc *sc = device_get_softc(dev); 818 uint32_t cmd, bar, error; 819 820 memset(sc, 0, sizeof(struct mrsas_softc)); 821 822 /* Look up our softc and initialize its fields. */ 823 sc->mrsas_dev = dev; 824 sc->device_id = pci_get_device(dev); 825 826 if ((sc->device_id == MRSAS_INVADER) || 827 (sc->device_id == MRSAS_FURY) || 828 (sc->device_id == MRSAS_INTRUDER) || 829 (sc->device_id == MRSAS_INTRUDER_24) || 830 (sc->device_id == MRSAS_CUTLASS_52) || 831 (sc->device_id == MRSAS_CUTLASS_53)) { 832 sc->mrsas_gen3_ctrl = 1; 833 } 834 835 mrsas_get_tunables(sc); 836 837 /* 838 * Set up PCI and registers 839 */ 840 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 841 if ((cmd & PCIM_CMD_PORTEN) == 0) { 842 return (ENXIO); 843 } 844 /* Force the busmaster enable bit on. */ 845 cmd |= PCIM_CMD_BUSMASTEREN; 846 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 847 848 bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4); 849 850 sc->reg_res_id = MRSAS_PCI_BAR1;/* BAR1 offset */ 851 if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY, 852 &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE)) 853 == NULL) { 854 device_printf(dev, "Cannot allocate PCI registers\n"); 855 goto attach_fail; 856 } 857 sc->bus_tag = rman_get_bustag(sc->reg_res); 858 sc->bus_handle = rman_get_bushandle(sc->reg_res); 859 860 /* Intialize mutexes */ 861 mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF); 862 mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF); 863 mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF); 864 mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF); 865 mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN); 866 mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF); 867 mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF); 868 mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF); 869 870 /* Intialize linked list */ 871 TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head); 872 TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head); 873 874 mrsas_atomic_set(&sc->fw_outstanding, 0); 875 mrsas_atomic_set(&sc->target_reset_outstanding, 0); 876 877 sc->io_cmds_highwater = 0; 878 879 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 880 sc->UnevenSpanSupport = 0; 881 882 sc->msix_enable = 0; 883 884 /* Initialize Firmware */ 885 if (mrsas_init_fw(sc) != SUCCESS) { 886 goto attach_fail_fw; 887 } 888 /* Register mrsas to CAM layer */ 889 if ((mrsas_cam_attach(sc) != SUCCESS)) { 890 goto attach_fail_cam; 891 } 892 /* Register IRQs */ 893 if (mrsas_setup_irq(sc) != SUCCESS) { 894 goto attach_fail_irq; 895 } 896 error = mrsas_kproc_create(mrsas_ocr_thread, sc, 897 &sc->ocr_thread, 0, 0, "mrsas_ocr%d", 898 device_get_unit(sc->mrsas_dev)); 899 if (error) { 900 device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error); 901 goto attach_fail_ocr_thread; 902 } 903 /* 904 * After FW initialization and OCR thread creation 905 * we will defer the cdev creation, AEN setup on ICH callback 906 */ 907 sc->mrsas_ich.ich_func = mrsas_ich_startup; 908 sc->mrsas_ich.ich_arg = sc; 909 if (config_intrhook_establish(&sc->mrsas_ich) != 0) { 910 device_printf(sc->mrsas_dev, "Config hook is already established\n"); 911 } 912 mrsas_setup_sysctl(sc); 913 return SUCCESS; 914 915attach_fail_ocr_thread: 916 if (sc->ocr_thread_active) 917 wakeup(&sc->ocr_chan); 918attach_fail_irq: 919 mrsas_teardown_intr(sc); 920attach_fail_cam: 921 mrsas_cam_detach(sc); 922attach_fail_fw: 923 /* if MSIX vector is allocated and FW Init FAILED then release MSIX */ 924 if (sc->msix_enable == 1) 925 pci_release_msi(sc->mrsas_dev); 926 mrsas_free_mem(sc); 927 mtx_destroy(&sc->sim_lock); 928 mtx_destroy(&sc->aen_lock); 929 mtx_destroy(&sc->pci_lock); 930 mtx_destroy(&sc->io_lock); 931 mtx_destroy(&sc->ioctl_lock); 932 mtx_destroy(&sc->mpt_cmd_pool_lock); 933 mtx_destroy(&sc->mfi_cmd_pool_lock); 934 mtx_destroy(&sc->raidmap_lock); 935attach_fail: 936 if (sc->reg_res) { 937 bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY, 938 sc->reg_res_id, sc->reg_res); 939 } 940 return (ENXIO); 941} 942 943/* 944 * Interrupt config hook 945 */ 946static void 947mrsas_ich_startup(void *arg) 948{ 949 struct mrsas_softc *sc = (struct mrsas_softc *)arg; 950 951 /* 952 * Intialize a counting Semaphore to take care no. of concurrent IOCTLs 953 */ 954 sema_init(&sc->ioctl_count_sema, MRSAS_MAX_IOCTL_CMDS, 955 IOCTL_SEMA_DESCRIPTION); 956 957 /* Create a /dev entry for mrsas controller. */ 958 sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT, 959 GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u", 960 device_get_unit(sc->mrsas_dev)); 961 962 if (device_get_unit(sc->mrsas_dev) == 0) { 963 make_dev_alias_p(MAKEDEV_CHECKNAME, 964 &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev, 965 "megaraid_sas_ioctl_node"); 966 } 967 if (sc->mrsas_cdev) 968 sc->mrsas_cdev->si_drv1 = sc; 969 970 /* 971 * Add this controller to mrsas_mgmt_info structure so that it can be 972 * exported to management applications 973 */ 974 if (device_get_unit(sc->mrsas_dev) == 0) 975 memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info)); 976 977 mrsas_mgmt_info.count++; 978 mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc; 979 mrsas_mgmt_info.max_index++; 980 981 /* Enable Interrupts */ 982 mrsas_enable_intr(sc); 983 984 /* Initiate AEN (Asynchronous Event Notification) */ 985 if (mrsas_start_aen(sc)) { 986 device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! " 987 "Further events from the controller will not be communicated.\n" 988 "Either there is some problem in the controller" 989 "or the controller does not support AEN.\n" 990 "Please contact to the SUPPORT TEAM if the problem persists\n"); 991 } 992 if (sc->mrsas_ich.ich_arg != NULL) { 993 device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n"); 994 config_intrhook_disestablish(&sc->mrsas_ich); 995 sc->mrsas_ich.ich_arg = NULL; 996 } 997} 998 999/* 1000 * mrsas_detach: De-allocates and teardown resources 1001 * input: pointer to device struct 1002 * 1003 * This function is the entry point for device disconnect and detach. 1004 * It performs memory de-allocations, shutdown of the controller and various 1005 * teardown and destroy resource functions. 1006 */ 1007static int 1008mrsas_detach(device_t dev) 1009{ 1010 struct mrsas_softc *sc; 1011 int i = 0; 1012 1013 sc = device_get_softc(dev); 1014 sc->remove_in_progress = 1; 1015 1016 /* Destroy the character device so no other IOCTL will be handled */ 1017 if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev) 1018 destroy_dev(sc->mrsas_linux_emulator_cdev); 1019 destroy_dev(sc->mrsas_cdev); 1020 1021 /* 1022 * Take the instance off the instance array. Note that we will not 1023 * decrement the max_index. We let this array be sparse array 1024 */ 1025 for (i = 0; i < mrsas_mgmt_info.max_index; i++) { 1026 if (mrsas_mgmt_info.sc_ptr[i] == sc) { 1027 mrsas_mgmt_info.count--; 1028 mrsas_mgmt_info.sc_ptr[i] = NULL; 1029 break; 1030 } 1031 } 1032 1033 if (sc->ocr_thread_active) 1034 wakeup(&sc->ocr_chan); 1035 while (sc->reset_in_progress) { 1036 i++; 1037 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 1038 mrsas_dprint(sc, MRSAS_INFO, 1039 "[%2d]waiting for OCR to be finished from %s\n", i, __func__); 1040 } 1041 pause("mr_shutdown", hz); 1042 } 1043 i = 0; 1044 while (sc->ocr_thread_active) { 1045 i++; 1046 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 1047 mrsas_dprint(sc, MRSAS_INFO, 1048 "[%2d]waiting for " 1049 "mrsas_ocr thread to quit ocr %d\n", i, 1050 sc->ocr_thread_active); 1051 } 1052 pause("mr_shutdown", hz); 1053 } 1054 mrsas_flush_cache(sc); 1055 mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN); 1056 mrsas_disable_intr(sc); 1057 mrsas_cam_detach(sc); 1058 mrsas_teardown_intr(sc); 1059 mrsas_free_mem(sc); 1060 mtx_destroy(&sc->sim_lock); 1061 mtx_destroy(&sc->aen_lock); 1062 mtx_destroy(&sc->pci_lock); 1063 mtx_destroy(&sc->io_lock); 1064 mtx_destroy(&sc->ioctl_lock); 1065 mtx_destroy(&sc->mpt_cmd_pool_lock); 1066 mtx_destroy(&sc->mfi_cmd_pool_lock); 1067 mtx_destroy(&sc->raidmap_lock); 1068 1069 /* Wait for all the semaphores to be released */ 1070 while (sema_value(&sc->ioctl_count_sema) != MRSAS_MAX_IOCTL_CMDS) 1071 pause("mr_shutdown", hz); 1072 1073 /* Destroy the counting semaphore created for Ioctl */ 1074 sema_destroy(&sc->ioctl_count_sema); 1075 1076 if (sc->reg_res) { 1077 bus_release_resource(sc->mrsas_dev, 1078 SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res); 1079 } 1080 if (sc->sysctl_tree != NULL) 1081 sysctl_ctx_free(&sc->sysctl_ctx); 1082 1083 return (0); 1084} 1085 1086/* 1087 * mrsas_free_mem: Frees allocated memory 1088 * input: Adapter instance soft state 1089 * 1090 * This function is called from mrsas_detach() to free previously allocated 1091 * memory. 1092 */ 1093void 1094mrsas_free_mem(struct mrsas_softc *sc) 1095{ 1096 int i; 1097 u_int32_t max_cmd; 1098 struct mrsas_mfi_cmd *mfi_cmd; 1099 struct mrsas_mpt_cmd *mpt_cmd; 1100 1101 /* 1102 * Free RAID map memory 1103 */ 1104 for (i = 0; i < 2; i++) { 1105 if (sc->raidmap_phys_addr[i]) 1106 bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]); 1107 if (sc->raidmap_mem[i] != NULL) 1108 bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]); 1109 if (sc->raidmap_tag[i] != NULL) 1110 bus_dma_tag_destroy(sc->raidmap_tag[i]); 1111 1112 if (sc->ld_drv_map[i] != NULL) 1113 free(sc->ld_drv_map[i], M_MRSAS); 1114 } 1115 for (i = 0; i < 2; i++) { 1116 if (sc->jbodmap_phys_addr[i]) 1117 bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]); 1118 if (sc->jbodmap_mem[i] != NULL) 1119 bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]); 1120 if (sc->jbodmap_tag[i] != NULL) 1121 bus_dma_tag_destroy(sc->jbodmap_tag[i]); 1122 } 1123 /* 1124 * Free version buffer memroy 1125 */ 1126 if (sc->verbuf_phys_addr) 1127 bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap); 1128 if (sc->verbuf_mem != NULL) 1129 bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap); 1130 if (sc->verbuf_tag != NULL) 1131 bus_dma_tag_destroy(sc->verbuf_tag); 1132 1133 1134 /* 1135 * Free sense buffer memory 1136 */ 1137 if (sc->sense_phys_addr) 1138 bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap); 1139 if (sc->sense_mem != NULL) 1140 bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap); 1141 if (sc->sense_tag != NULL) 1142 bus_dma_tag_destroy(sc->sense_tag); 1143 1144 /* 1145 * Free chain frame memory 1146 */ 1147 if (sc->chain_frame_phys_addr) 1148 bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap); 1149 if (sc->chain_frame_mem != NULL) 1150 bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap); 1151 if (sc->chain_frame_tag != NULL) 1152 bus_dma_tag_destroy(sc->chain_frame_tag); 1153 1154 /* 1155 * Free IO Request memory 1156 */ 1157 if (sc->io_request_phys_addr) 1158 bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap); 1159 if (sc->io_request_mem != NULL) 1160 bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap); 1161 if (sc->io_request_tag != NULL) 1162 bus_dma_tag_destroy(sc->io_request_tag); 1163 1164 /* 1165 * Free Reply Descriptor memory 1166 */ 1167 if (sc->reply_desc_phys_addr) 1168 bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap); 1169 if (sc->reply_desc_mem != NULL) 1170 bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap); 1171 if (sc->reply_desc_tag != NULL) 1172 bus_dma_tag_destroy(sc->reply_desc_tag); 1173 1174 /* 1175 * Free event detail memory 1176 */ 1177 if (sc->evt_detail_phys_addr) 1178 bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap); 1179 if (sc->evt_detail_mem != NULL) 1180 bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap); 1181 if (sc->evt_detail_tag != NULL) 1182 bus_dma_tag_destroy(sc->evt_detail_tag); 1183 1184 /* 1185 * Free MFI frames 1186 */ 1187 if (sc->mfi_cmd_list) { 1188 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { 1189 mfi_cmd = sc->mfi_cmd_list[i]; 1190 mrsas_free_frame(sc, mfi_cmd); 1191 } 1192 } 1193 if (sc->mficmd_frame_tag != NULL) 1194 bus_dma_tag_destroy(sc->mficmd_frame_tag); 1195 1196 /* 1197 * Free MPT internal command list 1198 */ 1199 max_cmd = sc->max_fw_cmds; 1200 if (sc->mpt_cmd_list) { 1201 for (i = 0; i < max_cmd; i++) { 1202 mpt_cmd = sc->mpt_cmd_list[i]; 1203 bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap); 1204 free(sc->mpt_cmd_list[i], M_MRSAS); 1205 } 1206 free(sc->mpt_cmd_list, M_MRSAS); 1207 sc->mpt_cmd_list = NULL; 1208 } 1209 /* 1210 * Free MFI internal command list 1211 */ 1212 1213 if (sc->mfi_cmd_list) { 1214 for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { 1215 free(sc->mfi_cmd_list[i], M_MRSAS); 1216 } 1217 free(sc->mfi_cmd_list, M_MRSAS); 1218 sc->mfi_cmd_list = NULL; 1219 } 1220 /* 1221 * Free request descriptor memory 1222 */ 1223 free(sc->req_desc, M_MRSAS); 1224 sc->req_desc = NULL; 1225 1226 /* 1227 * Destroy parent tag 1228 */ 1229 if (sc->mrsas_parent_tag != NULL) 1230 bus_dma_tag_destroy(sc->mrsas_parent_tag); 1231 1232 /* 1233 * Free ctrl_info memory 1234 */ 1235 if (sc->ctrl_info != NULL) 1236 free(sc->ctrl_info, M_MRSAS); 1237} 1238 1239/* 1240 * mrsas_teardown_intr: Teardown interrupt 1241 * input: Adapter instance soft state 1242 * 1243 * This function is called from mrsas_detach() to teardown and release bus 1244 * interrupt resourse. 1245 */ 1246void 1247mrsas_teardown_intr(struct mrsas_softc *sc) 1248{ 1249 int i; 1250 1251 if (!sc->msix_enable) { 1252 if (sc->intr_handle[0]) 1253 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]); 1254 if (sc->mrsas_irq[0] != NULL) 1255 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, 1256 sc->irq_id[0], sc->mrsas_irq[0]); 1257 sc->intr_handle[0] = NULL; 1258 } else { 1259 for (i = 0; i < sc->msix_vectors; i++) { 1260 if (sc->intr_handle[i]) 1261 bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i], 1262 sc->intr_handle[i]); 1263 1264 if (sc->mrsas_irq[i] != NULL) 1265 bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, 1266 sc->irq_id[i], sc->mrsas_irq[i]); 1267 1268 sc->intr_handle[i] = NULL; 1269 } 1270 pci_release_msi(sc->mrsas_dev); 1271 } 1272 1273} 1274 1275/* 1276 * mrsas_suspend: Suspend entry point 1277 * input: Device struct pointer 1278 * 1279 * This function is the entry point for system suspend from the OS. 1280 */ 1281static int 1282mrsas_suspend(device_t dev) 1283{ 1284 /* This will be filled when the driver will have hibernation support */ 1285 return (0); 1286} 1287 1288/* 1289 * mrsas_resume: Resume entry point 1290 * input: Device struct pointer 1291 * 1292 * This function is the entry point for system resume from the OS. 1293 */ 1294static int 1295mrsas_resume(device_t dev) 1296{ 1297 /* This will be filled when the driver will have hibernation support */ 1298 return (0); 1299} 1300 1301/** 1302 * mrsas_get_softc_instance: Find softc instance based on cmd type 1303 * 1304 * This function will return softc instance based on cmd type. 1305 * In some case, application fire ioctl on required management instance and 1306 * do not provide host_no. Use cdev->si_drv1 to get softc instance for those 1307 * case, else get the softc instance from host_no provided by application in 1308 * user data. 1309 */ 1310 1311static struct mrsas_softc * 1312mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg) 1313{ 1314 struct mrsas_softc *sc = NULL; 1315 struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg; 1316 1317 if (cmd == MRSAS_IOC_GET_PCI_INFO) { 1318 sc = dev->si_drv1; 1319 } else { 1320 /* 1321 * get the Host number & the softc from data sent by the 1322 * Application 1323 */ 1324 sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no]; 1325 if (sc == NULL) 1326 printf("There is no Controller number %d\n", 1327 user_ioc->host_no); 1328 else if (user_ioc->host_no >= mrsas_mgmt_info.max_index) 1329 mrsas_dprint(sc, MRSAS_FAULT, 1330 "Invalid Controller number %d\n", user_ioc->host_no); 1331 } 1332 1333 return sc; 1334} 1335 1336/* 1337 * mrsas_ioctl: IOCtl commands entry point. 1338 * 1339 * This function is the entry point for IOCtls from the OS. It calls the 1340 * appropriate function for processing depending on the command received. 1341 */ 1342static int 1343mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, d_thread_t *td) 1344{ 1345 struct mrsas_softc *sc; 1346 int ret = 0, i = 0; 1347 MRSAS_DRV_PCI_INFORMATION *pciDrvInfo; 1348 1349 sc = mrsas_get_softc_instance(dev, cmd, arg); 1350 if (!sc) 1351 return ENOENT; 1352 1353 if (sc->remove_in_progress || 1354 (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) { 1355 mrsas_dprint(sc, MRSAS_INFO, 1356 "Either driver remove or shutdown called or " 1357 "HW is in unrecoverable critical error state.\n"); 1358 return ENOENT; 1359 } 1360 mtx_lock_spin(&sc->ioctl_lock); 1361 if (!sc->reset_in_progress) { 1362 mtx_unlock_spin(&sc->ioctl_lock); 1363 goto do_ioctl; 1364 } 1365 mtx_unlock_spin(&sc->ioctl_lock); 1366 while (sc->reset_in_progress) { 1367 i++; 1368 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 1369 mrsas_dprint(sc, MRSAS_INFO, 1370 "[%2d]waiting for OCR to be finished from %s\n", i, __func__); 1371 } 1372 pause("mr_ioctl", hz); 1373 } 1374 1375do_ioctl: 1376 switch (cmd) { 1377 case MRSAS_IOC_FIRMWARE_PASS_THROUGH64: 1378#ifdef COMPAT_FREEBSD32 1379 case MRSAS_IOC_FIRMWARE_PASS_THROUGH32: 1380#endif 1381 /* 1382 * Decrement the Ioctl counting Semaphore before getting an 1383 * mfi command 1384 */ 1385 sema_wait(&sc->ioctl_count_sema); 1386 1387 ret = mrsas_passthru(sc, (void *)arg, cmd); 1388 1389 /* Increment the Ioctl counting semaphore value */ 1390 sema_post(&sc->ioctl_count_sema); 1391 1392 break; 1393 case MRSAS_IOC_SCAN_BUS: 1394 ret = mrsas_bus_scan(sc); 1395 break; 1396 1397 case MRSAS_IOC_GET_PCI_INFO: 1398 pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg; 1399 memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION)); 1400 pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev); 1401 pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev); 1402 pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev); 1403 pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev); 1404 mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d," 1405 "pci device no: %d, pci function no: %d," 1406 "pci domain ID: %d\n", 1407 pciDrvInfo->busNumber, pciDrvInfo->deviceNumber, 1408 pciDrvInfo->functionNumber, pciDrvInfo->domainID); 1409 ret = 0; 1410 break; 1411 1412 default: 1413 mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd); 1414 ret = ENOENT; 1415 } 1416 1417 return (ret); 1418} 1419 1420/* 1421 * mrsas_poll: poll entry point for mrsas driver fd 1422 * 1423 * This function is the entry point for poll from the OS. It waits for some AEN 1424 * events to be triggered from the controller and notifies back. 1425 */ 1426static int 1427mrsas_poll(struct cdev *dev, int poll_events, struct thread *td) 1428{ 1429 struct mrsas_softc *sc; 1430 int revents = 0; 1431 1432 sc = dev->si_drv1; 1433 1434 if (poll_events & (POLLIN | POLLRDNORM)) { 1435 if (sc->mrsas_aen_triggered) { 1436 revents |= poll_events & (POLLIN | POLLRDNORM); 1437 } 1438 } 1439 if (revents == 0) { 1440 if (poll_events & (POLLIN | POLLRDNORM)) { 1441 mtx_lock(&sc->aen_lock); 1442 sc->mrsas_poll_waiting = 1; 1443 selrecord(td, &sc->mrsas_select); 1444 mtx_unlock(&sc->aen_lock); 1445 } 1446 } 1447 return revents; 1448} 1449 1450/* 1451 * mrsas_setup_irq: Set up interrupt 1452 * input: Adapter instance soft state 1453 * 1454 * This function sets up interrupts as a bus resource, with flags indicating 1455 * resource permitting contemporaneous sharing and for resource to activate 1456 * atomically. 1457 */ 1458static int 1459mrsas_setup_irq(struct mrsas_softc *sc) 1460{ 1461 if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS)) 1462 device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n"); 1463 1464 else { 1465 device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n"); 1466 sc->irq_context[0].sc = sc; 1467 sc->irq_context[0].MSIxIndex = 0; 1468 sc->irq_id[0] = 0; 1469 sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev, 1470 SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE); 1471 if (sc->mrsas_irq[0] == NULL) { 1472 device_printf(sc->mrsas_dev, "Cannot allocate legcay" 1473 "interrupt\n"); 1474 return (FAIL); 1475 } 1476 if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0], 1477 INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr, 1478 &sc->irq_context[0], &sc->intr_handle[0])) { 1479 device_printf(sc->mrsas_dev, "Cannot set up legacy" 1480 "interrupt\n"); 1481 return (FAIL); 1482 } 1483 } 1484 return (0); 1485} 1486 1487/* 1488 * mrsas_isr: ISR entry point 1489 * input: argument pointer 1490 * 1491 * This function is the interrupt service routine entry point. There are two 1492 * types of interrupts, state change interrupt and response interrupt. If an 1493 * interrupt is not ours, we just return. 1494 */ 1495void 1496mrsas_isr(void *arg) 1497{ 1498 struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg; 1499 struct mrsas_softc *sc = irq_context->sc; 1500 int status = 0; 1501 1502 if (sc->mask_interrupts) 1503 return; 1504 1505 if (!sc->msix_vectors) { 1506 status = mrsas_clear_intr(sc); 1507 if (!status) 1508 return; 1509 } 1510 /* If we are resetting, bail */ 1511 if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) { 1512 printf(" Entered into ISR when OCR is going active. \n"); 1513 mrsas_clear_intr(sc); 1514 return; 1515 } 1516 /* Process for reply request and clear response interrupt */ 1517 if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS) 1518 mrsas_clear_intr(sc); 1519 1520 return; 1521} 1522 1523/* 1524 * mrsas_complete_cmd: Process reply request 1525 * input: Adapter instance soft state 1526 * 1527 * This function is called from mrsas_isr() to process reply request and clear 1528 * response interrupt. Processing of the reply request entails walking 1529 * through the reply descriptor array for the command request pended from 1530 * Firmware. We look at the Function field to determine the command type and 1531 * perform the appropriate action. Before we return, we clear the response 1532 * interrupt. 1533 */ 1534int 1535mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex) 1536{ 1537 Mpi2ReplyDescriptorsUnion_t *desc; 1538 MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc; 1539 MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req; 1540 struct mrsas_mpt_cmd *cmd_mpt; 1541 struct mrsas_mfi_cmd *cmd_mfi; 1542 u_int8_t reply_descript_type; 1543 u_int16_t smid, num_completed; 1544 u_int8_t status, extStatus; 1545 union desc_value desc_val; 1546 PLD_LOAD_BALANCE_INFO lbinfo; 1547 u_int32_t device_id; 1548 int threshold_reply_count = 0; 1549#if TM_DEBUG 1550 MR_TASK_MANAGE_REQUEST *mr_tm_req; 1551 MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req; 1552#endif 1553 1554 /* If we have a hardware error, not need to continue */ 1555 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 1556 return (DONE); 1557 1558 desc = sc->reply_desc_mem; 1559 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION)) 1560 + sc->last_reply_idx[MSIxIndex]; 1561 1562 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc; 1563 1564 desc_val.word = desc->Words; 1565 num_completed = 0; 1566 1567 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1568 1569 /* Find our reply descriptor for the command and process */ 1570 while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) { 1571 smid = reply_desc->SMID; 1572 cmd_mpt = sc->mpt_cmd_list[smid - 1]; 1573 scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request; 1574 1575 status = scsi_io_req->RaidContext.status; 1576 extStatus = scsi_io_req->RaidContext.exStatus; 1577 1578 switch (scsi_io_req->Function) { 1579 case MPI2_FUNCTION_SCSI_TASK_MGMT: 1580#if TM_DEBUG 1581 mr_tm_req = (MR_TASK_MANAGE_REQUEST *) cmd_mpt->io_request; 1582 mpi_tm_req = (MPI2_SCSI_TASK_MANAGE_REQUEST *) 1583 &mr_tm_req->TmRequest; 1584 device_printf(sc->mrsas_dev, "TM completion type 0x%X, " 1585 "TaskMID: 0x%X", mpi_tm_req->TaskType, mpi_tm_req->TaskMID); 1586#endif 1587 wakeup_one((void *)&sc->ocr_chan); 1588 break; 1589 case MPI2_FUNCTION_SCSI_IO_REQUEST: /* Fast Path IO. */ 1590 device_id = cmd_mpt->ccb_ptr->ccb_h.target_id; 1591 lbinfo = &sc->load_balance_info[device_id]; 1592 if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) { 1593 mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]); 1594 cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG; 1595 } 1596 /* Fall thru and complete IO */ 1597 case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST: 1598 mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus); 1599 mrsas_cmd_done(sc, cmd_mpt); 1600 scsi_io_req->RaidContext.status = 0; 1601 scsi_io_req->RaidContext.exStatus = 0; 1602 mrsas_atomic_dec(&sc->fw_outstanding); 1603 break; 1604 case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFI command */ 1605 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx]; 1606 /* 1607 * Make sure NOT TO release the mfi command from the called 1608 * function's context if it is fired with issue_polled call. 1609 * And also make sure that the issue_polled call should only be 1610 * used if INTERRUPT IS DISABLED. 1611 */ 1612 if (cmd_mfi->frame->hdr.flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE) 1613 mrsas_release_mfi_cmd(cmd_mfi); 1614 else 1615 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status); 1616 break; 1617 } 1618 1619 sc->last_reply_idx[MSIxIndex]++; 1620 if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth) 1621 sc->last_reply_idx[MSIxIndex] = 0; 1622 1623 desc->Words = ~((uint64_t)0x00); /* set it back to all 1624 * 0xFFFFFFFFs */ 1625 num_completed++; 1626 threshold_reply_count++; 1627 1628 /* Get the next reply descriptor */ 1629 if (!sc->last_reply_idx[MSIxIndex]) { 1630 desc = sc->reply_desc_mem; 1631 desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION)); 1632 } else 1633 desc++; 1634 1635 reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc; 1636 desc_val.word = desc->Words; 1637 1638 reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1639 1640 if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 1641 break; 1642 1643 /* 1644 * Write to reply post index after completing threshold reply 1645 * count and still there are more replies in reply queue 1646 * pending to be completed. 1647 */ 1648 if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) { 1649 if (sc->msix_enable) { 1650 if (sc->mrsas_gen3_ctrl) 1651 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8], 1652 ((MSIxIndex & 0x7) << 24) | 1653 sc->last_reply_idx[MSIxIndex]); 1654 else 1655 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) | 1656 sc->last_reply_idx[MSIxIndex]); 1657 } else 1658 mrsas_write_reg(sc, offsetof(mrsas_reg_set, 1659 reply_post_host_index), sc->last_reply_idx[0]); 1660 1661 threshold_reply_count = 0; 1662 } 1663 } 1664 1665 /* No match, just return */ 1666 if (num_completed == 0) 1667 return (DONE); 1668 1669 /* Clear response interrupt */ 1670 if (sc->msix_enable) { 1671 if (sc->mrsas_gen3_ctrl) { 1672 mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8], 1673 ((MSIxIndex & 0x7) << 24) | 1674 sc->last_reply_idx[MSIxIndex]); 1675 } else 1676 mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) | 1677 sc->last_reply_idx[MSIxIndex]); 1678 } else 1679 mrsas_write_reg(sc, offsetof(mrsas_reg_set, 1680 reply_post_host_index), sc->last_reply_idx[0]); 1681 1682 return (0); 1683} 1684 1685/* 1686 * mrsas_map_mpt_cmd_status: Allocate DMAable memory. 1687 * input: Adapter instance soft state 1688 * 1689 * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO. 1690 * It checks the command status and maps the appropriate CAM status for the 1691 * CCB. 1692 */ 1693void 1694mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus) 1695{ 1696 struct mrsas_softc *sc = cmd->sc; 1697 u_int8_t *sense_data; 1698 1699 switch (status) { 1700 case MFI_STAT_OK: 1701 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP; 1702 break; 1703 case MFI_STAT_SCSI_IO_FAILED: 1704 case MFI_STAT_SCSI_DONE_WITH_ERROR: 1705 cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR; 1706 sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data; 1707 if (sense_data) { 1708 /* For now just copy 18 bytes back */ 1709 memcpy(sense_data, cmd->sense, 18); 1710 cmd->ccb_ptr->csio.sense_len = 18; 1711 cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID; 1712 } 1713 break; 1714 case MFI_STAT_LD_OFFLINE: 1715 case MFI_STAT_DEVICE_NOT_FOUND: 1716 if (cmd->ccb_ptr->ccb_h.target_lun) 1717 cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID; 1718 else 1719 cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE; 1720 break; 1721 case MFI_STAT_CONFIG_SEQ_MISMATCH: 1722 cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ; 1723 break; 1724 default: 1725 device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status); 1726 cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR; 1727 cmd->ccb_ptr->csio.scsi_status = status; 1728 } 1729 return; 1730} 1731 1732/* 1733 * mrsas_alloc_mem: Allocate DMAable memory 1734 * input: Adapter instance soft state 1735 * 1736 * This function creates the parent DMA tag and allocates DMAable memory. DMA 1737 * tag describes constraints of DMA mapping. Memory allocated is mapped into 1738 * Kernel virtual address. Callback argument is physical memory address. 1739 */ 1740static int 1741mrsas_alloc_mem(struct mrsas_softc *sc) 1742{ 1743 u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size, 1744 chain_frame_size, evt_detail_size, count; 1745 1746 /* 1747 * Allocate parent DMA tag 1748 */ 1749 if (bus_dma_tag_create(NULL, /* parent */ 1750 1, /* alignment */ 1751 0, /* boundary */ 1752 BUS_SPACE_MAXADDR, /* lowaddr */ 1753 BUS_SPACE_MAXADDR, /* highaddr */ 1754 NULL, NULL, /* filter, filterarg */ 1755 MAXPHYS, /* maxsize */ 1756 sc->max_num_sge, /* nsegments */ 1757 MAXPHYS, /* maxsegsize */ 1758 0, /* flags */ 1759 NULL, NULL, /* lockfunc, lockarg */ 1760 &sc->mrsas_parent_tag /* tag */ 1761 )) { 1762 device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n"); 1763 return (ENOMEM); 1764 } 1765 /* 1766 * Allocate for version buffer 1767 */ 1768 verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t)); 1769 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1770 1, 0, 1771 BUS_SPACE_MAXADDR_32BIT, 1772 BUS_SPACE_MAXADDR, 1773 NULL, NULL, 1774 verbuf_size, 1775 1, 1776 verbuf_size, 1777 BUS_DMA_ALLOCNOW, 1778 NULL, NULL, 1779 &sc->verbuf_tag)) { 1780 device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n"); 1781 return (ENOMEM); 1782 } 1783 if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem, 1784 BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) { 1785 device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n"); 1786 return (ENOMEM); 1787 } 1788 bzero(sc->verbuf_mem, verbuf_size); 1789 if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem, 1790 verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr, 1791 BUS_DMA_NOWAIT)) { 1792 device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n"); 1793 return (ENOMEM); 1794 } 1795 /* 1796 * Allocate IO Request Frames 1797 */ 1798 io_req_size = sc->io_frames_alloc_sz; 1799 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1800 16, 0, 1801 BUS_SPACE_MAXADDR_32BIT, 1802 BUS_SPACE_MAXADDR, 1803 NULL, NULL, 1804 io_req_size, 1805 1, 1806 io_req_size, 1807 BUS_DMA_ALLOCNOW, 1808 NULL, NULL, 1809 &sc->io_request_tag)) { 1810 device_printf(sc->mrsas_dev, "Cannot create IO request tag\n"); 1811 return (ENOMEM); 1812 } 1813 if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem, 1814 BUS_DMA_NOWAIT, &sc->io_request_dmamap)) { 1815 device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n"); 1816 return (ENOMEM); 1817 } 1818 bzero(sc->io_request_mem, io_req_size); 1819 if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap, 1820 sc->io_request_mem, io_req_size, mrsas_addr_cb, 1821 &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) { 1822 device_printf(sc->mrsas_dev, "Cannot load IO request memory\n"); 1823 return (ENOMEM); 1824 } 1825 /* 1826 * Allocate Chain Frames 1827 */ 1828 chain_frame_size = sc->chain_frames_alloc_sz; 1829 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1830 4, 0, 1831 BUS_SPACE_MAXADDR_32BIT, 1832 BUS_SPACE_MAXADDR, 1833 NULL, NULL, 1834 chain_frame_size, 1835 1, 1836 chain_frame_size, 1837 BUS_DMA_ALLOCNOW, 1838 NULL, NULL, 1839 &sc->chain_frame_tag)) { 1840 device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n"); 1841 return (ENOMEM); 1842 } 1843 if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem, 1844 BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) { 1845 device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n"); 1846 return (ENOMEM); 1847 } 1848 bzero(sc->chain_frame_mem, chain_frame_size); 1849 if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap, 1850 sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb, 1851 &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) { 1852 device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n"); 1853 return (ENOMEM); 1854 } 1855 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 1856 /* 1857 * Allocate Reply Descriptor Array 1858 */ 1859 reply_desc_size = sc->reply_alloc_sz * count; 1860 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1861 16, 0, 1862 BUS_SPACE_MAXADDR_32BIT, 1863 BUS_SPACE_MAXADDR, 1864 NULL, NULL, 1865 reply_desc_size, 1866 1, 1867 reply_desc_size, 1868 BUS_DMA_ALLOCNOW, 1869 NULL, NULL, 1870 &sc->reply_desc_tag)) { 1871 device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n"); 1872 return (ENOMEM); 1873 } 1874 if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem, 1875 BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) { 1876 device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n"); 1877 return (ENOMEM); 1878 } 1879 if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap, 1880 sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb, 1881 &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) { 1882 device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n"); 1883 return (ENOMEM); 1884 } 1885 /* 1886 * Allocate Sense Buffer Array. Keep in lower 4GB 1887 */ 1888 sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN; 1889 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1890 64, 0, 1891 BUS_SPACE_MAXADDR_32BIT, 1892 BUS_SPACE_MAXADDR, 1893 NULL, NULL, 1894 sense_size, 1895 1, 1896 sense_size, 1897 BUS_DMA_ALLOCNOW, 1898 NULL, NULL, 1899 &sc->sense_tag)) { 1900 device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n"); 1901 return (ENOMEM); 1902 } 1903 if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem, 1904 BUS_DMA_NOWAIT, &sc->sense_dmamap)) { 1905 device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n"); 1906 return (ENOMEM); 1907 } 1908 if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap, 1909 sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr, 1910 BUS_DMA_NOWAIT)) { 1911 device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n"); 1912 return (ENOMEM); 1913 } 1914 /* 1915 * Allocate for Event detail structure 1916 */ 1917 evt_detail_size = sizeof(struct mrsas_evt_detail); 1918 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1919 1, 0, 1920 BUS_SPACE_MAXADDR_32BIT, 1921 BUS_SPACE_MAXADDR, 1922 NULL, NULL, 1923 evt_detail_size, 1924 1, 1925 evt_detail_size, 1926 BUS_DMA_ALLOCNOW, 1927 NULL, NULL, 1928 &sc->evt_detail_tag)) { 1929 device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n"); 1930 return (ENOMEM); 1931 } 1932 if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem, 1933 BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) { 1934 device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n"); 1935 return (ENOMEM); 1936 } 1937 bzero(sc->evt_detail_mem, evt_detail_size); 1938 if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap, 1939 sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb, 1940 &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) { 1941 device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n"); 1942 return (ENOMEM); 1943 } 1944 /* 1945 * Create a dma tag for data buffers; size will be the maximum 1946 * possible I/O size (280kB). 1947 */ 1948 if (bus_dma_tag_create(sc->mrsas_parent_tag, 1949 1, 1950 0, 1951 BUS_SPACE_MAXADDR, 1952 BUS_SPACE_MAXADDR, 1953 NULL, NULL, 1954 MAXPHYS, 1955 sc->max_num_sge, /* nsegments */ 1956 MAXPHYS, 1957 BUS_DMA_ALLOCNOW, 1958 busdma_lock_mutex, 1959 &sc->io_lock, 1960 &sc->data_tag)) { 1961 device_printf(sc->mrsas_dev, "Cannot create data dma tag\n"); 1962 return (ENOMEM); 1963 } 1964 return (0); 1965} 1966 1967/* 1968 * mrsas_addr_cb: Callback function of bus_dmamap_load() 1969 * input: callback argument, machine dependent type 1970 * that describes DMA segments, number of segments, error code 1971 * 1972 * This function is for the driver to receive mapping information resultant of 1973 * the bus_dmamap_load(). The information is actually not being used, but the 1974 * address is saved anyway. 1975 */ 1976void 1977mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1978{ 1979 bus_addr_t *addr; 1980 1981 addr = arg; 1982 *addr = segs[0].ds_addr; 1983} 1984 1985/* 1986 * mrsas_setup_raidmap: Set up RAID map. 1987 * input: Adapter instance soft state 1988 * 1989 * Allocate DMA memory for the RAID maps and perform setup. 1990 */ 1991static int 1992mrsas_setup_raidmap(struct mrsas_softc *sc) 1993{ 1994 int i; 1995 1996 for (i = 0; i < 2; i++) { 1997 sc->ld_drv_map[i] = 1998 (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT); 1999 /* Do Error handling */ 2000 if (!sc->ld_drv_map[i]) { 2001 device_printf(sc->mrsas_dev, "Could not allocate memory for local map"); 2002 2003 if (i == 1) 2004 free(sc->ld_drv_map[0], M_MRSAS); 2005 /* ABORT driver initialization */ 2006 goto ABORT; 2007 } 2008 } 2009 2010 for (int i = 0; i < 2; i++) { 2011 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2012 4, 0, 2013 BUS_SPACE_MAXADDR_32BIT, 2014 BUS_SPACE_MAXADDR, 2015 NULL, NULL, 2016 sc->max_map_sz, 2017 1, 2018 sc->max_map_sz, 2019 BUS_DMA_ALLOCNOW, 2020 NULL, NULL, 2021 &sc->raidmap_tag[i])) { 2022 device_printf(sc->mrsas_dev, 2023 "Cannot allocate raid map tag.\n"); 2024 return (ENOMEM); 2025 } 2026 if (bus_dmamem_alloc(sc->raidmap_tag[i], 2027 (void **)&sc->raidmap_mem[i], 2028 BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) { 2029 device_printf(sc->mrsas_dev, 2030 "Cannot allocate raidmap memory.\n"); 2031 return (ENOMEM); 2032 } 2033 bzero(sc->raidmap_mem[i], sc->max_map_sz); 2034 2035 if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i], 2036 sc->raidmap_mem[i], sc->max_map_sz, 2037 mrsas_addr_cb, &sc->raidmap_phys_addr[i], 2038 BUS_DMA_NOWAIT)) { 2039 device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n"); 2040 return (ENOMEM); 2041 } 2042 if (!sc->raidmap_mem[i]) { 2043 device_printf(sc->mrsas_dev, 2044 "Cannot allocate memory for raid map.\n"); 2045 return (ENOMEM); 2046 } 2047 } 2048 2049 if (!mrsas_get_map_info(sc)) 2050 mrsas_sync_map_info(sc); 2051 2052 return (0); 2053 2054ABORT: 2055 return (1); 2056} 2057 2058/** 2059 * megasas_setup_jbod_map - setup jbod map for FP seq_number. 2060 * @sc: Adapter soft state 2061 * 2062 * Return 0 on success. 2063 */ 2064void 2065megasas_setup_jbod_map(struct mrsas_softc *sc) 2066{ 2067 int i; 2068 uint32_t pd_seq_map_sz; 2069 2070 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 2071 (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1)); 2072 2073 if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) { 2074 sc->use_seqnum_jbod_fp = 0; 2075 return; 2076 } 2077 if (sc->jbodmap_mem[0]) 2078 goto skip_alloc; 2079 2080 for (i = 0; i < 2; i++) { 2081 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2082 4, 0, 2083 BUS_SPACE_MAXADDR_32BIT, 2084 BUS_SPACE_MAXADDR, 2085 NULL, NULL, 2086 pd_seq_map_sz, 2087 1, 2088 pd_seq_map_sz, 2089 BUS_DMA_ALLOCNOW, 2090 NULL, NULL, 2091 &sc->jbodmap_tag[i])) { 2092 device_printf(sc->mrsas_dev, 2093 "Cannot allocate jbod map tag.\n"); 2094 return; 2095 } 2096 if (bus_dmamem_alloc(sc->jbodmap_tag[i], 2097 (void **)&sc->jbodmap_mem[i], 2098 BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) { 2099 device_printf(sc->mrsas_dev, 2100 "Cannot allocate jbod map memory.\n"); 2101 return; 2102 } 2103 bzero(sc->jbodmap_mem[i], pd_seq_map_sz); 2104 2105 if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i], 2106 sc->jbodmap_mem[i], pd_seq_map_sz, 2107 mrsas_addr_cb, &sc->jbodmap_phys_addr[i], 2108 BUS_DMA_NOWAIT)) { 2109 device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n"); 2110 return; 2111 } 2112 if (!sc->jbodmap_mem[i]) { 2113 device_printf(sc->mrsas_dev, 2114 "Cannot allocate memory for jbod map.\n"); 2115 sc->use_seqnum_jbod_fp = 0; 2116 return; 2117 } 2118 } 2119 2120skip_alloc: 2121 if (!megasas_sync_pd_seq_num(sc, false) && 2122 !megasas_sync_pd_seq_num(sc, true)) 2123 sc->use_seqnum_jbod_fp = 1; 2124 else 2125 sc->use_seqnum_jbod_fp = 0; 2126 2127 device_printf(sc->mrsas_dev, "Jbod map is supported\n"); 2128} 2129 2130/* 2131 * mrsas_init_fw: Initialize Firmware 2132 * input: Adapter soft state 2133 * 2134 * Calls transition_to_ready() to make sure Firmware is in operational state and 2135 * calls mrsas_init_adapter() to send IOC_INIT command to Firmware. It 2136 * issues internal commands to get the controller info after the IOC_INIT 2137 * command response is received by Firmware. Note: code relating to 2138 * get_pdlist, get_ld_list and max_sectors are currently not being used, it 2139 * is left here as placeholder. 2140 */ 2141static int 2142mrsas_init_fw(struct mrsas_softc *sc) 2143{ 2144 2145 int ret, loop, ocr = 0; 2146 u_int32_t max_sectors_1; 2147 u_int32_t max_sectors_2; 2148 u_int32_t tmp_sectors; 2149 u_int32_t scratch_pad_2; 2150 int msix_enable = 0; 2151 int fw_msix_count = 0; 2152 2153 /* Make sure Firmware is ready */ 2154 ret = mrsas_transition_to_ready(sc, ocr); 2155 if (ret != SUCCESS) { 2156 return (ret); 2157 } 2158 /* MSI-x index 0- reply post host index register */ 2159 sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET; 2160 /* Check if MSI-X is supported while in ready state */ 2161 msix_enable = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a; 2162 2163 if (msix_enable) { 2164 scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2165 outbound_scratch_pad_2)); 2166 2167 /* Check max MSI-X vectors */ 2168 if (sc->device_id == MRSAS_TBOLT) { 2169 sc->msix_vectors = (scratch_pad_2 2170 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; 2171 fw_msix_count = sc->msix_vectors; 2172 } else { 2173 /* Invader/Fury supports 96 MSI-X vectors */ 2174 sc->msix_vectors = ((scratch_pad_2 2175 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) 2176 >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; 2177 fw_msix_count = sc->msix_vectors; 2178 2179 for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; 2180 loop++) { 2181 sc->msix_reg_offset[loop] = 2182 MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET + 2183 (loop * 0x10); 2184 } 2185 } 2186 2187 /* Don't bother allocating more MSI-X vectors than cpus */ 2188 sc->msix_vectors = min(sc->msix_vectors, 2189 mp_ncpus); 2190 2191 /* Allocate MSI-x vectors */ 2192 if (mrsas_allocate_msix(sc) == SUCCESS) 2193 sc->msix_enable = 1; 2194 else 2195 sc->msix_enable = 0; 2196 2197 device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector," 2198 "Online CPU %d Current MSIX <%d>\n", 2199 fw_msix_count, mp_ncpus, sc->msix_vectors); 2200 } 2201 if (mrsas_init_adapter(sc) != SUCCESS) { 2202 device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n"); 2203 return (1); 2204 } 2205 /* Allocate internal commands for pass-thru */ 2206 if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) { 2207 device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n"); 2208 return (1); 2209 } 2210 sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT); 2211 if (!sc->ctrl_info) { 2212 device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n"); 2213 return (1); 2214 } 2215 /* 2216 * Get the controller info from FW, so that the MAX VD support 2217 * availability can be decided. 2218 */ 2219 if (mrsas_get_ctrl_info(sc)) { 2220 device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n"); 2221 return (1); 2222 } 2223 sc->secure_jbod_support = 2224 (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD; 2225 2226 if (sc->secure_jbod_support) 2227 device_printf(sc->mrsas_dev, "FW supports SED \n"); 2228 2229 if (sc->use_seqnum_jbod_fp) 2230 device_printf(sc->mrsas_dev, "FW supports JBOD Map \n"); 2231 2232 if (mrsas_setup_raidmap(sc) != SUCCESS) { 2233 device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! " 2234 "There seems to be some problem in the controller\n" 2235 "Please contact to the SUPPORT TEAM if the problem persists\n"); 2236 } 2237 megasas_setup_jbod_map(sc); 2238 2239 /* For pass-thru, get PD/LD list and controller info */ 2240 memset(sc->pd_list, 0, 2241 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); 2242 if (mrsas_get_pd_list(sc) != SUCCESS) { 2243 device_printf(sc->mrsas_dev, "Get PD list failed.\n"); 2244 return (1); 2245 } 2246 memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS); 2247 if (mrsas_get_ld_list(sc) != SUCCESS) { 2248 device_printf(sc->mrsas_dev, "Get LD lsit failed.\n"); 2249 return (1); 2250 } 2251 /* 2252 * Compute the max allowed sectors per IO: The controller info has 2253 * two limits on max sectors. Driver should use the minimum of these 2254 * two. 2255 * 2256 * 1 << stripe_sz_ops.min = max sectors per strip 2257 * 2258 * Note that older firmwares ( < FW ver 30) didn't report information to 2259 * calculate max_sectors_1. So the number ended up as zero always. 2260 */ 2261 tmp_sectors = 0; 2262 max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) * 2263 sc->ctrl_info->max_strips_per_io; 2264 max_sectors_2 = sc->ctrl_info->max_request_size; 2265 tmp_sectors = min(max_sectors_1, max_sectors_2); 2266 sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512; 2267 2268 if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors)) 2269 sc->max_sectors_per_req = tmp_sectors; 2270 2271 sc->disableOnlineCtrlReset = 2272 sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; 2273 sc->UnevenSpanSupport = 2274 sc->ctrl_info->adapterOperations2.supportUnevenSpans; 2275 if (sc->UnevenSpanSupport) { 2276 device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n", 2277 sc->UnevenSpanSupport); 2278 2279 if (MR_ValidateMapInfo(sc)) 2280 sc->fast_path_io = 1; 2281 else 2282 sc->fast_path_io = 0; 2283 } 2284 return (0); 2285} 2286 2287/* 2288 * mrsas_init_adapter: Initializes the adapter/controller 2289 * input: Adapter soft state 2290 * 2291 * Prepares for the issuing of the IOC Init cmd to FW for initializing the 2292 * ROC/controller. The FW register is read to determined the number of 2293 * commands that is supported. All memory allocations for IO is based on 2294 * max_cmd. Appropriate calculations are performed in this function. 2295 */ 2296int 2297mrsas_init_adapter(struct mrsas_softc *sc) 2298{ 2299 uint32_t status; 2300 u_int32_t max_cmd, scratch_pad_2; 2301 int ret; 2302 int i = 0; 2303 2304 /* Read FW status register */ 2305 status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2306 2307 /* Get operational params from status register */ 2308 sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK; 2309 2310 /* Decrement the max supported by 1, to correlate with FW */ 2311 sc->max_fw_cmds = sc->max_fw_cmds - 1; 2312 max_cmd = sc->max_fw_cmds; 2313 2314 /* Determine allocation size of command frames */ 2315 sc->reply_q_depth = ((max_cmd + 1 + 15) / 16 * 16) * 2; 2316 sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd; 2317 sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth); 2318 sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1)); 2319 scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2320 outbound_scratch_pad_2)); 2321 /* 2322 * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set, 2323 * Firmware support extended IO chain frame which is 4 time more 2324 * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) = 2325 * 1K 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K 2326 */ 2327 if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK) 2328 sc->max_chain_frame_sz = 2329 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5) 2330 * MEGASAS_1MB_IO; 2331 else 2332 sc->max_chain_frame_sz = 2333 ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5) 2334 * MEGASAS_256K_IO; 2335 2336 sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * max_cmd; 2337 sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 2338 offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16; 2339 2340 sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION); 2341 sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2; 2342 2343 mrsas_dprint(sc, MRSAS_INFO, "Avago Debug: MAX sge 0x%X MAX chain frame size 0x%X \n", 2344 sc->max_num_sge, sc->max_chain_frame_sz); 2345 2346 /* Used for pass thru MFI frame (DCMD) */ 2347 sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16; 2348 2349 sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - 2350 sizeof(MPI2_SGE_IO_UNION)) / 16; 2351 2352 int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2353 2354 for (i = 0; i < count; i++) 2355 sc->last_reply_idx[i] = 0; 2356 2357 ret = mrsas_alloc_mem(sc); 2358 if (ret != SUCCESS) 2359 return (ret); 2360 2361 ret = mrsas_alloc_mpt_cmds(sc); 2362 if (ret != SUCCESS) 2363 return (ret); 2364 2365 ret = mrsas_ioc_init(sc); 2366 if (ret != SUCCESS) 2367 return (ret); 2368 2369 return (0); 2370} 2371 2372/* 2373 * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command 2374 * input: Adapter soft state 2375 * 2376 * Allocates for the IOC Init cmd to FW to initialize the ROC/controller. 2377 */ 2378int 2379mrsas_alloc_ioc_cmd(struct mrsas_softc *sc) 2380{ 2381 int ioc_init_size; 2382 2383 /* Allocate IOC INIT command */ 2384 ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST); 2385 if (bus_dma_tag_create(sc->mrsas_parent_tag, 2386 1, 0, 2387 BUS_SPACE_MAXADDR_32BIT, 2388 BUS_SPACE_MAXADDR, 2389 NULL, NULL, 2390 ioc_init_size, 2391 1, 2392 ioc_init_size, 2393 BUS_DMA_ALLOCNOW, 2394 NULL, NULL, 2395 &sc->ioc_init_tag)) { 2396 device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n"); 2397 return (ENOMEM); 2398 } 2399 if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem, 2400 BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) { 2401 device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n"); 2402 return (ENOMEM); 2403 } 2404 bzero(sc->ioc_init_mem, ioc_init_size); 2405 if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap, 2406 sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb, 2407 &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) { 2408 device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n"); 2409 return (ENOMEM); 2410 } 2411 return (0); 2412} 2413 2414/* 2415 * mrsas_free_ioc_cmd: Allocates memory for IOC Init command 2416 * input: Adapter soft state 2417 * 2418 * Deallocates memory of the IOC Init cmd. 2419 */ 2420void 2421mrsas_free_ioc_cmd(struct mrsas_softc *sc) 2422{ 2423 if (sc->ioc_init_phys_mem) 2424 bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap); 2425 if (sc->ioc_init_mem != NULL) 2426 bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap); 2427 if (sc->ioc_init_tag != NULL) 2428 bus_dma_tag_destroy(sc->ioc_init_tag); 2429} 2430 2431/* 2432 * mrsas_ioc_init: Sends IOC Init command to FW 2433 * input: Adapter soft state 2434 * 2435 * Issues the IOC Init cmd to FW to initialize the ROC/controller. 2436 */ 2437int 2438mrsas_ioc_init(struct mrsas_softc *sc) 2439{ 2440 struct mrsas_init_frame *init_frame; 2441 pMpi2IOCInitRequest_t IOCInitMsg; 2442 MRSAS_REQUEST_DESCRIPTOR_UNION req_desc; 2443 u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME; 2444 bus_addr_t phys_addr; 2445 int i, retcode = 0; 2446 u_int32_t scratch_pad_2; 2447 2448 /* Allocate memory for the IOC INIT command */ 2449 if (mrsas_alloc_ioc_cmd(sc)) { 2450 device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n"); 2451 return (1); 2452 } 2453 2454 if (!sc->block_sync_cache) { 2455 scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2456 outbound_scratch_pad_2)); 2457 sc->fw_sync_cache_support = (scratch_pad_2 & 2458 MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0; 2459 } 2460 2461 IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024); 2462 IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT; 2463 IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER; 2464 IOCInitMsg->MsgVersion = MPI2_VERSION; 2465 IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION; 2466 IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4; 2467 IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth; 2468 IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr; 2469 IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr; 2470 IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0); 2471 2472 init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem; 2473 init_frame->cmd = MFI_CMD_INIT; 2474 init_frame->cmd_status = 0xFF; 2475 init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 2476 2477 /* driver support Extended MSIX */ 2478 if (sc->mrsas_gen3_ctrl) { 2479 init_frame->driver_operations. 2480 mfi_capabilities.support_additional_msix = 1; 2481 } 2482 if (sc->verbuf_mem) { 2483 snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n", 2484 MRSAS_VERSION); 2485 init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr; 2486 init_frame->driver_ver_hi = 0; 2487 } 2488 init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1; 2489 init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1; 2490 init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1; 2491 if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN) 2492 init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1; 2493 phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024; 2494 init_frame->queue_info_new_phys_addr_lo = phys_addr; 2495 init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t); 2496 2497 req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem; 2498 req_desc.MFAIo.RequestFlags = 2499 (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 2500 2501 mrsas_disable_intr(sc); 2502 mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n"); 2503 mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high); 2504 2505 /* 2506 * Poll response timer to wait for Firmware response. While this 2507 * timer with the DELAY call could block CPU, the time interval for 2508 * this is only 1 millisecond. 2509 */ 2510 if (init_frame->cmd_status == 0xFF) { 2511 for (i = 0; i < (max_wait * 1000); i++) { 2512 if (init_frame->cmd_status == 0xFF) 2513 DELAY(1000); 2514 else 2515 break; 2516 } 2517 } 2518 if (init_frame->cmd_status == 0) 2519 mrsas_dprint(sc, MRSAS_OCR, 2520 "IOC INIT response received from FW.\n"); 2521 else { 2522 if (init_frame->cmd_status == 0xFF) 2523 device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait); 2524 else 2525 device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status); 2526 retcode = 1; 2527 } 2528 2529 mrsas_free_ioc_cmd(sc); 2530 return (retcode); 2531} 2532 2533/* 2534 * mrsas_alloc_mpt_cmds: Allocates the command packets 2535 * input: Adapter instance soft state 2536 * 2537 * This function allocates the internal commands for IOs. Each command that is 2538 * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An 2539 * array is allocated with mrsas_mpt_cmd context. The free commands are 2540 * maintained in a linked list (cmd pool). SMID value range is from 1 to 2541 * max_fw_cmds. 2542 */ 2543int 2544mrsas_alloc_mpt_cmds(struct mrsas_softc *sc) 2545{ 2546 int i, j; 2547 u_int32_t max_cmd, count; 2548 struct mrsas_mpt_cmd *cmd; 2549 pMpi2ReplyDescriptorsUnion_t reply_desc; 2550 u_int32_t offset, chain_offset, sense_offset; 2551 bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys; 2552 u_int8_t *io_req_base, *chain_frame_base, *sense_base; 2553 2554 max_cmd = sc->max_fw_cmds; 2555 2556 sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT); 2557 if (!sc->req_desc) { 2558 device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n"); 2559 return (ENOMEM); 2560 } 2561 memset(sc->req_desc, 0, sc->request_alloc_sz); 2562 2563 /* 2564 * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers. 2565 * Allocate the dynamic array first and then allocate individual 2566 * commands. 2567 */ 2568 sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_cmd, M_MRSAS, M_NOWAIT); 2569 if (!sc->mpt_cmd_list) { 2570 device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n"); 2571 return (ENOMEM); 2572 } 2573 memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_cmd); 2574 for (i = 0; i < max_cmd; i++) { 2575 sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd), 2576 M_MRSAS, M_NOWAIT); 2577 if (!sc->mpt_cmd_list[i]) { 2578 for (j = 0; j < i; j++) 2579 free(sc->mpt_cmd_list[j], M_MRSAS); 2580 free(sc->mpt_cmd_list, M_MRSAS); 2581 sc->mpt_cmd_list = NULL; 2582 return (ENOMEM); 2583 } 2584 } 2585 2586 io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 2587 io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; 2588 chain_frame_base = (u_int8_t *)sc->chain_frame_mem; 2589 chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr; 2590 sense_base = (u_int8_t *)sc->sense_mem; 2591 sense_base_phys = (bus_addr_t)sc->sense_phys_addr; 2592 for (i = 0; i < max_cmd; i++) { 2593 cmd = sc->mpt_cmd_list[i]; 2594 offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i; 2595 chain_offset = sc->max_chain_frame_sz * i; 2596 sense_offset = MRSAS_SENSE_LEN * i; 2597 memset(cmd, 0, sizeof(struct mrsas_mpt_cmd)); 2598 cmd->index = i + 1; 2599 cmd->ccb_ptr = NULL; 2600 callout_init_mtx(&cmd->cm_callout, &sc->sim_lock, 0); 2601 cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX; 2602 cmd->sc = sc; 2603 cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset); 2604 memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST)); 2605 cmd->io_request_phys_addr = io_req_base_phys + offset; 2606 cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset); 2607 cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset; 2608 cmd->sense = sense_base + sense_offset; 2609 cmd->sense_phys_addr = sense_base_phys + sense_offset; 2610 if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) { 2611 return (FAIL); 2612 } 2613 TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next); 2614 } 2615 2616 /* Initialize reply descriptor array to 0xFFFFFFFF */ 2617 reply_desc = sc->reply_desc_mem; 2618 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2619 for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) { 2620 reply_desc->Words = MRSAS_ULONG_MAX; 2621 } 2622 return (0); 2623} 2624 2625/* 2626 * mrsas_fire_cmd: Sends command to FW 2627 * input: Adapter softstate 2628 * request descriptor address low 2629 * request descriptor address high 2630 * 2631 * This functions fires the command to Firmware by writing to the 2632 * inbound_low_queue_port and inbound_high_queue_port. 2633 */ 2634void 2635mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, 2636 u_int32_t req_desc_hi) 2637{ 2638 mtx_lock(&sc->pci_lock); 2639 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port), 2640 req_desc_lo); 2641 mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port), 2642 req_desc_hi); 2643 mtx_unlock(&sc->pci_lock); 2644} 2645 2646/* 2647 * mrsas_transition_to_ready: Move FW to Ready state input: 2648 * Adapter instance soft state 2649 * 2650 * During the initialization, FW passes can potentially be in any one of several 2651 * possible states. If the FW in operational, waiting-for-handshake states, 2652 * driver must take steps to bring it to ready state. Otherwise, it has to 2653 * wait for the ready state. 2654 */ 2655int 2656mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr) 2657{ 2658 int i; 2659 u_int8_t max_wait; 2660 u_int32_t val, fw_state; 2661 u_int32_t cur_state; 2662 u_int32_t abs_state, curr_abs_state; 2663 2664 val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2665 fw_state = val & MFI_STATE_MASK; 2666 max_wait = MRSAS_RESET_WAIT_TIME; 2667 2668 if (fw_state != MFI_STATE_READY) 2669 device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n"); 2670 2671 while (fw_state != MFI_STATE_READY) { 2672 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); 2673 switch (fw_state) { 2674 case MFI_STATE_FAULT: 2675 device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n"); 2676 if (ocr) { 2677 cur_state = MFI_STATE_FAULT; 2678 break; 2679 } else 2680 return -ENODEV; 2681 case MFI_STATE_WAIT_HANDSHAKE: 2682 /* Set the CLR bit in inbound doorbell */ 2683 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 2684 MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG); 2685 cur_state = MFI_STATE_WAIT_HANDSHAKE; 2686 break; 2687 case MFI_STATE_BOOT_MESSAGE_PENDING: 2688 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 2689 MFI_INIT_HOTPLUG); 2690 cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; 2691 break; 2692 case MFI_STATE_OPERATIONAL: 2693 /* 2694 * Bring it to READY state; assuming max wait 10 2695 * secs 2696 */ 2697 mrsas_disable_intr(sc); 2698 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS); 2699 for (i = 0; i < max_wait * 1000; i++) { 2700 if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1) 2701 DELAY(1000); 2702 else 2703 break; 2704 } 2705 cur_state = MFI_STATE_OPERATIONAL; 2706 break; 2707 case MFI_STATE_UNDEFINED: 2708 /* 2709 * This state should not last for more than 2 2710 * seconds 2711 */ 2712 cur_state = MFI_STATE_UNDEFINED; 2713 break; 2714 case MFI_STATE_BB_INIT: 2715 cur_state = MFI_STATE_BB_INIT; 2716 break; 2717 case MFI_STATE_FW_INIT: 2718 cur_state = MFI_STATE_FW_INIT; 2719 break; 2720 case MFI_STATE_FW_INIT_2: 2721 cur_state = MFI_STATE_FW_INIT_2; 2722 break; 2723 case MFI_STATE_DEVICE_SCAN: 2724 cur_state = MFI_STATE_DEVICE_SCAN; 2725 break; 2726 case MFI_STATE_FLUSH_CACHE: 2727 cur_state = MFI_STATE_FLUSH_CACHE; 2728 break; 2729 default: 2730 device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state); 2731 return -ENODEV; 2732 } 2733 2734 /* 2735 * The cur_state should not last for more than max_wait secs 2736 */ 2737 for (i = 0; i < (max_wait * 1000); i++) { 2738 fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2739 outbound_scratch_pad)) & MFI_STATE_MASK); 2740 curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2741 outbound_scratch_pad)); 2742 if (abs_state == curr_abs_state) 2743 DELAY(1000); 2744 else 2745 break; 2746 } 2747 2748 /* 2749 * Return error if fw_state hasn't changed after max_wait 2750 */ 2751 if (curr_abs_state == abs_state) { 2752 device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed " 2753 "in %d secs\n", fw_state, max_wait); 2754 return -ENODEV; 2755 } 2756 } 2757 mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n"); 2758 return 0; 2759} 2760 2761/* 2762 * mrsas_get_mfi_cmd: Get a cmd from free command pool 2763 * input: Adapter soft state 2764 * 2765 * This function removes an MFI command from the command list. 2766 */ 2767struct mrsas_mfi_cmd * 2768mrsas_get_mfi_cmd(struct mrsas_softc *sc) 2769{ 2770 struct mrsas_mfi_cmd *cmd = NULL; 2771 2772 mtx_lock(&sc->mfi_cmd_pool_lock); 2773 if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) { 2774 cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head); 2775 TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next); 2776 } 2777 mtx_unlock(&sc->mfi_cmd_pool_lock); 2778 2779 return cmd; 2780} 2781 2782/* 2783 * mrsas_ocr_thread: Thread to handle OCR/Kill Adapter. 2784 * input: Adapter Context. 2785 * 2786 * This function will check FW status register and flag do_timeout_reset flag. 2787 * It will do OCR/Kill adapter if FW is in fault state or IO timed out has 2788 * trigger reset. 2789 */ 2790static void 2791mrsas_ocr_thread(void *arg) 2792{ 2793 struct mrsas_softc *sc; 2794 u_int32_t fw_status, fw_state; 2795 u_int8_t tm_target_reset_failed = 0; 2796 2797 sc = (struct mrsas_softc *)arg; 2798 2799 mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__); 2800 2801 sc->ocr_thread_active = 1; 2802 mtx_lock(&sc->sim_lock); 2803 for (;;) { 2804 /* Sleep for 1 second and check the queue status */ 2805 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, 2806 "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz); 2807 if (sc->remove_in_progress || 2808 sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) { 2809 mrsas_dprint(sc, MRSAS_OCR, 2810 "Exit due to %s from %s\n", 2811 sc->remove_in_progress ? "Shutdown" : 2812 "Hardware critical error", __func__); 2813 break; 2814 } 2815 fw_status = mrsas_read_reg(sc, 2816 offsetof(mrsas_reg_set, outbound_scratch_pad)); 2817 fw_state = fw_status & MFI_STATE_MASK; 2818 if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset || 2819 mrsas_atomic_read(&sc->target_reset_outstanding)) { 2820 2821 /* First, freeze further IOs to come to the SIM */ 2822 mrsas_xpt_freeze(sc); 2823 2824 /* If this is an IO timeout then go for target reset */ 2825 if (mrsas_atomic_read(&sc->target_reset_outstanding)) { 2826 device_printf(sc->mrsas_dev, "Initiating Target RESET " 2827 "because of SCSI IO timeout!\n"); 2828 2829 /* Let the remaining IOs to complete */ 2830 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, 2831 "mrsas_reset_targets", 5 * hz); 2832 2833 /* Try to reset the target device */ 2834 if (mrsas_reset_targets(sc) == FAIL) 2835 tm_target_reset_failed = 1; 2836 } 2837 2838 /* If this is a DCMD timeout or FW fault, 2839 * then go for controller reset 2840 */ 2841 if (fw_state == MFI_STATE_FAULT || tm_target_reset_failed || 2842 (sc->do_timedout_reset == MFI_DCMD_TIMEOUT_OCR)) { 2843 if (tm_target_reset_failed) 2844 device_printf(sc->mrsas_dev, "Initiaiting OCR because of " 2845 "TM FAILURE!\n"); 2846 else 2847 device_printf(sc->mrsas_dev, "Initiaiting OCR " 2848 "because of %s!\n", sc->do_timedout_reset ? 2849 "DCMD IO Timeout" : "FW fault"); 2850 2851 mtx_lock_spin(&sc->ioctl_lock); 2852 sc->reset_in_progress = 1; 2853 mtx_unlock_spin(&sc->ioctl_lock); 2854 sc->reset_count++; 2855 2856 /* 2857 * Wait for the AEN task to be completed if it is running. 2858 */ 2859 mtx_unlock(&sc->sim_lock); 2860 taskqueue_drain(sc->ev_tq, &sc->ev_task); 2861 mtx_lock(&sc->sim_lock); 2862 2863 taskqueue_block(sc->ev_tq); 2864 /* Try to reset the controller */ 2865 mrsas_reset_ctrl(sc, sc->do_timedout_reset); 2866 2867 sc->do_timedout_reset = 0; 2868 sc->reset_in_progress = 0; 2869 tm_target_reset_failed = 0; 2870 mrsas_atomic_set(&sc->target_reset_outstanding, 0); 2871 memset(sc->target_reset_pool, 0, 2872 sizeof(sc->target_reset_pool)); 2873 taskqueue_unblock(sc->ev_tq); 2874 } 2875 2876 /* Now allow IOs to come to the SIM */ 2877 mrsas_xpt_release(sc); 2878 } 2879 } 2880 mtx_unlock(&sc->sim_lock); 2881 sc->ocr_thread_active = 0; 2882 mrsas_kproc_exit(0); 2883} 2884 2885/* 2886 * mrsas_reset_reply_desc: Reset Reply descriptor as part of OCR. 2887 * input: Adapter Context. 2888 * 2889 * This function will clear reply descriptor so that post OCR driver and FW will 2890 * lost old history. 2891 */ 2892void 2893mrsas_reset_reply_desc(struct mrsas_softc *sc) 2894{ 2895 int i, count; 2896 pMpi2ReplyDescriptorsUnion_t reply_desc; 2897 2898 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 2899 for (i = 0; i < count; i++) 2900 sc->last_reply_idx[i] = 0; 2901 2902 reply_desc = sc->reply_desc_mem; 2903 for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) { 2904 reply_desc->Words = MRSAS_ULONG_MAX; 2905 } 2906} 2907 2908/* 2909 * mrsas_reset_ctrl: Core function to OCR/Kill adapter. 2910 * input: Adapter Context. 2911 * 2912 * This function will run from thread context so that it can sleep. 1. Do not 2913 * handle OCR if FW is in HW critical error. 2. Wait for outstanding command 2914 * to complete for 180 seconds. 3. If #2 does not find any outstanding 2915 * command Controller is in working state, so skip OCR. Otherwise, do 2916 * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the 2917 * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post 2918 * OCR, Re-fire Managment command and move Controller to Operation state. 2919 */ 2920int 2921mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason) 2922{ 2923 int retval = SUCCESS, i, j, retry = 0; 2924 u_int32_t host_diag, abs_state, status_reg, reset_adapter; 2925 union ccb *ccb; 2926 struct mrsas_mfi_cmd *mfi_cmd; 2927 struct mrsas_mpt_cmd *mpt_cmd; 2928 union mrsas_evt_class_locale class_locale; 2929 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 2930 2931 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) { 2932 device_printf(sc->mrsas_dev, 2933 "mrsas: Hardware critical error, returning FAIL.\n"); 2934 return FAIL; 2935 } 2936 mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 2937 sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT; 2938 mrsas_disable_intr(sc); 2939 msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr", 2940 sc->mrsas_fw_fault_check_delay * hz); 2941 2942 /* First try waiting for commands to complete */ 2943 if (mrsas_wait_for_outstanding(sc, reset_reason)) { 2944 mrsas_dprint(sc, MRSAS_OCR, 2945 "resetting adapter from %s.\n", 2946 __func__); 2947 /* Now return commands back to the CAM layer */ 2948 mtx_unlock(&sc->sim_lock); 2949 for (i = 0; i < sc->max_fw_cmds; i++) { 2950 mpt_cmd = sc->mpt_cmd_list[i]; 2951 if (mpt_cmd->ccb_ptr) { 2952 ccb = (union ccb *)(mpt_cmd->ccb_ptr); 2953 ccb->ccb_h.status = CAM_SCSI_BUS_RESET; 2954 mrsas_cmd_done(sc, mpt_cmd); 2955 mrsas_atomic_dec(&sc->fw_outstanding); 2956 } 2957 } 2958 mtx_lock(&sc->sim_lock); 2959 2960 status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2961 outbound_scratch_pad)); 2962 abs_state = status_reg & MFI_STATE_MASK; 2963 reset_adapter = status_reg & MFI_RESET_ADAPTER; 2964 if (sc->disableOnlineCtrlReset || 2965 (abs_state == MFI_STATE_FAULT && !reset_adapter)) { 2966 /* Reset not supported, kill adapter */ 2967 mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n"); 2968 mrsas_kill_hba(sc); 2969 retval = FAIL; 2970 goto out; 2971 } 2972 /* Now try to reset the chip */ 2973 for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) { 2974 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2975 MPI2_WRSEQ_FLUSH_KEY_VALUE); 2976 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2977 MPI2_WRSEQ_1ST_KEY_VALUE); 2978 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2979 MPI2_WRSEQ_2ND_KEY_VALUE); 2980 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2981 MPI2_WRSEQ_3RD_KEY_VALUE); 2982 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2983 MPI2_WRSEQ_4TH_KEY_VALUE); 2984 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2985 MPI2_WRSEQ_5TH_KEY_VALUE); 2986 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), 2987 MPI2_WRSEQ_6TH_KEY_VALUE); 2988 2989 /* Check that the diag write enable (DRWE) bit is on */ 2990 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2991 fusion_host_diag)); 2992 retry = 0; 2993 while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) { 2994 DELAY(100 * 1000); 2995 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 2996 fusion_host_diag)); 2997 if (retry++ == 100) { 2998 mrsas_dprint(sc, MRSAS_OCR, 2999 "Host diag unlock failed!\n"); 3000 break; 3001 } 3002 } 3003 if (!(host_diag & HOST_DIAG_WRITE_ENABLE)) 3004 continue; 3005 3006 /* Send chip reset command */ 3007 mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag), 3008 host_diag | HOST_DIAG_RESET_ADAPTER); 3009 DELAY(3000 * 1000); 3010 3011 /* Make sure reset adapter bit is cleared */ 3012 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 3013 fusion_host_diag)); 3014 retry = 0; 3015 while (host_diag & HOST_DIAG_RESET_ADAPTER) { 3016 DELAY(100 * 1000); 3017 host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 3018 fusion_host_diag)); 3019 if (retry++ == 1000) { 3020 mrsas_dprint(sc, MRSAS_OCR, 3021 "Diag reset adapter never cleared!\n"); 3022 break; 3023 } 3024 } 3025 if (host_diag & HOST_DIAG_RESET_ADAPTER) 3026 continue; 3027 3028 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 3029 outbound_scratch_pad)) & MFI_STATE_MASK; 3030 retry = 0; 3031 3032 while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) { 3033 DELAY(100 * 1000); 3034 abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 3035 outbound_scratch_pad)) & MFI_STATE_MASK; 3036 } 3037 if (abs_state <= MFI_STATE_FW_INIT) { 3038 mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT," 3039 " state = 0x%x\n", abs_state); 3040 continue; 3041 } 3042 /* Wait for FW to become ready */ 3043 if (mrsas_transition_to_ready(sc, 1)) { 3044 mrsas_dprint(sc, MRSAS_OCR, 3045 "mrsas: Failed to transition controller to ready.\n"); 3046 continue; 3047 } 3048 mrsas_reset_reply_desc(sc); 3049 if (mrsas_ioc_init(sc)) { 3050 mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n"); 3051 continue; 3052 } 3053 for (j = 0; j < sc->max_fw_cmds; j++) { 3054 mpt_cmd = sc->mpt_cmd_list[j]; 3055 if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) { 3056 mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx]; 3057 /* If not an IOCTL then release the command else re-fire */ 3058 if (!mfi_cmd->sync_cmd) { 3059 mrsas_release_mfi_cmd(mfi_cmd); 3060 } else { 3061 req_desc = mrsas_get_request_desc(sc, 3062 mfi_cmd->cmd_id.context.smid - 1); 3063 mrsas_dprint(sc, MRSAS_OCR, 3064 "Re-fire command DCMD opcode 0x%x index %d\n ", 3065 mfi_cmd->frame->dcmd.opcode, j); 3066 if (!req_desc) 3067 device_printf(sc->mrsas_dev, 3068 "Cannot build MPT cmd.\n"); 3069 else 3070 mrsas_fire_cmd(sc, req_desc->addr.u.low, 3071 req_desc->addr.u.high); 3072 } 3073 } 3074 } 3075 3076 /* Reset load balance info */ 3077 memset(sc->load_balance_info, 0, 3078 sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT); 3079 3080 if (mrsas_get_ctrl_info(sc)) { 3081 mrsas_kill_hba(sc); 3082 retval = FAIL; 3083 goto out; 3084 } 3085 if (!mrsas_get_map_info(sc)) 3086 mrsas_sync_map_info(sc); 3087 3088 megasas_setup_jbod_map(sc); 3089 3090 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 3091 mrsas_enable_intr(sc); 3092 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 3093 3094 /* Register AEN with FW for last sequence number */ 3095 class_locale.members.reserved = 0; 3096 class_locale.members.locale = MR_EVT_LOCALE_ALL; 3097 class_locale.members.class = MR_EVT_CLASS_DEBUG; 3098 3099 mtx_unlock(&sc->sim_lock); 3100 if (mrsas_register_aen(sc, sc->last_seq_num, 3101 class_locale.word)) { 3102 device_printf(sc->mrsas_dev, 3103 "ERROR: AEN registration FAILED from OCR !!! " 3104 "Further events from the controller cannot be notified." 3105 "Either there is some problem in the controller" 3106 "or the controller does not support AEN.\n" 3107 "Please contact to the SUPPORT TEAM if the problem persists\n"); 3108 } 3109 mtx_lock(&sc->sim_lock); 3110 3111 /* Adapter reset completed successfully */ 3112 device_printf(sc->mrsas_dev, "Reset successful\n"); 3113 retval = SUCCESS; 3114 goto out; 3115 } 3116 /* Reset failed, kill the adapter */ 3117 device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n"); 3118 mrsas_kill_hba(sc); 3119 retval = FAIL; 3120 } else { 3121 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 3122 mrsas_enable_intr(sc); 3123 sc->adprecovery = MRSAS_HBA_OPERATIONAL; 3124 } 3125out: 3126 mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); 3127 mrsas_dprint(sc, MRSAS_OCR, 3128 "Reset Exit with %d.\n", retval); 3129 return retval; 3130} 3131 3132/* 3133 * mrsas_kill_hba: Kill HBA when OCR is not supported 3134 * input: Adapter Context. 3135 * 3136 * This function will kill HBA when OCR is not supported. 3137 */ 3138void 3139mrsas_kill_hba(struct mrsas_softc *sc) 3140{ 3141 sc->adprecovery = MRSAS_HW_CRITICAL_ERROR; 3142 DELAY(1000 * 1000); 3143 mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__); 3144 mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), 3145 MFI_STOP_ADP); 3146 /* Flush */ 3147 mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)); 3148 mrsas_complete_outstanding_ioctls(sc); 3149} 3150 3151/** 3152 * mrsas_complete_outstanding_ioctls Complete pending IOCTLS after kill_hba 3153 * input: Controller softc 3154 * 3155 * Returns void 3156 */ 3157void 3158mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc) 3159{ 3160 int i; 3161 struct mrsas_mpt_cmd *cmd_mpt; 3162 struct mrsas_mfi_cmd *cmd_mfi; 3163 u_int32_t count, MSIxIndex; 3164 3165 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 3166 for (i = 0; i < sc->max_fw_cmds; i++) { 3167 cmd_mpt = sc->mpt_cmd_list[i]; 3168 3169 if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) { 3170 cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx]; 3171 if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) { 3172 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) 3173 mrsas_complete_mptmfi_passthru(sc, cmd_mfi, 3174 cmd_mpt->io_request->RaidContext.status); 3175 } 3176 } 3177 } 3178} 3179 3180/* 3181 * mrsas_wait_for_outstanding: Wait for outstanding commands 3182 * input: Adapter Context. 3183 * 3184 * This function will wait for 180 seconds for outstanding commands to be 3185 * completed. 3186 */ 3187int 3188mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason) 3189{ 3190 int i, outstanding, retval = 0; 3191 u_int32_t fw_state, count, MSIxIndex; 3192 3193 3194 for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) { 3195 if (sc->remove_in_progress) { 3196 mrsas_dprint(sc, MRSAS_OCR, 3197 "Driver remove or shutdown called.\n"); 3198 retval = 1; 3199 goto out; 3200 } 3201 /* Check if firmware is in fault state */ 3202 fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, 3203 outbound_scratch_pad)) & MFI_STATE_MASK; 3204 if (fw_state == MFI_STATE_FAULT) { 3205 mrsas_dprint(sc, MRSAS_OCR, 3206 "Found FW in FAULT state, will reset adapter.\n"); 3207 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 3208 mtx_unlock(&sc->sim_lock); 3209 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) 3210 mrsas_complete_cmd(sc, MSIxIndex); 3211 mtx_lock(&sc->sim_lock); 3212 retval = 1; 3213 goto out; 3214 } 3215 if (check_reason == MFI_DCMD_TIMEOUT_OCR) { 3216 mrsas_dprint(sc, MRSAS_OCR, 3217 "DCMD IO TIMEOUT detected, will reset adapter.\n"); 3218 retval = 1; 3219 goto out; 3220 } 3221 outstanding = mrsas_atomic_read(&sc->fw_outstanding); 3222 if (!outstanding) 3223 goto out; 3224 3225 if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { 3226 mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d " 3227 "commands to complete\n", i, outstanding); 3228 count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; 3229 mtx_unlock(&sc->sim_lock); 3230 for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) 3231 mrsas_complete_cmd(sc, MSIxIndex); 3232 mtx_lock(&sc->sim_lock); 3233 } 3234 DELAY(1000 * 1000); 3235 } 3236 3237 if (mrsas_atomic_read(&sc->fw_outstanding)) { 3238 mrsas_dprint(sc, MRSAS_OCR, 3239 " pending commands remain after waiting," 3240 " will reset adapter.\n"); 3241 retval = 1; 3242 } 3243out: 3244 return retval; 3245} 3246 3247/* 3248 * mrsas_release_mfi_cmd: Return a cmd to free command pool 3249 * input: Command packet for return to free cmd pool 3250 * 3251 * This function returns the MFI & MPT command to the command list. 3252 */ 3253void 3254mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd_mfi) 3255{ 3256 struct mrsas_softc *sc = cmd_mfi->sc; 3257 struct mrsas_mpt_cmd *cmd_mpt; 3258 3259 3260 mtx_lock(&sc->mfi_cmd_pool_lock); 3261 /* 3262 * Release the mpt command (if at all it is allocated 3263 * associated with the mfi command 3264 */ 3265 if (cmd_mfi->cmd_id.context.smid) { 3266 mtx_lock(&sc->mpt_cmd_pool_lock); 3267 /* Get the mpt cmd from mfi cmd frame's smid value */ 3268 cmd_mpt = sc->mpt_cmd_list[cmd_mfi->cmd_id.context.smid-1]; 3269 cmd_mpt->flags = 0; 3270 cmd_mpt->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX; 3271 TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd_mpt, next); 3272 mtx_unlock(&sc->mpt_cmd_pool_lock); 3273 } 3274 /* Release the mfi command */ 3275 cmd_mfi->ccb_ptr = NULL; 3276 cmd_mfi->cmd_id.frame_count = 0; 3277 TAILQ_INSERT_HEAD(&(sc->mrsas_mfi_cmd_list_head), cmd_mfi, next); 3278 mtx_unlock(&sc->mfi_cmd_pool_lock); 3279 3280 return; 3281} 3282 3283/* 3284 * mrsas_get_controller_info: Returns FW's controller structure 3285 * input: Adapter soft state 3286 * Controller information structure 3287 * 3288 * Issues an internal command (DCMD) to get the FW's controller structure. This 3289 * information is mainly used to find out the maximum IO transfer per command 3290 * supported by the FW. 3291 */ 3292static int 3293mrsas_get_ctrl_info(struct mrsas_softc *sc) 3294{ 3295 int retcode = 0; 3296 u_int8_t do_ocr = 1; 3297 struct mrsas_mfi_cmd *cmd; 3298 struct mrsas_dcmd_frame *dcmd; 3299 3300 cmd = mrsas_get_mfi_cmd(sc); 3301 3302 if (!cmd) { 3303 device_printf(sc->mrsas_dev, "Failed to get a free cmd\n"); 3304 return -ENOMEM; 3305 } 3306 dcmd = &cmd->frame->dcmd; 3307 3308 if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) { 3309 device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n"); 3310 mrsas_release_mfi_cmd(cmd); 3311 return -ENOMEM; 3312 } 3313 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3314 3315 dcmd->cmd = MFI_CMD_DCMD; 3316 dcmd->cmd_status = 0xFF; 3317 dcmd->sge_count = 1; 3318 dcmd->flags = MFI_FRAME_DIR_READ; 3319 dcmd->timeout = 0; 3320 dcmd->pad_0 = 0; 3321 dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info); 3322 dcmd->opcode = MR_DCMD_CTRL_GET_INFO; 3323 dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr; 3324 dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info); 3325 3326 if (!sc->mask_interrupts) 3327 retcode = mrsas_issue_blocked_cmd(sc, cmd); 3328 else 3329 retcode = mrsas_issue_polled(sc, cmd); 3330 3331 if (retcode == ETIMEDOUT) 3332 goto dcmd_timeout; 3333 else 3334 memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info)); 3335 3336 do_ocr = 0; 3337 mrsas_update_ext_vd_details(sc); 3338 3339 sc->use_seqnum_jbod_fp = 3340 sc->ctrl_info->adapterOperations3.useSeqNumJbodFP; 3341 sc->disableOnlineCtrlReset = 3342 sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; 3343 3344dcmd_timeout: 3345 mrsas_free_ctlr_info_cmd(sc); 3346 3347 if (do_ocr) 3348 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 3349 3350 if (!sc->mask_interrupts) 3351 mrsas_release_mfi_cmd(cmd); 3352 3353 return (retcode); 3354} 3355 3356/* 3357 * mrsas_update_ext_vd_details : Update details w.r.t Extended VD 3358 * input: 3359 * sc - Controller's softc 3360*/ 3361static void 3362mrsas_update_ext_vd_details(struct mrsas_softc *sc) 3363{ 3364 sc->max256vdSupport = 3365 sc->ctrl_info->adapterOperations3.supportMaxExtLDs; 3366 /* Below is additional check to address future FW enhancement */ 3367 if (sc->ctrl_info->max_lds > 64) 3368 sc->max256vdSupport = 1; 3369 3370 sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS 3371 * MRSAS_MAX_DEV_PER_CHANNEL; 3372 sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS 3373 * MRSAS_MAX_DEV_PER_CHANNEL; 3374 if (sc->max256vdSupport) { 3375 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; 3376 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 3377 } else { 3378 sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES; 3379 sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; 3380 } 3381 3382 sc->old_map_sz = sizeof(MR_FW_RAID_MAP) + 3383 (sizeof(MR_LD_SPAN_MAP) * 3384 (sc->fw_supported_vd_count - 1)); 3385 sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT); 3386 sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP) + 3387 (sizeof(MR_LD_SPAN_MAP) * 3388 (sc->drv_supported_vd_count - 1)); 3389 3390 sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz); 3391 3392 if (sc->max256vdSupport) 3393 sc->current_map_sz = sc->new_map_sz; 3394 else 3395 sc->current_map_sz = sc->old_map_sz; 3396} 3397 3398/* 3399 * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command 3400 * input: Adapter soft state 3401 * 3402 * Allocates DMAable memory for the controller info internal command. 3403 */ 3404int 3405mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc) 3406{ 3407 int ctlr_info_size; 3408 3409 /* Allocate get controller info command */ 3410 ctlr_info_size = sizeof(struct mrsas_ctrl_info); 3411 if (bus_dma_tag_create(sc->mrsas_parent_tag, 3412 1, 0, 3413 BUS_SPACE_MAXADDR_32BIT, 3414 BUS_SPACE_MAXADDR, 3415 NULL, NULL, 3416 ctlr_info_size, 3417 1, 3418 ctlr_info_size, 3419 BUS_DMA_ALLOCNOW, 3420 NULL, NULL, 3421 &sc->ctlr_info_tag)) { 3422 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n"); 3423 return (ENOMEM); 3424 } 3425 if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem, 3426 BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) { 3427 device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n"); 3428 return (ENOMEM); 3429 } 3430 if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap, 3431 sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb, 3432 &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) { 3433 device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n"); 3434 return (ENOMEM); 3435 } 3436 memset(sc->ctlr_info_mem, 0, ctlr_info_size); 3437 return (0); 3438} 3439 3440/* 3441 * mrsas_free_ctlr_info_cmd: Free memory for controller info command 3442 * input: Adapter soft state 3443 * 3444 * Deallocates memory of the get controller info cmd. 3445 */ 3446void 3447mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc) 3448{ 3449 if (sc->ctlr_info_phys_addr) 3450 bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap); 3451 if (sc->ctlr_info_mem != NULL) 3452 bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap); 3453 if (sc->ctlr_info_tag != NULL) 3454 bus_dma_tag_destroy(sc->ctlr_info_tag); 3455} 3456 3457/* 3458 * mrsas_issue_polled: Issues a polling command 3459 * inputs: Adapter soft state 3460 * Command packet to be issued 3461 * 3462 * This function is for posting of internal commands to Firmware. MFI requires 3463 * the cmd_status to be set to 0xFF before posting. The maximun wait time of 3464 * the poll response timer is 180 seconds. 3465 */ 3466int 3467mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3468{ 3469 struct mrsas_header *frame_hdr = &cmd->frame->hdr; 3470 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 3471 int i, retcode = SUCCESS; 3472 3473 frame_hdr->cmd_status = 0xFF; 3474 frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 3475 3476 /* Issue the frame using inbound queue port */ 3477 if (mrsas_issue_dcmd(sc, cmd)) { 3478 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n"); 3479 return (1); 3480 } 3481 /* 3482 * Poll response timer to wait for Firmware response. While this 3483 * timer with the DELAY call could block CPU, the time interval for 3484 * this is only 1 millisecond. 3485 */ 3486 if (frame_hdr->cmd_status == 0xFF) { 3487 for (i = 0; i < (max_wait * 1000); i++) { 3488 if (frame_hdr->cmd_status == 0xFF) 3489 DELAY(1000); 3490 else 3491 break; 3492 } 3493 } 3494 if (frame_hdr->cmd_status == 0xFF) { 3495 device_printf(sc->mrsas_dev, "DCMD timed out after %d " 3496 "seconds from %s\n", max_wait, __func__); 3497 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n", 3498 cmd->frame->dcmd.opcode); 3499 retcode = ETIMEDOUT; 3500 } 3501 return (retcode); 3502} 3503 3504/* 3505 * mrsas_issue_dcmd: Issues a MFI Pass thru cmd 3506 * input: Adapter soft state mfi cmd pointer 3507 * 3508 * This function is called by mrsas_issued_blocked_cmd() and 3509 * mrsas_issued_polled(), to build the MPT command and then fire the command 3510 * to Firmware. 3511 */ 3512int 3513mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3514{ 3515 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3516 3517 req_desc = mrsas_build_mpt_cmd(sc, cmd); 3518 if (!req_desc) { 3519 device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n"); 3520 return (1); 3521 } 3522 mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high); 3523 3524 return (0); 3525} 3526 3527/* 3528 * mrsas_build_mpt_cmd: Calls helper function to build Passthru cmd 3529 * input: Adapter soft state mfi cmd to build 3530 * 3531 * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru 3532 * command and prepares the MPT command to send to Firmware. 3533 */ 3534MRSAS_REQUEST_DESCRIPTOR_UNION * 3535mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3536{ 3537 MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; 3538 u_int16_t index; 3539 3540 if (mrsas_build_mptmfi_passthru(sc, cmd)) { 3541 device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n"); 3542 return NULL; 3543 } 3544 index = cmd->cmd_id.context.smid; 3545 3546 req_desc = mrsas_get_request_desc(sc, index - 1); 3547 if (!req_desc) 3548 return NULL; 3549 3550 req_desc->addr.Words = 0; 3551 req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); 3552 3553 req_desc->SCSIIO.SMID = index; 3554 3555 return (req_desc); 3556} 3557 3558/* 3559 * mrsas_build_mptmfi_passthru: Builds a MPT MFI Passthru command 3560 * input: Adapter soft state mfi cmd pointer 3561 * 3562 * The MPT command and the io_request are setup as a passthru command. The SGE 3563 * chain address is set to frame_phys_addr of the MFI command. 3564 */ 3565u_int8_t 3566mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd) 3567{ 3568 MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain; 3569 PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req; 3570 struct mrsas_mpt_cmd *mpt_cmd; 3571 struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr; 3572 3573 mpt_cmd = mrsas_get_mpt_cmd(sc); 3574 if (!mpt_cmd) 3575 return (1); 3576 3577 /* Save the smid. To be used for returning the cmd */ 3578 mfi_cmd->cmd_id.context.smid = mpt_cmd->index; 3579 3580 mpt_cmd->sync_cmd_idx = mfi_cmd->index; 3581 3582 /* 3583 * For cmds where the flag is set, store the flag and check on 3584 * completion. For cmds with this flag, don't call 3585 * mrsas_complete_cmd. 3586 */ 3587 3588 if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE) 3589 mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; 3590 3591 io_req = mpt_cmd->io_request; 3592 3593 if (sc->mrsas_gen3_ctrl) { 3594 pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL; 3595 3596 sgl_ptr_end += sc->max_sge_in_main_msg - 1; 3597 sgl_ptr_end->Flags = 0; 3598 } 3599 mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain; 3600 3601 io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST; 3602 io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4; 3603 io_req->ChainOffset = sc->chain_offset_mfi_pthru; 3604 3605 mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr; 3606 3607 mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | 3608 MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; 3609 3610 mpi25_ieee_chain->Length = sc->max_chain_frame_sz; 3611 3612 return (0); 3613} 3614 3615/* 3616 * mrsas_issue_blocked_cmd: Synchronous wrapper around regular FW cmds 3617 * input: Adapter soft state Command to be issued 3618 * 3619 * This function waits on an event for the command to be returned from the ISR. 3620 * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing 3621 * internal and ioctl commands. 3622 */ 3623int 3624mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3625{ 3626 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 3627 unsigned long total_time = 0; 3628 int retcode = SUCCESS; 3629 3630 /* Initialize cmd_status */ 3631 cmd->cmd_status = 0xFF; 3632 3633 /* Build MPT-MFI command for issue to FW */ 3634 if (mrsas_issue_dcmd(sc, cmd)) { 3635 device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n"); 3636 return (1); 3637 } 3638 sc->chan = (void *)&cmd; 3639 3640 while (1) { 3641 if (cmd->cmd_status == 0xFF) { 3642 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz); 3643 } else 3644 break; 3645 3646 if (!cmd->sync_cmd) { /* cmd->sync will be set for an IOCTL 3647 * command */ 3648 total_time++; 3649 if (total_time >= max_wait) { 3650 device_printf(sc->mrsas_dev, 3651 "Internal command timed out after %d seconds.\n", max_wait); 3652 retcode = 1; 3653 break; 3654 } 3655 } 3656 } 3657 3658 if (cmd->cmd_status == 0xFF) { 3659 device_printf(sc->mrsas_dev, "DCMD timed out after %d " 3660 "seconds from %s\n", max_wait, __func__); 3661 device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n", 3662 cmd->frame->dcmd.opcode); 3663 retcode = ETIMEDOUT; 3664 } 3665 return (retcode); 3666} 3667 3668/* 3669 * mrsas_complete_mptmfi_passthru: Completes a command 3670 * input: @sc: Adapter soft state 3671 * @cmd: Command to be completed 3672 * @status: cmd completion status 3673 * 3674 * This function is called from mrsas_complete_cmd() after an interrupt is 3675 * received from Firmware, and io_request->Function is 3676 * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST. 3677 */ 3678void 3679mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd, 3680 u_int8_t status) 3681{ 3682 struct mrsas_header *hdr = &cmd->frame->hdr; 3683 u_int8_t cmd_status = cmd->frame->hdr.cmd_status; 3684 3685 /* Reset the retry counter for future re-tries */ 3686 cmd->retry_for_fw_reset = 0; 3687 3688 if (cmd->ccb_ptr) 3689 cmd->ccb_ptr = NULL; 3690 3691 switch (hdr->cmd) { 3692 case MFI_CMD_INVALID: 3693 device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n"); 3694 break; 3695 case MFI_CMD_PD_SCSI_IO: 3696 case MFI_CMD_LD_SCSI_IO: 3697 /* 3698 * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been 3699 * issued either through an IO path or an IOCTL path. If it 3700 * was via IOCTL, we will send it to internal completion. 3701 */ 3702 if (cmd->sync_cmd) { 3703 cmd->sync_cmd = 0; 3704 mrsas_wakeup(sc, cmd); 3705 break; 3706 } 3707 case MFI_CMD_SMP: 3708 case MFI_CMD_STP: 3709 case MFI_CMD_DCMD: 3710 /* Check for LD map update */ 3711 if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) && 3712 (cmd->frame->dcmd.mbox.b[1] == 1)) { 3713 sc->fast_path_io = 0; 3714 mtx_lock(&sc->raidmap_lock); 3715 sc->map_update_cmd = NULL; 3716 if (cmd_status != 0) { 3717 if (cmd_status != MFI_STAT_NOT_FOUND) 3718 device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status); 3719 else { 3720 mrsas_release_mfi_cmd(cmd); 3721 mtx_unlock(&sc->raidmap_lock); 3722 break; 3723 } 3724 } else 3725 sc->map_id++; 3726 mrsas_release_mfi_cmd(cmd); 3727 if (MR_ValidateMapInfo(sc)) 3728 sc->fast_path_io = 0; 3729 else 3730 sc->fast_path_io = 1; 3731 mrsas_sync_map_info(sc); 3732 mtx_unlock(&sc->raidmap_lock); 3733 break; 3734 } 3735 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO || 3736 cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) { 3737 sc->mrsas_aen_triggered = 0; 3738 } 3739 /* FW has an updated PD sequence */ 3740 if ((cmd->frame->dcmd.opcode == 3741 MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && 3742 (cmd->frame->dcmd.mbox.b[0] == 1)) { 3743 3744 mtx_lock(&sc->raidmap_lock); 3745 sc->jbod_seq_cmd = NULL; 3746 mrsas_release_mfi_cmd(cmd); 3747 3748 if (cmd_status == MFI_STAT_OK) { 3749 sc->pd_seq_map_id++; 3750 /* Re-register a pd sync seq num cmd */ 3751 if (megasas_sync_pd_seq_num(sc, true)) 3752 sc->use_seqnum_jbod_fp = 0; 3753 } else { 3754 sc->use_seqnum_jbod_fp = 0; 3755 device_printf(sc->mrsas_dev, 3756 "Jbod map sync failed, status=%x\n", cmd_status); 3757 } 3758 mtx_unlock(&sc->raidmap_lock); 3759 break; 3760 } 3761 /* See if got an event notification */ 3762 if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) 3763 mrsas_complete_aen(sc, cmd); 3764 else 3765 mrsas_wakeup(sc, cmd); 3766 break; 3767 case MFI_CMD_ABORT: 3768 /* Command issued to abort another cmd return */ 3769 mrsas_complete_abort(sc, cmd); 3770 break; 3771 default: 3772 device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd); 3773 break; 3774 } 3775} 3776 3777/* 3778 * mrsas_wakeup: Completes an internal command 3779 * input: Adapter soft state 3780 * Command to be completed 3781 * 3782 * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait 3783 * timer is started. This function is called from 3784 * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up 3785 * from the command wait. 3786 */ 3787void 3788mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 3789{ 3790 cmd->cmd_status = cmd->frame->io.cmd_status; 3791 3792 if (cmd->cmd_status == 0xFF) 3793 cmd->cmd_status = 0; 3794 3795 sc->chan = (void *)&cmd; 3796 wakeup_one((void *)&sc->chan); 3797 return; 3798} 3799 3800/* 3801 * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller input: 3802 * Adapter soft state Shutdown/Hibernate 3803 * 3804 * This function issues a DCMD internal command to Firmware to initiate shutdown 3805 * of the controller. 3806 */ 3807static void 3808mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode) 3809{ 3810 struct mrsas_mfi_cmd *cmd; 3811 struct mrsas_dcmd_frame *dcmd; 3812 3813 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 3814 return; 3815 3816 cmd = mrsas_get_mfi_cmd(sc); 3817 if (!cmd) { 3818 device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n"); 3819 return; 3820 } 3821 if (sc->aen_cmd) 3822 mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd); 3823 if (sc->map_update_cmd) 3824 mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd); 3825 if (sc->jbod_seq_cmd) 3826 mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd); 3827 3828 dcmd = &cmd->frame->dcmd; 3829 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3830 3831 dcmd->cmd = MFI_CMD_DCMD; 3832 dcmd->cmd_status = 0x0; 3833 dcmd->sge_count = 0; 3834 dcmd->flags = MFI_FRAME_DIR_NONE; 3835 dcmd->timeout = 0; 3836 dcmd->pad_0 = 0; 3837 dcmd->data_xfer_len = 0; 3838 dcmd->opcode = opcode; 3839 3840 device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n"); 3841 3842 mrsas_issue_blocked_cmd(sc, cmd); 3843 mrsas_release_mfi_cmd(cmd); 3844 3845 return; 3846} 3847 3848/* 3849 * mrsas_flush_cache: Requests FW to flush all its caches input: 3850 * Adapter soft state 3851 * 3852 * This function is issues a DCMD internal command to Firmware to initiate 3853 * flushing of all caches. 3854 */ 3855static void 3856mrsas_flush_cache(struct mrsas_softc *sc) 3857{ 3858 struct mrsas_mfi_cmd *cmd; 3859 struct mrsas_dcmd_frame *dcmd; 3860 3861 if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) 3862 return; 3863 3864 cmd = mrsas_get_mfi_cmd(sc); 3865 if (!cmd) { 3866 device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n"); 3867 return; 3868 } 3869 dcmd = &cmd->frame->dcmd; 3870 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3871 3872 dcmd->cmd = MFI_CMD_DCMD; 3873 dcmd->cmd_status = 0x0; 3874 dcmd->sge_count = 0; 3875 dcmd->flags = MFI_FRAME_DIR_NONE; 3876 dcmd->timeout = 0; 3877 dcmd->pad_0 = 0; 3878 dcmd->data_xfer_len = 0; 3879 dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; 3880 dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; 3881 3882 mrsas_issue_blocked_cmd(sc, cmd); 3883 mrsas_release_mfi_cmd(cmd); 3884 3885 return; 3886} 3887 3888int 3889megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend) 3890{ 3891 int retcode = 0; 3892 u_int8_t do_ocr = 1; 3893 struct mrsas_mfi_cmd *cmd; 3894 struct mrsas_dcmd_frame *dcmd; 3895 uint32_t pd_seq_map_sz; 3896 struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; 3897 bus_addr_t pd_seq_h; 3898 3899 pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + 3900 (sizeof(struct MR_PD_CFG_SEQ) * 3901 (MAX_PHYSICAL_DEVICES - 1)); 3902 3903 cmd = mrsas_get_mfi_cmd(sc); 3904 if (!cmd) { 3905 device_printf(sc->mrsas_dev, 3906 "Cannot alloc for ld map info cmd.\n"); 3907 return 1; 3908 } 3909 dcmd = &cmd->frame->dcmd; 3910 3911 pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)]; 3912 pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)]; 3913 if (!pd_sync) { 3914 device_printf(sc->mrsas_dev, 3915 "Failed to alloc mem for jbod map info.\n"); 3916 mrsas_release_mfi_cmd(cmd); 3917 return (ENOMEM); 3918 } 3919 memset(pd_sync, 0, pd_seq_map_sz); 3920 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 3921 dcmd->cmd = MFI_CMD_DCMD; 3922 dcmd->cmd_status = 0xFF; 3923 dcmd->sge_count = 1; 3924 dcmd->timeout = 0; 3925 dcmd->pad_0 = 0; 3926 dcmd->data_xfer_len = (pd_seq_map_sz); 3927 dcmd->opcode = (MR_DCMD_SYSTEM_PD_MAP_GET_INFO); 3928 dcmd->sgl.sge32[0].phys_addr = (pd_seq_h); 3929 dcmd->sgl.sge32[0].length = (pd_seq_map_sz); 3930 3931 if (pend) { 3932 dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG; 3933 dcmd->flags = (MFI_FRAME_DIR_WRITE); 3934 sc->jbod_seq_cmd = cmd; 3935 if (mrsas_issue_dcmd(sc, cmd)) { 3936 device_printf(sc->mrsas_dev, 3937 "Fail to send sync map info command.\n"); 3938 return 1; 3939 } else 3940 return 0; 3941 } else 3942 dcmd->flags = MFI_FRAME_DIR_READ; 3943 3944 retcode = mrsas_issue_polled(sc, cmd); 3945 if (retcode == ETIMEDOUT) 3946 goto dcmd_timeout; 3947 3948 if (pd_sync->count > MAX_PHYSICAL_DEVICES) { 3949 device_printf(sc->mrsas_dev, 3950 "driver supports max %d JBOD, but FW reports %d\n", 3951 MAX_PHYSICAL_DEVICES, pd_sync->count); 3952 retcode = -EINVAL; 3953 } 3954 if (!retcode) 3955 sc->pd_seq_map_id++; 3956 do_ocr = 0; 3957 3958dcmd_timeout: 3959 if (do_ocr) 3960 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 3961 3962 return (retcode); 3963} 3964 3965/* 3966 * mrsas_get_map_info: Load and validate RAID map input: 3967 * Adapter instance soft state 3968 * 3969 * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load 3970 * and validate RAID map. It returns 0 if successful, 1 other- wise. 3971 */ 3972static int 3973mrsas_get_map_info(struct mrsas_softc *sc) 3974{ 3975 uint8_t retcode = 0; 3976 3977 sc->fast_path_io = 0; 3978 if (!mrsas_get_ld_map_info(sc)) { 3979 retcode = MR_ValidateMapInfo(sc); 3980 if (retcode == 0) { 3981 sc->fast_path_io = 1; 3982 return 0; 3983 } 3984 } 3985 return 1; 3986} 3987 3988/* 3989 * mrsas_get_ld_map_info: Get FW's ld_map structure input: 3990 * Adapter instance soft state 3991 * 3992 * Issues an internal command (DCMD) to get the FW's controller PD list 3993 * structure. 3994 */ 3995static int 3996mrsas_get_ld_map_info(struct mrsas_softc *sc) 3997{ 3998 int retcode = 0; 3999 struct mrsas_mfi_cmd *cmd; 4000 struct mrsas_dcmd_frame *dcmd; 4001 void *map; 4002 bus_addr_t map_phys_addr = 0; 4003 4004 cmd = mrsas_get_mfi_cmd(sc); 4005 if (!cmd) { 4006 device_printf(sc->mrsas_dev, 4007 "Cannot alloc for ld map info cmd.\n"); 4008 return 1; 4009 } 4010 dcmd = &cmd->frame->dcmd; 4011 4012 map = (void *)sc->raidmap_mem[(sc->map_id & 1)]; 4013 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)]; 4014 if (!map) { 4015 device_printf(sc->mrsas_dev, 4016 "Failed to alloc mem for ld map info.\n"); 4017 mrsas_release_mfi_cmd(cmd); 4018 return (ENOMEM); 4019 } 4020 memset(map, 0, sizeof(sc->max_map_sz)); 4021 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4022 4023 dcmd->cmd = MFI_CMD_DCMD; 4024 dcmd->cmd_status = 0xFF; 4025 dcmd->sge_count = 1; 4026 dcmd->flags = MFI_FRAME_DIR_READ; 4027 dcmd->timeout = 0; 4028 dcmd->pad_0 = 0; 4029 dcmd->data_xfer_len = sc->current_map_sz; 4030 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; 4031 dcmd->sgl.sge32[0].phys_addr = map_phys_addr; 4032 dcmd->sgl.sge32[0].length = sc->current_map_sz; 4033 4034 retcode = mrsas_issue_polled(sc, cmd); 4035 if (retcode == ETIMEDOUT) 4036 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 4037 4038 return (retcode); 4039} 4040 4041/* 4042 * mrsas_sync_map_info: Get FW's ld_map structure input: 4043 * Adapter instance soft state 4044 * 4045 * Issues an internal command (DCMD) to get the FW's controller PD list 4046 * structure. 4047 */ 4048static int 4049mrsas_sync_map_info(struct mrsas_softc *sc) 4050{ 4051 int retcode = 0, i; 4052 struct mrsas_mfi_cmd *cmd; 4053 struct mrsas_dcmd_frame *dcmd; 4054 uint32_t size_sync_info, num_lds; 4055 MR_LD_TARGET_SYNC *target_map = NULL; 4056 MR_DRV_RAID_MAP_ALL *map; 4057 MR_LD_RAID *raid; 4058 MR_LD_TARGET_SYNC *ld_sync; 4059 bus_addr_t map_phys_addr = 0; 4060 4061 cmd = mrsas_get_mfi_cmd(sc); 4062 if (!cmd) { 4063 device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n"); 4064 return ENOMEM; 4065 } 4066 map = sc->ld_drv_map[sc->map_id & 1]; 4067 num_lds = map->raidMap.ldCount; 4068 4069 dcmd = &cmd->frame->dcmd; 4070 size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds; 4071 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4072 4073 target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1]; 4074 memset(target_map, 0, sc->max_map_sz); 4075 4076 map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1]; 4077 4078 ld_sync = (MR_LD_TARGET_SYNC *) target_map; 4079 4080 for (i = 0; i < num_lds; i++, ld_sync++) { 4081 raid = MR_LdRaidGet(i, map); 4082 ld_sync->targetId = MR_GetLDTgtId(i, map); 4083 ld_sync->seqNum = raid->seqNum; 4084 } 4085 4086 dcmd->cmd = MFI_CMD_DCMD; 4087 dcmd->cmd_status = 0xFF; 4088 dcmd->sge_count = 1; 4089 dcmd->flags = MFI_FRAME_DIR_WRITE; 4090 dcmd->timeout = 0; 4091 dcmd->pad_0 = 0; 4092 dcmd->data_xfer_len = sc->current_map_sz; 4093 dcmd->mbox.b[0] = num_lds; 4094 dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG; 4095 dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; 4096 dcmd->sgl.sge32[0].phys_addr = map_phys_addr; 4097 dcmd->sgl.sge32[0].length = sc->current_map_sz; 4098 4099 sc->map_update_cmd = cmd; 4100 if (mrsas_issue_dcmd(sc, cmd)) { 4101 device_printf(sc->mrsas_dev, 4102 "Fail to send sync map info command.\n"); 4103 return (1); 4104 } 4105 return (retcode); 4106} 4107 4108/* 4109 * mrsas_get_pd_list: Returns FW's PD list structure input: 4110 * Adapter soft state 4111 * 4112 * Issues an internal command (DCMD) to get the FW's controller PD list 4113 * structure. This information is mainly used to find out about system 4114 * supported by Firmware. 4115 */ 4116static int 4117mrsas_get_pd_list(struct mrsas_softc *sc) 4118{ 4119 int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size; 4120 u_int8_t do_ocr = 1; 4121 struct mrsas_mfi_cmd *cmd; 4122 struct mrsas_dcmd_frame *dcmd; 4123 struct MR_PD_LIST *pd_list_mem; 4124 struct MR_PD_ADDRESS *pd_addr; 4125 bus_addr_t pd_list_phys_addr = 0; 4126 struct mrsas_tmp_dcmd *tcmd; 4127 4128 cmd = mrsas_get_mfi_cmd(sc); 4129 if (!cmd) { 4130 device_printf(sc->mrsas_dev, 4131 "Cannot alloc for get PD list cmd\n"); 4132 return 1; 4133 } 4134 dcmd = &cmd->frame->dcmd; 4135 4136 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT); 4137 pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); 4138 if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) { 4139 device_printf(sc->mrsas_dev, 4140 "Cannot alloc dmamap for get PD list cmd\n"); 4141 mrsas_release_mfi_cmd(cmd); 4142 mrsas_free_tmp_dcmd(tcmd); 4143 free(tcmd, M_MRSAS); 4144 return (ENOMEM); 4145 } else { 4146 pd_list_mem = tcmd->tmp_dcmd_mem; 4147 pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr; 4148 } 4149 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4150 4151 dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; 4152 dcmd->mbox.b[1] = 0; 4153 dcmd->cmd = MFI_CMD_DCMD; 4154 dcmd->cmd_status = 0xFF; 4155 dcmd->sge_count = 1; 4156 dcmd->flags = MFI_FRAME_DIR_READ; 4157 dcmd->timeout = 0; 4158 dcmd->pad_0 = 0; 4159 dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); 4160 dcmd->opcode = MR_DCMD_PD_LIST_QUERY; 4161 dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr; 4162 dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); 4163 4164 if (!sc->mask_interrupts) 4165 retcode = mrsas_issue_blocked_cmd(sc, cmd); 4166 else 4167 retcode = mrsas_issue_polled(sc, cmd); 4168 4169 if (retcode == ETIMEDOUT) 4170 goto dcmd_timeout; 4171 4172 /* Get the instance PD list */ 4173 pd_count = MRSAS_MAX_PD; 4174 pd_addr = pd_list_mem->addr; 4175 if (pd_list_mem->count < pd_count) { 4176 memset(sc->local_pd_list, 0, 4177 MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); 4178 for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) { 4179 sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId; 4180 sc->local_pd_list[pd_addr->deviceId].driveType = 4181 pd_addr->scsiDevType; 4182 sc->local_pd_list[pd_addr->deviceId].driveState = 4183 MR_PD_STATE_SYSTEM; 4184 pd_addr++; 4185 } 4186 /* 4187 * Use mutext/spinlock if pd_list component size increase more than 4188 * 32 bit. 4189 */ 4190 memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list)); 4191 do_ocr = 0; 4192 } 4193dcmd_timeout: 4194 mrsas_free_tmp_dcmd(tcmd); 4195 free(tcmd, M_MRSAS); 4196 4197 if (do_ocr) 4198 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 4199 4200 if (!sc->mask_interrupts) 4201 mrsas_release_mfi_cmd(cmd); 4202 4203 return (retcode); 4204} 4205 4206/* 4207 * mrsas_get_ld_list: Returns FW's LD list structure input: 4208 * Adapter soft state 4209 * 4210 * Issues an internal command (DCMD) to get the FW's controller PD list 4211 * structure. This information is mainly used to find out about supported by 4212 * the FW. 4213 */ 4214static int 4215mrsas_get_ld_list(struct mrsas_softc *sc) 4216{ 4217 int ld_list_size, retcode = 0, ld_index = 0, ids = 0; 4218 u_int8_t do_ocr = 1; 4219 struct mrsas_mfi_cmd *cmd; 4220 struct mrsas_dcmd_frame *dcmd; 4221 struct MR_LD_LIST *ld_list_mem; 4222 bus_addr_t ld_list_phys_addr = 0; 4223 struct mrsas_tmp_dcmd *tcmd; 4224 4225 cmd = mrsas_get_mfi_cmd(sc); 4226 if (!cmd) { 4227 device_printf(sc->mrsas_dev, 4228 "Cannot alloc for get LD list cmd\n"); 4229 return 1; 4230 } 4231 dcmd = &cmd->frame->dcmd; 4232 4233 tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT); 4234 ld_list_size = sizeof(struct MR_LD_LIST); 4235 if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) { 4236 device_printf(sc->mrsas_dev, 4237 "Cannot alloc dmamap for get LD list cmd\n"); 4238 mrsas_release_mfi_cmd(cmd); 4239 mrsas_free_tmp_dcmd(tcmd); 4240 free(tcmd, M_MRSAS); 4241 return (ENOMEM); 4242 } else { 4243 ld_list_mem = tcmd->tmp_dcmd_mem; 4244 ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr; 4245 } 4246 memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); 4247 4248 if (sc->max256vdSupport) 4249 dcmd->mbox.b[0] = 1; 4250 4251 dcmd->cmd = MFI_CMD_DCMD; 4252 dcmd->cmd_status = 0xFF; 4253 dcmd->sge_count = 1; 4254 dcmd->flags = MFI_FRAME_DIR_READ; 4255 dcmd->timeout = 0; 4256 dcmd->data_xfer_len = sizeof(struct MR_LD_LIST); 4257 dcmd->opcode = MR_DCMD_LD_GET_LIST; 4258 dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr; 4259 dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST); 4260 dcmd->pad_0 = 0; 4261 4262 if (!sc->mask_interrupts) 4263 retcode = mrsas_issue_blocked_cmd(sc, cmd); 4264 else 4265 retcode = mrsas_issue_polled(sc, cmd); 4266 4267 if (retcode == ETIMEDOUT) 4268 goto dcmd_timeout; 4269 4270#if VD_EXT_DEBUG 4271 printf("Number of LDs %d\n", ld_list_mem->ldCount); 4272#endif 4273 4274 /* Get the instance LD list */ 4275 if (ld_list_mem->ldCount <= sc->fw_supported_vd_count) { 4276 sc->CurLdCount = ld_list_mem->ldCount; 4277 memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); 4278 for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) { 4279 if (ld_list_mem->ldList[ld_index].state != 0) { 4280 ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; 4281 sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; 4282 } 4283 } 4284 do_ocr = 0; 4285 } 4286dcmd_timeout: 4287 mrsas_free_tmp_dcmd(tcmd); 4288 free(tcmd, M_MRSAS); 4289 4290 if (do_ocr) 4291 sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; 4292 if (!sc->mask_interrupts) 4293 mrsas_release_mfi_cmd(cmd); 4294 4295 return (retcode); 4296} 4297 4298/* 4299 * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command input: 4300 * Adapter soft state Temp command Size of alloction 4301 * 4302 * Allocates DMAable memory for a temporary internal command. The allocated 4303 * memory is initialized to all zeros upon successful loading of the dma 4304 * mapped memory. 4305 */ 4306int 4307mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, 4308 struct mrsas_tmp_dcmd *tcmd, int size) 4309{ 4310 if (bus_dma_tag_create(sc->mrsas_parent_tag, 4311 1, 0, 4312 BUS_SPACE_MAXADDR_32BIT, 4313 BUS_SPACE_MAXADDR, 4314 NULL, NULL, 4315 size, 4316 1, 4317 size, 4318 BUS_DMA_ALLOCNOW, 4319 NULL, NULL, 4320 &tcmd->tmp_dcmd_tag)) { 4321 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n"); 4322 return (ENOMEM); 4323 } 4324 if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem, 4325 BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) { 4326 device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n"); 4327 return (ENOMEM); 4328 } 4329 if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap, 4330 tcmd->tmp_dcmd_mem, size, mrsas_addr_cb, 4331 &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) { 4332 device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n"); 4333 return (ENOMEM); 4334 } 4335 memset(tcmd->tmp_dcmd_mem, 0, size); 4336 return (0); 4337} 4338 4339/* 4340 * mrsas_free_tmp_dcmd: Free memory for temporary command input: 4341 * temporary dcmd pointer 4342 * 4343 * Deallocates memory of the temporary command for use in the construction of 4344 * the internal DCMD. 4345 */ 4346void 4347mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp) 4348{ 4349 if (tmp->tmp_dcmd_phys_addr) 4350 bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap); 4351 if (tmp->tmp_dcmd_mem != NULL) 4352 bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap); 4353 if (tmp->tmp_dcmd_tag != NULL) 4354 bus_dma_tag_destroy(tmp->tmp_dcmd_tag); 4355} 4356 4357/* 4358 * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd input: 4359 * Adapter soft state Previously issued cmd to be aborted 4360 * 4361 * This function is used to abort previously issued commands, such as AEN and 4362 * RAID map sync map commands. The abort command is sent as a DCMD internal 4363 * command and subsequently the driver will wait for a return status. The 4364 * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds. 4365 */ 4366static int 4367mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc, 4368 struct mrsas_mfi_cmd *cmd_to_abort) 4369{ 4370 struct mrsas_mfi_cmd *cmd; 4371 struct mrsas_abort_frame *abort_fr; 4372 u_int8_t retcode = 0; 4373 unsigned long total_time = 0; 4374 u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; 4375 4376 cmd = mrsas_get_mfi_cmd(sc); 4377 if (!cmd) { 4378 device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n"); 4379 return (1); 4380 } 4381 abort_fr = &cmd->frame->abort; 4382 4383 /* Prepare and issue the abort frame */ 4384 abort_fr->cmd = MFI_CMD_ABORT; 4385 abort_fr->cmd_status = 0xFF; 4386 abort_fr->flags = 0; 4387 abort_fr->abort_context = cmd_to_abort->index; 4388 abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr; 4389 abort_fr->abort_mfi_phys_addr_hi = 0; 4390 4391 cmd->sync_cmd = 1; 4392 cmd->cmd_status = 0xFF; 4393 4394 if (mrsas_issue_dcmd(sc, cmd)) { 4395 device_printf(sc->mrsas_dev, "Fail to send abort command.\n"); 4396 return (1); 4397 } 4398 /* Wait for this cmd to complete */ 4399 sc->chan = (void *)&cmd; 4400 while (1) { 4401 if (cmd->cmd_status == 0xFF) { 4402 tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz); 4403 } else 4404 break; 4405 total_time++; 4406 if (total_time >= max_wait) { 4407 device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait); 4408 retcode = 1; 4409 break; 4410 } 4411 } 4412 4413 cmd->sync_cmd = 0; 4414 mrsas_release_mfi_cmd(cmd); 4415 return (retcode); 4416} 4417 4418/* 4419 * mrsas_complete_abort: Completes aborting a command input: 4420 * Adapter soft state Cmd that was issued to abort another cmd 4421 * 4422 * The mrsas_issue_blocked_abort_cmd() function waits for the command status to 4423 * change after sending the command. This function is called from 4424 * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated. 4425 */ 4426void 4427mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 4428{ 4429 if (cmd->sync_cmd) { 4430 cmd->sync_cmd = 0; 4431 cmd->cmd_status = 0; 4432 sc->chan = (void *)&cmd; 4433 wakeup_one((void *)&sc->chan); 4434 } 4435 return; 4436} 4437 4438/* 4439 * mrsas_aen_handler: AEN processing callback function from thread context 4440 * input: Adapter soft state 4441 * 4442 * Asynchronous event handler 4443 */ 4444void 4445mrsas_aen_handler(struct mrsas_softc *sc) 4446{ 4447 union mrsas_evt_class_locale class_locale; 4448 int doscan = 0; 4449 u_int32_t seq_num; 4450 int error, fail_aen = 0; 4451 4452 if (sc == NULL) { 4453 printf("invalid instance!\n"); 4454 return; 4455 } 4456 if (sc->remove_in_progress || sc->reset_in_progress) { 4457 device_printf(sc->mrsas_dev, "Returning from %s, line no %d\n", 4458 __func__, __LINE__); 4459 return; 4460 } 4461 if (sc->evt_detail_mem) { 4462 switch (sc->evt_detail_mem->code) { 4463 case MR_EVT_PD_INSERTED: 4464 fail_aen = mrsas_get_pd_list(sc); 4465 if (!fail_aen) 4466 mrsas_bus_scan_sim(sc, sc->sim_1); 4467 else 4468 goto skip_register_aen; 4469 break; 4470 case MR_EVT_PD_REMOVED: 4471 fail_aen = mrsas_get_pd_list(sc); 4472 if (!fail_aen) 4473 mrsas_bus_scan_sim(sc, sc->sim_1); 4474 else 4475 goto skip_register_aen; 4476 break; 4477 case MR_EVT_LD_OFFLINE: 4478 case MR_EVT_CFG_CLEARED: 4479 case MR_EVT_LD_DELETED: 4480 mrsas_bus_scan_sim(sc, sc->sim_0); 4481 break; 4482 case MR_EVT_LD_CREATED: 4483 fail_aen = mrsas_get_ld_list(sc); 4484 if (!fail_aen) 4485 mrsas_bus_scan_sim(sc, sc->sim_0); 4486 else 4487 goto skip_register_aen; 4488 break; 4489 case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: 4490 case MR_EVT_FOREIGN_CFG_IMPORTED: 4491 case MR_EVT_LD_STATE_CHANGE: 4492 doscan = 1; 4493 break; 4494 case MR_EVT_CTRL_PROP_CHANGED: 4495 fail_aen = mrsas_get_ctrl_info(sc); 4496 if (fail_aen) 4497 goto skip_register_aen; 4498 break; 4499 default: 4500 break; 4501 } 4502 } else { 4503 device_printf(sc->mrsas_dev, "invalid evt_detail\n"); 4504 return; 4505 } 4506 if (doscan) { 4507 fail_aen = mrsas_get_pd_list(sc); 4508 if (!fail_aen) { 4509 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n"); 4510 mrsas_bus_scan_sim(sc, sc->sim_1); 4511 } else 4512 goto skip_register_aen; 4513 4514 fail_aen = mrsas_get_ld_list(sc); 4515 if (!fail_aen) { 4516 mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n"); 4517 mrsas_bus_scan_sim(sc, sc->sim_0); 4518 } else 4519 goto skip_register_aen; 4520 } 4521 seq_num = sc->evt_detail_mem->seq_num + 1; 4522 4523 /* Register AEN with FW for latest sequence number plus 1 */ 4524 class_locale.members.reserved = 0; 4525 class_locale.members.locale = MR_EVT_LOCALE_ALL; 4526 class_locale.members.class = MR_EVT_CLASS_DEBUG; 4527 4528 if (sc->aen_cmd != NULL) 4529 return; 4530 4531 mtx_lock(&sc->aen_lock); 4532 error = mrsas_register_aen(sc, seq_num, 4533 class_locale.word); 4534 mtx_unlock(&sc->aen_lock); 4535 4536 if (error) 4537 device_printf(sc->mrsas_dev, "register aen failed error %x\n", error); 4538 4539skip_register_aen: 4540 return; 4541 4542} 4543 4544 4545/* 4546 * mrsas_complete_aen: Completes AEN command 4547 * input: Adapter soft state 4548 * Cmd that was issued to abort another cmd 4549 * 4550 * This function will be called from ISR and will continue event processing from 4551 * thread context by enqueuing task in ev_tq (callback function 4552 * "mrsas_aen_handler"). 4553 */ 4554void 4555mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) 4556{ 4557 /* 4558 * Don't signal app if it is just an aborted previously registered 4559 * aen 4560 */ 4561 if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) { 4562 sc->mrsas_aen_triggered = 1; 4563 mtx_lock(&sc->aen_lock); 4564 if (sc->mrsas_poll_waiting) { 4565 sc->mrsas_poll_waiting = 0; 4566 selwakeup(&sc->mrsas_select); 4567 } 4568 mtx_unlock(&sc->aen_lock); 4569 } else 4570 cmd->abort_aen = 0; 4571 4572 sc->aen_cmd = NULL; 4573 mrsas_release_mfi_cmd(cmd); 4574 4575 taskqueue_enqueue(sc->ev_tq, &sc->ev_task); 4576 4577 return; 4578} 4579 4580static device_method_t mrsas_methods[] = { 4581 DEVMETHOD(device_probe, mrsas_probe), 4582 DEVMETHOD(device_attach, mrsas_attach), 4583 DEVMETHOD(device_detach, mrsas_detach), 4584 DEVMETHOD(device_suspend, mrsas_suspend), 4585 DEVMETHOD(device_resume, mrsas_resume), 4586 DEVMETHOD(bus_print_child, bus_generic_print_child), 4587 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 4588 {0, 0} 4589}; 4590 4591static driver_t mrsas_driver = { 4592 "mrsas", 4593 mrsas_methods, 4594 sizeof(struct mrsas_softc) 4595}; 4596 4597static devclass_t mrsas_devclass; 4598 4599DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0); 4600MODULE_DEPEND(mrsas, cam, 1, 1, 1); 4601