Lines Matching refs:ioc

134 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc,
137 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
139 _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
149 * @ioc: per adapter object.
157 mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
165 ioc_err(ioc, "Command %s\n",
173 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
183 struct MPT3SAS_ADAPTER *ioc;
188 /* global ioc spinlock to protect controller list on list operations */
191 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
192 ioc->fwfault_debug = mpt3sas_fwfault_debug;
243 * @ioc: per adapter object
248 _base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply,
256 u16 cmd_credit = ioc->facts.RequestCredit + 1;
257 void __iomem *reply_free_iomem = (void __iomem *)ioc->chip +
259 (cmd_credit * ioc->request_sz) + (index * sizeof(u32));
305 * @ioc: per adapter object
312 _base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid,
316 u16 cmd_credit = ioc->facts.RequestCredit + 1;
318 base_chain = (void __iomem *)ioc->chip + MPI_FRAME_START_OFFSET +
319 (cmd_credit * ioc->request_sz) +
321 chain_virt = base_chain + (smid * ioc->facts.MaxChainDepth *
322 ioc->request_sz) + (sge_chain_count * ioc->request_sz);
331 * @ioc: per adapter object
338 _base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid,
342 u16 cmd_credit = ioc->facts.RequestCredit + 1;
344 base_chain_phys = ioc->chip_phys + MPI_FRAME_START_OFFSET +
345 (cmd_credit * ioc->request_sz) +
347 chain_phys = base_chain_phys + (smid * ioc->facts.MaxChainDepth *
348 ioc->request_sz) + (sge_chain_count * ioc->request_sz);
357 * @ioc: per adapter object
364 _base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
366 u16 cmd_credit = ioc->facts.RequestCredit + 1;
368 void __iomem *chain_end = _base_get_chain(ioc,
370 ioc->facts.MaxChainDepth);
379 * @ioc: per adapter object
385 _base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
387 u16 cmd_credit = ioc->facts.RequestCredit + 1;
388 phys_addr_t chain_end_phys = _base_get_chain_phys(ioc,
390 ioc->facts.MaxChainDepth);
400 * @ioc: per adapter object
406 _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
412 for (index = 0; index < ioc->scsiio_depth; index++) {
413 for (j = 0; j < ioc->chains_needed_per_io; j++) {
414 ct = &ioc->chain_lookup[index].chains_per_smid[j];
419 ioc_info(ioc, "Provided chain_buffer_dma address is not in the lookup list\n");
429 * @ioc: per adapter object.
433 static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
473 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
475 ioc_err(ioc, "scmd is NULL\n");
498 buffer_iomem = _base_get_buffer_bar0(ioc, smid);
499 buffer_iomem_phys = _base_get_buffer_phys_bar0(ioc, smid);
509 for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
522 _base_get_chain_buffer_dma_to_chain_buffer(ioc,
531 _base_get_chain(ioc,
535 dst_addr_phys = _base_get_chain_phys(ioc,
558 ioc->config_vaddr,
596 src_chain_addr[i], ioc->request_sz);
601 * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
602 * @arg: input argument, used to derive ioc
610 struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
613 if (!ioc)
616 pdev = ioc->pdev;
625 * @ioc: Per Adapter Object
629 static void _base_sync_drv_fw_timestamp(struct MPT3SAS_ADAPTER *ioc)
638 mutex_lock(&ioc->scsih_cmds.mutex);
639 if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
640 ioc_err(ioc, "scsih_cmd in use %s\n", __func__);
643 ioc->scsih_cmds.status = MPT3_CMD_PENDING;
644 smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
646 ioc_err(ioc, "Failed obtaining a smid %s\n", __func__);
647 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
650 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
651 ioc->scsih_cmds.smid = smid;
660 init_completion(&ioc->scsih_cmds.done);
661 ioc->put_smid_default(ioc, smid);
662 dinitprintk(ioc, ioc_info(ioc,
665 wait_for_completion_timeout(&ioc->scsih_cmds.done,
667 if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
668 mpt3sas_check_cmd_timeout(ioc,
669 ioc->scsih_cmds.status, mpi_request,
673 if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
674 mpi_reply = ioc->scsih_cmds.reply;
675 dinitprintk(ioc, ioc_info(ioc,
682 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
683 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
685 mutex_unlock(&ioc->scsih_cmds.mutex);
689 * _base_fault_reset_work - workq handling ioc fault conditions
690 * @work: input argument, used to derive ioc
697 struct MPT3SAS_ADAPTER *ioc =
705 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
706 if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) ||
707 ioc->pci_error_recovery)
709 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
711 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
713 ioc_err(ioc, "SAS host is non-operational !!!!\n");
716 * pci bus failure issues rather removing the dead ioc function
720 * controller to non-operational state and remove the dead ioc
723 if (ioc->non_operational_loop++ < 5) {
724 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
733 * and this call is safe since dead ioc will never return any
736 mpt3sas_base_pause_mq_polling(ioc);
737 ioc->schedule_dead_ioc_flush_running_cmds(ioc);
742 ioc->remove_host = 1;
744 p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
745 "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
747 ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
750 ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
756 u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
757 ioc->manu_pg11.CoreDumpTOSec :
762 if (ioc->ioc_coredump_loop == 0) {
763 mpt3sas_print_coredump_info(ioc,
767 &ioc->ioc_reset_in_progress_lock, flags);
768 ioc->shost_recovery = 1;
770 &ioc->ioc_reset_in_progress_lock, flags);
771 mpt3sas_base_mask_interrupts(ioc);
772 mpt3sas_base_pause_mq_polling(ioc);
773 _base_clear_outstanding_commands(ioc);
776 ioc_info(ioc, "%s: CoreDump loop %d.",
777 __func__, ioc->ioc_coredump_loop);
780 if (ioc->ioc_coredump_loop++ < timeout) {
782 &ioc->ioc_reset_in_progress_lock, flags);
787 if (ioc->ioc_coredump_loop) {
789 ioc_err(ioc, "%s: CoreDump completed. LoopCount: %d",
790 __func__, ioc->ioc_coredump_loop);
792 ioc_err(ioc, "%s: CoreDump Timed out. LoopCount: %d",
793 __func__, ioc->ioc_coredump_loop);
794 ioc->ioc_coredump_loop = MPT3SAS_COREDUMP_LOOP_DONE;
796 ioc->non_operational_loop = 0;
798 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
799 ioc_warn(ioc, "%s: hard reset: %s\n",
801 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
803 mpt3sas_print_fault_code(ioc, doorbell &
807 mpt3sas_print_coredump_info(ioc, doorbell &
813 ioc->ioc_coredump_loop = 0;
814 if (ioc->time_sync_interval &&
815 ++ioc->timestamp_update_count >= ioc->time_sync_interval) {
816 ioc->timestamp_update_count = 0;
817 _base_sync_drv_fw_timestamp(ioc);
819 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
821 if (ioc->fault_reset_work_q)
822 queue_delayed_work(ioc->fault_reset_work_q,
823 &ioc->fault_reset_work,
825 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
830 * @ioc: per adapter object
835 mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
839 if (ioc->fault_reset_work_q)
842 ioc->timestamp_update_count = 0;
845 INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
846 snprintf(ioc->fault_reset_work_q_name,
847 sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
848 ioc->driver_name, ioc->id);
849 ioc->fault_reset_work_q =
850 create_singlethread_workqueue(ioc->fault_reset_work_q_name);
851 if (!ioc->fault_reset_work_q) {
852 ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__);
855 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
856 if (ioc->fault_reset_work_q)
857 queue_delayed_work(ioc->fault_reset_work_q,
858 &ioc->fault_reset_work,
860 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
865 * @ioc: per adapter object
870 mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
875 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
876 wq = ioc->fault_reset_work_q;
877 ioc->fault_reset_work_q = NULL;
878 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
880 if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
888 * @ioc: per adapter object
892 mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
894 ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
899 * @ioc: per adapter object
905 mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code)
907 ioc_err(ioc, "coredump_state(0x%04x)!\n", fault_code);
913 * @ioc: per adapter object
919 mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc,
922 u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ?
923 ioc->manu_pg11.CoreDumpTOSec :
926 int ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_FAULT,
930 ioc_err(ioc,
934 ioc_info(ioc,
943 * @ioc: per adapter object
951 mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
955 if (!ioc->fwfault_debug)
960 doorbell = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
962 mpt3sas_print_fault_code(ioc, doorbell &
966 mpt3sas_print_coredump_info(ioc, doorbell &
969 writel(0xC0FFEE00, &ioc->chip->Doorbell);
970 ioc_err(ioc, "Firmware is halted due to command timeout\n");
973 if (ioc->fwfault_debug == 2)
981 * _base_sas_ioc_info - verbose translation of the ioc status
982 * @ioc: per adapter object
987 _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
1014 !(ioc->logging_level & MPT_DEBUG_CONFIG)) {
1173 frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
1197 frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
1202 ioc->sge_size;
1211 ioc_warn(ioc, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
1219 * @ioc: per adapter object
1223 _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
1229 if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
1251 if (!ioc->hide_ir_msg)
1258 ioc_info(ioc, "Discovery: (%s)",
1283 if (!ioc->hide_ir_msg)
1287 if (!ioc->hide_ir_msg)
1291 if (!ioc->hide_ir_msg)
1295 if (!ioc->hide_ir_msg)
1314 ioc_info(ioc, "PCIE Enumeration: (%s)",
1331 ioc_info(ioc, "%s\n", desc);
1336 * @ioc: per adapter object
1340 _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc, u32 log_info)
1363 if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
1375 if (!ioc->hide_ir_msg)
1382 ioc_warn(ioc, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
1389 * @ioc: per adapter object
1395 _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1402 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1404 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
1411 (ioc->logging_level & MPT_DEBUG_REPLY)) {
1412 _base_sas_ioc_info(ioc, mpi_reply,
1413 mpt3sas_base_get_msg_frame(ioc, smid));
1418 _base_sas_log_info(ioc, loginfo);
1423 mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
1429 * @ioc: per adapter object
1439 mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1444 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1446 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
1448 if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
1451 ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
1453 ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
1454 memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
1456 ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
1458 complete(&ioc->base_cmds.done);
1464 * @ioc: per adapter object
1473 _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
1480 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1486 _base_display_event_data(ioc, mpi_reply);
1490 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
1500 &ioc->delayed_event_ack_list);
1501 dewtprintk(ioc,
1502 ioc_info(ioc, "DELAYED: EVENT ACK: event (0x%04x)\n",
1507 ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
1514 ioc->put_smid_default(ioc, smid);
1519 mpt3sas_scsih_event_callback(ioc, msix_index, reply);
1522 mpt3sas_ctl_event_callback(ioc, msix_index, reply);
1528 _get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1533 WARN_ON(smid >= ioc->hi_priority_smid))
1536 cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1545 * @ioc: per adapter object
1551 _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1554 u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
1557 if (smid < ioc->hi_priority_smid) {
1561 st = _get_st_from_smid(ioc, smid);
1565 cb_idx = ioc->ctl_cb_idx;
1566 } else if (smid < ioc->internal_smid) {
1567 i = smid - ioc->hi_priority_smid;
1568 cb_idx = ioc->hpr_lookup[i].cb_idx;
1569 } else if (smid <= ioc->hba_queue_depth) {
1570 i = smid - ioc->internal_smid;
1571 cb_idx = ioc->internal_lookup[i].cb_idx;
1579 * @ioc: per adapter object
1588 mpt3sas_base_pause_mq_polling(struct MPT3SAS_ADAPTER *ioc)
1591 ioc->reply_queue_count - ioc->iopoll_q_start_index;
1595 atomic_set(&ioc->io_uring_poll_queues[qid].pause, 1);
1601 while (atomic_read(&ioc->io_uring_poll_queues[qid].busy)) {
1610 * @ioc: per adapter object
1615 mpt3sas_base_resume_mq_polling(struct MPT3SAS_ADAPTER *ioc)
1618 ioc->reply_queue_count - ioc->iopoll_q_start_index;
1622 atomic_set(&ioc->io_uring_poll_queues[qid].pause, 0);
1627 * @ioc: per adapter object
1632 mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1636 ioc->mask_interrupts = 1;
1637 him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1639 writel(him_register, &ioc->chip->HostInterruptMask);
1640 ioc->base_readl(&ioc->chip->HostInterruptMask);
1645 * @ioc: per adapter object
1650 mpt3sas_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1654 him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1656 writel(him_register, &ioc->chip->HostInterruptMask);
1657 ioc->mask_interrupts = 0;
1696 struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1725 cb_idx = _base_get_cb_idx(ioc, smid);
1728 rc = mpt_callbacks[cb_idx](ioc, smid,
1731 mpt3sas_base_free_smid(ioc, smid);
1737 if (reply > ioc->reply_dma_max_address ||
1738 reply < ioc->reply_dma_min_address)
1741 cb_idx = _base_get_cb_idx(ioc, smid);
1744 rc = mpt_callbacks[cb_idx](ioc, smid,
1747 _base_display_reply_info(ioc,
1750 mpt3sas_base_free_smid(ioc,
1754 _base_async_event(ioc, msix_index, reply);
1759 ioc->reply_free_host_index =
1760 (ioc->reply_free_host_index ==
1761 (ioc->reply_free_queue_depth - 1)) ?
1762 0 : ioc->reply_free_host_index + 1;
1763 ioc->reply_free[ioc->reply_free_host_index] =
1765 if (ioc->is_mcpu_endpoint)
1766 _base_clone_reply_to_sys_mem(ioc,
1768 ioc->reply_free_host_index);
1769 writel(ioc->reply_free_host_index,
1770 &ioc->chip->ReplyFreeHostIndex);
1777 (ioc->reply_post_queue_depth - 1)) ? 0 :
1788 if (completed_cmds >= ioc->thresh_hold) {
1789 if (ioc->combined_reply_queue) {
1793 ioc->replyPostRegisterIndex[msix_index/8]);
1798 &ioc->chip->ReplyPostHostIndex);
1823 if (ioc->is_warpdrive) {
1825 ioc->reply_post_host_index[msix_index]);
1845 if (ioc->combined_reply_queue)
1848 ioc->replyPostRegisterIndex[msix_index/8]);
1852 &ioc->chip->ReplyPostHostIndex);
1866 struct MPT3SAS_ADAPTER *ioc =
1870 int qid = queue_num - ioc->iopoll_q_start_index;
1872 if (atomic_read(&ioc->io_uring_poll_queues[qid].pause) ||
1873 !atomic_add_unless(&ioc->io_uring_poll_queues[qid].busy, 1, 1))
1876 reply_q = ioc->io_uring_poll_queues[qid].reply_q;
1879 atomic_dec(&ioc->io_uring_poll_queues[qid].busy);
1895 struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1897 if (ioc->mask_interrupts)
1944 * @ioc: per adapter object
1949 _base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc)
1953 if (list_empty(&ioc->reply_queue_list))
1956 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1960 ioc->hba_queue_depth/4, _base_irqpoll);
1963 reply_q->os_irq = pci_irq_vector(ioc->pdev,
1970 * @ioc: per adapter object
1975 _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
1977 return (ioc->facts.IOCCapabilities &
1978 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1983 * @ioc: per adapter object
1991 mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll)
1998 if (!_base_is_controller_msix_enabled(ioc))
2001 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
2002 if (ioc->shost_recovery || ioc->remove_host ||
2003 ioc->pci_error_recovery)
2014 synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
2080 * @ioc: per adapter object
2088 _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
2094 ioc->base_add_sg_single(paddr, flags_length, -1);
2134 * @ioc: per adapter object
2141 _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
2148 atomic_read(&ioc->chain_lookup[smid - 1].chain_offset);
2150 if (chain_offset == ioc->chains_needed_per_io)
2153 chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset];
2154 atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset);
2161 * @ioc: per adapter object
2169 _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
2176 _base_build_zero_len_sge(ioc, psge);
2185 ioc->base_add_sg_single(psge, sgl_flags |
2189 psge += ioc->sge_size;
2196 ioc->base_add_sg_single(psge, sgl_flags |
2203 ioc->base_add_sg_single(psge, sgl_flags |
2210 ioc->base_add_sg_single(psge, sgl_flags |
2220 * @ioc: per adapter object
2273 _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
2301 prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid);
2302 prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
2308 page_mask = ioc->page_size - 1;
2358 entry_len = ioc->page_size - offset;
2378 if (length > ioc->page_size) {
2430 * @ioc: per adapter object
2440 base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc,
2455 nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE);
2494 curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid);
2495 msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
2567 base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
2596 * @ioc: per adapter object
2605 _base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
2617 if (!base_is_prp_possible(ioc, pcie_device,
2626 base_make_prp_nvme(ioc, scmd, mpi_request,
2657 * @ioc: per adapter object
2665 _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
2677 * @ioc: per adapter object
2689 _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
2706 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2725 sges_in_segment = ioc->max_sges_in_main_message;
2730 (sges_in_segment * ioc->sge_size))/4;
2735 ioc->base_add_sg_single(sg_local,
2739 ioc->base_add_sg_single(sg_local, sgl_flags |
2742 sg_local += ioc->sge_size;
2749 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2756 ioc->max_sges_in_chain_message) ? sges_left :
2757 ioc->max_sges_in_chain_message;
2759 0 : (sges_in_segment * ioc->sge_size)/4;
2760 chain_length = sges_in_segment * ioc->sge_size;
2764 chain_length += ioc->sge_size;
2766 ioc->base_add_sg_single(sg_local, chain_flags | chain_offset |
2775 ioc->base_add_sg_single(sg_local,
2780 ioc->base_add_sg_single(sg_local, sgl_flags |
2784 sg_local += ioc->sge_size;
2789 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2802 ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer |
2805 ioc->base_add_sg_single(sg_local, sgl_flags |
2808 sg_local += ioc->sge_size;
2817 * @ioc: per adapter object
2830 _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
2846 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2857 if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request,
2869 sges_in_segment = (ioc->request_sz -
2870 offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
2875 (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
2882 sg_local += ioc->sge_size_ieee;
2888 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2895 ioc->max_sges_in_chain_message) ? sges_left :
2896 ioc->max_sges_in_chain_message;
2899 chain_length = sges_in_segment * ioc->sge_size_ieee;
2901 chain_length += ioc->sge_size_ieee;
2914 sg_local += ioc->sge_size_ieee;
2919 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2939 sg_local += ioc->sge_size_ieee;
2948 * @ioc: per adapter object
2956 _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
2963 _base_build_zero_len_sge_ieee(ioc, psge);
2975 psge += ioc->sge_size_ieee;
3000 * @ioc: per adapter object
3006 _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
3011 if (ioc->is_mcpu_endpoint || sizeof(dma_addr_t) == 4) {
3012 ioc->dma_mask = 32;
3015 } else if (ioc->hba_mpi_version_belonged > MPI2_VERSION) {
3016 ioc->dma_mask = 63;
3019 ioc->dma_mask = 64;
3023 if (ioc->use_32bit_dma)
3030 if (ioc->dma_mask > 32) {
3031 ioc->base_add_sg_single = &_base_add_sg_single_64;
3032 ioc->sge_size = sizeof(Mpi2SGESimple64_t);
3034 ioc->base_add_sg_single = &_base_add_sg_single_32;
3035 ioc->sge_size = sizeof(Mpi2SGESimple32_t);
3039 ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
3040 ioc->dma_mask, convert_to_kb(s.totalram));
3047 * @ioc: per adapter object
3053 _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
3061 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
3062 ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) {
3066 base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
3068 dfailprintk(ioc, ioc_info(ioc, "msix not supported\n"));
3074 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
3075 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
3076 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
3077 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
3078 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
3079 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
3080 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
3081 ioc->msix_vector_count = 1;
3083 pci_read_config_word(ioc->pdev, base + 2, &message_control);
3084 ioc->msix_vector_count = (message_control & 0x3FF) + 1;
3086 dinitprintk(ioc, ioc_info(ioc, "msix is supported, vector_count(%d)\n",
3087 ioc->msix_vector_count));
3093 * @ioc: per adapter object
3098 mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
3103 if (list_empty(&ioc->reply_queue_list))
3106 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
3113 if (ioc->smp_affinity_enable) {
3114 irq = pci_irq_vector(ioc->pdev, reply_q->msix_index);
3117 free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
3125 * @ioc: per adapter object
3131 _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
3133 struct pci_dev *pdev = ioc->pdev;
3139 ioc_err(ioc, "unable to allocate memory %zu!\n",
3143 reply_q->ioc = ioc;
3148 if (index >= ioc->iopoll_q_start_index) {
3149 qid = index - ioc->iopoll_q_start_index;
3151 ioc->driver_name, ioc->id, qid);
3153 ioc->io_uring_poll_queues[qid].reply_q = reply_q;
3158 if (ioc->msix_enable)
3160 ioc->driver_name, ioc->id, index);
3163 ioc->driver_name, ioc->id);
3174 list_add_tail(&reply_q->list, &ioc->reply_queue_list);
3180 * @ioc: per adapter object
3185 _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
3189 int iopoll_q_count = ioc->reply_queue_count -
3190 ioc->iopoll_q_start_index;
3193 if (!_base_is_controller_msix_enabled(ioc))
3196 if (ioc->msix_load_balance)
3199 memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
3202 nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
3203 ioc->facts.MaxMSIxVectors);
3207 if (ioc->smp_affinity_enable) {
3213 if (ioc->high_iops_queues) {
3214 mask = cpumask_of_node(dev_to_node(&ioc->pdev->dev));
3215 for (index = 0; index < ioc->high_iops_queues;
3217 irq = pci_irq_vector(ioc->pdev, index);
3222 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3225 if (reply_q->msix_index < ioc->high_iops_queues ||
3226 reply_q->msix_index >= ioc->iopoll_q_start_index)
3229 mask = pci_irq_get_affinity(ioc->pdev,
3232 ioc_warn(ioc, "no affinity for msi %x\n",
3238 if (cpu >= ioc->cpu_msix_table_sz)
3240 ioc->cpu_msix_table[cpu] = reply_q->msix_index;
3248 nr_msix -= (ioc->high_iops_queues - iopoll_q_count);
3251 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3254 if (reply_q->msix_index < ioc->high_iops_queues ||
3255 reply_q->msix_index >= ioc->iopoll_q_start_index)
3265 ioc->cpu_msix_table[cpu] = reply_q->msix_index;
3274 * @ioc: per adapter object
3287 _base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
3297 ioc->io_uring_poll_queues) {
3298 ioc->high_iops_queues = 0;
3304 pcie_capability_read_word(ioc->pdev, PCI_EXP_LNKSTA, &lnksta);
3308 ioc->high_iops_queues = 0;
3313 if (!reset_devices && ioc->is_aero_ioc &&
3317 ioc->high_iops_queues = MPT3SAS_HIGH_IOPS_REPLY_QUEUES;
3319 ioc->high_iops_queues = 0;
3324 * @ioc: per adapter object
3328 mpt3sas_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
3330 if (!ioc->msix_enable)
3332 pci_free_irq_vectors(ioc->pdev);
3333 ioc->msix_enable = 0;
3334 kfree(ioc->io_uring_poll_queues);
3339 * @ioc: per adapter object
3343 _base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc)
3346 struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues };
3352 int nr_msix_vectors = ioc->iopoll_q_start_index;
3355 if (ioc->smp_affinity_enable)
3360 ioc_info(ioc, " %d %d %d\n", ioc->high_iops_queues,
3361 ioc->reply_queue_count, nr_msix_vectors);
3363 i = pci_alloc_irq_vectors_affinity(ioc->pdev,
3364 ioc->high_iops_queues,
3372 * @ioc: per adapter object
3376 _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
3383 ioc->msix_load_balance = false;
3391 if (_base_check_enable_msix(ioc) != 0)
3394 ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count);
3396 ioc->cpu_count, max_msix_vectors);
3398 ioc->reply_queue_count =
3399 min_t(int, ioc->cpu_count, ioc->msix_vector_count);
3401 if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
3413 if (!ioc->combined_reply_queue &&
3414 ioc->hba_mpi_version_belonged != MPI2_VERSION) {
3415 ioc_info(ioc,
3417 ioc->msix_load_balance = true;
3424 if (ioc->msix_load_balance)
3425 ioc->smp_affinity_enable = 0;
3427 if (!ioc->smp_affinity_enable || ioc->reply_queue_count <= 1)
3428 ioc->shost->host_tagset = 0;
3433 if (ioc->shost->host_tagset)
3437 ioc->io_uring_poll_queues = kcalloc(iopoll_q_count,
3439 if (!ioc->io_uring_poll_queues)
3443 if (ioc->is_aero_ioc)
3444 _base_check_and_enable_high_iops_queues(ioc,
3445 ioc->msix_vector_count);
3451 ioc->reply_queue_count = min_t(int,
3452 ioc->reply_queue_count + ioc->high_iops_queues,
3453 ioc->msix_vector_count);
3460 ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
3461 ioc->reply_queue_count);
3467 if (ioc->reply_queue_count < (iopoll_q_count + MPT3_MIN_IRQS))
3469 ioc->reply_queue_count = min_t(int,
3470 ioc->reply_queue_count + iopoll_q_count,
3471 ioc->msix_vector_count);
3477 ioc->iopoll_q_start_index =
3478 ioc->reply_queue_count - iopoll_q_count;
3480 r = _base_alloc_irq_vectors(ioc);
3482 ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r);
3491 if (r < ioc->iopoll_q_start_index) {
3492 ioc->reply_queue_count = r + iopoll_q_count;
3493 ioc->iopoll_q_start_index =
3494 ioc->reply_queue_count - iopoll_q_count;
3497 ioc->msix_enable = 1;
3498 for (i = 0; i < ioc->reply_queue_count; i++) {
3499 r = _base_request_irq(ioc, i);
3501 mpt3sas_base_free_irq(ioc);
3502 mpt3sas_base_disable_msix(ioc);
3507 ioc_info(ioc, "High IOPs queues : %s\n",
3508 ioc->high_iops_queues ? "enabled" : "disabled");
3514 ioc->high_iops_queues = 0;
3515 ioc_info(ioc, "High IOPs queues : disabled\n");
3516 ioc->reply_queue_count = 1;
3517 ioc->iopoll_q_start_index = ioc->reply_queue_count - 0;
3518 r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
3520 dfailprintk(ioc,
3521 ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
3524 r = _base_request_irq(ioc, 0);
3531 * @ioc: per adapter object
3534 mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
3536 struct pci_dev *pdev = ioc->pdev;
3538 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3540 mpt3sas_base_free_irq(ioc);
3541 mpt3sas_base_disable_msix(ioc);
3543 kfree(ioc->replyPostRegisterIndex);
3544 ioc->replyPostRegisterIndex = NULL;
3547 if (ioc->chip_phys) {
3548 iounmap(ioc->chip);
3549 ioc->chip_phys = 0;
3553 pci_release_selected_regions(ioc->pdev, ioc->bars);
3559 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
3564 * @ioc: per adapter object
3569 mpt3sas_base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
3574 dinitprintk(ioc, pr_info("%s\n", __func__));
3575 if (ioc->pci_error_recovery)
3577 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3578 dhsprintk(ioc, pr_info("%s: ioc_state(0x%08x)\n", __func__, ioc_state));
3581 mpt3sas_print_fault_code(ioc, ioc_state &
3583 mpt3sas_base_mask_interrupts(ioc);
3584 rc = _base_diag_reset(ioc);
3587 mpt3sas_print_coredump_info(ioc, ioc_state &
3589 mpt3sas_base_wait_for_coredump_completion(ioc, __func__);
3590 mpt3sas_base_mask_interrupts(ioc);
3591 rc = _base_diag_reset(ioc);
3599 * @ioc: per adapter object
3604 mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
3606 struct pci_dev *pdev = ioc->pdev;
3615 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3617 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
3619 ioc_warn(ioc, "pci_enable_device_mem: failed\n");
3620 ioc->bars = 0;
3625 if (pci_request_selected_regions(pdev, ioc->bars,
3626 ioc->driver_name)) {
3627 ioc_warn(ioc, "pci_request_selected_regions: failed\n");
3628 ioc->bars = 0;
3636 if (_base_config_dma_addressing(ioc, pdev) != 0) {
3637 ioc_warn(ioc, "no suitable DMA mask for %s\n", pci_name(pdev));
3652 ioc->chip_phys = pci_resource_start(pdev, i);
3653 chip_phys = ioc->chip_phys;
3655 ioc->chip = ioremap(ioc->chip_phys, memap_sz);
3659 if (ioc->chip == NULL) {
3660 ioc_err(ioc,
3666 mpt3sas_base_mask_interrupts(ioc);
3668 r = _base_get_ioc_facts(ioc);
3670 rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
3671 if (rc || (_base_get_ioc_facts(ioc)))
3675 if (!ioc->rdpq_array_enable_assigned) {
3676 ioc->rdpq_array_enable = ioc->rdpq_array_capable;
3677 ioc->rdpq_array_enable_assigned = 1;
3680 r = _base_enable_msix(ioc);
3684 iopoll_q_count = ioc->reply_queue_count - ioc->iopoll_q_start_index;
3686 atomic_set(&ioc->io_uring_poll_queues[i].busy, 0);
3687 atomic_set(&ioc->io_uring_poll_queues[i].pause, 0);
3690 if (!ioc->is_driver_loading)
3691 _base_init_irqpolls(ioc);
3695 if (ioc->combined_reply_queue) {
3702 ioc->replyPostRegisterIndex = kcalloc(
3703 ioc->combined_reply_index_count,
3705 if (!ioc->replyPostRegisterIndex) {
3706 ioc_err(ioc,
3712 for (i = 0; i < ioc->combined_reply_index_count; i++) {
3713 ioc->replyPostRegisterIndex[i] =
3715 ((u8 __force *)&ioc->chip->Doorbell +
3721 if (ioc->is_warpdrive) {
3722 ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
3723 &ioc->chip->ReplyPostHostIndex;
3725 for (i = 1; i < ioc->cpu_msix_table_sz; i++)
3726 ioc->reply_post_host_index[i] =
3728 ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
3732 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
3733 if (reply_q->msix_index >= ioc->iopoll_q_start_index) {
3741 ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC",
3742 pci_irq_vector(ioc->pdev, reply_q->msix_index));
3745 ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n",
3746 &chip_phys, ioc->chip, memap_sz);
3747 ioc_info(ioc, "ioport(0x%016llx), size(%d)\n",
3755 mpt3sas_base_unmap_resources(ioc);
3761 * @ioc: per adapter object
3767 mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3769 return (void *)(ioc->request + (smid * ioc->request_sz));
3774 * @ioc: per adapter object
3780 mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3782 return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
3787 * @ioc: per adapter object
3793 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3795 return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
3801 * @ioc: per adapter object
3807 mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3809 return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl);
3814 * @ioc: per adapter object
3820 mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3822 return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma;
3827 * @ioc: per adapter object
3833 mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
3837 return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
3842 * @ioc: per adapter object
3850 _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc,
3854 if (ioc->msix_load_balance)
3855 return ioc->reply_queue_count ?
3857 &ioc->total_io_cnt), ioc->reply_queue_count) : 0;
3859 if (scmd && ioc->shost->nr_hw_queues > 1) {
3863 ioc->high_iops_queues;
3866 return ioc->cpu_msix_table[raw_smp_processor_id()];
3872 * @ioc: per adapter object
3880 _base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc,
3891 atomic64_add_return(1, &ioc->high_iops_outstanding) /
3895 return _base_get_msix_index(ioc, scmd);
3900 * @ioc: per adapter object
3906 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3912 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3913 if (list_empty(&ioc->internal_free_list)) {
3914 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3915 ioc_err(ioc, "%s: smid not available\n", __func__);
3919 request = list_entry(ioc->internal_free_list.next,
3924 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3930 * @ioc: per adapter object
3937 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
3955 * unique_tag = ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag;
3957 ioc->io_queue_num[tag] = blk_mq_unique_tag_to_hwq(unique_tag);
3969 * @ioc: per adapter object
3975 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3981 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3982 if (list_empty(&ioc->hpr_free_list)) {
3983 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3987 request = list_entry(ioc->hpr_free_list.next,
3992 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3997 _base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
4002 if (ioc->shost_recovery && ioc->pending_io_count) {
4003 ioc->pending_io_count = scsi_host_busy(ioc->shost);
4004 if (ioc->pending_io_count == 0)
4005 wake_up(&ioc->reset_wq);
4009 void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
4017 atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
4023 * @ioc: per adapter object
4027 mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4032 if (smid < ioc->hi_priority_smid) {
4036 st = _get_st_from_smid(ioc, smid);
4038 _base_recovery_check(ioc);
4043 request = mpt3sas_base_get_msg_frame(ioc, smid);
4044 memset(request, 0, ioc->request_sz);
4046 mpt3sas_base_clear_st(ioc, st);
4047 _base_recovery_check(ioc);
4048 ioc->io_queue_num[smid - 1] = 0;
4052 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
4053 if (smid < ioc->internal_smid) {
4055 i = smid - ioc->hi_priority_smid;
4056 ioc->hpr_lookup[i].cb_idx = 0xFF;
4057 list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
4058 } else if (smid <= ioc->hba_queue_depth) {
4060 i = smid - ioc->internal_smid;
4061 ioc->internal_lookup[i].cb_idx = 0xFF;
4062 list_add(&ioc->internal_lookup[i].tracker_list,
4063 &ioc->internal_free_list);
4065 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
4119 * @ioc: per adapter object
4125 _base_set_and_get_msix_index(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4129 if (smid < ioc->hi_priority_smid)
4130 st = _get_st_from_smid(ioc, smid);
4133 return _base_get_msix_index(ioc, NULL);
4135 st->msix_io = ioc->get_msix_index_for_smlio(ioc, st->scmd);
4141 * @ioc: per adapter object
4146 _base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc,
4152 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
4154 _clone_sg_entries(ioc, (void *) mfp, smid);
4155 mpi_req_iomem = (void __force *)ioc->chip +
4156 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
4158 ioc->request_sz);
4160 descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4164 _base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4165 &ioc->scsi_lookup_lock);
4170 * @ioc: per adapter object
4175 _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
4182 descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4186 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4187 &ioc->scsi_lookup_lock);
4192 * @ioc: per adapter object
4197 _base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4205 descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4209 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4210 &ioc->scsi_lookup_lock);
4215 * @ioc: per adapter object
4220 _base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4227 if (ioc->is_mcpu_endpoint) {
4228 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
4231 mpi_req_iomem = (void __force *)ioc->chip
4233 + (smid * ioc->request_sz);
4235 ioc->request_sz);
4246 if (ioc->is_mcpu_endpoint)
4248 &ioc->chip->RequestDescriptorPostLow,
4249 &ioc->scsi_lookup_lock);
4251 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4252 &ioc->scsi_lookup_lock);
4258 * @ioc: per adapter object
4262 mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4269 descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4273 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4274 &ioc->scsi_lookup_lock);
4279 * @ioc: per adapter object
4283 _base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4289 if (ioc->is_mcpu_endpoint) {
4290 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
4292 _clone_sg_entries(ioc, (void *) mfp, smid);
4294 mpi_req_iomem = (void __force *)ioc->chip +
4295 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
4297 ioc->request_sz);
4301 descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4305 if (ioc->is_mcpu_endpoint)
4307 &ioc->chip->RequestDescriptorPostLow,
4308 &ioc->scsi_lookup_lock);
4310 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
4311 &ioc->scsi_lookup_lock);
4317 * @ioc: per adapter object
4324 _base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4331 descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4334 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4340 * @ioc: per adapter object
4346 _base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4353 descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4356 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4362 * @ioc: per adapter object
4369 _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
4379 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4385 * @ioc: per adapter object
4391 _base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
4397 descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
4400 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
4405 * @ioc: per adapter object
4408 _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
4410 if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
4413 switch (ioc->pdev->subsystem_vendor) {
4415 switch (ioc->pdev->device) {
4417 switch (ioc->pdev->subsystem_device) {
4419 ioc_info(ioc, "%s\n",
4423 ioc_info(ioc, "%s\n",
4427 ioc_info(ioc, "%s\n",
4431 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4432 ioc->pdev->subsystem_device);
4437 switch (ioc->pdev->subsystem_device) {
4439 ioc_info(ioc, "%s\n",
4443 ioc_info(ioc, "%s\n",
4447 ioc_info(ioc, "%s\n",
4451 ioc_info(ioc, "%s\n",
4455 ioc_info(ioc, "%s\n",
4459 ioc_info(ioc, "%s\n",
4463 ioc_info(ioc, "%s\n",
4467 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4468 ioc->pdev->subsystem_device);
4473 switch (ioc->pdev->subsystem_device) {
4475 ioc_info(ioc, "%s\n",
4480 ioc_info(ioc, "%s\n",
4484 ioc_info(ioc, "%s\n",
4488 ioc_info(ioc, "%s\n",
4492 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4493 ioc->pdev->subsystem_device);
4498 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4499 ioc->pdev->subsystem_device);
4504 switch (ioc->pdev->device) {
4506 switch (ioc->pdev->subsystem_device) {
4508 ioc_info(ioc, "%s\n",
4512 ioc_info(ioc, "%s\n",
4516 ioc_info(ioc, "%s\n",
4520 ioc_info(ioc, "%s\n",
4524 ioc_info(ioc, "%s\n",
4528 ioc_info(ioc, "%s\n",
4532 ioc_info(ioc, "%s\n",
4536 ioc_info(ioc, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
4537 ioc->pdev->subsystem_device);
4542 switch (ioc->pdev->subsystem_device) {
4544 ioc_info(ioc, "%s\n",
4548 ioc_info(ioc, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
4549 ioc->pdev->subsystem_device);
4554 ioc_info(ioc, "Dell HBA: Subsystem ID: 0x%X\n",
4555 ioc->pdev->subsystem_device);
4560 switch (ioc->pdev->device) {
4562 switch (ioc->pdev->subsystem_device) {
4564 ioc_info(ioc, "%s\n",
4568 ioc_info(ioc, "%s\n",
4572 ioc_info(ioc, "%s\n",
4576 ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
4577 ioc->pdev->subsystem_device);
4582 switch (ioc->pdev->subsystem_device) {
4584 ioc_info(ioc, "%s\n",
4588 ioc_info(ioc, "%s\n",
4592 ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
4593 ioc->pdev->subsystem_device);
4598 ioc_info(ioc, "Cisco SAS HBA: Subsystem ID: 0x%X\n",
4599 ioc->pdev->subsystem_device);
4604 switch (ioc->pdev->device) {
4606 switch (ioc->pdev->subsystem_device) {
4608 ioc_info(ioc, "%s\n",
4612 ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
4613 ioc->pdev->subsystem_device);
4618 switch (ioc->pdev->subsystem_device) {
4620 ioc_info(ioc, "%s\n",
4624 ioc_info(ioc, "%s\n",
4628 ioc_info(ioc, "%s\n",
4632 ioc_info(ioc, "%s\n",
4636 ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
4637 ioc->pdev->subsystem_device);
4642 ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n",
4643 ioc->pdev->subsystem_device);
4655 * @ioc: per adapter object
4660 _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
4673 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
4675 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
4676 ioc_err(ioc, "%s: internal command already in use\n", __func__);
4681 fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
4684 ioc_err(ioc,
4690 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
4692 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
4697 ioc->base_cmds.status = MPT3_CMD_PENDING;
4698 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4699 ioc->base_cmds.smid = smid;
4704 ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma,
4706 init_completion(&ioc->base_cmds.done);
4707 ioc->put_smid_default(ioc, smid);
4709 wait_for_completion_timeout(&ioc->base_cmds.done,
4711 ioc_info(ioc, "%s: complete\n", __func__);
4712 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4713 ioc_err(ioc, "%s: timeout\n", __func__);
4719 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) {
4720 memcpy(&mpi_reply, ioc->base_cmds.reply,
4739 ioc_info(ioc,
4751 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4754 dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data,
4757 if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED)
4759 if (mpt3sas_base_check_for_fault_and_issue_reset(ioc))
4768 * @ioc: per adapter object
4771 _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
4777 strncpy(desc, ioc->manu_pg0.ChipName, 16);
4778 ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x)\n",
4780 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
4781 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
4782 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
4783 ioc->facts.FWVersion.Word & 0x000000FF,
4784 ioc->pdev->revision);
4786 _base_display_OEMs_branding(ioc);
4788 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
4793 ioc_info(ioc, "Protocol=(");
4795 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
4800 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
4808 if (!ioc->hide_ir_msg) {
4809 if (ioc->facts.IOCCapabilities &
4816 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
4821 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
4826 if (ioc->facts.IOCCapabilities &
4832 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
4837 if (ioc->facts.IOCCapabilities &
4843 if (ioc->facts.IOCCapabilities &
4849 if (ioc->facts.IOCCapabilities &
4855 if (ioc->facts.IOCCapabilities &
4861 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
4872 * @ioc: per adapter object
4881 mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
4892 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
4899 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4903 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
4905 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4912 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4937 if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
4945 ioc_info(ioc, "device_missing_delay: old(%d), new(%d)\n",
4947 ioc_info(ioc, "ioc_missing_delay: old(%d), new(%d)\n",
4950 ioc->device_missing_delay = dmd_new;
4951 ioc->io_missing_delay = io_missing_delay;
4961 * @ioc : per adapter object
4967 _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
4973 rc = mpt3sas_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy);
4976 memcpy(&ioc_pg1, &ioc->ioc_pg1_copy, sizeof(Mpi2IOCPage1_t));
4981 if (ioc->high_iops_queues) {
4982 ioc_info(ioc,
4997 rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
5000 ioc_info(ioc, "performance mode: balanced\n");
5012 rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
5015 ioc_info(ioc, "performance mode: latency\n");
5021 ioc_info(ioc,
5026 rc = mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
5037 * @ioc : per adapter object
5042 _base_get_event_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
5052 r = mpt3sas_config_get_driver_trigger_pg2(ioc, &mpi_reply,
5060 dinitprintk(ioc,
5061 ioc_err(ioc,
5070 ioc->diag_trigger_event.ValidEntries = count;
5072 event_tg = &ioc->diag_trigger_event.EventTriggerEntry[0];
5089 * @ioc : per adapter object
5094 _base_get_scsi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
5104 r = mpt3sas_config_get_driver_trigger_pg3(ioc, &mpi_reply,
5112 dinitprintk(ioc,
5113 ioc_err(ioc,
5122 ioc->diag_trigger_scsi.ValidEntries = count;
5124 scsi_tg = &ioc->diag_trigger_scsi.SCSITriggerEntry[0];
5141 * @ioc : per adapter object
5146 _base_get_mpi_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
5156 r = mpt3sas_config_get_driver_trigger_pg4(ioc, &mpi_reply,
5164 dinitprintk(ioc,
5165 ioc_err(ioc,
5174 ioc->diag_trigger_mpi.ValidEntries = count;
5176 status_tg = &ioc->diag_trigger_mpi.MPITriggerEntry[0];
5195 * @ioc : per adapter object
5200 _base_get_master_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
5207 r = mpt3sas_config_get_driver_trigger_pg1(ioc, &mpi_reply,
5215 dinitprintk(ioc,
5216 ioc_err(ioc,
5223 ioc->diag_trigger_master.MasterData |=
5232 * @ioc : per adapter object
5242 _base_check_for_trigger_pages_support(struct MPT3SAS_ADAPTER *ioc, u32 *trigger_flags)
5249 r = mpt3sas_config_get_driver_trigger_pg0(ioc, &mpi_reply,
5266 * @ioc : per adapter object
5272 _base_get_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
5280 ioc->diag_trigger_master.MasterData =
5283 r = _base_check_for_trigger_pages_support(ioc, &trigger_flags);
5294 ioc->supports_trigger_pages = 1;
5302 r = _base_get_master_diag_triggers(ioc);
5313 r = _base_get_event_diag_triggers(ioc);
5324 r = _base_get_scsi_diag_triggers(ioc);
5334 r = _base_get_mpi_diag_triggers(ioc);
5345 * @ioc : per adapter object
5350 _base_update_diag_trigger_pages(struct MPT3SAS_ADAPTER *ioc)
5353 if (ioc->diag_trigger_master.MasterData)
5354 mpt3sas_config_update_driver_trigger_pg1(ioc,
5355 &ioc->diag_trigger_master, 1);
5357 if (ioc->diag_trigger_event.ValidEntries)
5358 mpt3sas_config_update_driver_trigger_pg2(ioc,
5359 &ioc->diag_trigger_event, 1);
5361 if (ioc->diag_trigger_scsi.ValidEntries)
5362 mpt3sas_config_update_driver_trigger_pg3(ioc,
5363 &ioc->diag_trigger_scsi, 1);
5365 if (ioc->diag_trigger_mpi.ValidEntries)
5366 mpt3sas_config_update_driver_trigger_pg4(ioc,
5367 &ioc->diag_trigger_mpi, 1);
5373 * @ioc : per adapter object
5378 static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc)
5386 ioc->max_wideport_qd = MPT3SAS_SAS_QUEUE_DEPTH;
5387 ioc->max_narrowport_qd = MPT3SAS_SAS_QUEUE_DEPTH;
5388 ioc->max_sata_qd = MPT3SAS_SATA_QUEUE_DEPTH;
5389 ioc->max_nvme_qd = MPT3SAS_NVME_QUEUE_DEPTH;
5390 if (!ioc->is_gen35_ioc)
5393 rc = mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
5397 ioc->name, __FILE__, __LINE__, __func__);
5402 ioc->max_wideport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH);
5405 ioc->max_narrowport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH);
5408 ioc->max_sata_qd = (depth ? depth : MPT3SAS_SATA_QUEUE_DEPTH);
5411 rc = mpt3sas_config_get_pcie_iounit_pg1(ioc, &mpi_reply,
5415 ioc->name, __FILE__, __LINE__, __func__);
5418 ioc->max_nvme_qd = (le16_to_cpu(pcie_iounit_pg1.NVMeMaxQueueDepth)) ?
5422 dinitprintk(ioc, pr_err(
5424 ioc->max_wideport_qd, ioc->max_narrowport_qd,
5425 ioc->max_sata_qd, ioc->max_nvme_qd));
5432 * @ioc : per adapter object
5437 mpt3sas_atto_validate_nvram(struct MPT3SAS_ADAPTER *ioc,
5455 ioc_err(ioc, "Invalid ATTO NVRAM checksum\n");
5465 ioc_err(ioc, "Invalid ATTO NVRAM signature\n");
5467 ioc_info(ioc, "Invalid ATTO NVRAM version");
5474 ioc_err(ioc, "Invalid ATTO SAS address\n");
5483 * @ioc : per adapter object
5488 mpt3sas_atto_get_sas_addr(struct MPT3SAS_ADAPTER *ioc, union ATTO_SAS_ADDRESS *sas_addr)
5496 r = mpt3sas_config_get_manufacturing_pg1(ioc, &mpi_reply, &mfg_pg1);
5498 ioc_err(ioc, "Failed to read manufacturing page 1\n");
5504 r = mpt3sas_atto_validate_nvram(ioc, nvram);
5516 * @ioc : per adapter object
5521 mpt3sas_atto_init(struct MPT3SAS_ADAPTER *ioc)
5532 r = mpt3sas_atto_get_sas_addr(ioc, &sas_addr);
5537 r = mpt3sas_config_get_bios_pg4(ioc, &mpi_reply, NULL, 0);
5539 ioc_err(ioc, "Failed to read ATTO bios page 4 header.\n");
5546 ioc_err(ioc, "Failed to allocate memory for ATTO bios page.\n");
5551 r = mpt3sas_config_get_bios_pg4(ioc, &mpi_reply, bios_pg4, sz);
5553 ioc_err(ioc, "Failed to read ATTO bios page 4\n");
5567 r = mpt3sas_config_set_bios_pg4(ioc, &mpi_reply, bios_pg4, sz);
5576 * @ioc: per adapter object
5579 _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
5586 ioc->nvme_abort_timeout = 30;
5588 rc = mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply,
5589 &ioc->manu_pg0);
5592 if (ioc->ir_firmware) {
5593 rc = mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
5594 &ioc->manu_pg10);
5599 if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO) {
5600 rc = mpt3sas_atto_init(ioc);
5609 rc = mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply,
5610 &ioc->manu_pg11);
5613 if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) {
5615 ioc->name);
5616 ioc->manu_pg11.EEDPTagMode &= ~0x3;
5617 ioc->manu_pg11.EEDPTagMode |= 0x1;
5618 mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
5619 &ioc->manu_pg11);
5621 if (ioc->manu_pg11.AddlFlags2 & NVME_TASK_MNGT_CUSTOM_MASK)
5622 ioc->tm_custom_handling = 1;
5624 ioc->tm_custom_handling = 0;
5625 if (ioc->manu_pg11.NVMeAbortTO < NVME_TASK_ABORT_MIN_TIMEOUT)
5626 ioc->nvme_abort_timeout = NVME_TASK_ABORT_MIN_TIMEOUT;
5627 else if (ioc->manu_pg11.NVMeAbortTO >
5629 ioc->nvme_abort_timeout = NVME_TASK_ABORT_MAX_TIMEOUT;
5631 ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO;
5633 ioc->time_sync_interval =
5634 ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_MASK;
5635 if (ioc->time_sync_interval) {
5636 if (ioc->manu_pg11.TimeSyncInterval & MPT3SAS_TIMESYNC_UNIT_MASK)
5637 ioc->time_sync_interval =
5638 ioc->time_sync_interval * SECONDS_PER_HOUR;
5640 ioc->time_sync_interval =
5641 ioc->time_sync_interval * SECONDS_PER_MIN;
5642 dinitprintk(ioc, ioc_info(ioc,
5644 ioc->time_sync_interval, (ioc->manu_pg11.TimeSyncInterval &
5647 if (ioc->is_gen35_ioc)
5648 ioc_warn(ioc,
5651 rc = _base_assign_fw_reported_qd(ioc);
5658 if (ioc->pdev->vendor == MPI2_MFGPAGE_VENDORID_ATTO)
5659 ioc->bios_pg3.BiosVersion = 0;
5661 rc = mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
5664 rc = mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
5669 rc = mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
5672 rc = mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
5675 rc = mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
5678 rc = mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &iounit_pg8);
5681 _base_display_ioc_capabilities(ioc);
5687 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
5688 if ((ioc->facts.IOCCapabilities &
5695 ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
5696 rc = mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
5701 ioc->temp_sensors_count = iounit_pg8.NumSensors;
5702 if (ioc->is_aero_ioc) {
5703 rc = _base_update_ioc_page1_inlinewith_perf_mode(ioc);
5707 if (ioc->is_gen35_ioc) {
5708 if (ioc->is_driver_loading) {
5709 rc = _base_get_diag_triggers(ioc);
5724 _base_check_for_trigger_pages_support(ioc, &tg_flags);
5725 if (!ioc->supports_trigger_pages && tg_flags != -EFAULT)
5726 _base_update_diag_trigger_pages(ioc);
5727 else if (ioc->supports_trigger_pages &&
5729 ioc->supports_trigger_pages = 0;
5737 * @ioc: per adapter object
5742 mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
5748 enclosure_dev_next, &ioc->enclosure_list, list) {
5756 * @ioc: per adapter object
5761 _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
5767 int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
5769 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5771 if (ioc->request) {
5772 dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz,
5773 ioc->request, ioc->request_dma);
5774 dexitprintk(ioc,
5775 ioc_info(ioc, "request_pool(0x%p): free\n",
5776 ioc->request));
5777 ioc->request = NULL;
5780 if (ioc->sense) {
5781 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
5782 dma_pool_destroy(ioc->sense_dma_pool);
5783 dexitprintk(ioc,
5784 ioc_info(ioc, "sense_pool(0x%p): free\n",
5785 ioc->sense));
5786 ioc->sense = NULL;
5789 if (ioc->reply) {
5790 dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
5791 dma_pool_destroy(ioc->reply_dma_pool);
5792 dexitprintk(ioc,
5793 ioc_info(ioc, "reply_pool(0x%p): free\n",
5794 ioc->reply));
5795 ioc->reply = NULL;
5798 if (ioc->reply_free) {
5799 dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
5800 ioc->reply_free_dma);
5801 dma_pool_destroy(ioc->reply_free_dma_pool);
5802 dexitprintk(ioc,
5803 ioc_info(ioc, "reply_free_pool(0x%p): free\n",
5804 ioc->reply_free));
5805 ioc->reply_free = NULL;
5808 if (ioc->reply_post) {
5814 if (ioc->reply_post[i].reply_post_free) {
5816 ioc->reply_post_free_dma_pool,
5817 ioc->reply_post[i].reply_post_free,
5818 ioc->reply_post[i].reply_post_free_dma);
5819 dexitprintk(ioc, ioc_info(ioc,
5821 ioc->reply_post[i].reply_post_free));
5822 ioc->reply_post[i].reply_post_free =
5828 dma_pool_destroy(ioc->reply_post_free_dma_pool);
5829 if (ioc->reply_post_free_array &&
5830 ioc->rdpq_array_enable) {
5831 dma_pool_free(ioc->reply_post_free_array_dma_pool,
5832 ioc->reply_post_free_array,
5833 ioc->reply_post_free_array_dma);
5834 ioc->reply_post_free_array = NULL;
5836 dma_pool_destroy(ioc->reply_post_free_array_dma_pool);
5837 kfree(ioc->reply_post);
5840 if (ioc->pcie_sgl_dma_pool) {
5841 for (i = 0; i < ioc->scsiio_depth; i++) {
5842 dma_pool_free(ioc->pcie_sgl_dma_pool,
5843 ioc->pcie_sg_lookup[i].pcie_sgl,
5844 ioc->pcie_sg_lookup[i].pcie_sgl_dma);
5845 ioc->pcie_sg_lookup[i].pcie_sgl = NULL;
5847 dma_pool_destroy(ioc->pcie_sgl_dma_pool);
5849 kfree(ioc->pcie_sg_lookup);
5850 ioc->pcie_sg_lookup = NULL;
5852 if (ioc->config_page) {
5853 dexitprintk(ioc,
5854 ioc_info(ioc, "config_page(0x%p): free\n",
5855 ioc->config_page));
5856 dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz,
5857 ioc->config_page, ioc->config_page_dma);
5860 kfree(ioc->hpr_lookup);
5861 ioc->hpr_lookup = NULL;
5862 kfree(ioc->internal_lookup);
5863 ioc->internal_lookup = NULL;
5864 if (ioc->chain_lookup) {
5865 for (i = 0; i < ioc->scsiio_depth; i++) {
5866 for (j = ioc->chains_per_prp_buffer;
5867 j < ioc->chains_needed_per_io; j++) {
5868 ct = &ioc->chain_lookup[i].chains_per_smid[j];
5870 dma_pool_free(ioc->chain_dma_pool,
5874 kfree(ioc->chain_lookup[i].chains_per_smid);
5876 dma_pool_destroy(ioc->chain_dma_pool);
5877 kfree(ioc->chain_lookup);
5878 ioc->chain_lookup = NULL;
5881 kfree(ioc->io_queue_num);
5882 ioc->io_queue_num = NULL;
5909 * @ioc: Adapter object
5914 _base_reduce_hba_queue_depth(struct MPT3SAS_ADAPTER *ioc)
5918 if ((ioc->hba_queue_depth - reduce_sz) >
5919 (ioc->internal_depth + INTERNAL_SCSIIO_CMDS_COUNT)) {
5920 ioc->hba_queue_depth -= reduce_sz;
5929 * @ioc: Adapter object
5936 _base_allocate_pcie_sgl_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
5941 ioc->pcie_sgl_dma_pool =
5942 dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz,
5943 ioc->page_size, 0);
5944 if (!ioc->pcie_sgl_dma_pool) {
5945 ioc_err(ioc, "PCIe SGL pool: dma_pool_create failed\n");
5949 ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
5950 ioc->chains_per_prp_buffer =
5951 min(ioc->chains_per_prp_buffer, ioc->chains_needed_per_io);
5952 for (i = 0; i < ioc->scsiio_depth; i++) {
5953 ioc->pcie_sg_lookup[i].pcie_sgl =
5954 dma_pool_alloc(ioc->pcie_sgl_dma_pool, GFP_KERNEL,
5955 &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
5956 if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
5957 ioc_err(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
5962 ioc->pcie_sg_lookup[i].pcie_sgl_dma, sz)) {
5963 ioc_err(ioc, "PCIE SGLs are not in same 4G !! pcie sgl (0x%p) dma = (0x%llx)\n",
5964 ioc->pcie_sg_lookup[i].pcie_sgl,
5966 ioc->pcie_sg_lookup[i].pcie_sgl_dma);
5967 ioc->use_32bit_dma = true;
5971 for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
5972 ct = &ioc->chain_lookup[i].chains_per_smid[j];
5974 ioc->pcie_sg_lookup[i].pcie_sgl +
5975 (j * ioc->chain_segment_sz);
5977 ioc->pcie_sg_lookup[i].pcie_sgl_dma +
5978 (j * ioc->chain_segment_sz);
5981 dinitprintk(ioc, ioc_info(ioc,
5983 ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
5984 dinitprintk(ioc, ioc_info(ioc,
5986 ioc->chains_per_prp_buffer));
5993 * @ioc: Adapter object
5999 _base_allocate_chain_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
6004 ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
6005 ioc->chain_segment_sz, 16, 0);
6006 if (!ioc->chain_dma_pool)
6009 for (i = 0; i < ioc->scsiio_depth; i++) {
6010 for (j = ioc->chains_per_prp_buffer;
6011 j < ioc->chains_needed_per_io; j++) {
6012 ctr = &ioc->chain_lookup[i].chains_per_smid[j];
6013 ctr->chain_buffer = dma_pool_alloc(ioc->chain_dma_pool,
6018 ctr->chain_buffer_dma, ioc->chain_segment_sz)) {
6019 ioc_err(ioc,
6023 ioc->use_32bit_dma = true;
6028 dinitprintk(ioc, ioc_info(ioc,
6030 ioc->scsiio_depth, ioc->chain_segment_sz, ((ioc->scsiio_depth *
6031 (ioc->chains_needed_per_io - ioc->chains_per_prp_buffer) *
6032 ioc->chain_segment_sz))/1024));
6039 * @ioc: Adapter object
6044 _base_allocate_sense_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
6046 ioc->sense_dma_pool =
6047 dma_pool_create("sense pool", &ioc->pdev->dev, sz, 4, 0);
6048 if (!ioc->sense_dma_pool)
6050 ioc->sense = dma_pool_alloc(ioc->sense_dma_pool,
6051 GFP_KERNEL, &ioc->sense_dma);
6052 if (!ioc->sense)
6054 if (!mpt3sas_check_same_4gb_region(ioc->sense_dma, sz)) {
6055 dinitprintk(ioc, pr_err(
6057 ioc->sense, (unsigned long long) ioc->sense_dma));
6058 ioc->use_32bit_dma = true;
6061 ioc_info(ioc,
6063 ioc->sense, (unsigned long long)ioc->sense_dma,
6064 ioc->scsiio_depth, SCSI_SENSE_BUFFERSIZE, sz/1024);
6071 * @ioc: Adapter object
6076 _base_allocate_reply_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
6079 ioc->reply_dma_pool = dma_pool_create("reply pool",
6080 &ioc->pdev->dev, sz, 4, 0);
6081 if (!ioc->reply_dma_pool)
6083 ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
6084 &ioc->reply_dma);
6085 if (!ioc->reply)
6087 if (!mpt3sas_check_same_4gb_region(ioc->reply_dma, sz)) {
6088 dinitprintk(ioc, pr_err(
6090 ioc->reply, (unsigned long long) ioc->reply_dma));
6091 ioc->use_32bit_dma = true;
6094 ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
6095 ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
6096 ioc_info(ioc,
6098 ioc->reply, (unsigned long long)ioc->reply_dma,
6099 ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024);
6106 * @ioc: Adapter object
6111 _base_allocate_reply_free_dma_pool(struct MPT3SAS_ADAPTER *ioc, u32 sz)
6114 ioc->reply_free_dma_pool = dma_pool_create(
6115 "reply_free pool", &ioc->pdev->dev, sz, 16, 0);
6116 if (!ioc->reply_free_dma_pool)
6118 ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool,
6119 GFP_KERNEL, &ioc->reply_free_dma);
6120 if (!ioc->reply_free)
6122 if (!mpt3sas_check_same_4gb_region(ioc->reply_free_dma, sz)) {
6123 dinitprintk(ioc,
6125 ioc->reply_free, (unsigned long long) ioc->reply_free_dma));
6126 ioc->use_32bit_dma = true;
6129 memset(ioc->reply_free, 0, sz);
6130 dinitprintk(ioc, ioc_info(ioc,
6132 ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
6133 dinitprintk(ioc, ioc_info(ioc,
6135 (unsigned long long)ioc->reply_free_dma));
6142 * @ioc: Adapter object
6148 _base_allocate_reply_post_free_array(struct MPT3SAS_ADAPTER *ioc,
6151 ioc->reply_post_free_array_dma_pool =
6153 &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
6154 if (!ioc->reply_post_free_array_dma_pool)
6156 ioc->reply_post_free_array =
6157 dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
6158 GFP_KERNEL, &ioc->reply_post_free_array_dma);
6159 if (!ioc->reply_post_free_array)
6161 if (!mpt3sas_check_same_4gb_region(ioc->reply_post_free_array_dma,
6163 dinitprintk(ioc, pr_err(
6165 ioc->reply_free,
6166 (unsigned long long) ioc->reply_free_dma));
6167 ioc->use_32bit_dma = true;
6175 * @ioc: per adapter object
6180 base_alloc_rdpq_dma_pool(struct MPT3SAS_ADAPTER *ioc, int sz)
6184 int reply_post_free_sz = ioc->reply_post_queue_depth *
6186 int count = ioc->rdpq_array_enable ? ioc->reply_queue_count : 1;
6188 ioc->reply_post = kcalloc(count, sizeof(struct reply_post_struct),
6190 if (!ioc->reply_post)
6203 ioc->reply_post_free_dma_pool =
6205 &ioc->pdev->dev, sz, 16, 0);
6206 if (!ioc->reply_post_free_dma_pool)
6210 ioc->reply_post[i].reply_post_free =
6211 dma_pool_zalloc(ioc->reply_post_free_dma_pool,
6213 &ioc->reply_post[i].reply_post_free_dma);
6214 if (!ioc->reply_post[i].reply_post_free)
6226 ioc->reply_post[i].reply_post_free_dma, sz)) {
6227 dinitprintk(ioc,
6228 ioc_err(ioc, "bad Replypost free pool(0x%p)"
6230 ioc->reply_post[i].reply_post_free,
6232 ioc->reply_post[i].reply_post_free_dma));
6238 ioc->reply_post[i].reply_post_free =
6240 ((long)ioc->reply_post[i-1].reply_post_free
6242 ioc->reply_post[i].reply_post_free_dma =
6244 (ioc->reply_post[i-1].reply_post_free_dma +
6253 * @ioc: per adapter object
6258 _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
6272 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6276 facts = &ioc->facts;
6282 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
6293 if (ioc->is_mcpu_endpoint)
6294 ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
6301 ioc_warn(ioc, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n",
6304 ioc->shost->sg_tablesize = sg_tablesize;
6307 ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
6309 if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
6312 ioc_err(ioc, "IOC doesn't have enough Request Credits, it has just %d number of credits\n",
6316 ioc->internal_depth = 10;
6319 ioc->hi_priority_depth = ioc->internal_depth - (5);
6323 ioc->internal_depth, facts->RequestCredit);
6328 (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth));
6337 ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth;
6340 ioc->request_sz = facts->IOCRequestFrameSize * 4;
6343 ioc->reply_sz = facts->ReplyFrameSize * 4;
6346 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
6348 ioc->chain_segment_sz =
6353 ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS *
6356 ioc->chain_segment_sz = ioc->request_sz;
6359 sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
6364 max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
6366 ioc->max_sges_in_main_message = max_sge_elements/sge_size;
6369 max_sge_elements = ioc->chain_segment_sz - sge_size;
6370 ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
6375 chains_needed_per_io = ((ioc->shost->sg_tablesize -
6376 ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
6380 ioc->shost->sg_tablesize = min_t(u16,
6381 ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
6382 * chains_needed_per_io), ioc->shost->sg_tablesize);
6384 ioc->chains_needed_per_io = chains_needed_per_io;
6387 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
6390 if (ioc->is_mcpu_endpoint)
6391 ioc->reply_post_queue_depth = ioc->reply_free_queue_depth;
6394 ioc->reply_post_queue_depth = ioc->hba_queue_depth +
6395 ioc->reply_free_queue_depth + 1;
6397 if (ioc->reply_post_queue_depth % 16)
6398 ioc->reply_post_queue_depth += 16 -
6399 (ioc->reply_post_queue_depth % 16);
6402 if (ioc->reply_post_queue_depth >
6404 ioc->reply_post_queue_depth =
6407 ioc->hba_queue_depth =
6408 ((ioc->reply_post_queue_depth - 64) / 2) - 1;
6409 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
6412 ioc_info(ioc,
6415 ioc->max_sges_in_main_message,
6416 ioc->max_sges_in_chain_message,
6417 ioc->shost->sg_tablesize,
6418 ioc->chains_needed_per_io);
6421 reply_post_free_sz = ioc->reply_post_queue_depth *
6424 if ((_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
6425 || (ioc->reply_queue_count < RDPQ_MAX_INDEX_IN_ONE_CHUNK))
6426 rdpq_sz = reply_post_free_sz * ioc->reply_queue_count;
6427 ret = base_alloc_rdpq_dma_pool(ioc, rdpq_sz);
6433 _base_release_memory_pools(ioc);
6434 ioc->use_32bit_dma = true;
6435 if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
6436 ioc_err(ioc,
6437 "32 DMA mask failed %s\n", pci_name(ioc->pdev));
6440 if (base_alloc_rdpq_dma_pool(ioc, rdpq_sz))
6444 total_sz = rdpq_sz * (!ioc->rdpq_array_enable ? 1 :
6445 DIV_ROUND_UP(ioc->reply_queue_count, RDPQ_MAX_INDEX_IN_ONE_CHUNK));
6446 ioc->scsiio_depth = ioc->hba_queue_depth -
6447 ioc->hi_priority_depth - ioc->internal_depth;
6452 ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
6453 dinitprintk(ioc,
6454 ioc_info(ioc, "scsi host: can_queue depth (%d)\n",
6455 ioc->shost->can_queue));
6460 ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
6461 sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
6464 sz += (ioc->hi_priority_depth * ioc->request_sz);
6467 sz += (ioc->internal_depth * ioc->request_sz);
6469 ioc->request_dma_sz = sz;
6470 ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz,
6471 &ioc->request_dma, GFP_KERNEL);
6472 if (!ioc->request) {
6473 ioc_err(ioc, "request pool: dma_alloc_coherent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n",
6474 ioc->hba_queue_depth, ioc->chains_needed_per_io,
6475 ioc->request_sz, sz / 1024);
6476 if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
6479 ioc->hba_queue_depth -= retry_sz;
6480 _base_release_memory_pools(ioc);
6485 ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
6486 ioc->hba_queue_depth, ioc->chains_needed_per_io,
6487 ioc->request_sz, sz / 1024);
6490 ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
6491 ioc->request_sz);
6492 ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
6493 ioc->request_sz);
6496 ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
6497 ioc->request_sz);
6498 ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
6499 ioc->request_sz);
6501 ioc_info(ioc,
6504 ioc->request, (unsigned long long) ioc->request_dma,
6505 ioc->hba_queue_depth, ioc->request_sz,
6506 (ioc->hba_queue_depth * ioc->request_sz) / 1024);
6510 dinitprintk(ioc,
6511 ioc_info(ioc, "scsiio(0x%p): depth(%d)\n",
6512 ioc->request, ioc->scsiio_depth));
6514 ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
6515 sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
6516 ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
6517 if (!ioc->chain_lookup) {
6518 ioc_err(ioc, "chain_lookup: __get_free_pages failed\n");
6522 sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker);
6523 for (i = 0; i < ioc->scsiio_depth; i++) {
6524 ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
6525 if (!ioc->chain_lookup[i].chains_per_smid) {
6526 ioc_err(ioc, "chain_lookup: kzalloc failed\n");
6532 ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
6534 if (!ioc->hpr_lookup) {
6535 ioc_err(ioc, "hpr_lookup: kcalloc failed\n");
6538 ioc->hi_priority_smid = ioc->scsiio_depth + 1;
6539 dinitprintk(ioc,
6540 ioc_info(ioc, "hi_priority(0x%p): depth(%d), start smid(%d)\n",
6541 ioc->hi_priority,
6542 ioc->hi_priority_depth, ioc->hi_priority_smid));
6545 ioc->internal_lookup = kcalloc(ioc->internal_depth,
6547 if (!ioc->internal_lookup) {
6548 ioc_err(ioc, "internal_lookup: kcalloc failed\n");
6551 ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
6552 dinitprintk(ioc,
6553 ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n",
6554 ioc->internal,
6555 ioc->internal_depth, ioc->internal_smid));
6557 ioc->io_queue_num = kcalloc(ioc->scsiio_depth,
6559 if (!ioc->io_queue_num)
6575 ioc->chains_per_prp_buffer = 0;
6576 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
6578 (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1;
6579 nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
6582 sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
6583 ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
6584 if (!ioc->pcie_sg_lookup) {
6585 ioc_info(ioc, "PCIe SGL lookup: kzalloc failed\n");
6588 sz = nvme_blocks_needed * ioc->page_size;
6589 rc = _base_allocate_pcie_sgl_pool(ioc, sz);
6594 total_sz += sz * ioc->scsiio_depth;
6597 rc = _base_allocate_chain_dma_pool(ioc, ioc->chain_segment_sz);
6602 total_sz += ioc->chain_segment_sz * ((ioc->chains_needed_per_io -
6603 ioc->chains_per_prp_buffer) * ioc->scsiio_depth);
6604 dinitprintk(ioc,
6605 ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
6606 ioc->chain_depth, ioc->chain_segment_sz,
6607 (ioc->chain_depth * ioc->chain_segment_sz) / 1024));
6609 sense_sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
6610 rc = _base_allocate_sense_dma_pool(ioc, sense_sz);
6617 sz = ioc->reply_free_queue_depth * ioc->reply_sz;
6618 rc = _base_allocate_reply_pool(ioc, sz);
6626 sz = ioc->reply_free_queue_depth * 4;
6627 rc = _base_allocate_reply_free_dma_pool(ioc, sz);
6632 dinitprintk(ioc,
6633 ioc_info(ioc, "reply_free_dma (0x%llx)\n",
6634 (unsigned long long)ioc->reply_free_dma));
6636 if (ioc->rdpq_array_enable) {
6637 reply_post_free_array_sz = ioc->reply_queue_count *
6639 rc = _base_allocate_reply_post_free_array(ioc,
6646 ioc->config_page_sz = 512;
6647 ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev,
6648 ioc->config_page_sz, &ioc->config_page_dma, GFP_KERNEL);
6649 if (!ioc->config_page) {
6650 ioc_err(ioc, "config page: dma_pool_alloc failed\n");
6654 ioc_info(ioc, "config page(0x%p) - dma(0x%llx): size(%d)\n",
6655 ioc->config_page, (unsigned long long)ioc->config_page_dma,
6656 ioc->config_page_sz);
6657 total_sz += ioc->config_page_sz;
6659 ioc_info(ioc, "Allocated physical memory: size(%d kB)\n",
6661 ioc_info(ioc, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
6662 ioc->shost->can_queue, facts->RequestCredit);
6663 ioc_info(ioc, "Scatter Gather Elements per IO(%d)\n",
6664 ioc->shost->sg_tablesize);
6668 _base_release_memory_pools(ioc);
6669 if (ioc->use_32bit_dma && (ioc->dma_mask > 32)) {
6671 if (_base_config_dma_addressing(ioc, ioc->pdev) != 0) {
6673 pci_name(ioc->pdev));
6676 } else if (_base_reduce_hba_queue_depth(ioc) != 0)
6686 * @ioc: Pointer to MPT_ADAPTER structure
6693 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
6697 s = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
6703 * _base_wait_on_iocstate - waiting on a particular ioc state
6704 * @ioc: ?
6711 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
6719 current_state = mpt3sas_base_get_iocstate(ioc, 1);
6736 * @ioc: per adapter object
6741 _base_dump_reg_set(struct MPT3SAS_ADAPTER *ioc)
6744 u32 __iomem *reg = (u32 __iomem *)ioc->chip;
6746 ioc_info(ioc, "System Register set:\n");
6754 * @ioc: per adapter object
6763 _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
6771 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
6773 dhsprintk(ioc,
6774 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
6783 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
6789 _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
6797 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
6799 dhsprintk(ioc,
6800 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
6809 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
6817 * @ioc: per adapter object
6826 _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
6835 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
6837 dhsprintk(ioc,
6838 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
6842 doorbell = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
6845 mpt3sas_print_fault_code(ioc, doorbell);
6850 mpt3sas_print_coredump_info(ioc, doorbell);
6861 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
6868 * @ioc: per adapter object
6874 _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
6882 doorbell_reg = ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
6884 dhsprintk(ioc,
6885 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
6894 ioc_err(ioc, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
6901 * @ioc: per adapter object
6908 _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
6915 ioc_err(ioc, "%s: unknown reset_type\n", __func__);
6919 if (!(ioc->facts.IOCCapabilities &
6923 ioc_info(ioc, "sending message unit reset !!\n");
6926 &ioc->chip->Doorbell);
6927 if ((_base_wait_for_doorbell_ack(ioc, 15))) {
6932 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
6934 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6941 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6942 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
6948 MPI2_IOC_STATE_COREDUMP && (ioc->is_driver_loading == 1 ||
6949 ioc->fault_reset_work_q == NULL)) {
6951 &ioc->ioc_reset_in_progress_lock, flags);
6952 mpt3sas_print_coredump_info(ioc, ioc_state);
6953 mpt3sas_base_wait_for_coredump_completion(ioc,
6956 &ioc->ioc_reset_in_progress_lock, flags);
6958 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
6960 ioc_info(ioc, "message unit reset: %s\n",
6967 * @ioc: per adapter object
6976 mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int timeout)
6982 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
6993 if (ioc->is_driver_loading)
6997 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
7001 ioc_err(ioc, "%s: failed due to ioc not operational\n", __func__);
7005 ioc_info(ioc, "ioc is operational\n");
7011 * @ioc: per adapter object
7021 _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
7030 if ((ioc->base_readl_ext_retry(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
7031 ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
7036 if (ioc->base_readl(&ioc->chip->HostInterruptStatus) &
7038 writel(0, &ioc->chip->HostInterruptStatus);
7040 /* send message to ioc */
7043 &ioc->chip->Doorbell);
7045 if ((_base_spin_on_doorbell_int(ioc, 5))) {
7046 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
7050 writel(0, &ioc->chip->HostInterruptStatus);
7052 if ((_base_wait_for_doorbell_ack(ioc, 5))) {
7053 ioc_err(ioc, "doorbell handshake ack failed (line=%d)\n",
7060 writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
7061 if ((_base_wait_for_doorbell_ack(ioc, 5)))
7066 ioc_err(ioc, "doorbell handshake sending request failed (line=%d)\n",
7072 if ((_base_wait_for_doorbell_int(ioc, timeout))) {
7073 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
7079 reply[0] = le16_to_cpu(ioc->base_readl_ext_retry(&ioc->chip->Doorbell)
7081 writel(0, &ioc->chip->HostInterruptStatus);
7082 if ((_base_wait_for_doorbell_int(ioc, 5))) {
7083 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
7087 reply[1] = le16_to_cpu(ioc->base_readl_ext_retry(&ioc->chip->Doorbell)
7089 writel(0, &ioc->chip->HostInterruptStatus);
7092 if ((_base_wait_for_doorbell_int(ioc, 5))) {
7093 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
7098 ioc->base_readl_ext_retry(&ioc->chip->Doorbell);
7101 ioc->base_readl_ext_retry(&ioc->chip->Doorbell)
7103 writel(0, &ioc->chip->HostInterruptStatus);
7106 _base_wait_for_doorbell_int(ioc, 5);
7107 if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
7108 dhsprintk(ioc,
7109 ioc_info(ioc, "doorbell is in use (line=%d)\n",
7112 writel(0, &ioc->chip->HostInterruptStatus);
7114 if (ioc->logging_level & MPT_DEBUG_INIT) {
7118 ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
7126 * @ioc: per adapter object
7139 mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
7148 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7150 mutex_lock(&ioc->base_cmds.mutex);
7152 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
7153 ioc_err(ioc, "%s: base_cmd in use\n", __func__);
7158 rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
7162 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
7164 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7170 ioc->base_cmds.status = MPT3_CMD_PENDING;
7171 request = mpt3sas_base_get_msg_frame(ioc, smid);
7172 ioc->base_cmds.smid = smid;
7176 ioc->ioc_link_reset_in_progress = 1;
7177 init_completion(&ioc->base_cmds.done);
7178 ioc->put_smid_default(ioc, smid);
7179 wait_for_completion_timeout(&ioc->base_cmds.done,
7183 ioc->ioc_link_reset_in_progress)
7184 ioc->ioc_link_reset_in_progress = 0;
7185 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
7186 mpt3sas_check_cmd_timeout(ioc, ioc->base_cmds.status,
7191 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
7192 memcpy(mpi_reply, ioc->base_cmds.reply,
7196 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
7201 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
7202 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
7205 mutex_unlock(&ioc->base_cmds.mutex);
7211 * @ioc: per adapter object
7221 mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
7229 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7231 mutex_lock(&ioc->base_cmds.mutex);
7233 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
7234 ioc_err(ioc, "%s: base_cmd in use\n", __func__);
7239 rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
7243 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
7245 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7251 ioc->base_cmds.status = MPT3_CMD_PENDING;
7252 request = mpt3sas_base_get_msg_frame(ioc, smid);
7253 ioc->base_cmds.smid = smid;
7254 memset(request, 0, ioc->request_sz);
7256 init_completion(&ioc->base_cmds.done);
7257 ioc->put_smid_default(ioc, smid);
7258 wait_for_completion_timeout(&ioc->base_cmds.done,
7260 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
7261 mpt3sas_check_cmd_timeout(ioc,
7262 ioc->base_cmds.status, mpi_request,
7266 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
7267 memcpy(mpi_reply, ioc->base_cmds.reply,
7271 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
7276 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
7277 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
7280 mutex_unlock(&ioc->base_cmds.mutex);
7285 * _base_get_port_facts - obtain port facts reply and save in ioc
7286 * @ioc: per adapter object
7292 _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
7299 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7306 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
7310 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
7314 pfacts = &ioc->pfacts[port];
7327 * @ioc: per adapter object
7333 _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
7338 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7340 if (ioc->pci_error_recovery) {
7341 dfailprintk(ioc,
7342 ioc_info(ioc, "%s: host in pci error recovery\n",
7347 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
7348 dhsprintk(ioc,
7349 ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
7357 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
7362 mpt3sas_print_fault_code(ioc, ioc_state &
7367 ioc_info(ioc,
7373 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
7375 dfailprintk(ioc,
7376 ioc_info(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
7384 rc = _base_diag_reset(ioc);
7389 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
7390 * @ioc: per adapter object
7395 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
7402 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7404 r = _base_wait_for_iocstate(ioc, 10);
7406 dfailprintk(ioc,
7407 ioc_info(ioc, "%s: failed getting to correct state\n",
7415 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
7419 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
7423 facts = &ioc->facts;
7434 if (ioc->msix_enable && (facts->MaxMSIxVectors <=
7435 MAX_COMBINED_MSIX_VECTORS(ioc->is_gen35_ioc)))
7436 ioc->combined_reply_queue = 0;
7443 ioc->ir_firmware = 1;
7446 ioc->rdpq_array_capable = 1;
7448 && ioc->is_aero_ioc)
7449 ioc->atomic_desc_capable = 1;
7453 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
7459 ioc->shost->max_id = -1;
7472 ioc->page_size = 1 << facts->CurrentHostPageSize;
7473 if (ioc->page_size == 1) {
7474 ioc_info(ioc, "CurrentHostPageSize is 0: Setting default host page size to 4k\n");
7475 ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
7477 dinitprintk(ioc,
7478 ioc_info(ioc, "CurrentHostPageSize(%d)\n",
7481 dinitprintk(ioc,
7482 ioc_info(ioc, "hba queue depth(%d), max chains per io(%d)\n",
7484 dinitprintk(ioc,
7485 ioc_info(ioc, "request frame size(%d), reply frame size(%d)\n",
7493 * @ioc: per adapter object
7498 _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
7507 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7514 mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
7518 if (_base_is_controller_msix_enabled(ioc))
7519 mpi_request.HostMSIxVectors = ioc->reply_queue_count;
7520 mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
7522 cpu_to_le16(ioc->reply_post_queue_depth);
7524 cpu_to_le16(ioc->reply_free_queue_depth);
7527 cpu_to_le32((u64)ioc->sense_dma >> 32);
7529 cpu_to_le32((u64)ioc->reply_dma >> 32);
7531 cpu_to_le64((u64)ioc->request_dma);
7533 cpu_to_le64((u64)ioc->reply_free_dma);
7535 if (ioc->rdpq_array_enable) {
7536 reply_post_free_array_sz = ioc->reply_queue_count *
7538 memset(ioc->reply_post_free_array, 0, reply_post_free_array_sz);
7539 for (i = 0; i < ioc->reply_queue_count; i++)
7540 ioc->reply_post_free_array[i].RDPQBaseAddress =
7542 (u64)ioc->reply_post[i].reply_post_free_dma);
7545 cpu_to_le64((u64)ioc->reply_post_free_array_dma);
7548 cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
7563 if (ioc->logging_level & MPT_DEBUG_INIT) {
7568 ioc_info(ioc, "\toffset:data\n");
7570 ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4,
7574 r = _base_handshake_req_reply_wait(ioc,
7579 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
7586 ioc_err(ioc, "%s: failed\n", __func__);
7591 ioc->timestamp_update_count = 0;
7597 * @ioc: per adapter object
7606 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
7612 if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
7615 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
7622 ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
7623 ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
7624 ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
7625 memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
7628 ioc->port_enable_failed = 1;
7630 if (ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE_ASYNC) {
7631 ioc->port_enable_cmds.status &= ~MPT3_CMD_COMPLETE_ASYNC;
7633 mpt3sas_port_enable_complete(ioc);
7636 ioc->start_scan_failed = ioc_status;
7637 ioc->start_scan = 0;
7641 complete(&ioc->port_enable_cmds.done);
7647 * @ioc: per adapter object
7652 _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
7660 ioc_info(ioc, "sending port enable !!\n");
7662 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
7663 ioc_err(ioc, "%s: internal command already in use\n", __func__);
7667 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
7669 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7673 ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
7674 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7675 ioc->port_enable_cmds.smid = smid;
7679 init_completion(&ioc->port_enable_cmds.done);
7680 ioc->put_smid_default(ioc, smid);
7681 wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
7682 if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
7683 ioc_err(ioc, "%s: timeout\n", __func__);
7686 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
7693 mpi_reply = ioc->port_enable_cmds.reply;
7696 ioc_err(ioc, "%s: failed with (ioc_status=0x%08x)\n",
7703 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
7704 ioc_info(ioc, "port enable: %s\n", r == 0 ? "SUCCESS" : "FAILED");
7710 * @ioc: per adapter object
7715 mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
7720 ioc_info(ioc, "sending port enable !!\n");
7722 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
7723 ioc_err(ioc, "%s: internal command already in use\n", __func__);
7727 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
7729 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7732 ioc->drv_internal_flags |= MPT_DRV_INTERNAL_FIRST_PE_ISSUED;
7733 ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
7734 ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE_ASYNC;
7735 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7736 ioc->port_enable_cmds.smid = smid;
7740 ioc->put_smid_default(ioc, smid);
7746 * @ioc: per adapter object
7754 _base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
7758 * turn on the bit in ioc->pd_handles to indicate PD
7762 if (ioc->ir_firmware)
7766 if (!ioc->bios_pg3.BiosVersion)
7776 if ((ioc->bios_pg2.CurrentBootDeviceForm &
7780 (ioc->bios_pg2.ReqBootDeviceForm &
7784 (ioc->bios_pg2.ReqAltBootDeviceForm &
7794 * @ioc: per adapter object
7797 * The mask is stored in ioc->event_masks.
7800 _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
7810 ioc->event_masks[0] &= ~desired_event;
7812 ioc->event_masks[1] &= ~desired_event;
7814 ioc->event_masks[2] &= ~desired_event;
7816 ioc->event_masks[3] &= ~desired_event;
7821 * @ioc: per adapter object
7826 _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
7833 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7835 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
7836 ioc_err(ioc, "%s: internal command already in use\n", __func__);
7840 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
7842 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
7845 ioc->base_cmds.status = MPT3_CMD_PENDING;
7846 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
7847 ioc->base_cmds.smid = smid;
7854 cpu_to_le32(ioc->event_masks[i]);
7855 init_completion(&ioc->base_cmds.done);
7856 ioc->put_smid_default(ioc, smid);
7857 wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
7858 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
7859 ioc_err(ioc, "%s: timeout\n", __func__);
7862 if (ioc->base_cmds.status & MPT3_CMD_RESET)
7868 dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__));
7869 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
7872 if (ioc->drv_internal_flags & MPT_DRV_INTERNAL_FIRST_PE_ISSUED)
7874 if (mpt3sas_base_check_for_fault_and_issue_reset(ioc))
7883 * @ioc: per adapter object
7890 mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
7902 (ioc->event_masks[i] & desired_event)) {
7903 ioc->event_masks[i] &= ~desired_event;
7913 mutex_lock(&ioc->base_cmds.mutex);
7914 _base_event_notification(ioc);
7915 mutex_unlock(&ioc->base_cmds.mutex);
7920 * @ioc: per adapter object
7927 mpt3sas_base_unlock_and_get_host_diagnostic(struct MPT3SAS_ADAPTER *ioc,
7939 drsprintk(ioc, ioc_info(ioc, "write magic sequence\n"));
7940 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
7941 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
7942 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
7943 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
7944 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
7945 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
7946 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
7952 ioc_info(ioc,
7954 _base_dump_reg_set(ioc);
7958 *host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic);
7959 drsprintk(ioc,
7960 ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
7969 * @ioc: per adapter object
7973 mpt3sas_base_lock_host_diagnostic(struct MPT3SAS_ADAPTER *ioc)
7975 drsprintk(ioc, ioc_info(ioc, "disable writes to the diagnostic register\n"));
7976 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
7981 * @ioc: per adapter object
7986 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
7993 ioc_info(ioc, "sending diag reset !!\n");
7995 pci_cfg_access_lock(ioc->pdev);
7997 drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
7999 mutex_lock(&ioc->hostdiag_unlock_mutex);
8000 if (mpt3sas_base_unlock_and_get_host_diagnostic(ioc, &host_diagnostic))
8003 hcb_size = ioc->base_readl(&ioc->chip->HCBSize);
8004 drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
8006 &ioc->chip->HostDiagnostic);
8015 host_diagnostic = ioc->base_readl_ext_retry(&ioc->chip->HostDiagnostic);
8018 ioc_info(ioc,
8020 _base_dump_reg_set(ioc);
8032 drsprintk(ioc,
8033 ioc_info(ioc, "restart the adapter assuming the\n"
8037 writel(host_diagnostic, &ioc->chip->HostDiagnostic);
8039 drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n"));
8041 &ioc->chip->HCBSize);
8044 drsprintk(ioc, ioc_info(ioc, "restart the adapter\n"));
8046 &ioc->chip->HostDiagnostic);
8048 mpt3sas_base_lock_host_diagnostic(ioc);
8049 mutex_unlock(&ioc->hostdiag_unlock_mutex);
8051 drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n"));
8052 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
8054 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
8056 _base_dump_reg_set(ioc);
8060 pci_cfg_access_unlock(ioc->pdev);
8061 ioc_info(ioc, "diag reset: SUCCESS\n");
8065 pci_cfg_access_unlock(ioc->pdev);
8066 ioc_err(ioc, "diag reset: FAILED\n");
8067 mutex_unlock(&ioc->hostdiag_unlock_mutex);
8073 * @ioc: per adapter object
8079 mpt3sas_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
8085 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
8087 if (ioc->pci_error_recovery)
8090 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
8091 dhsprintk(ioc,
8092 ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
8101 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
8106 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
8114 ioc_info(ioc, "unexpected doorbell active!\n");
8119 mpt3sas_print_fault_code(ioc, ioc_state &
8132 if (ioc->ioc_coredump_loop != MPT3SAS_COREDUMP_LOOP_DONE) {
8133 mpt3sas_print_coredump_info(ioc, ioc_state &
8135 mpt3sas_base_wait_for_coredump_completion(ioc,
8145 if (!(_base_send_ioc_reset(ioc,
8151 rc = _base_diag_reset(ioc);
8157 * @ioc: per adapter object
8162 _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
8175 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
8179 &ioc->delayed_tr_list, list) {
8186 &ioc->delayed_tr_volume_list, list) {
8192 &ioc->delayed_sc_list, list) {
8198 &ioc->delayed_event_ack_list, list) {
8203 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
8206 INIT_LIST_HEAD(&ioc->hpr_free_list);
8207 smid = ioc->hi_priority_smid;
8208 for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
8209 ioc->hpr_lookup[i].cb_idx = 0xFF;
8210 ioc->hpr_lookup[i].smid = smid;
8211 list_add_tail(&ioc->hpr_lookup[i].tracker_list,
8212 &ioc->hpr_free_list);
8216 INIT_LIST_HEAD(&ioc->internal_free_list);
8217 smid = ioc->internal_smid;
8218 for (i = 0; i < ioc->internal_depth; i++, smid++) {
8219 ioc->internal_lookup[i].cb_idx = 0xFF;
8220 ioc->internal_lookup[i].smid = smid;
8221 list_add_tail(&ioc->internal_lookup[i].tracker_list,
8222 &ioc->internal_free_list);
8225 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
8228 for (i = 0, reply_address = (u32)ioc->reply_dma ;
8229 i < ioc->reply_free_queue_depth ; i++, reply_address +=
8230 ioc->reply_sz) {
8231 ioc->reply_free[i] = cpu_to_le32(reply_address);
8232 if (ioc->is_mcpu_endpoint)
8233 _base_clone_reply_to_sys_mem(ioc,
8238 if (ioc->is_driver_loading)
8239 _base_assign_reply_queues(ioc);
8243 reply_post_free_contig = ioc->reply_post[0].reply_post_free;
8244 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
8249 if (ioc->rdpq_array_enable) {
8251 ioc->reply_post[index++].reply_post_free;
8254 reply_post_free_contig += ioc->reply_post_queue_depth;
8258 for (i = 0; i < ioc->reply_post_queue_depth; i++)
8261 if (!_base_is_controller_msix_enabled(ioc))
8266 r = _base_send_ioc_init(ioc);
8273 if (!ioc->is_driver_loading)
8276 rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
8277 if (rc || (_base_send_ioc_init(ioc)))
8282 ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
8283 writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
8286 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
8287 if (ioc->combined_reply_queue)
8290 ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
8294 &ioc->chip->ReplyPostHostIndex);
8296 if (!_base_is_controller_msix_enabled(ioc))
8302 mpt3sas_base_unmask_interrupts(ioc);
8304 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
8305 r = _base_display_fwpkg_version(ioc);
8310 r = _base_static_config_pages(ioc);
8314 r = _base_event_notification(ioc);
8318 if (!ioc->shost_recovery) {
8320 if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
8323 le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
8326 ioc->mfg_pg10_hide_flag = hide_flag;
8329 ioc->wait_for_discovery_to_complete =
8330 _base_determine_wait_on_discovery(ioc);
8335 r = _base_send_port_enable(ioc);
8344 * @ioc: per adapter object
8347 mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
8349 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
8352 mutex_lock(&ioc->pci_access_mutex);
8353 if (ioc->chip_phys && ioc->chip) {
8354 mpt3sas_base_mask_interrupts(ioc);
8355 ioc->shost_recovery = 1;
8356 mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
8357 ioc->shost_recovery = 0;
8360 mpt3sas_base_unmap_resources(ioc);
8361 mutex_unlock(&ioc->pci_access_mutex);
8367 * @ioc: per adapter object
8372 mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
8377 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
8380 ioc->cpu_count = num_online_cpus();
8383 ioc->cpu_msix_table_sz = last_cpu_id + 1;
8384 ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
8385 ioc->reply_queue_count = 1;
8386 if (!ioc->cpu_msix_table) {
8387 ioc_info(ioc, "Allocation for cpu_msix_table failed!!!\n");
8392 if (ioc->is_warpdrive) {
8393 ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
8395 if (!ioc->reply_post_host_index) {
8396 ioc_info(ioc, "Allocation for reply_post_host_index failed!!!\n");
8402 ioc->smp_affinity_enable = smp_affinity_enable;
8404 ioc->rdpq_array_enable_assigned = 0;
8405 ioc->use_32bit_dma = false;
8406 ioc->dma_mask = 64;
8407 if (ioc->is_aero_ioc) {
8408 ioc->base_readl = &_base_readl_aero;
8409 ioc->base_readl_ext_retry = &_base_readl_ext_retry;
8411 ioc->base_readl = &_base_readl;
8412 ioc->base_readl_ext_retry = &_base_readl;
8414 r = mpt3sas_base_map_resources(ioc);
8418 pci_set_drvdata(ioc->pdev, ioc->shost);
8419 r = _base_get_ioc_facts(ioc);
8421 rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
8422 if (rc || (_base_get_ioc_facts(ioc)))
8426 switch (ioc->hba_mpi_version_belonged) {
8428 ioc->build_sg_scmd = &_base_build_sg_scmd;
8429 ioc->build_sg = &_base_build_sg;
8430 ioc->build_zero_len_sge = &_base_build_zero_len_sge;
8431 ioc->get_msix_index_for_smlio = &_base_get_msix_index;
8441 ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
8442 ioc->build_sg = &_base_build_sg_ieee;
8443 ioc->build_nvme_prp = &_base_build_nvme_prp;
8444 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
8445 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
8446 if (ioc->high_iops_queues)
8447 ioc->get_msix_index_for_smlio =
8450 ioc->get_msix_index_for_smlio = &_base_get_msix_index;
8453 if (ioc->atomic_desc_capable) {
8454 ioc->put_smid_default = &_base_put_smid_default_atomic;
8455 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
8456 ioc->put_smid_fast_path =
8458 ioc->put_smid_hi_priority =
8461 ioc->put_smid_default = &_base_put_smid_default;
8462 ioc->put_smid_fast_path = &_base_put_smid_fast_path;
8463 ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
8464 if (ioc->is_mcpu_endpoint)
8465 ioc->put_smid_scsi_io =
8468 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
8476 ioc->build_sg_mpi = &_base_build_sg;
8477 ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
8479 r = mpt3sas_base_make_ioc_ready(ioc, SOFT_RESET);
8483 ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
8485 if (!ioc->pfacts) {
8490 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
8491 r = _base_get_port_facts(ioc, i);
8493 rc = mpt3sas_base_check_for_fault_and_issue_reset(ioc);
8494 if (rc || (_base_get_port_facts(ioc, i)))
8499 r = _base_allocate_memory_pools(ioc);
8504 ioc->thresh_hold = irqpoll_weight;
8506 ioc->thresh_hold = ioc->hba_queue_depth/4;
8508 _base_init_irqpolls(ioc);
8509 init_waitqueue_head(&ioc->reset_wq);
8512 ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
8513 if (ioc->facts.MaxDevHandle % 8)
8514 ioc->pd_handles_sz++;
8515 ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
8517 if (!ioc->pd_handles) {
8521 ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
8523 if (!ioc->blocking_handles) {
8529 ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
8530 if (ioc->facts.MaxDevHandle % 8)
8531 ioc->pend_os_device_add_sz++;
8532 ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
8534 if (!ioc->pend_os_device_add) {
8539 ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
8540 ioc->device_remove_in_progress =
8541 kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
8542 if (!ioc->device_remove_in_progress) {
8547 ioc->fwfault_debug = mpt3sas_fwfault_debug;
8550 mutex_init(&ioc->base_cmds.mutex);
8551 ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8552 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
8555 ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8556 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
8559 ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8560 ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
8561 mutex_init(&ioc->transport_cmds.mutex);
8564 ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8565 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
8566 mutex_init(&ioc->scsih_cmds.mutex);
8569 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8570 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
8571 mutex_init(&ioc->tm_cmds.mutex);
8574 ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8575 ioc->config_cmds.status = MPT3_CMD_NOT_USED;
8576 mutex_init(&ioc->config_cmds.mutex);
8579 ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
8580 ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
8581 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
8582 mutex_init(&ioc->ctl_cmds.mutex);
8584 if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply ||
8585 !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply ||
8586 !ioc->tm_cmds.reply || !ioc->config_cmds.reply ||
8587 !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) {
8593 ioc->event_masks[i] = -1;
8596 _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
8597 _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
8598 _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
8599 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
8600 _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
8601 _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
8602 _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
8603 _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
8604 _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
8605 _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
8606 _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
8607 _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
8608 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
8609 if (ioc->hba_mpi_version_belonged == MPI26_VERSION) {
8610 if (ioc->is_gen35_ioc) {
8611 _base_unmask_events(ioc,
8613 _base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION);
8614 _base_unmask_events(ioc,
8618 r = _base_make_ioc_operational(ioc);
8620 r = _base_make_ioc_operational(ioc);
8629 memcpy(&ioc->prev_fw_facts, &ioc->facts,
8632 ioc->non_operational_loop = 0;
8633 ioc->ioc_coredump_loop = 0;
8634 ioc->got_task_abort_from_ioctl = 0;
8639 ioc->remove_host = 1;
8641 mpt3sas_base_free_resources(ioc);
8642 _base_release_memory_pools(ioc);
8643 pci_set_drvdata(ioc->pdev, NULL);
8644 kfree(ioc->cpu_msix_table);
8645 if (ioc->is_warpdrive)
8646 kfree(ioc->reply_post_host_index);
8647 kfree(ioc->pd_handles);
8648 kfree(ioc->blocking_handles);
8649 kfree(ioc->device_remove_in_progress);
8650 kfree(ioc->pend_os_device_add);
8651 kfree(ioc->tm_cmds.reply);
8652 kfree(ioc->transport_cmds.reply);
8653 kfree(ioc->scsih_cmds.reply);
8654 kfree(ioc->config_cmds.reply);
8655 kfree(ioc->base_cmds.reply);
8656 kfree(ioc->port_enable_cmds.reply);
8657 kfree(ioc->ctl_cmds.reply);
8658 kfree(ioc->ctl_cmds.sense);
8659 kfree(ioc->pfacts);
8660 ioc->ctl_cmds.reply = NULL;
8661 ioc->base_cmds.reply = NULL;
8662 ioc->tm_cmds.reply = NULL;
8663 ioc->scsih_cmds.reply = NULL;
8664 ioc->transport_cmds.reply = NULL;
8665 ioc->config_cmds.reply = NULL;
8666 ioc->pfacts = NULL;
8673 * @ioc: per adapter object
8676 mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
8678 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
8680 mpt3sas_base_stop_watchdog(ioc);
8681 mpt3sas_base_free_resources(ioc);
8682 _base_release_memory_pools(ioc);
8683 mpt3sas_free_enclosure_list(ioc);
8684 pci_set_drvdata(ioc->pdev, NULL);
8685 kfree(ioc->cpu_msix_table);
8686 if (ioc->is_warpdrive)
8687 kfree(ioc->reply_post_host_index);
8688 kfree(ioc->pd_handles);
8689 kfree(ioc->blocking_handles);
8690 kfree(ioc->device_remove_in_progress);
8691 kfree(ioc->pend_os_device_add);
8692 kfree(ioc->pfacts);
8693 kfree(ioc->ctl_cmds.reply);
8694 kfree(ioc->ctl_cmds.sense);
8695 kfree(ioc->base_cmds.reply);
8696 kfree(ioc->port_enable_cmds.reply);
8697 kfree(ioc->tm_cmds.reply);
8698 kfree(ioc->transport_cmds.reply);
8699 kfree(ioc->scsih_cmds.reply);
8700 kfree(ioc->config_cmds.reply);
8705 * @ioc: per adapter object
8707 static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
8709 mpt3sas_scsih_pre_reset_handler(ioc);
8710 mpt3sas_ctl_pre_reset_handler(ioc);
8711 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
8716 * @ioc: per adapter object
8719 _base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER *ioc)
8721 dtmprintk(ioc,
8722 ioc_info(ioc, "%s: clear outstanding mpt cmds\n", __func__));
8723 if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
8724 ioc->transport_cmds.status |= MPT3_CMD_RESET;
8725 mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
8726 complete(&ioc->transport_cmds.done);
8728 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
8729 ioc->base_cmds.status |= MPT3_CMD_RESET;
8730 mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
8731 complete(&ioc->base_cmds.done);
8733 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
8734 ioc->port_enable_failed = 1;
8735 ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
8736 mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
8737 if (ioc->is_driver_loading) {
8738 ioc->start_scan_failed =
8740 ioc->start_scan = 0;
8742 complete(&ioc->port_enable_cmds.done);
8745 if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
8746 ioc->config_cmds.status |= MPT3_CMD_RESET;
8747 mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
8748 ioc->config_cmds.smid = USHRT_MAX;
8749 complete(&ioc->config_cmds.done);
8755 * @ioc: per adapter object
8757 static void _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc)
8759 mpt3sas_scsih_clear_outstanding_scsi_tm_commands(ioc);
8760 mpt3sas_ctl_clear_outstanding_ioctls(ioc);
8761 _base_clear_outstanding_mpt_commands(ioc);
8766 * @ioc: per adapter object
8768 static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
8770 mpt3sas_scsih_reset_done_handler(ioc);
8771 mpt3sas_ctl_reset_done_handler(ioc);
8772 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
8777 * @ioc: Pointer to MPT_ADAPTER structure
8783 mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
8787 ioc->pending_io_count = 0;
8789 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
8794 ioc->pending_io_count = scsi_host_busy(ioc->shost);
8796 if (!ioc->pending_io_count)
8800 wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
8808 * @ioc: Pointer to MPT_ADAPTER structure
8811 _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
8816 struct mpt3sas_facts *old_facts = &ioc->prev_fw_facts;
8818 if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) {
8819 pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
8820 if (ioc->facts.MaxDevHandle % 8)
8823 pd_handles = krealloc(ioc->pd_handles, pd_handles_sz,
8826 ioc_info(ioc,
8831 memset(pd_handles + ioc->pd_handles_sz, 0,
8832 (pd_handles_sz - ioc->pd_handles_sz));
8833 ioc->pd_handles = pd_handles;
8835 blocking_handles = krealloc(ioc->blocking_handles,
8838 ioc_info(ioc,
8844 memset(blocking_handles + ioc->pd_handles_sz, 0,
8845 (pd_handles_sz - ioc->pd_handles_sz));
8846 ioc->blocking_handles = blocking_handles;
8847 ioc->pd_handles_sz = pd_handles_sz;
8849 pend_os_device_add = krealloc(ioc->pend_os_device_add,
8852 ioc_info(ioc,
8857 memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0,
8858 (pd_handles_sz - ioc->pend_os_device_add_sz));
8859 ioc->pend_os_device_add = pend_os_device_add;
8860 ioc->pend_os_device_add_sz = pd_handles_sz;
8863 ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL);
8865 ioc_info(ioc,
8872 ioc->device_remove_in_progress_sz, 0,
8873 (pd_handles_sz - ioc->device_remove_in_progress_sz));
8874 ioc->device_remove_in_progress = device_remove_in_progress;
8875 ioc->device_remove_in_progress_sz = pd_handles_sz;
8878 memcpy(&ioc->prev_fw_facts, &ioc->facts, sizeof(struct mpt3sas_facts));
8884 * @ioc: Pointer to MPT_ADAPTER structure
8890 mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
8898 dtmprintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
8900 if (ioc->pci_error_recovery) {
8901 ioc_err(ioc, "%s: pci error recovery reset\n", __func__);
8907 mpt3sas_halt_firmware(ioc);
8910 mutex_lock(&ioc->reset_in_progress_mutex);
8912 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
8913 ioc->shost_recovery = 1;
8914 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
8916 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
8918 (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
8921 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
8926 ioc->htb_rel.trigger_info_dwords[1] =
8930 _base_pre_reset_handler(ioc);
8931 mpt3sas_wait_for_commands_to_complete(ioc);
8932 mpt3sas_base_mask_interrupts(ioc);
8933 mpt3sas_base_pause_mq_polling(ioc);
8934 r = mpt3sas_base_make_ioc_ready(ioc, type);
8937 _base_clear_outstanding_commands(ioc);
8942 if (ioc->is_driver_loading && ioc->port_enable_failed) {
8943 ioc->remove_host = 1;
8947 r = _base_get_ioc_facts(ioc);
8951 r = _base_check_ioc_facts_changes(ioc);
8953 ioc_info(ioc,
8958 if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
8961 " firmware version is running\n", ioc->name);
8963 r = _base_make_ioc_operational(ioc);
8965 _base_reset_done_handler(ioc);
8968 ioc_info(ioc, "%s: %s\n", __func__, r == 0 ? "SUCCESS" : "FAILED");
8970 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
8971 ioc->shost_recovery = 0;
8972 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
8973 ioc->ioc_reset_count++;
8974 mutex_unlock(&ioc->reset_in_progress_mutex);
8975 mpt3sas_base_resume_mq_polling(ioc);
8980 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
8982 mpt3sas_trigger_master(ioc,
8985 dtmprintk(ioc, ioc_info(ioc, "%s: exit\n", __func__));