Lines Matching refs:ha

23  *	ha = adapter block pointer.
52 struct qla_hw_data *ha = vha->hw;
53 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
55 if (ha->pdev->error_state == pci_channel_io_perm_failure) {
67 reg = ha->iobase;
73 if (ha->flags.pci_channel_io_perm_failure) {
79 if (ha->flags.isp82xx_fw_hung) {
83 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
93 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
101 ha->flags.mbox_busy = 1;
103 ha->mcp32 = mcp;
108 spin_lock_irqsave(&ha->hardware_lock, flags);
117 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
127 ha->flags.mbox_int = 0;
128 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
143 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
144 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
146 QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
147 spin_unlock_irqrestore(&ha->hardware_lock, flags);
149 WARN_ON_ONCE(wait_for_completion_timeout(&ha->mbx_intr_comp,
155 QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code);
156 spin_unlock_irqrestore(&ha->hardware_lock, flags);
159 while (!ha->flags.mbox_int) {
164 qla2x00_poll(ha->rsp_q_map[0]);
166 if (!ha->flags.mbox_int &&
167 !(IS_QLA2200(ha) &&
177 if (ha->flags.mbox_int) {
184 ha->flags.mbox_int = 0;
185 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
187 if (ha->mailbox_out32[0] != MBS_COMMAND_COMPLETE)
192 iptr = (uint32_t *)&ha->mailbox_out32[0];
194 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
207 ha->flags.mbox_busy = 0;
210 ha->mcp32 = NULL;
212 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
217 qla2x00_poll(ha->rsp_q_map[0]);
223 ha->flags.eeh_busy) {
236 ha->flags.eeh_busy);
256 if (ha->isp_ops->abort_isp(vha)) {
270 complete(&ha->mbx_cmd_comp);
289 * ha = adapter block pointer.
333 * ha = adapter block pointer.
378 * ha = adapter block pointer.
396 struct qla_hw_data *ha = vha->hw;
404 mcp->mb[2] = MSD(ha->init_cb_dma);
405 mcp->mb[3] = LSD(ha->init_cb_dma);
499 struct qla_hw_data *ha = vha->hw;
501 pci_set_master(ha->pdev);
502 pci_try_set_mwi(ha->pdev);
504 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
507 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
510 if (pci_is_pcie(ha->pdev))
511 pcie_set_readrq(ha->pdev, 2048);
513 ha->chip_revision = ha->pdev->revision;
527 struct qla_hw_data *ha = vha->hw;
532 spin_lock_irqsave(&ha->hardware_lock, flags);
534 QLAFX00_SET_HBA_SOC_REG(ha, 0x80004, 0);
535 QLAFX00_SET_HBA_SOC_REG(ha, 0x82004, 0);
538 QLAFX00_SET_HBA_SOC_REG(ha, 0x60920, 0x02);
539 QLAFX00_SET_HBA_SOC_REG(ha, 0x60924, 0x02);
540 QLAFX00_SET_HBA_SOC_REG(ha, 0xf0920, 0x02);
541 QLAFX00_SET_HBA_SOC_REG(ha, 0xf0924, 0x02);
544 reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60840);
546 QLAFX00_SET_HBA_SOC_REG(ha, 0x60840, reg_val);
548 reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60844);
550 QLAFX00_SET_HBA_SOC_REG(ha, 0x60844, reg_val);
552 reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60848);
554 QLAFX00_SET_HBA_SOC_REG(ha, 0x60848, reg_val);
556 reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x6084C);
558 QLAFX00_SET_HBA_SOC_REG(ha, 0x6084C, reg_val);
561 if ((QLAFX00_GET_HBA_SOC_REG(ha, 0xd0000) & 0x10000000) == 0 &&
562 (QLAFX00_GET_HBA_SOC_REG(ha, 0x10600) & 0x1) == 0)
569 QLAFX00_SET_HBA_SOC_REG(ha,
571 QLAFX00_SET_HBA_SOC_REG(ha,
576 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x011f0101));
579 QLAFX00_SET_HBA_SOC_REG(ha, 0x10610, 1);
580 QLAFX00_SET_HBA_SOC_REG(ha, 0x10600, 0);
584 QLAFX00_SET_HBA_SOC_REG(ha,
590 QLAFX00_SET_HBA_SOC_REG(ha,
597 QLAFX00_SET_HBA_SOC_REG(ha,
602 QLAFX00_SET_HBA_SOC_REG(ha,
606 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2));
607 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3));
610 QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0));
613 QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00));
615 spin_unlock_irqrestore(&ha->hardware_lock, flags);
633 struct qla_hw_data *ha = vha->hw;
636 if (unlikely(pci_channel_offline(ha->pdev) &&
637 ha->flags.pci_channel_io_perm_failure))
640 ha->isp_ops->disable_intrs(ha);
656 struct qla_hw_data *ha = vha->hw;
657 struct req_que *req = ha->req_q_map[0];
659 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
675 struct qla_hw_data *ha = vha->hw;
676 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
691 struct qla_hw_data *ha = vha->hw;
693 if (pci_is_pcie(ha->pdev))
701 struct qla_hw_data *ha = vha->hw;
703 snprintf(str, size, "%s", ha->mr.fw_version);
708 qlafx00_enable_intrs(struct qla_hw_data *ha)
712 spin_lock_irqsave(&ha->hardware_lock, flags);
713 ha->interrupts_on = 1;
714 QLAFX00_ENABLE_ICNTRL_REG(ha);
715 spin_unlock_irqrestore(&ha->hardware_lock, flags);
719 qlafx00_disable_intrs(struct qla_hw_data *ha)
723 spin_lock_irqsave(&ha->hardware_lock, flags);
724 ha->interrupts_on = 0;
725 QLAFX00_DISABLE_ICNTRL_REG(ha);
726 spin_unlock_irqrestore(&ha->hardware_lock, flags);
742 qlafx00_iospace_config(struct qla_hw_data *ha)
744 if (pci_request_selected_regions(ha->pdev, ha->bars,
746 ql_log_pci(ql_log_fatal, ha->pdev, 0x014e,
748 pci_name(ha->pdev));
753 if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) {
754 ql_log_pci(ql_log_warn, ha->pdev, 0x014f,
756 pci_name(ha->pdev));
759 if (pci_resource_len(ha->pdev, 0) < BAR0_LEN_FX00) {
760 ql_log_pci(ql_log_warn, ha->pdev, 0x0127,
762 pci_name(ha->pdev));
766 ha->cregbase =
767 ioremap(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00);
768 if (!ha->cregbase) {
769 ql_log_pci(ql_log_fatal, ha->pdev, 0x0128,
770 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
774 if (!(pci_resource_flags(ha->pdev, 2) & IORESOURCE_MEM)) {
775 ql_log_pci(ql_log_warn, ha->pdev, 0x0129,
777 pci_name(ha->pdev));
780 if (pci_resource_len(ha->pdev, 2) < BAR2_LEN_FX00) {
781 ql_log_pci(ql_log_warn, ha->pdev, 0x012a,
783 pci_name(ha->pdev));
787 ha->iobase =
788 ioremap(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00);
789 if (!ha->iobase) {
790 ql_log_pci(ql_log_fatal, ha->pdev, 0x012b,
791 "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev));
796 ha->max_req_queues = ha->max_rsp_queues = 1;
798 ql_log_pci(ql_log_info, ha->pdev, 0x012c,
800 ha->bars, ha->cregbase, ha->iobase);
811 struct qla_hw_data *ha = vha->hw;
812 struct req_que *req = ha->req_q_map[0];
813 struct rsp_que *rsp = ha->rsp_q_map[0];
837 struct qla_hw_data *ha = vha->hw;
838 struct req_que *req = ha->req_q_map[0];
839 struct rsp_que *rsp = ha->rsp_q_map[0];
840 dma_addr_t bar2_hdl = pci_resource_start(ha->pdev, 2);
842 req->length = ha->req_que_len;
843 req->ring = (void __force *)ha->iobase + ha->req_que_off;
844 req->dma = bar2_hdl + ha->req_que_off;
846 ql_log_pci(ql_log_info, ha->pdev, 0x012f,
855 ha->req_que_off, (u64)req->dma);
857 rsp->length = ha->rsp_que_len;
858 rsp->ring = (void __force *)ha->iobase + ha->rsp_que_off;
859 rsp->dma = bar2_hdl + ha->rsp_que_off;
861 ql_log_pci(ql_log_info, ha->pdev, 0x0131,
870 ha->rsp_que_off, (u64)rsp->dma);
881 struct qla_hw_data *ha = vha->hw;
882 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
894 ha->mbx_intr_code = MSW(aenmbx7);
895 ha->rqstq_intr_code = LSW(aenmbx7);
924 ha->mbx_intr_code = MSW(aenmbx7);
925 ha->rqstq_intr_code = LSW(aenmbx7);
926 ha->req_que_off = rd_reg_dword(&reg->aenmailbox1);
927 ha->rsp_que_off = rd_reg_dword(&reg->aenmailbox3);
928 ha->req_que_len = rd_reg_dword(&reg->aenmailbox5);
929 ha->rsp_que_len = rd_reg_dword(&reg->aenmailbox6);
935 ha->mbx_intr_code, ha->rqstq_intr_code);
936 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
962 ha->mbx_intr_code = MSW(aenmbx7);
963 ha->rqstq_intr_code = LSW(aenmbx7);
964 ha->req_que_off = rd_reg_dword(&reg->initval1);
965 ha->rsp_que_off = rd_reg_dword(&reg->initval3);
966 ha->req_que_len = rd_reg_dword(&reg->initval5);
967 ha->rsp_que_len = rd_reg_dword(&reg->initval6);
971 ha->mbx_intr_code, ha->rqstq_intr_code);
972 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
1035 * @ha: HA context
1101 struct qla_hw_data *ha = vha->hw;
1118 ha->gid_list, 32);
1125 for_each_set_bit(tgt_id, (void *)ha->gid_list,
1217 * ha = adapter block pointer.
1287 * ha = adapter block pointer.
1332 struct qla_hw_data *ha = vha->hw;
1336 ha->mr.fw_hbt_en = 0;
1339 ha->flags.chip_reset_done = 0;
1343 "Performing ISP error recovery - ha = %p.\n", ha);
1344 ha->isp_ops->reset_chip(vha);
1364 if (!ha->flags.eeh_busy) {
1380 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
1383 "%s Done done - ha=%p.\n", __func__, ha);
1417 struct qla_hw_data *ha = vha->hw;
1418 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
1421 qla2x00_request_irqs(ha, ha->rsp_q_map[0]);
1424 ha->mbx_intr_code = MSW(aenmbx7);
1425 ha->rqstq_intr_code = LSW(aenmbx7);
1426 ha->req_que_off = rd_reg_dword(&reg->aenmailbox1);
1427 ha->rsp_que_off = rd_reg_dword(&reg->aenmailbox3);
1428 ha->req_que_len = rd_reg_dword(&reg->aenmailbox5);
1429 ha->rsp_que_len = rd_reg_dword(&reg->aenmailbox6);
1434 ha->mbx_intr_code, ha->rqstq_intr_code,
1435 ha->req_que_off, ha->rsp_que_len);
1438 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
1460 struct qla_hw_data *ha = vha->hw;
1463 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
1467 if (ha->mr.fw_hbt_cnt)
1468 ha->mr.fw_hbt_cnt--;
1470 if ((!ha->flags.mr_reset_hdlr_active) &&
1473 (ha->mr.fw_hbt_en)) {
1475 if (fw_heart_beat != ha->mr.old_fw_hbt_cnt) {
1476 ha->mr.old_fw_hbt_cnt = fw_heart_beat;
1477 ha->mr.fw_hbt_miss_cnt = 0;
1479 ha->mr.fw_hbt_miss_cnt++;
1480 if (ha->mr.fw_hbt_miss_cnt ==
1485 ha->mr.fw_hbt_miss_cnt = 0;
1489 ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
1495 if (ha->mr.fw_reset_timer_exp) {
1498 ha->mr.fw_reset_timer_exp = 0;
1504 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
1506 (!ha->mr.fw_hbt_en)) {
1507 ha->mr.fw_hbt_en = 1;
1508 } else if (!ha->mr.fw_reset_timer_tick) {
1509 if (aenmbx0 == ha->mr.old_aenmbx0_state)
1510 ha->mr.fw_reset_timer_exp = 1;
1511 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
1515 data0 = QLAFX00_RD_REG(ha,
1517 data1 = QLAFX00_RD_REG(ha,
1523 QLAFX00_WR_REG(ha,
1527 ha->mr.fw_reset_timer_tick =
1530 ha->mr.fw_reset_timer_tick =
1533 if (ha->mr.old_aenmbx0_state != aenmbx0) {
1534 ha->mr.old_aenmbx0_state = aenmbx0;
1535 ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
1537 ha->mr.fw_reset_timer_tick--;
1544 if (ha->mr.fw_critemp_timer_tick == 0) {
1545 tempc = QLAFX00_GET_TEMPERATURE(ha);
1550 if (tempc < ha->mr.critical_temperature) {
1556 ha->mr.fw_critemp_timer_tick =
1559 ha->mr.fw_critemp_timer_tick--;
1562 if (ha->mr.host_info_resend) {
1567 if (ha->mr.hinfo_resend_timer_tick == 0) {
1568 ha->mr.host_info_resend = false;
1570 ha->mr.hinfo_resend_timer_tick =
1574 ha->mr.hinfo_resend_timer_tick--;
1585 * ha = adapter block pointer.
1593 struct qla_hw_data *ha = vha->hw;
1601 ha->flags.mr_reset_hdlr_active = 1;
1610 ha->flags.mr_reset_hdlr_active = 0;
1619 * ha = adapter block pointer.
1627 struct qla_hw_data *ha = vha->hw;
1630 if (unlikely(pci_channel_offline(ha->pdev) &&
1631 ha->flags.pci_channel_io_perm_failure)) {
1642 ha->isp_ops->reset_chip(vha);
1645 QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
1785 struct qla_hw_data *ha = vha->hw;
1841 fdisc->u.fxiocb.req_addr = dma_alloc_coherent(&ha->pdev->dev,
1858 ha->mr.host_info_resend = true;
1872 ha->pdev->device);
1894 fdisc->u.fxiocb.rsp_addr = dma_alloc_coherent(&ha->pdev->dev,
1930 ha->mr.extended_io_enabled = (pinfo->enabled_capabilities &
1967 dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.rsp_len,
1972 dma_free_coherent(&ha->pdev->dev, fdisc->u.fxiocb.req_len,
1986 * ha = adapter block pointer.
1995 struct qla_hw_data *ha = vha->hw;
2000 ha->flags.chip_reset_done = 0;
2002 ha->flags.pci_channel_io_perm_failure = 0;
2003 ha->flags.eeh_busy = 0;
2009 ha->isp_abort_cnt = 0;
2010 ha->beacon_blink_led = 0;
2012 set_bit(0, ha->req_qid_map);
2013 set_bit(0, ha->rsp_qid_map);
2018 rval = ha->isp_ops->pci_config(vha);
2039 rval = qla2x00_alloc_outstanding_cmds(ha, vha->req);
2044 ha->flags.chip_reset_done = 1;
2046 tempc = QLAFX00_GET_TEMPERATURE(ha);
2077 struct qla_hw_data *ha = ((struct scsi_qla_host *)
2081 switch (ha->link_data_rate) {
2261 struct qla_hw_data *ha = vha->hw;
2276 req = ha->req_q_map[que];
2386 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
2392 if ((IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
2528 struct qla_hw_data *ha = rsp->hw;
2529 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
2609 struct qla_hw_data *ha = vha->hw;
2633 req = ha->req_q_map[que];
2664 struct qla_hw_data *ha = vha->hw;
2670 req = ha->req_q_map[que];
2768 struct qla_hw_data *ha = vha->hw;
2772 reg = &ha->iobase->ispfx00;
2774 switch (ha->aenmb[0]) {
2777 "ISP System Error - mbx1=%x\n", ha->aenmb[0]);
2789 ha->aenmb[1] = rd_reg_dword(&reg->aenmailbox1);
2790 ha->aenmb[2] = rd_reg_dword(&reg->aenmailbox2);
2791 ha->aenmb[3] = rd_reg_dword(&reg->aenmailbox3);
2795 ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]);
2803 ha->aenmb[0]);
2810 ha->aenmb[0]);
2817 ha->aenmb[0]);
2821 ha->aenmb[1] = rd_reg_dword(&reg->aenmailbox1);
2822 ha->aenmb[2] = rd_reg_dword(&reg->aenmailbox2);
2823 ha->aenmb[3] = rd_reg_dword(&reg->aenmailbox3);
2824 ha->aenmb[4] = rd_reg_dword(&reg->aenmailbox4);
2825 ha->aenmb[5] = rd_reg_dword(&reg->aenmailbox5);
2826 ha->aenmb[6] = rd_reg_dword(&reg->aenmailbox6);
2827 ha->aenmb[7] = rd_reg_dword(&reg->aenmailbox7);
2830 ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3],
2831 ha->aenmb[4], ha->aenmb[5], ha->aenmb[6], ha->aenmb[7]);
2834 qlafx00_post_aenfx_work(vha, ha->aenmb[0],
2835 (uint32_t *)ha->aenmb, data_size);
2848 struct qla_hw_data *ha = vha->hw;
2849 struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
2851 if (!ha->mcp32)
2855 ha->flags.mbox_int = 1;
2856 ha->mailbox_out32[0] = mb0;
2859 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2860 ha->mailbox_out32[cnt] = rd_reg_dword(wptr);
2878 struct qla_hw_data *ha;
2896 ha = rsp->hw;
2897 reg = &ha->iobase->ispfx00;
2900 if (unlikely(pci_channel_offline(ha->pdev)))
2903 spin_lock_irqsave(&ha->hardware_lock, flags);
2904 vha = pci_get_drvdata(ha->pdev);
2906 stat = QLAFX00_RD_INTR_REG(ha);
2920 ha->aenmb[0] = rd_reg_dword(&reg->aenmailbox0);
2929 QLAFX00_CLR_INTR_REG(ha, clr_intr);
2930 QLAFX00_RD_INTR_REG(ha);
2933 qla2x00_handle_mbx_completion(ha, status);
2934 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3057 struct qla_hw_data *ha = vha->hw;
3063 rsp = ha->rsp_q_map[0];
3070 spin_lock_irqsave(&ha->hardware_lock, flags);
3078 nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
3154 QLAFX00_SET_HST_INTR(ha, ha->rqstq_intr_code);
3156 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3163 spin_unlock_irqrestore(&ha->hardware_lock, flags);