Lines Matching defs:ha

80 	struct qla_hw_data *ha = vha->hw;
98 rsp_els = dma_alloc_coherent(&ha->pdev->dev, sizeof(*rsp_els), &dma,
185 dma_free_coherent(&ha->pdev->dev, sizeof(*rsp_els), rsp_els, dma);
344 struct qla_hw_data *ha;
360 ha = rsp->hw;
361 reg = &ha->iobase->isp;
364 spin_lock_irqsave(&ha->hardware_lock, flags);
365 vha = pci_get_drvdata(ha->pdev);
371 if (pci_channel_offline(ha->pdev))
382 ha->isp_ops->fw_dump(vha);
393 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
398 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
399 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
400 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
418 qla2x00_handle_mbx_completion(ha, status);
419 spin_unlock_irqrestore(&ha->hardware_lock, flags);
465 struct qla_hw_data *ha;
475 ha = rsp->hw;
476 reg = &ha->iobase->isp;
479 spin_lock_irqsave(&ha->hardware_lock, flags);
480 vha = pci_get_drvdata(ha->pdev);
486 if (unlikely(pci_channel_offline(ha->pdev)))
508 ha->isp_ops->fw_dump(vha);
527 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
528 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
529 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
543 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
554 qla2x00_handle_mbx_completion(ha, status);
555 spin_unlock_irqrestore(&ha->hardware_lock, flags);
571 struct qla_hw_data *ha = vha->hw;
572 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
575 WARN_ON_ONCE(ha->mbx_count > 32);
576 mboxes = (1ULL << ha->mbx_count) - 1;
577 if (!ha->mcp)
580 mboxes = ha->mcp->in_mb;
583 ha->flags.mbox_int = 1;
584 ha->mailbox_out[0] = mb0;
586 wptr = MAILBOX_REG(ha, reg, 1);
588 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
589 if (IS_QLA2200(ha) && cnt == 8)
590 wptr = MAILBOX_REG(ha, reg, 8);
592 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
594 ha->mailbox_out[cnt] = rd_reg_word(wptr);
664 qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
671 if (IS_QLA2100(ha) || IS_QLA2200(ha))
684 struct qla_hw_data *ha = vha->hw;
706 ha->flags.nic_core_hung = 1;
836 if (ha->flags.nic_core_reset_owner)
974 struct qla_hw_data *ha = vha->hw;
980 if (!ha->num_vhosts)
983 spin_lock_irqsave(&ha->vport_slock, flags);
984 list_for_each_entry(vp, &ha->vp_list, list) {
991 spin_unlock_irqrestore(&ha->vport_slock, flags);
1047 struct qla_hw_data *ha = vha->hw;
1063 ha->isp_ops->fw_dump(vha);
1067 ha->isp_ops->mpi_fw_dump(vha, 1);
1261 struct qla_hw_data *ha = vha->hw;
1262 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1263 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
1264 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
1278 if (IS_CNA_CAPABLE(ha))
1307 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
1315 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
1316 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
1322 handles[1] = make_handle(RD_MAILBOX_REG(ha, reg, 7),
1323 RD_MAILBOX_REG(ha, reg, 6));
1353 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
1354 IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1370 if ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&
1372 ha->isp_ops->mpi_fw_dump(vha, 1);
1373 ha->isp_ops->fw_dump(vha);
1374 ha->flags.fw_init_done = 0;
1375 QLA_FW_STOPPED(ha);
1377 if (IS_FWI2_CAPABLE(ha)) {
1386 if ((mbx & MBX_3) && (ha->port_no == 0))
1432 ha->flags.lip_ae = 1;
1456 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1457 ha->link_data_rate = PORT_SPEED_1GB;
1459 ha->link_data_rate = mb[1];
1463 qla2x00_get_link_speed_str(ha, ha->link_data_rate));
1465 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1472 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
1482 SAVE_TOPO(ha);
1483 ha->flags.lip_ae = 0;
1484 ha->current_topology = 0;
1487 mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
1489 mbx = (IS_P3P_TYPE(ha)) ? rd_reg_word(&reg82->mailbox_out[4])
1504 if (ha->flags.fawwpn_enabled &&
1505 (ha->current_topology == ISP_CFG_F)) {
1506 memcpy(vha->port_name, ha->port_name, WWN_SIZE);
1528 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1549 ha->operating_mode = LOOP;
1556 ha->flags.lip_ae = 0;
1558 if (IS_QLA2100(ha))
1561 if (IS_CNA_CAPABLE(ha)) {
1565 if (ha->notify_dcbx_comp && !vha->vp_idx)
1566 complete(&ha->dcbx_comp);
1581 if (!N2N_TOPO(ha))
1600 if (IS_QLA2100(ha))
1639 if (IS_QLA2XXX_MIDTYPE(ha) &&
1653 if (mb[1] == NPH_SNS_LID(ha)) {
1660 if (IS_FWI2_CAPABLE(ha))
1702 ha->link_data_rate = PORT_SPEED_UNKNOWN;
1713 !ha->flags.n2n_ae &&
1741 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
1778 if (!ha->flags.scm_enabled ||
1795 if (IS_FWI2_CAPABLE(ha))
1818 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
1826 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
1829 ha->cs84xx->op_fw_version);
1832 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1835 ha->cs84xx->diag_fw_version);
1838 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
1839 ha->cs84xx->fw_update = 1;
1842 ha->cs84xx->gold_fw_version);
1849 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
1867 if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1884 if (ha->notify_lb_portup_comp && !vha->vp_idx)
1885 complete(&ha->lb_portup_comp);
1889 IS_QLA8044(ha))
1894 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1897 } else if (IS_QLA83XX(ha)) {
1918 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1946 if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
1970 if (!vha->vp_idx && ha->num_vhosts)
1985 struct qla_hw_data *ha = vha->hw;
1992 if (IS_P3P_TYPE(ha))
2009 if (IS_P3P_TYPE(ha))
2020 struct qla_hw_data *ha = vha->hw;
2033 if (IS_P3P_TYPE(ha))
2163 struct qla_hw_data *ha = vha->hw;
2178 if (IS_P3P_TYPE(ha))
2942 struct qla_hw_data *ha = rsp->hw;
2943 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2946 vha = pci_get_drvdata(ha->pdev);
2975 wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
3146 struct qla_hw_data *ha = vha->hw;
3182 if (IS_FWI2_CAPABLE(ha)) {
3312 struct qla_hw_data *ha = vha->hw;
3321 if (IS_FWI2_CAPABLE(ha)) {
3331 req = ha->req_q_map[que];
3335 que >= find_first_zero_bit(ha->req_qid_map, ha->max_req_queues)) {
3357 if (IS_P3P_TYPE(ha))
3420 if (IS_FWI2_CAPABLE(ha)) {
3449 if (IS_FWI2_CAPABLE(ha)) {
3464 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
3522 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
3525 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
3609 if (IS_FWI2_CAPABLE(ha))
3642 if (!IS_PI_SPLIT_DET_CAPABLE(ha))
3696 struct qla_hw_data *ha = rsp->hw;
3697 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
3724 if (IS_FWI2_CAPABLE(ha))
3754 struct qla_hw_data *ha = vha->hw;
3765 if (que >= ha->max_req_queues || !ha->req_q_map[que])
3768 req = ha->req_q_map[que];
3823 struct qla_hw_data *ha = vha->hw;
3824 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3827 WARN_ON_ONCE(ha->mbx_count > 32);
3828 mboxes = (1ULL << ha->mbx_count) - 1;
3829 if (!ha->mcp)
3832 mboxes = ha->mcp->in_mb;
3835 ha->flags.mbox_int = 1;
3836 ha->mailbox_out[0] = mb0;
3840 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
3842 ha->mailbox_out[cnt] = rd_reg_word(wptr);
3958 struct qla_hw_data *ha = vha->hw;
3965 if (!ha->flags.fw_started)
3981 is_shadow_hba = IS_SHADOW_REG_CAPABLE(ha);
4038 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4039 IS_QLA28XX(ha)) {
4159 if (IS_P3P_TYPE(ha)) {
4160 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
4173 struct qla_hw_data *ha = vha->hw;
4174 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
4176 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
4177 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4231 struct qla_hw_data *ha;
4249 ha = rsp->hw;
4250 reg = &ha->iobase->isp24;
4253 if (unlikely(pci_channel_offline(ha->pdev)))
4256 spin_lock_irqsave(&ha->hardware_lock, flags);
4257 vha = pci_get_drvdata(ha->pdev);
4263 if (unlikely(pci_channel_offline(ha->pdev)))
4274 ha->isp_ops->fw_dump(vha);
4315 if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
4318 qla2x00_handle_mbx_completion(ha, status);
4319 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4322 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4324 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
4333 struct qla_hw_data *ha;
4345 ha = rsp->hw;
4346 reg = &ha->iobase->isp24;
4348 spin_lock_irqsave(&ha->hardware_lock, flags);
4350 vha = pci_get_drvdata(ha->pdev);
4352 if (!ha->flags.disable_msix_handshake) {
4356 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4365 struct qla_hw_data *ha;
4381 ha = rsp->hw;
4382 reg = &ha->iobase->isp24;
4385 spin_lock_irqsave(&ha->hardware_lock, flags);
4386 vha = pci_get_drvdata(ha->pdev);
4392 if (unlikely(pci_channel_offline(ha->pdev)))
4404 ha->isp_ops->fw_dump(vha);
4445 qla2x00_handle_mbx_completion(ha, status);
4446 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4449 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
4451 spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
4460 struct qla_hw_data *ha;
4469 ha = qpair->hw;
4471 queue_work(ha->wq, &qpair->q_work);
4479 struct qla_hw_data *ha;
4490 ha = qpair->hw;
4492 reg = &ha->iobase->isp24;
4493 spin_lock_irqsave(&ha->hardware_lock, flags);
4495 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4497 queue_work(ha->wq, &qpair->q_work);
4523 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
4527 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4534 IS_ATIO_MSIX_CAPABLE(ha)) {
4539 if (USER_CTRL_IRQ(ha) || !ha->mqiobase) {
4541 ret = pci_alloc_irq_vectors(ha->pdev, min_vecs,
4542 min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
4545 ret = pci_alloc_irq_vectors_affinity(ha->pdev, min_vecs,
4546 min((u16)ha->msix_count, (u16)(num_online_cpus() + min_vecs)),
4554 ha->msix_count, ret);
4556 } else if (ret < ha->msix_count) {
4559 ha->msix_count = ret;
4561 if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) {
4562 ha->max_req_queues = ha->msix_count - 1;
4566 ha->max_req_queues--;
4568 ha->max_rsp_queues = ha->max_req_queues;
4570 ha->max_qpairs = ha->max_req_queues - 1;
4571 ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0190,
4572 "Adjusted Max no of queues pairs: %d.\n", ha->max_qpairs);
4576 ha->msix_entries = kcalloc(ha->msix_count,
4579 if (!ha->msix_entries) {
4581 "Failed to allocate memory for ha->msix_entries.\n");
4585 ha->flags.msix_enabled = 1;
4587 for (i = 0; i < ha->msix_count; i++) {
4588 qentry = &ha->msix_entries[i];
4589 qentry->vector = pci_irq_vector(ha->pdev, i);
4599 qentry = &ha->msix_entries[i];
4604 if (IS_P3P_TYPE(ha))
4623 IS_ATIO_MSIX_CAPABLE(ha)) {
4624 qentry = &ha->msix_entries[QLA_ATIO_VECTOR];
4643 ha->mqenable = 0;
4648 if (IS_MQUE_CAPABLE(ha) &&
4649 (ha->msixbase && ha->mqiobase && ha->max_qpairs))
4650 ha->mqenable = 1;
4652 ha->mqenable = 0;
4656 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
4659 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
4665 pci_free_irq_vectors(ha->pdev);
4670 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
4673 device_reg_t *reg = ha->iobase;
4674 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4677 if (ql2xenablemsix == 0 || (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
4678 !IS_QLA8432(ha) && !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha) &&
4679 !IS_QLAFX00(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha)))
4685 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
4686 (ha->pdev->subsystem_device == 0x7040 ||
4687 ha->pdev->subsystem_device == 0x7041 ||
4688 ha->pdev->subsystem_device == 0x1705)) {
4691 ha->pdev->subsystem_vendor,
4692 ha->pdev->subsystem_device);
4696 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
4699 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
4703 ret = qla24xx_enable_msix(ha, rsp);
4707 ha->chip_revision, ha->fw_attributes);
4716 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
4717 !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) &&
4718 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4721 ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI);
4725 ha->flags.msi_enabled = 1;
4732 if (!ha->flags.msi_enabled && IS_QLA82XX(ha))
4735 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
4736 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
4741 ha->pdev->irq);
4743 } else if (!ha->flags.msi_enabled) {
4746 ha->flags.mr_intr_valid = 1;
4748 ha->max_qpairs = 0;
4752 if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
4755 spin_lock_irq(&ha->hardware_lock);
4757 spin_unlock_irq(&ha->hardware_lock);
4766 struct qla_hw_data *ha = vha->hw;
4772 * We need to check that ha->rsp_q_map is valid in case we are called
4775 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
4777 rsp = ha->rsp_q_map[0];
4779 if (ha->flags.msix_enabled) {
4780 for (i = 0; i < ha->msix_count; i++) {
4781 qentry = &ha->msix_entries[i];
4784 free_irq(pci_irq_vector(ha->pdev, i), qentry->handle);
4787 kfree(ha->msix_entries);
4788 ha->msix_entries = NULL;
4789 ha->flags.msix_enabled = 0;
4793 free_irq(pci_irq_vector(ha->pdev, 0), rsp);
4797 pci_free_irq_vectors(ha->pdev);
4800 int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
4804 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
4818 qla_mapq_init_qp_cpu_map(ha, msix, qpair);