Lines Matching refs:ha

42 	struct qla_hw_data *ha = sp->vha->hw;
52 dma_unmap_sg(&ha->pdev->dev,
57 dma_unmap_sg(&ha->pdev->dev,
63 dma_pool_free(ha->purex_dma_pool, sp->remap.rsp.buf,
65 dma_pool_free(ha->purex_dma_pool, sp->remap.req.buf,
68 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
71 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
80 queue_work(ha->wq, &sp->fcport->free_work);
147 struct qla_hw_data *ha = vha->hw;
152 if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
161 if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
167 if (ha->flags.fcp_prio_enabled) {
168 ha->flags.fcp_prio_enabled = 0;
169 ha->fcp_prio_cfg->attributes &=
181 if (!ha->flags.fcp_prio_enabled) {
182 if (ha->fcp_prio_cfg) {
183 ha->flags.fcp_prio_enabled = 1;
184 ha->fcp_prio_cfg->attributes |=
208 bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
221 if (!ha->fcp_prio_cfg) {
222 ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
223 if (!ha->fcp_prio_cfg) {
233 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
235 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
240 if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1)) {
246 vfree(ha->fcp_prio_cfg);
247 ha->fcp_prio_cfg = NULL;
251 ha->flags.fcp_prio_enabled = 0;
252 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
253 ha->flags.fcp_prio_enabled = 1;
276 struct qla_hw_data *ha;
293 ha = vha->hw;
298 ha = vha->hw;
312 if (!IS_FWI2_CAPABLE(ha)) {
370 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
373 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
379 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
382 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
433 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
435 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
467 struct qla_hw_data *ha = vha->hw;
475 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
484 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
583 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
585 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
599 struct qla_hw_data *ha = vha->hw;
601 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
614 ha->notify_dcbx_comp = wait;
615 ha->notify_lb_portup_comp = wait2;
621 ha->notify_dcbx_comp = 0;
622 ha->notify_lb_portup_comp = 0;
628 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
632 ha->notify_dcbx_comp = 0;
633 ha->notify_lb_portup_comp = 0;
641 !wait_for_completion_timeout(&ha->lb_portup_comp,
645 ha->notify_lb_portup_comp = 0;
652 ha->notify_dcbx_comp = 0;
653 ha->notify_lb_portup_comp = 0;
670 struct qla_hw_data *ha = vha->hw;
672 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
684 ha->notify_dcbx_comp = 1;
689 ha->notify_dcbx_comp = 0;
697 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
699 if (!ha->idc_extend_tmo || rem_tmo) {
700 ha->idc_extend_tmo = 0;
703 current_tmo = ha->idc_extend_tmo * HZ;
704 ha->idc_extend_tmo = 0;
721 if (ha->flags.idc_compl_status) {
725 ha->flags.idc_compl_status = 0;
731 ha->notify_dcbx_comp = 0;
732 ha->idc_extend_tmo = 0;
745 struct qla_hw_data *ha = vha->hw;
767 elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
777 elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
800 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
809 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
831 ((ha->current_topology == ISP_CFG_F && (elreq.options & 7) >= 2) ||
832 ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
842 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
865 if (IS_QLA8031(ha) || IS_QLA8044(ha))
895 if (IS_QLA81XX(ha)) {
962 dma_free_coherent(&ha->pdev->dev, rsp_data_len,
965 dma_free_coherent(&ha->pdev->dev, req_data_len,
968 dma_unmap_sg(&ha->pdev->dev,
972 dma_unmap_sg(&ha->pdev->dev,
988 struct qla_hw_data *ha = vha->hw;
992 if (!IS_QLA84XX(ha)) {
1024 struct qla_hw_data *ha = vha->hw;
1035 if (!IS_QLA84XX(ha)) {
1041 sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1059 fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
1071 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1112 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1115 dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1118 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1134 struct qla_hw_data *ha = vha->hw;
1144 if (!IS_QLA84XX(ha)) {
1150 mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1163 sg_cnt = dma_map_sg(&ha->pdev->dev,
1186 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1213 sg_cnt = dma_map_sg(&ha->pdev->dev,
1236 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1305 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1308 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1311 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1315 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1419 struct qla_hw_data *ha = vha->hw;
1421 if (unlikely(pci_channel_offline(ha->pdev)))
1425 if (start > ha->optrom_size) {
1427 "start %d > optrom_size %d.\n", start, ha->optrom_size);
1431 if (ha->optrom_state != QLA_SWAITING) {
1433 "optrom_state %d.\n", ha->optrom_state);
1437 ha->optrom_region_start = start;
1440 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1442 else if (start == (ha->flt_region_boot * 4) ||
1443 start == (ha->flt_region_fw * 4))
1445 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1446 IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
1447 IS_QLA28XX(ha))
1456 ha->optrom_region_size = start +
1457 bsg_job->request_payload.payload_len > ha->optrom_size ?
1458 ha->optrom_size - start :
1460 ha->optrom_state = QLA_SWRITING;
1462 ha->optrom_region_size = start +
1463 bsg_job->reply_payload.payload_len > ha->optrom_size ?
1464 ha->optrom_size - start :
1466 ha->optrom_state = QLA_SREADING;
1469 ha->optrom_buffer = vzalloc(ha->optrom_region_size);
1470 if (!ha->optrom_buffer) {
1473 "(%x)\n", ha->optrom_region_size);
1475 ha->optrom_state = QLA_SWAITING;
1488 struct qla_hw_data *ha = vha->hw;
1491 if (ha->flags.nic_core_reset_hdlr_active)
1494 mutex_lock(&ha->optrom_mutex);
1497 mutex_unlock(&ha->optrom_mutex);
1501 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1502 ha->optrom_region_start, ha->optrom_region_size);
1505 bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1506 ha->optrom_region_size);
1508 bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
1510 vfree(ha->optrom_buffer);
1511 ha->optrom_buffer = NULL;
1512 ha->optrom_state = QLA_SWAITING;
1513 mutex_unlock(&ha->optrom_mutex);
1525 struct qla_hw_data *ha = vha->hw;
1528 mutex_lock(&ha->optrom_mutex);
1531 mutex_unlock(&ha->optrom_mutex);
1536 ha->flags.isp82xx_no_md_cap = 1;
1539 bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1540 ha->optrom_region_size);
1542 rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1543 ha->optrom_region_start, ha->optrom_region_size);
1551 vfree(ha->optrom_buffer);
1552 ha->optrom_buffer = NULL;
1553 ha->optrom_state = QLA_SWAITING;
1554 mutex_unlock(&ha->optrom_mutex);
1566 struct qla_hw_data *ha = vha->hw;
1573 void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1602 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1619 struct qla_hw_data *ha = vha->hw;
1624 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1652 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1670 struct qla_hw_data *ha = vha->hw;
1675 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1700 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1717 struct qla_hw_data *ha = vha->hw;
1722 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1746 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1763 struct qla_hw_data *ha = vha->hw;
1768 uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1795 dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1813 struct qla_hw_data *ha = vha->hw;
1824 if (!IS_BIDI_CAPABLE(ha)) {
1855 if (ha->current_topology != ISP_CFG_F) {
1863 if (ha->operating_mode != P2P) {
1870 mutex_lock(&ha->selflogin_lock);
1883 mutex_unlock(&ha->selflogin_lock);
1891 mutex_unlock(&ha->selflogin_lock);
1895 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1905 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1961 mempool_free(sp, ha->srb_mempool);
1963 dma_unmap_sg(&ha->pdev->dev,
1967 dma_unmap_sg(&ha->pdev->dev,
1991 struct qla_hw_data *ha = vha->hw;
2015 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
2027 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
2084 mempool_free(sp, ha->srb_mempool);
2095 dma_unmap_sg(&ha->pdev->dev,
2100 dma_unmap_sg(&ha->pdev->dev,
2198 struct qla_hw_data *ha = vha->hw;
2201 if (!(IS_QLA27XX(ha)) && !IS_QLA28XX(ha))
2205 cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2206 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2207 (uint64_t)ha->fw_attributes_h << 16 |
2208 (uint64_t)ha->fw_attributes;
2230 struct qla_hw_data *ha = vha->hw;
2234 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2241 online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2242 (uint64_t)ha->fw_attributes_ext[0] << 32 |
2243 (uint64_t)ha->fw_attributes_h << 16 |
2244 (uint64_t)ha->fw_attributes;
2276 struct qla_hw_data *ha = vha->hw;
2282 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
2336 struct qla_hw_data *ha = vha->hw;
2337 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2347 if (unlikely(pci_channel_offline(ha->pdev)))
2353 if (!IS_FWI2_CAPABLE(ha))
2356 stats = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stats), &stats_dma,
2382 dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2520 struct qla_hw_data *ha = vha->hw;
2527 if (IS_QLA27XX(ha))
2530 if (IS_QLA28XX(ha)) {
3067 struct qla_hw_data *ha = vha->hw;
3076 if (qla2x00_isp_reg_stat(ha)) {
3083 spin_lock_irqsave(&ha->hardware_lock, flags);
3084 for (que = 0; que < ha->max_req_queues; que++) {
3085 req = ha->req_q_map[que];
3098 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3100 if (!ha->flags.eeh_busy && ha->isp_ops->abort_command(sp)) {
3109 spin_lock_irqsave(&ha->hardware_lock, flags);
3115 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3121 spin_unlock_irqrestore(&ha->hardware_lock, flags);