Lines Matching defs:ha

91  *	ha = adapter block pointer.
119 struct qla_hw_data *ha = vha->hw;
120 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
126 if (ha->pdev->error_state == pci_channel_io_perm_failure) {
146 reg = ha->iobase;
151 chip_reset = ha->chip_reset;
153 if (ha->flags.pci_channel_io_perm_failure) {
159 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
163 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
171 !is_rom_cmd(mcp->mb[0])) || ha->flags.eeh_busy) {
178 atomic_inc(&ha->num_pend_mbx_stage1);
184 if (!wait_for_completion_timeout(&ha->mbx_cmd_comp, mcp->tov * HZ)) {
190 atomic_dec(&ha->num_pend_mbx_stage1);
193 atomic_dec(&ha->num_pend_mbx_stage1);
194 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
195 ha->flags.eeh_busy) {
198 ha->flags.purge_mbox, ha->flags.eeh_busy, mcp->mb[0]);
205 ha->mcp = mcp;
210 spin_lock_irqsave(&ha->hardware_lock, flags);
212 if (ha->flags.purge_mbox || chip_reset != ha->chip_reset ||
213 ha->flags.mbox_busy) {
215 spin_unlock_irqrestore(&ha->hardware_lock, flags);
218 ha->flags.mbox_busy = 1;
221 if (IS_P3P_TYPE(ha))
223 else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
226 optr = MAILBOX_REG(ha, &reg->isp, 0);
234 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
235 if (IS_QLA2200(ha) && cnt == 8)
236 optr = MAILBOX_REG(ha, &reg->isp, 8);
254 ha->flags.mbox_int = 0;
255 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
263 atomic_inc(&ha->num_pend_mbx_stage2);
264 if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
265 set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
267 if (IS_P3P_TYPE(ha))
269 else if (IS_FWI2_CAPABLE(ha))
273 spin_unlock_irqrestore(&ha->hardware_lock, flags);
276 if (!wait_for_completion_timeout(&ha->mbx_intr_comp,
280 spin_lock_irqsave(&ha->hardware_lock, flags);
281 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
282 spin_unlock_irqrestore(&ha->hardware_lock, flags);
284 if (chip_reset != ha->chip_reset) {
285 eeh_delay = ha->flags.eeh_busy ? 1 : 0;
287 spin_lock_irqsave(&ha->hardware_lock, flags);
288 ha->flags.mbox_busy = 0;
289 spin_unlock_irqrestore(&ha->hardware_lock,
291 atomic_dec(&ha->num_pend_mbx_stage2);
295 } else if (ha->flags.purge_mbox ||
296 chip_reset != ha->chip_reset) {
297 eeh_delay = ha->flags.eeh_busy ? 1 : 0;
299 spin_lock_irqsave(&ha->hardware_lock, flags);
300 ha->flags.mbox_busy = 0;
301 spin_unlock_irqrestore(&ha->hardware_lock, flags);
302 atomic_dec(&ha->num_pend_mbx_stage2);
314 if (IS_P3P_TYPE(ha)) {
317 ha->flags.mbox_busy = 0;
318 spin_unlock_irqrestore(&ha->hardware_lock,
320 atomic_dec(&ha->num_pend_mbx_stage2);
328 } else if (IS_FWI2_CAPABLE(ha))
332 spin_unlock_irqrestore(&ha->hardware_lock, flags);
335 while (!ha->flags.mbox_int) {
336 if (ha->flags.purge_mbox ||
337 chip_reset != ha->chip_reset) {
338 eeh_delay = ha->flags.eeh_busy ? 1 : 0;
340 spin_lock_irqsave(&ha->hardware_lock, flags);
341 ha->flags.mbox_busy = 0;
342 spin_unlock_irqrestore(&ha->hardware_lock,
344 atomic_dec(&ha->num_pend_mbx_stage2);
353 qla2x00_poll(ha->rsp_q_map[0]);
355 if (!ha->flags.mbox_int &&
356 !(IS_QLA2200(ha) &&
364 atomic_dec(&ha->num_pend_mbx_stage2);
367 if (ha->flags.mbox_int) {
374 ha->flags.mbox_int = 0;
375 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
377 if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
378 spin_lock_irqsave(&ha->hardware_lock, flags);
379 ha->flags.mbox_busy = 0;
380 spin_unlock_irqrestore(&ha->hardware_lock, flags);
384 ha->mcp = NULL;
387 "FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
391 if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) {
393 "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0],
400 iptr = (uint16_t *)&ha->mailbox_out[0];
405 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
422 if (IS_FWI2_CAPABLE(ha)) {
440 mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
451 pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
453 (chip_reset != ha->chip_reset)) {
460 spin_lock_irqsave(&ha->hardware_lock, flags);
461 ha->flags.mbox_busy = 0;
462 spin_unlock_irqrestore(&ha->hardware_lock,
478 spin_lock_irqsave(&ha->hardware_lock, flags);
479 ha->flags.mbox_busy = 0;
480 spin_unlock_irqrestore(&ha->hardware_lock, flags);
483 ha->mcp = NULL;
485 if ((abort_active || !io_lock_on) && !IS_NOPOLLING_TYPE(ha)) {
490 qla2x00_poll(ha->rsp_q_map[0]);
496 ha->flags.eeh_busy) {
504 if (IS_QLA82XX(ha)) {
508 qla82xx_wr_32(ha,
517 ha->flags.eeh_busy);
522 } else if (current == ha->dpc_thread) {
530 if (IS_QLA82XX(ha)) {
534 qla82xx_wr_32(ha,
547 complete(&ha->mbx_cmd_comp);
548 if (ha->isp_ops->abort_isp(vha) &&
549 !ha->flags.eeh_busy) {
564 complete(&ha->mbx_cmd_comp);
574 dev_name(&ha->pdev->dev), 0x1020+0x800,
578 for (i = 0; i < ha->mbx_count && cnt; i++, mboxes >>= 1)
585 if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha))) {
603 while (i && eeh_delay && (ha->pci_error_state < QLA_PCI_SLOT_RESET)) {
621 struct qla_hw_data *ha = vha->hw;
628 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
642 if (IS_FWI2_CAPABLE(ha)) {
677 * ha = adapter block pointer.
691 struct qla_hw_data *ha = vha->hw;
705 if (IS_FWI2_CAPABLE(ha)) {
713 if (ha->flags.lr_detected) {
715 if (IS_BPM_RANGE_CAPABLE(ha))
717 ha->lr_distance << LR_DIST_FW_POS;
720 if (ql2xnvmeenable && (IS_QLA27XX(ha) || IS_QLA28XX(ha)))
723 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
724 struct nvram_81xx *nv = ha->nvram;
740 if (ha->flags.exlogins_enabled)
743 if (ha->flags.exchoffld_enabled)
754 if (IS_QLA2322(ha) || IS_QLA6322(ha)) {
765 if (IS_QLA28XX(ha) && rval == QLA_COMMAND_ERROR &&
786 if (!IS_FWI2_CAPABLE(ha))
789 ha->fw_ability_mask = mcp->mb[3] << 16 | mcp->mb[2];
791 "fw_ability_mask=%x.\n", ha->fw_ability_mask);
793 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
794 ha->max_supported_speed = mcp->mb[2] & (BIT_0|BIT_1);
796 ha->max_supported_speed == 0 ? "16Gps" :
797 ha->max_supported_speed == 1 ? "32Gps" :
798 ha->max_supported_speed == 2 ? "64Gps" : "unknown");
800 ha->min_supported_speed = mcp->mb[5] &
804 ha->min_supported_speed == 6 ? "64Gps" :
805 ha->min_supported_speed == 5 ? "32Gps" :
806 ha->min_supported_speed == 4 ? "16Gps" :
807 ha->min_supported_speed == 3 ? "8Gps" :
808 ha->min_supported_speed == 2 ? "4Gps" : "unknown");
812 if (IS_QLA28XX(ha) && (mcp->mb[5] & EDIF_HW_SUPPORT)) {
813 ha->flags.edif_hw = 1;
831 * ha: adapter state pointer.
884 * ha: adapter state pointer.
904 struct qla_hw_data *ha = vha->hw;
916 mcp->mb[8] = MSW(ha->exlogin_size);
917 mcp->mb[9] = LSW(ha->exlogin_size);
941 * ha: adapter state pointer.
994 * ha: adapter state pointer.
1014 struct qla_hw_data *ha = vha->hw;
1022 mcp->mb[2] = MSW(ha->exchoffld_buf_dma);
1023 mcp->mb[3] = LSW(ha->exchoffld_buf_dma);
1024 mcp->mb[6] = MSW(MSD(ha->exchoffld_buf_dma));
1025 mcp->mb[7] = LSW(MSD(ha->exchoffld_buf_dma));
1026 mcp->mb[8] = MSW(ha->exchoffld_size);
1027 mcp->mb[9] = LSW(ha->exchoffld_size);
1049 * ha: adapter state pointer.
1066 struct qla_hw_data *ha = vha->hw;
1074 if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
1076 if (IS_FWI2_CAPABLE(ha))
1078 if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
1090 ha->fw_major_version = mcp->mb[1];
1091 ha->fw_minor_version = mcp->mb[2];
1092 ha->fw_subminor_version = mcp->mb[3];
1093 ha->fw_attributes = mcp->mb[6];
1095 ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
1097 ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
1099 if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
1100 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1101 ha->mpi_version[1] = mcp->mb[11] >> 8;
1102 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1103 ha->mpi_capabilities = (mcp->mb[12] << 16) | mcp->mb[13];
1104 ha->phy_version[0] = mcp->mb[8] & 0xff;
1105 ha->phy_version[1] = mcp->mb[9] >> 8;
1106 ha->phy_version[2] = mcp->mb[9] & 0xff;
1109 if (IS_FWI2_CAPABLE(ha)) {
1110 ha->fw_attributes_h = mcp->mb[15];
1111 ha->fw_attributes_ext[0] = mcp->mb[16];
1112 ha->fw_attributes_ext[1] = mcp->mb[17];
1120 if (ha->fw_attributes_h & 0x4)
1123 __func__, ha->fw_attributes_h);
1125 if (ha->fw_attributes_h & 0x8)
1128 __func__, ha->fw_attributes_h);
1134 if ((ha->fw_attributes_h &
1137 if (ha->fw_attributes_h & FW_ATTR_H_NVME_FBURST)
1143 __func__, ha->fw_attributes_h);
1147 if (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_NVME2) {
1150 ha->fw_attributes_ext[0]);
1154 if (IS_QLA28XX(ha) && ha->flags.edif_hw && ql2xsecenable &&
1155 (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_EDIF)) {
1156 ha->flags.edif_enabled = 1;
1162 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1163 ha->serdes_version[0] = mcp->mb[7] & 0xff;
1164 ha->serdes_version[1] = mcp->mb[8] >> 8;
1165 ha->serdes_version[2] = mcp->mb[8] & 0xff;
1166 ha->mpi_version[0] = mcp->mb[10] & 0xff;
1167 ha->mpi_version[1] = mcp->mb[11] >> 8;
1168 ha->mpi_version[2] = mcp->mb[11] & 0xff;
1169 ha->pep_version[0] = mcp->mb[13] & 0xff;
1170 ha->pep_version[1] = mcp->mb[14] >> 8;
1171 ha->pep_version[2] = mcp->mb[14] & 0xff;
1172 ha->fw_shared_ram_start = (mcp->mb[19] << 16) | mcp->mb[18];
1173 ha->fw_shared_ram_end = (mcp->mb[21] << 16) | mcp->mb[20];
1174 ha->fw_ddr_ram_start = (mcp->mb[23] << 16) | mcp->mb[22];
1175 ha->fw_ddr_ram_end = (mcp->mb[25] << 16) | mcp->mb[24];
1176 if (IS_QLA28XX(ha)) {
1178 ha->flags.secure_fw = 1;
1182 (ha->flags.secure_fw) ? "Supported" :
1186 if (ha->flags.scm_supported_a &&
1187 (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_SCM_SUPPORTED)) {
1188 ha->flags.scm_supported_f = 1;
1189 ha->sf_init_cb->flags |= cpu_to_le16(BIT_13);
1192 (ha->flags.scm_supported_f) ? "Supported" :
1197 ha->sf_init_cb->flags |= cpu_to_le16(BIT_15);
1199 ha->sf_init_cb->flags |= cpu_to_le16(BIT_14);
1220 * ha = adapter block pointer.
1268 * ha = adapter block pointer.
1327 * ha = adapter block pointer.
1388 * ha = adapter block pointer.
1443 * ha = adapter state pointer.
1512 * ha = adapter block pointer.
1531 struct qla_hw_data *ha = vha->hw;
1543 spin_lock_irqsave(&ha->hardware_lock, flags);
1548 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1556 if (HAS_EXTENDED_IDS(ha))
1677 * ha = adapter block pointer.
1792 * ha = adapter block pointer.
1847 * ha = adapter block pointer.
1865 struct qla_hw_data *ha = vha->hw;
1870 if (IS_P3P_TYPE(ha) && ql2xdbwr)
1871 qla82xx_wr_32(ha, (uintptr_t __force)ha->nxdb_wr_ptr,
1872 (0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
1874 if (ha->flags.npiv_supported)
1880 mcp->mb[2] = MSW(ha->init_cb_dma);
1881 mcp->mb[3] = LSW(ha->init_cb_dma);
1882 mcp->mb[6] = MSW(MSD(ha->init_cb_dma));
1883 mcp->mb[7] = LSW(MSD(ha->init_cb_dma));
1885 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1887 mcp->mb[10] = MSW(ha->ex_init_cb_dma);
1888 mcp->mb[11] = LSW(ha->ex_init_cb_dma);
1889 mcp->mb[12] = MSW(MSD(ha->ex_init_cb_dma));
1890 mcp->mb[13] = LSW(MSD(ha->ex_init_cb_dma));
1891 mcp->mb[14] = sizeof(*ha->ex_init_cb);
1895 if (ha->flags.scm_supported_f || vha->flags.nvme2_enabled) {
1897 mcp->mb[16] = MSW(ha->sf_init_cb_dma);
1898 mcp->mb[17] = LSW(ha->sf_init_cb_dma);
1899 mcp->mb[18] = MSW(MSD(ha->sf_init_cb_dma));
1900 mcp->mb[19] = LSW(MSD(ha->sf_init_cb_dma));
1901 mcp->mb[15] = sizeof(*ha->sf_init_cb);
1907 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
1920 if (ha->init_cb) {
1923 0x0104d, ha->init_cb, sizeof(*ha->init_cb));
1925 if (ha->ex_init_cb && ha->ex_init_cb->ex_version) {
1928 0x0104d, ha->ex_init_cb, sizeof(*ha->ex_init_cb));
1931 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
1950 * ha = adapter state pointer.
1969 struct qla_hw_data *ha = vha->hw;
1975 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1984 if (opt != 0 && !IS_FWI2_CAPABLE(ha))
1993 if (IS_FWI2_CAPABLE(ha)) {
1998 } else if (HAS_EXTENDED_IDS(ha)) {
2006 mcp->buf_size = IS_FWI2_CAPABLE(ha) ?
2009 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2014 if (IS_FWI2_CAPABLE(ha)) {
2021 if (NVME_TARGET(ha, fcport)) {
2119 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
2190 * ha = adapter block pointer.
2207 struct qla_hw_data *ha = vha->hw;
2212 if (!ha->flags.fw_started)
2239 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
2257 * ha = adapter block pointer.
2324 * ha = adapter block pointer.
2376 * ha = adapter block pointer.
2443 * ha = adapter block pointer.
2507 struct qla_hw_data *ha = vha->hw;
2516 req = ha->req_q_map[0];
2518 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2539 (ha->r_a_tov / 10 * 2) + 2);
2608 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2618 * ha = adapter block pointer.
2641 struct qla_hw_data *ha = vha->hw;
2648 if (HAS_EXTENDED_IDS(ha)) {
2659 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2704 * ha = adapter block pointer.
2722 struct qla_hw_data *ha = vha->hw;
2727 if (IS_FWI2_CAPABLE(ha))
2733 if (HAS_EXTENDED_IDS(ha))
2740 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
2781 struct qla_hw_data *ha = vha->hw;
2787 lg = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
2807 (ha->r_a_tov / 10 * 2) + 2);
2828 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
2838 * ha = adapter block pointer.
2893 * ha = adapter block pointer.
2939 * ha = adapter block pointer.
3000 * ha = adapter block pointer.
3011 struct qla_hw_data *ha = vha->hw;
3022 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
3023 IS_QLA27XX(ha) || IS_QLA28XX(ha))
3040 ha->orig_fw_tgt_xcb_count = mcp->mb[1];
3041 ha->cur_fw_tgt_xcb_count = mcp->mb[2];
3042 ha->cur_fw_xcb_count = mcp->mb[3];
3043 ha->orig_fw_xcb_count = mcp->mb[6];
3044 ha->cur_fw_iocb_count = mcp->mb[7];
3045 ha->orig_fw_iocb_count = mcp->mb[10];
3046 if (ha->flags.npiv_supported)
3047 ha->max_npiv_vports = mcp->mb[11];
3048 if (IS_QLA81XX(ha) || IS_QLA83XX(ha))
3049 ha->fw_max_fcf_count = mcp->mb[12];
3060 * ha = adapter state pointer.
3078 struct qla_hw_data *ha = vha->hw;
3083 pmap = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
3099 mcp->tov = (ha->login_timeout * 2) + (ha->login_timeout / 2);
3114 dma_pool_free(ha->s_dma_pool, pmap, pmap_dma);
3130 * ha = adapter block pointer.
3148 struct qla_hw_data *ha = vha->hw;
3160 if (IS_FWI2_CAPABLE(ha)) {
3166 } else if (HAS_EXTENDED_IDS(ha)) {
3254 struct qla_hw_data *ha = vha->hw;
3280 abt = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &abt_dma);
3327 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
3348 struct qla_hw_data *ha;
3353 ha = vha->hw;
3365 tsk = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
3376 tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
3419 rval2 = qla2x00_marker(vha, ha->base_qpair, fcport->loop_id, l,
3429 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
3437 struct qla_hw_data *ha = fcport->vha->hw;
3439 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3448 struct qla_hw_data *ha = fcport->vha->hw;
3450 if ((ql2xasynctmfenable) && IS_FWI2_CAPABLE(ha))
3462 struct qla_hw_data *ha = vha->hw;
3464 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
3982 struct qla_hw_data *ha = vha->hw;
3999 ha->flags.n2n_ae = 0;
4011 ha->current_topology = ISP_CFG_NL;
4032 ha->current_topology = ISP_CFG_N;
4043 ha->flags.n2n_bigger = 1;
4053 ha->flags.n2n_bigger = 0;
4102 ha->current_topology = ISP_CFG_FL;
4105 ha->current_topology = ISP_CFG_F;
4111 ha->flags.gpsc_supported = 1;
4112 ha->current_topology = ISP_CFG_F;
4120 ha->flags.fawwpn_enabled &&
4143 spin_lock_irqsave(&ha->vport_slock, flags);
4144 list_for_each_entry(vp, &ha->vp_list, list) {
4150 spin_unlock_irqrestore(&ha->vport_slock, flags);
4178 ha->current_topology = ISP_CFG_N;
4179 ha->flags.rida_fmt2 = 1;
4184 ha->flags.n2n_ae = 1;
4185 spin_lock_irqsave(&ha->vport_slock, flags);
4187 spin_unlock_irqrestore(&ha->vport_slock, flags);
4242 struct qla_hw_data *ha = vha->hw;
4243 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
4250 vpmod = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
4290 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
4300 * ha = adapter block pointer
4413 struct qla_hw_data *ha = vha->hw;
4418 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
4424 options = ha->cs84xx->fw_update ? VCO_FORCE_UPDATE : 0;
4476 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
4477 ha->cs84xx->op_fw_version =
4479 spin_unlock_irqrestore(&ha->cs84xx->access_lock,
4485 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
4505 struct qla_hw_data *ha = vha->hw;
4507 if (!ha->flags.fw_started)
4513 if (IS_SHADOW_REG_CAPABLE(ha))
4528 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4542 if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4543 IS_QLA28XX(ha))
4545 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4551 spin_lock_irqsave(&ha->hardware_lock, flags);
4554 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4557 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4578 struct qla_hw_data *ha = vha->hw;
4580 if (!ha->flags.fw_started)
4586 if (IS_SHADOW_REG_CAPABLE(ha))
4598 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
4612 if (IS_QLA81XX(ha)) {
4615 } else if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
4622 spin_lock_irqsave(&ha->hardware_lock, flags);
4625 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4629 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4786 struct qla_hw_data *ha = vha->hw;
4788 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
4789 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
4854 struct qla_hw_data *ha = vha->hw;
4856 if (!IS_P3P_TYPE(ha))
4902 struct qla_hw_data *ha = vha->hw;
4904 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
4905 IS_P3P_TYPE(ha))
4911 str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
4946 dma_pool_free(ha->s_dma_pool, str, str_dma);
5005 struct qla_hw_data *ha = vha->hw;
5007 if (!IS_QLA25XX(ha) && !IS_QLA2031(ha) &&
5008 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
5014 els_cmd_map = dma_alloc_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
5027 if (ha->flags.scm_supported_f) {
5031 if (ha->flags.edif_enabled) {
5063 dma_free_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
5109 struct qla_hw_data *ha = vha->hw;
5114 if (!IS_FWI2_CAPABLE(ha))
5160 struct qla_hw_data *ha = vha->hw;
5165 if (!IS_FWI2_CAPABLE(ha))
5382 struct qla_hw_data *ha = vha->hw;
5391 if (IS_CNA_CAPABLE(ha)) {
5408 if (IS_CNA_CAPABLE(ha))
5412 if (IS_CNA_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
5413 IS_QLA2031(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5415 if (IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha) ||
5416 IS_QLA28XX(ha))
5507 struct qla_hw_data *ha = vha->hw;
5508 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
5515 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
5536 &ha->mbx_cmd_flags);
5547 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags))
5570 struct qla_hw_data *ha = vha->hw;
5574 "Entered %s speed:0x%x mode:0x%x.\n", __func__, ha->set_data_rate,
5577 if (!IS_FWI2_CAPABLE(ha))
5581 switch (ha->set_data_rate) {
5587 val = ha->set_data_rate;
5592 ha->set_data_rate);
5593 val = ha->set_data_rate = PORT_SPEED_AUTO;
5603 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5629 struct qla_hw_data *ha = vha->hw;
5634 if (!IS_FWI2_CAPABLE(ha))
5641 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
5651 ha->link_data_rate = mcp->mb[1];
5653 if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
5662 ha->link_data_rate = mcp->mb[1];
5674 struct qla_hw_data *ha = vha->hw;
5679 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha) &&
5680 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
5740 struct qla_hw_data *ha = vha->hw;
5745 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
5750 if (ha->flags.fcp_prio_enabled)
5782 struct qla_hw_data *ha = vha->hw;
5785 if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
5791 if (IS_QLA25XX(ha)) {
5792 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5793 ha->pdev->subsystem_device == 0x0175) {
5799 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
5800 ha->pdev->subsystem_device == 0x338e) {
5811 if (IS_QLA82XX(ha)) {
5815 } else if (IS_QLA8044(ha)) {
5829 struct qla_hw_data *ha = vha->hw;
5836 if (!IS_FWI2_CAPABLE(ha))
5864 struct qla_hw_data *ha = vha->hw;
5871 if (!IS_P3P_TYPE(ha))
5898 struct qla_hw_data *ha = vha->hw;
5929 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
5930 if (!ha->md_template_size) {
5942 struct qla_hw_data *ha = vha->hw;
5950 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
5951 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
5952 if (!ha->md_tmplt_hdr) {
5963 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma));
5964 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma));
5965 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma));
5966 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma));
5967 mcp->mb[8] = LSW(ha->md_template_size);
5968 mcp->mb[9] = MSW(ha->md_template_size);
5991 struct qla_hw_data *ha = vha->hw;
6000 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
6001 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
6002 if (!ha->md_tmplt_hdr) {
6009 while (offset < ha->md_template_size) {
6014 mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
6015 mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
6016 mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
6017 mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
6047 struct qla_hw_data *ha = vha->hw;
6051 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
6061 if (IS_QLA8031(ha)) {
6069 if (IS_QLA8031(ha))
6091 struct qla_hw_data *ha = vha->hw;
6095 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
6106 if (IS_QLA8031(ha))
6118 if (IS_QLA8031(ha)) {
6135 struct qla_hw_data *ha = vha->hw;
6139 if (!IS_P3P_TYPE(ha))
6173 struct qla_hw_data *ha = vha->hw;
6177 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6210 struct qla_hw_data *ha = vha->hw;
6214 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
6249 struct qla_hw_data *ha = vha->hw;
6252 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
6300 struct qla_hw_data *ha = vha->hw;
6302 if (!IS_QLA83XX(ha))
6334 struct qla_hw_data *ha = vha->hw;
6336 if (!IS_QLA8031(ha))
6609 struct qla_hw_data *ha = vha->hw;
6615 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
6646 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
6818 struct qla_hw_data *ha = vha->hw;
6824 memset(ha->sfp_data, 0, SFP_DEV_SIZE);
6826 phys_addr = ha->sfp_data_dma;
6827 sfp_data = ha->sfp_data;
6997 struct qla_hw_data *ha = vha->hw;
7002 if (!IS_QLA2031(ha) && !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
7038 ha->beacon_blink_led = 0;