Lines Matching refs:ha

90 	struct qla_hw_data *ha = vha->hw;
93 tmo = ha->r_a_tov / 10 * 2;
94 if (IS_QLAFX00(ha)) {
96 } else if (!IS_FWI2_CAPABLE(ha)) {
101 tmo = ha->login_timeout;
657 struct qla_hw_data *ha = vha->hw;
659 if (IS_FWI2_CAPABLE(ha))
662 return (loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
680 struct qla_hw_data *ha = vha->hw;
685 spin_lock_irqsave(&ha->vport_slock, flags);
687 dev->loop_id = find_first_zero_bit(ha->loop_id_map, LOOPID_MAP_SIZE);
693 set_bit(dev->loop_id, ha->loop_id_map);
695 spin_unlock_irqrestore(&ha->vport_slock, flags);
711 struct qla_hw_data *ha = fcport->vha->hw;
717 clear_bit(fcport->loop_id, ha->loop_id_map);
941 } /* switch (ha->current_topology) */
1224 struct qla_hw_data *ha = vha->hw;
1245 dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in,
1381 struct qla_hw_data *ha = vha->hw;
1412 pd = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
1444 dma_pool_free(ha->s_dma_pool, pd, pd_dma);
2243 struct qla_hw_data *ha = vha->hw;
2246 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
2247 ha->active_tmf--;
2249 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2256 struct qla_hw_data *ha = vha->hw;
2262 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
2263 list_for_each_entry(t, &ha->tmf_active, tmf_elem) {
2269 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2274 list_add_tail(&arg->tmf_elem, &ha->tmf_pending);
2275 while (ha->active_tmf >= MAX_ACTIVE_TMF) {
2276 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2280 spin_lock_irqsave(&ha->tgt.sess_lock, flags);
2287 if (ha->active_tmf < MAX_ACTIVE_TMF &&
2288 list_is_first(&arg->tmf_elem, &ha->tmf_pending))
2295 ha->active_tmf++;
2296 list_add_tail(&arg->tmf_elem, &ha->tmf_active);
2299 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
2603 struct qla_hw_data *ha = vha->hw;
2612 ha->fcoe_dev_init_timeout = QLA83XX_IDC_INITIALIZATION_TIMEOUT;
2613 ha->fcoe_reset_timeout = QLA83XX_IDC_RESET_ACK_TIMEOUT;
2633 if (ha->flags.nic_core_reset_owner) {
2654 idc_minor_ver |= (QLA83XX_SUPP_IDC_MINOR_VERSION << (ha->portnum * 2));
2657 if (ha->flags.nic_core_reset_owner) {
2675 struct qla_hw_data *ha = vha->hw;
2677 if (ha->fce) {
2678 ha->flags.fce_enabled = 1;
2679 memset(ha->fce, 0, fce_calc_size(ha->fce_bufs));
2681 ha->fce_dma, ha->fce_bufs, ha->fce_mb, &ha->fce_bufs);
2686 ha->flags.fce_enabled = 0;
2694 struct qla_hw_data *ha = vha->hw;
2696 if (ha->eft) {
2697 memset(ha->eft, 0, EFT_SIZE);
2698 rval = qla2x00_enable_eft_trace(vha, ha->eft_dma, EFT_NUM_BUFFERS);
2711 * ha = adapter block pointer.
2720 struct qla_hw_data *ha = vha->hw;
2721 struct req_que *req = ha->req_q_map[0];
2722 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2729 ha->flags.chip_reset_done = 0;
2731 ha->flags.pci_channel_io_perm_failure = 0;
2732 ha->flags.eeh_busy = 0;
2740 ha->isp_abort_cnt = 0;
2741 ha->beacon_blink_led = 0;
2743 set_bit(0, ha->req_qid_map);
2744 set_bit(0, ha->rsp_qid_map);
2748 rval = ha->isp_ops->pci_config(vha);
2755 ha->isp_ops->reset_chip(vha);
2758 if (IS_QLA28XX(ha)) {
2760 ha->flags.secure_adapter = 1;
2762 (ha->flags.secure_adapter) ? "Yes" : "No");
2773 if (IS_QLA8044(ha)) {
2784 ha->isp_ops->get_flash_version(vha, req->ring);
2789 ha->fc4_type_priority = FC4_PRIORITY_FCP;
2791 ha->isp_ops->nvram_config(vha);
2793 if (ha->fc4_type_priority != FC4_PRIORITY_FCP &&
2794 ha->fc4_type_priority != FC4_PRIORITY_NVME)
2795 ha->fc4_type_priority = FC4_PRIORITY_FCP;
2798 ha->fc4_type_priority == FC4_PRIORITY_FCP ? "FCP" : "NVMe");
2800 if (ha->flags.disable_serdes) {
2817 rval = ha->isp_ops->chip_diag(vha);
2825 if (IS_QLA84XX(ha)) {
2826 ha->cs84xx = qla84xx_get_chip(vha);
2827 if (!ha->cs84xx) {
2841 ha->flags.chip_reset_done = 1;
2843 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
2854 if (IS_QLA8031(ha)) {
2861 if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
2864 if (IS_P3P_TYPE(ha))
2883 struct qla_hw_data *ha = vha->hw;
2884 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2886 pci_set_master(ha->pdev);
2887 pci_try_set_mwi(ha->pdev);
2889 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2891 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2893 pci_disable_rom(ha->pdev);
2896 spin_lock_irqsave(&ha->hardware_lock, flags);
2897 ha->pci_attr = rd_reg_word(&reg->ctrl_status);
2898 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2915 struct qla_hw_data *ha = vha->hw;
2916 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2918 pci_set_master(ha->pdev);
2919 pci_try_set_mwi(ha->pdev);
2921 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
2924 if (IS_QLA2322(ha) || IS_QLA6322(ha))
2926 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
2935 if (IS_QLA2300(ha)) {
2936 spin_lock_irqsave(&ha->hardware_lock, flags);
2952 ha->fb_rev = RD_FB_CMD_REG(ha, reg);
2954 if (ha->fb_rev == FPM_2300)
2955 pci_clear_mwi(ha->pdev);
2970 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2973 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
2975 pci_disable_rom(ha->pdev);
2978 spin_lock_irqsave(&ha->hardware_lock, flags);
2979 ha->pci_attr = rd_reg_word(&reg->ctrl_status);
2980 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2996 struct qla_hw_data *ha = vha->hw;
2997 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2999 pci_set_master(ha->pdev);
3000 pci_try_set_mwi(ha->pdev);
3002 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
3005 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
3007 pci_write_config_byte(ha->pdev, PCI_LATENCY_TIMER, 0x80);
3010 if (pci_find_capability(ha->pdev, PCI_CAP_ID_PCIX))
3011 pcix_set_mmrbc(ha->pdev, 2048);
3014 if (pci_is_pcie(ha->pdev))
3015 pcie_set_readrq(ha->pdev, 4096);
3017 pci_disable_rom(ha->pdev);
3019 ha->chip_revision = ha->pdev->revision;
3022 spin_lock_irqsave(&ha->hardware_lock, flags);
3023 ha->pci_attr = rd_reg_dword(&reg->ctrl_status);
3024 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3039 struct qla_hw_data *ha = vha->hw;
3041 pci_set_master(ha->pdev);
3042 pci_try_set_mwi(ha->pdev);
3044 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
3047 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
3050 if (pci_is_pcie(ha->pdev))
3051 pcie_set_readrq(ha->pdev, 4096);
3053 pci_disable_rom(ha->pdev);
3055 ha->chip_revision = ha->pdev->revision;
3072 struct qla_hw_data *ha = vha->hw;
3077 if (ha->flags.disable_risc_code_load) {
3081 rval = qla2x00_verify_checksum(vha, ha->fw_srisc_address);
3106 struct qla_hw_data *ha = vha->hw;
3107 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3112 if (unlikely(pci_channel_offline(ha->pdev)))
3115 ha->isp_ops->disable_intrs(ha);
3117 spin_lock_irqsave(&ha->hardware_lock, flags);
3121 pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
3123 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
3125 if (!IS_QLA2100(ha)) {
3128 if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
3149 if (!IS_QLA2200(ha)) {
3159 if (IS_QLA2200(ha)) {
3160 WRT_FB_CMD_REG(ha, reg, 0xa000);
3161 RD_FB_CMD_REG(ha, reg); /* PCI Posting. */
3163 WRT_FB_CMD_REG(ha, reg, 0x00fc);
3167 if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
3193 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
3218 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
3220 if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
3230 pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);
3233 if (!IS_QLA2100(ha)) {
3238 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3263 struct qla_hw_data *ha = vha->hw;
3264 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3270 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3313 struct qla_hw_data *ha = vha->hw;
3314 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
3321 spin_lock_irqsave(&ha->hardware_lock, flags);
3333 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
3343 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
3359 set_bit(ISP_MBX_RDY, &ha->fw_dump_cap_flags);
3377 set_bit(ISP_SOFT_RESET_CMPL, &ha->fw_dump_cap_flags);
3429 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
3436 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3440 IS_NOPOLLING_TYPE(ha) ? "Interrupt" : "Polling");
3442 if (IS_NOPOLLING_TYPE(ha))
3443 ha->isp_ops->enable_intrs(ha);
3536 struct qla_hw_data *ha = vha->hw;
3539 if (pci_channel_offline(ha->pdev) &&
3540 ha->flags.pci_channel_io_perm_failure) {
3544 ha->isp_ops->disable_intrs(ha);
3564 struct qla_hw_data *ha = vha->hw;
3565 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
3570 struct req_que *req = ha->req_q_map[0];
3578 spin_lock_irqsave(&ha->hardware_lock, flags);
3606 if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
3607 data = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 0));
3610 data = RD_MAILBOX_REG(ha, reg, 0);
3622 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
3623 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
3624 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
3625 mb[4] = qla2x00_debounce_register(MAILBOX_REG(ha, reg, 4));
3634 ha->product_id[0] = mb[1];
3635 ha->product_id[1] = mb[2];
3636 ha->product_id[2] = mb[3];
3637 ha->product_id[3] = mb[4];
3641 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * 1024;
3643 ha->fw_transfer_size = REQUEST_ENTRY_SIZE *
3646 if (IS_QLA2200(ha) &&
3647 RD_MAILBOX_REG(ha, reg, 7) == QLA2200A_RISC_ROM_VER) {
3651 ha->device_type |= DT_ISP2200A;
3652 ha->fw_transfer_size = 128;
3656 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3666 spin_lock_irqsave(&ha->hardware_lock, flags);
3673 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3688 struct qla_hw_data *ha = vha->hw;
3689 struct req_que *req = ha->req_q_map[0];
3691 if (IS_P3P_TYPE(ha))
3694 ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
3713 struct qla_hw_data *ha = vha->hw;
3715 if (!IS_FWI2_CAPABLE(ha))
3718 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
3719 !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
3722 if (ha->fce) {
3730 tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma,
3742 ha->fce_dma = tc_dma;
3743 ha->fce = tc;
3744 ha->fce_bufs = FCE_NUM_BUFFERS;
3752 struct qla_hw_data *ha = vha->hw;
3754 if (!IS_FWI2_CAPABLE(ha))
3757 if (ha->eft) {
3765 tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma,
3777 ha->eft_dma = tc_dma;
3778 ha->eft = tc;
3786 struct qla_hw_data *ha = vha->hw;
3787 struct req_que *req = ha->req_q_map[0];
3788 struct rsp_que *rsp = ha->rsp_q_map[0];
3791 if (ha->fw_dump) {
3797 ha->fw_dumped = 0;
3798 ha->fw_dump_cap_flags = 0;
3802 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
3804 } else if (IS_QLA23XX(ha)) {
3806 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
3808 } else if (IS_FWI2_CAPABLE(ha)) {
3809 if (IS_QLA83XX(ha))
3811 else if (IS_QLA81XX(ha))
3813 else if (IS_QLA25XX(ha))
3818 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
3820 if (ha->mqenable) {
3821 if (!IS_QLA83XX(ha))
3827 mq_size += (ha->max_req_queues - 1) *
3829 mq_size += (ha->max_rsp_queues - 1) *
3832 if (ha->tgt.atio_ring)
3833 mq_size += ha->tgt.atio_q_length * sizeof(request_t);
3836 if (ha->fce)
3839 if (ha->eft)
3843 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
3844 struct fwdt *fwdt = ha->fwdt;
3863 dump_size += ha->fwdt[1].dump_size;
3870 ha->chain_offset = dump_size;
3872 if (ha->exchoffld_buf)
3874 ha->exchoffld_size;
3875 if (ha->exlogin_buf)
3877 ha->exlogin_size;
3880 if (!ha->fw_dump_len || dump_size > ha->fw_dump_alloc_len) {
3884 __func__, dump_size, ha->fw_dump_len,
3885 ha->fw_dump_alloc_len);
3893 mutex_lock(&ha->optrom_mutex);
3894 if (ha->fw_dumped) {
3895 memcpy(fw_dump, ha->fw_dump, ha->fw_dump_len);
3896 vfree(ha->fw_dump);
3897 ha->fw_dump = fw_dump;
3898 ha->fw_dump_alloc_len = dump_size;
3903 vfree(ha->fw_dump);
3904 ha->fw_dump = fw_dump;
3906 ha->fw_dump_len = ha->fw_dump_alloc_len =
3912 if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
3913 ha->mpi_fw_dump = (char *)fw_dump +
3914 ha->fwdt[1].dump_size;
3915 mutex_unlock(&ha->optrom_mutex);
3919 ha->fw_dump->signature[0] = 'Q';
3920 ha->fw_dump->signature[1] = 'L';
3921 ha->fw_dump->signature[2] = 'G';
3922 ha->fw_dump->signature[3] = 'C';
3923 ha->fw_dump->version = htonl(1);
3925 ha->fw_dump->fixed_size = htonl(fixed_size);
3926 ha->fw_dump->mem_size = htonl(mem_size);
3927 ha->fw_dump->req_q_size = htonl(req_q_size);
3928 ha->fw_dump->rsp_q_size = htonl(rsp_q_size);
3930 ha->fw_dump->eft_size = htonl(eft_size);
3931 ha->fw_dump->eft_addr_l =
3932 htonl(LSD(ha->eft_dma));
3933 ha->fw_dump->eft_addr_h =
3934 htonl(MSD(ha->eft_dma));
3936 ha->fw_dump->header_size =
3940 mutex_unlock(&ha->optrom_mutex);
3993 qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
3999 if (!IS_FWI2_CAPABLE(ha))
4002 if (ha->cur_fw_xcb_count <= ha->cur_fw_iocb_count)
4003 req->num_outstanding_cmds = ha->cur_fw_xcb_count;
4005 req->num_outstanding_cmds = ha->cur_fw_iocb_count;
4137 * ha->lr_distance containing distance settings from NVRAM or SFP
4145 struct qla_hw_data *ha = vha->hw;
4146 struct nvram_81xx *nv = ha->nvram;
4154 ha->flags.lr_detected = 0;
4155 if (IS_BPM_RANGE_CAPABLE(ha) &&
4158 ha->flags.lr_detected = 1;
4159 ha->lr_distance =
4175 ha->flags.lr_detected = 0;
4179 ha->flags.lr_detected = 1;
4182 ha->lr_distance = LR_DISTANCE_10K;
4184 ha->lr_distance = LR_DISTANCE_5K;
4190 types[ha->flags.lr_detected],
4191 ha->flags.lr_detected ? lengths[ha->lr_distance] :
4193 used_nvram, ll, ha->flags.lr_detected, ha->lr_distance);
4194 return ha->flags.lr_detected;
4201 struct qla_hw_data *ha = qpair->vha->hw;
4203 num_qps = ha->num_qpairs + 1;
4204 limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;
4206 qpair->fwres.iocbs_total = ha->orig_fw_iocb_count;
4210 qpair->fwres.exch_total = ha->orig_fw_xcb_count;
4211 qpair->fwres.exch_limit = (ha->orig_fw_xcb_count *
4218 struct qla_hw_data *ha = vha->hw;
4220 __qla_adjust_iocb_limit(ha->base_qpair);
4221 ha->base_qpair->fwres.iocbs_used = 0;
4222 ha->base_qpair->fwres.exch_used = 0;
4224 for (i = 0; i < ha->max_qpairs; i++) {
4225 if (ha->queue_pair_map[i]) {
4226 __qla_adjust_iocb_limit(ha->queue_pair_map[i]);
4227 ha->queue_pair_map[i]->fwres.iocbs_used = 0;
4228 ha->queue_pair_map[i]->fwres.exch_used = 0;
4232 ha->fwres.iocb_total = ha->orig_fw_iocb_count;
4233 ha->fwres.iocb_limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;
4234 ha->fwres.exch_total = ha->orig_fw_xcb_count;
4235 ha->fwres.exch_limit = (ha->orig_fw_xcb_count * QLA_IOCB_PCT_LIMIT) / 100;
4237 atomic_set(&ha->fwres.iocb_used, 0);
4238 atomic_set(&ha->fwres.exch_used, 0);
4244 struct qla_hw_data *ha = vha->hw;
4246 __qla_adjust_iocb_limit(ha->base_qpair);
4248 for (i = 0; i < ha->max_qpairs; i++) {
4249 if (ha->queue_pair_map[i])
4250 __qla_adjust_iocb_limit(ha->queue_pair_map[i]);
4265 struct qla_hw_data *ha = vha->hw;
4266 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
4270 if (IS_P3P_TYPE(ha)) {
4271 rval = ha->isp_ops->load_risc(vha, &srisc_address);
4279 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
4281 spin_lock_irqsave(&ha->hardware_lock, flags);
4284 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4291 rval = ha->isp_ops->load_risc(vha, &srisc_address);
4303 ha->flags.exlogins_enabled = 1;
4306 ha->flags.exchoffld_enabled = 1;
4316 ha->isp_ops->reset_chip(vha);
4317 ha->isp_ops->chip_diag(vha);
4321 if (IS_ZIO_THRESHOLD_CAPABLE(ha))
4323 ha->last_zio_threshold);
4334 if (IS_P3P_TYPE(ha))
4340 ha->flags.npiv_supported = 0;
4341 if (IS_QLA2XXX_MIDTYPE(ha) &&
4342 (ha->fw_attributes & BIT_2)) {
4343 ha->flags.npiv_supported = 1;
4344 if ((!ha->max_npiv_vports) ||
4345 ((ha->max_npiv_vports + 1) %
4347 ha->max_npiv_vports =
4357 rval = qla2x00_alloc_outstanding_cmds(ha,
4362 if (ql2xallocfwdump && !(IS_P3P_TYPE(ha)))
4377 if (ql2xrdpenable || ha->flags.scm_supported_f ||
4378 ha->flags.edif_enabled)
4383 if (!IS_FWI2_CAPABLE(ha) && !IS_QLA2100(ha) && !IS_QLA2200(ha)) {
4385 spin_lock_irqsave(&ha->hardware_lock, flags);
4386 if (IS_QLA2300(ha))
4393 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4396 if (IS_QLA27XX(ha) || IS_QLA28XX(ha))
4397 ha->flags.fac_supported = 1;
4398 else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
4403 ha->flags.fac_supported = 1;
4404 ha->fdt_block_size = size << 2;
4408 ha->fw_major_version, ha->fw_minor_version,
4409 ha->fw_subminor_version);
4411 if (IS_QLA83XX(ha)) {
4412 ha->flags.fac_supported = 0;
4461 struct qla_hw_data *ha = vha->hw;
4463 memset(ha->fw_options, 0, sizeof(ha->fw_options));
4464 qla2x00_get_fw_options(vha, ha->fw_options);
4466 if (IS_QLA2100(ha) || IS_QLA2200(ha))
4473 ha->fw_seriallink_options, sizeof(ha->fw_seriallink_options));
4475 ha->fw_options[1] &= ~FO1_SET_EMPHASIS_SWING;
4476 if (ha->fw_seriallink_options[3] & BIT_2) {
4477 ha->fw_options[1] |= FO1_SET_EMPHASIS_SWING;
4480 swing = ha->fw_seriallink_options[2] & (BIT_2 | BIT_1 | BIT_0);
4481 emphasis = (ha->fw_seriallink_options[2] &
4483 tx_sens = ha->fw_seriallink_options[0] &
4485 rx_sens = (ha->fw_seriallink_options[0] &
4487 ha->fw_options[10] = (emphasis << 14) | (swing << 8);
4488 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
4491 ha->fw_options[10] |= (tx_sens << 4) | rx_sens;
4492 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
4493 ha->fw_options[10] |= BIT_5 |
4498 swing = (ha->fw_seriallink_options[2] &
4500 emphasis = ha->fw_seriallink_options[3] & (BIT_1 | BIT_0);
4501 tx_sens = ha->fw_seriallink_options[1] &
4503 rx_sens = (ha->fw_seriallink_options[1] &
4505 ha->fw_options[11] = (emphasis << 14) | (swing << 8);
4506 if (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA6312(ha)) {
4509 ha->fw_options[11] |= (tx_sens << 4) | rx_sens;
4510 } else if (IS_QLA2322(ha) || IS_QLA6322(ha))
4511 ha->fw_options[11] |= BIT_5 |
4518 ha->fw_options[3] |= BIT_13;
4521 if (ha->flags.enable_led_scheme)
4522 ha->fw_options[2] |= BIT_12;
4525 if (IS_QLA6312(ha))
4526 ha->fw_options[2] |= BIT_13;
4529 if (ha->operating_mode == P2P) {
4530 ha->fw_options[2] |= BIT_3;
4533 __func__, ha->fw_options[2]);
4537 qla2x00_set_fw_options(vha, ha->fw_options);
4544 struct qla_hw_data *ha = vha->hw;
4546 if (IS_P3P_TYPE(ha))
4551 ha->fw_options[3] |= BIT_12;
4554 if (ha->operating_mode == P2P) {
4555 ha->fw_options[2] |= BIT_3;
4558 __func__, ha->fw_options[2]);
4562 if (ql2xmvasynctoatio && !ha->flags.edif_enabled &&
4563 (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) {
4566 ha->fw_options[2] |= BIT_11;
4568 ha->fw_options[2] &= ~BIT_11;
4571 if (IS_QLA25XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4572 IS_QLA28XX(ha)) {
4579 ha->fw_options[2] |= BIT_4;
4581 ha->fw_options[2] &= ~(BIT_4);
4585 ha->fw_options[2] |= BIT_8;
4587 ha->fw_options[2] &= ~BIT_8;
4593 if (ha->flags.edif_enabled &&
4595 ha->fw_options[3] |= BIT_15;
4596 ha->flags.n2n_fw_acc_sec = 1;
4598 ha->fw_options[3] &= ~BIT_15;
4599 ha->flags.n2n_fw_acc_sec = 0;
4603 if (ql2xrdpenable || ha->flags.scm_supported_f ||
4604 ha->flags.edif_enabled)
4605 ha->fw_options[1] |= ADD_FO1_ENABLE_PUREX_IOCB;
4608 if (IS_BPM_RANGE_CAPABLE(ha))
4609 ha->fw_options[3] |= BIT_10;
4613 __func__, ha->fw_options[1], ha->fw_options[2],
4614 ha->fw_options[3], vha->host->active_mode);
4616 if (ha->fw_options[1] || ha->fw_options[2] || ha->fw_options[3])
4617 qla2x00_set_fw_options(vha, ha->fw_options);
4620 if ((le16_to_cpu(ha->fw_seriallink_options24[0]) & BIT_0) == 0)
4624 le16_to_cpu(ha->fw_seriallink_options24[1]),
4625 le16_to_cpu(ha->fw_seriallink_options24[2]),
4626 le16_to_cpu(ha->fw_seriallink_options24[3]));
4636 struct qla_hw_data *ha = vha->hw;
4637 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
4638 struct req_que *req = ha->req_q_map[0];
4639 struct rsp_que *rsp = ha->rsp_q_map[0];
4642 ha->init_cb->request_q_outpointer = cpu_to_le16(0);
4643 ha->init_cb->response_q_inpointer = cpu_to_le16(0);
4644 ha->init_cb->request_q_length = cpu_to_le16(req->length);
4645 ha->init_cb->response_q_length = cpu_to_le16(rsp->length);
4646 put_unaligned_le64(req->dma, &ha->init_cb->request_q_address);
4647 put_unaligned_le64(rsp->dma, &ha->init_cb->response_q_address);
4649 wrt_reg_word(ISP_REQ_Q_IN(ha, reg), 0);
4650 wrt_reg_word(ISP_REQ_Q_OUT(ha, reg), 0);
4651 wrt_reg_word(ISP_RSP_Q_IN(ha, reg), 0);
4652 wrt_reg_word(ISP_RSP_Q_OUT(ha, reg), 0);
4653 rd_reg_word(ISP_RSP_Q_OUT(ha, reg)); /* PCI Posting. */
4659 struct qla_hw_data *ha = vha->hw;
4660 device_reg_t *reg = ISP_QUE_REG(ha, 0);
4661 struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
4665 struct req_que *req = ha->req_q_map[0];
4666 struct rsp_que *rsp = ha->rsp_q_map[0];
4669 icb = (struct init_cb_24xx *)ha->init_cb;
4679 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
4680 put_unaligned_le64(ha->tgt.atio_dma, &icb->atio_q_address);
4682 if (IS_SHADOW_REG_CAPABLE(ha))
4685 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
4686 IS_QLA28XX(ha)) {
4689 if (ha->flags.msix_enabled) {
4690 msix = &ha->msix_entries[1];
4704 if ((ha->fw_attributes & BIT_6) && (IS_MSIX_NACK_CAPABLE(ha)) &&
4705 (ha->flags.msix_enabled)) {
4707 ha->flags.disable_msix_handshake = 1;
4729 if (ha->set_data_rate) {
4732 qla2x00_get_link_speed_str(ha, ha->set_data_rate));
4733 icb->firmware_options_3 = cpu_to_le32(ha->set_data_rate << 13);
4755 struct qla_hw_data *ha = vha->hw;
4759 (struct mid_init_cb_24xx *) ha->init_cb;
4761 spin_lock_irqsave(&ha->hardware_lock, flags);
4764 for (que = 0; que < ha->max_req_queues; que++) {
4765 req = ha->req_q_map[que];
4766 if (!req || !test_bit(que, ha->req_qid_map))
4781 for (que = 0; que < ha->max_rsp_queues; que++) {
4782 rsp = ha->rsp_q_map[que];
4783 if (!rsp || !test_bit(que, ha->rsp_qid_map))
4788 if (IS_QLAFX00(ha))
4794 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
4795 ha->tgt.atio_ring_index = 0;
4799 ha->isp_ops->config_rings(vha);
4801 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4803 if (IS_QLAFX00(ha)) {
4804 rval = qlafx00_init_firmware(vha, ha->init_cb_size);
4809 ha->isp_ops->update_fw_options(vha);
4817 if (ha->flags.npiv_supported) {
4818 if (ha->operating_mode == LOOP && !IS_CNA_CAPABLE(ha))
4819 ha->max_npiv_vports = MIN_MULTI_ID_FABRIC - 1;
4820 mid_init_cb->count = cpu_to_le16(ha->max_npiv_vports);
4823 if (IS_FWI2_CAPABLE(ha)) {
4826 cpu_to_le16(ha->cur_fw_xcb_count);
4827 ha->flags.dport_enabled =
4831 (ha->flags.dport_enabled) ? "enabled" : "disabled");
4833 ha->flags.fawwpn_enabled =
4837 (ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
4839 memcpy(ha->port_name, ha->init_cb->port_name, WWN_SIZE);
4843 if (ha->flags.edif_enabled)
4846 QLA_FW_STARTED(ha);
4847 rval = qla2x00_init_firmware(vha, ha->init_cb_size);
4850 QLA_FW_STOPPED(ha);
4876 struct qla_hw_data *ha = vha->hw;
4882 if (IS_P3P_TYPE(ha))
4891 if ((wait_time = (ha->retry_count*ha->login_timeout) + 5) < min_wait) {
4913 if (IS_QLA84XX(ha) && state[0] != FSTATE_READY) {
4944 qla2x00_get_retry_cnt(vha, &ha->retry_count,
4945 &ha->login_timeout, &ha->r_a_tov);
4969 ha->flags.isp82xx_fw_hung)
4997 * ha = adapter state pointer.
5016 struct qla_hw_data *ha = vha->hw;
5017 scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
5025 if (LOOP_TRANSITION(vha) || atomic_read(&ha->loop_down_timer) ||
5026 IS_CNA_CAPABLE(ha) ||
5033 if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
5054 ha->min_external_loopid = SNS_FIRST_LOOP_ID;
5055 ha->operating_mode = LOOP;
5060 ha->switch_cap = 0;
5061 ha->current_topology = ISP_CFG_NL;
5067 ha->switch_cap = sw_cap;
5068 ha->current_topology = ISP_CFG_FL;
5074 ha->switch_cap = 0;
5075 ha->operating_mode = P2P;
5076 ha->current_topology = ISP_CFG_N;
5082 ha->switch_cap = sw_cap;
5083 ha->operating_mode = P2P;
5084 ha->current_topology = ISP_CFG_F;
5091 ha->switch_cap = 0;
5092 ha->current_topology = ISP_CFG_NL;
5103 spin_lock_irqsave(&ha->hardware_lock, flags);
5107 } else if (!(topo == 2 && ha->flags.n2n_bigger))
5109 spin_unlock_irqrestore(&ha->hardware_lock, flags);
5126 struct qla_hw_data *ha = vha->hw;
5127 int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
5128 !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha);
5133 memcpy(ha->model_number, model, len);
5134 st = en = ha->model_number;
5142 index = (ha->pdev->subsystem_device & 0xff);
5144 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5146 strscpy(ha->model_desc,
5148 sizeof(ha->model_desc));
5150 index = (ha->pdev->subsystem_device & 0xff);
5152 ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
5154 strscpy(ha->model_number,
5156 sizeof(ha->model_number));
5157 strscpy(ha->model_desc,
5159 sizeof(ha->model_desc));
5161 strscpy(ha->model_number, def,
5162 sizeof(ha->model_number));
5165 if (IS_FWI2_CAPABLE(ha))
5166 qla2xxx_get_vpd_field(vha, "\x82", ha->model_desc,
5167 sizeof(ha->model_desc));
5176 struct qla_hw_data *ha = vha->hw;
5177 struct pci_dev *pdev = ha->pdev;
5196 * ha = adapter block pointer.
5212 struct qla_hw_data *ha = vha->hw;
5213 init_cb_t *icb = ha->init_cb;
5214 nvram_t *nv = ha->nvram;
5215 uint8_t *ptr = ha->nvram;
5216 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
5221 ha->nvram_size = sizeof(*nv);
5222 ha->nvram_base = 0;
5223 if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA2300(ha))
5225 ha->nvram_base = 0x80;
5228 ha->isp_ops->read_nvram(vha, ptr, ha->nvram_base, ha->nvram_size);
5229 for (cnt = 0, chksum = 0; cnt < ha->nvram_size; cnt++)
5235 nv, ha->nvram_size);
5251 memset(nv, 0, ha->nvram_size);
5254 if (IS_QLA23XX(ha)) {
5261 } else if (IS_QLA2200(ha)) {
5267 } else if (IS_QLA2100(ha)) {
5299 memset(icb, 0, ha->init_cb_size);
5309 if (IS_QLA23XX(ha)) {
5315 if (IS_QLA2300(ha)) {
5316 if (ha->fb_rev == FPM_2310) {
5317 strcpy(ha->model_number, "QLA2310");
5319 strcpy(ha->model_number, "QLA2300");
5325 } else if (IS_QLA2200(ha)) {
5337 strcpy(ha->model_number, "QLA22xx");
5338 } else /*if (IS_QLA2100(ha))*/ {
5339 strcpy(ha->model_number, "QLA2100");
5356 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
5383 ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
5385 if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
5386 ha->flags.disable_risc_code_load = 0;
5387 ha->flags.enable_lip_reset = ((nv->host_p[1] & BIT_1) ? 1 : 0);
5388 ha->flags.enable_lip_full_login = ((nv->host_p[1] & BIT_2) ? 1 : 0);
5389 ha->flags.enable_target_reset = ((nv->host_p[1] & BIT_3) ? 1 : 0);
5390 ha->flags.enable_led_scheme = (nv->special_options[1] & BIT_4) ? 1 : 0;
5391 ha->flags.disable_serdes = 0;
5393 ha->operating_mode =
5396 memcpy(ha->fw_seriallink_options, nv->seriallink_options,
5397 sizeof(ha->fw_seriallink_options));
5400 ha->serial0 = icb->port_name[5];
5401 ha->serial1 = icb->port_name[6];
5402 ha->serial2 = icb->port_name[7];
5408 ha->retry_count = nv->retry_count;
5415 ha->login_timeout = nv->login_timeout;
5418 ha->r_a_tov = 100;
5420 ha->loop_reset_delay = nv->reset_delay;
5433 ha->loop_down_abort_time =
5436 ha->link_down_timeout = nv->link_down_timeout;
5437 ha->loop_down_abort_time =
5438 (LOOP_DOWN_TIME - ha->link_down_timeout);
5444 ha->port_down_retry_count = nv->port_down_retry_count;
5446 ha->port_down_retry_count = qlport_down_retry;
5448 ha->login_retry_count = nv->retry_count;
5449 if (ha->port_down_retry_count == nv->port_down_retry_count &&
5450 ha->port_down_retry_count > 3)
5451 ha->login_retry_count = ha->port_down_retry_count;
5452 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
5453 ha->login_retry_count = ha->port_down_retry_count;
5455 ha->login_retry_count = ql2xloginretrycount;
5462 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
5475 ha->zio_mode = icb->add_firmware_options[0] &
5477 ha->zio_timer = icb->interrupt_delay_timer ?
5483 if (ha->zio_mode != QLA_ZIO_DISABLED) {
5484 ha->zio_mode = QLA_ZIO_MODE_6;
5488 ha->zio_mode, ha->zio_timer * 100);
5490 icb->add_firmware_options[0] |= (uint8_t)ha->zio_mode;
5491 icb->interrupt_delay_timer = (uint8_t)ha->zio_timer;
5613 struct qla_hw_data *ha = vha->hw;
5618 memset(ha->init_cb, 0, ha->init_cb_size);
5619 sz = min_t(int, sizeof(struct fc_els_flogi), ha->init_cb_size);
5620 rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
5621 ha->init_cb, sz);
5627 q = (__be32 *)&ha->plogi_els_payld.fl_csp;
5629 bp = (uint32_t *)ha->init_cb;
5631 ha->flags.plogi_template_valid = 1;
5639 * ha = adapter block pointer.
5651 struct qla_hw_data *ha = vha->hw;
5680 if ((ha->current_topology == ISP_CFG_FL ||
5681 ha->current_topology == ISP_CFG_F) &&
5687 } else if (ha->current_topology == ISP_CFG_NL ||
5688 ha->current_topology == ISP_CFG_N) {
5724 ha->flags.fw_init_done = 1;
5730 if (ha->flags.edif_enabled && DBELL_INACTIVE(vha))
5732 ha->link_data_rate);
5740 spin_lock_irqsave(&ha->tgt.atio_lock, flags);
5742 spin_unlock_irqrestore(&ha->tgt.atio_lock,
5818 * ha = adapter block pointer.
5834 struct qla_hw_data *ha = vha->hw;
5838 if (N2N_TOPO(ha))
5845 memset(ha->gid_list, 0, qla2x00_gid_list_size(ha));
5846 rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma,
5854 ha->gid_list, entries * sizeof(*ha->gid_list));
5897 gid = ha->gid_list;
5902 if (IS_QLA2100(ha) || IS_QLA2200(ha))
5906 gid = (void *)gid + ha->gid_list_info_size;
5915 (ha->current_topology == ISP_CFG_NL))
5939 if (ha->current_topology != ISP_CFG_N) {
5998 fcport->fp_speed = ha->link_data_rate;
6045 struct qla_hw_data *ha = vha->hw;
6047 if (!IS_IIDMA_CAPABLE(ha))
6054 fcport->fp_speed > ha->link_data_rate ||
6055 !ha->flags.gpsc_supported)
6067 qla2x00_get_link_speed_str(ha, fcport->fp_speed),
6147 * ha = adapter block pointer.
6283 * ha = adapter block pointer.
6296 struct qla_hw_data *ha = vha->hw;
6300 if (IS_FWI2_CAPABLE(ha))
6331 loop_id = NPH_SNS_LID(ha);
6332 rval = ha->isp_ops->fabric_login(vha, loop_id, 0xff, 0xff,
6395 if (USE_ASYNC_SCAN(ha)) {
6424 * ha = adapter block pointer.
6445 struct qla_hw_data *ha = vha->hw;
6446 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
6452 if (!ha->swl)
6453 ha->swl = kcalloc(ha->max_fibre_devices, sizeof(sw_info_t),
6455 swl = ha->swl;
6461 memset(swl, 0, ha->max_fibre_devices * sizeof(sw_info_t));
6502 loop_id = ha->min_external_loopid;
6503 for (; loop_id <= ha->max_loop_id; loop_id++) {
6507 if (ha->current_topology == ISP_CFG_FL &&
6578 (vha->d_id.b24 & 0xffff00)) && ha->current_topology ==
6737 struct qla_hw_data *ha = vha->hw;
6740 set_bit(NPH_MGMT_SERVER, ha->loop_id_map);
6745 spin_lock_irqsave(&ha->vport_slock, flags);
6753 spin_unlock_irqrestore(&ha->vport_slock, flags);
6763 * ha = adapter block pointer.
6780 struct qla_hw_data *ha = vha->hw;
6793 rval = ha->isp_ops->fabric_login(vha, fcport->loop_id,
6846 if (IS_FWI2_CAPABLE(ha)) {
6871 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
6889 ha->isp_ops->fabric_logout(vha, fcport->loop_id,
6908 * ha = adapter block pointer.
6941 * ha = adapter block pointer.
7002 int qla2x00_perform_loop_resync(scsi_qla_host_t *ha)
7006 if (!test_and_set_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags)) {
7008 atomic_set(&ha->loop_down_timer, 0);
7009 if (!(ha->device_flags & DFLG_NO_CABLE)) {
7010 atomic_set(&ha->loop_state, LOOP_UP);
7011 set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
7012 set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
7013 set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
7015 rval = qla2x00_loop_resync(ha);
7017 atomic_set(&ha->loop_state, LOOP_DEAD);
7019 clear_bit(LOOP_RESYNC_ACTIVE, &ha->dpc_flags);
7029 struct qla_hw_data *ha = vha->hw;
7035 if (IS_QLA8044(ha)) {
7050 (i != ha->portnum)) {
7060 ((i + 8) != ha->portnum)) {
7070 drv_presence_mask = ~((1 << (ha->portnum)) |
7078 (ha->portnum < fcoe_other_function)) {
7081 ha->flags.nic_core_reset_owner = 1;
7089 struct qla_hw_data *ha = vha->hw;
7094 drv_ack |= (1 << ha->portnum);
7105 struct qla_hw_data *ha = vha->hw;
7110 drv_ack &= ~(1 << ha->portnum);
7121 struct qla_hw_data *ha = vha->hw;
7126 ha->idc_audit_ts = (jiffies_to_msecs(jiffies) / 1000);
7127 idc_audit_reg = (ha->portnum) |
7128 (IDC_AUDIT_TIMESTAMP << 7) | (ha->idc_audit_ts << 8);
7134 jiffies_to_msecs(ha->idc_audit_ts)) / 1000);
7135 idc_audit_reg = (ha->portnum) |
7151 struct qla_hw_data *ha = vha->hw;
7164 if (ha->flags.nic_core_reset_owner && dev_state == QLA8XXX_DEV_READY) {
7205 struct qla_hw_data *ha = vha->hw;
7208 if (drv_presence & (1 << ha->portnum))
7218 struct qla_hw_data *ha = vha->hw;
7234 ha->portnum);
7251 ha->flags.nic_core_hung = 0;
7266 struct qla_hw_data *ha = vha->hw;
7269 if (!IS_MCTP_CAPABLE(ha)) {
7276 if (!ha->mctp_dump) {
7277 ha->mctp_dump = dma_alloc_coherent(&ha->pdev->dev,
7278 MCTP_DUMP_SIZE, &ha->mctp_dump_dma, GFP_KERNEL);
7280 if (!ha->mctp_dump) {
7288 rval = qla2x00_dump_mctp_data(vha, ha->mctp_dump_dma,
7296 vha->host_no, ha->mctp_dump);
7297 ha->mctp_dumped = 1;
7300 if (!ha->flags.nic_core_reset_hdlr_active && !ha->portnum) {
7301 ha->flags.nic_core_reset_hdlr_active = 1;
7310 ha->flags.nic_core_reset_hdlr_active = 0;
7328 struct qla_hw_data *ha = vha->hw;
7333 "Quiescing I/O - ha=%p.\n", ha);
7335 atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
7340 spin_lock_irqsave(&ha->vport_slock, flags);
7341 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
7343 spin_unlock_irqrestore(&ha->vport_slock, flags);
7347 spin_lock_irqsave(&ha->vport_slock, flags);
7350 spin_unlock_irqrestore(&ha->vport_slock, flags);
7364 struct qla_hw_data *ha = vha->hw;
7373 if (!(IS_P3P_TYPE(ha)))
7375 ha->flags.chip_reset_done = 0;
7380 "Performing ISP error recovery - ha=%p.\n", ha);
7382 ha->flags.purge_mbox = 1;
7387 if (!(IS_P3P_TYPE(ha)))
7388 ha->isp_ops->reset_chip(vha);
7390 ha->link_data_rate = PORT_SPEED_UNKNOWN;
7391 SAVE_TOPO(ha);
7392 ha->flags.rida_fmt2 = 0;
7393 ha->flags.n2n_ae = 0;
7394 ha->flags.lip_ae = 0;
7395 ha->current_topology = 0;
7396 QLA_FW_STOPPED(ha);
7397 ha->flags.fw_init_done = 0;
7398 ha->chip_reset++;
7399 ha->base_qpair->chip_reset = ha->chip_reset;
7400 ha->base_qpair->cmd_cnt = ha->base_qpair->cmd_completion_cnt = 0;
7401 ha->base_qpair->prev_completion_cnt = 0;
7402 for (i = 0; i < ha->max_qpairs; i++) {
7403 if (ha->queue_pair_map[i]) {
7404 ha->queue_pair_map[i]->chip_reset =
7405 ha->base_qpair->chip_reset;
7406 ha->queue_pair_map[i]->cmd_cnt =
7407 ha->queue_pair_map[i]->cmd_completion_cnt = 0;
7408 ha->base_qpair->prev_completion_cnt = 0;
7413 spin_lock_irqsave(&ha->hardware_lock, flags);
7414 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) {
7415 clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
7416 complete(&ha->mbx_intr_comp);
7418 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7421 while (atomic_read(&ha->num_pend_mbx_stage2) ||
7422 atomic_read(&ha->num_pend_mbx_stage1)) {
7428 ha->flags.purge_mbox = 0;
7435 spin_lock_irqsave(&ha->vport_slock, flags);
7436 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
7438 spin_unlock_irqrestore(&ha->vport_slock, flags);
7442 spin_lock_irqsave(&ha->vport_slock, flags);
7445 spin_unlock_irqrestore(&ha->vport_slock, flags);
7457 spin_lock_irqsave(&ha->vport_slock, flags);
7458 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
7460 spin_unlock_irqrestore(&ha->vport_slock, flags);
7465 spin_lock_irqsave(&ha->vport_slock, flags);
7468 spin_unlock_irqrestore(&ha->vport_slock, flags);
7471 if (IS_P3P_TYPE(ha)) {
7491 * ha = adapter block pointer.
7500 struct qla_hw_data *ha = vha->hw;
7502 struct req_que *req = ha->req_q_map[0];
7515 if (qla2x00_isp_reg_stat(ha)) {
7522 ha->flags.chip_reset_done = 1;
7529 if (IS_QLA8031(ha)) {
7537 if (unlikely(pci_channel_offline(ha->pdev) &&
7538 ha->flags.pci_channel_io_perm_failure)) {
7559 ha->isp_ops->get_flash_version(vha, req->ring);
7561 if (qla2x00_isp_reg_stat(ha)) {
7566 ha->isp_ops->nvram_config(vha);
7568 if (qla2x00_isp_reg_stat(ha)) {
7576 if (NVME_PRIORITY(ha, fcport))
7595 ha->isp_ops->enable_intrs(ha);
7597 ha->isp_abort_cnt = 0;
7600 if (IS_QLA81XX(ha) || IS_QLA8031(ha))
7606 if (ha->isp_abort_cnt == 0) {
7620 ha->isp_abort_cnt--;
7623 ha->isp_abort_cnt);
7627 ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT;
7630 "more times.\n", ha->isp_abort_cnt);
7646 spin_lock_irqsave(&ha->vport_slock, flags);
7647 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
7650 spin_unlock_irqrestore(&ha->vport_slock, flags);
7654 if (NVME_PRIORITY(ha, fcport))
7662 spin_lock_irqsave(&ha->vport_slock, flags);
7666 spin_unlock_irqrestore(&ha->vport_slock, flags);
7668 if (IS_QLA8031(ha)) {
7688 * ha = adapter block pointer.
7697 struct qla_hw_data *ha = vha->hw;
7702 status = ha->isp_ops->chip_diag(vha);
7715 ha->flags.chip_reset_done = 1;
7718 qla25xx_init_queues(ha);
7727 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
7734 qla25xx_init_queues(struct qla_hw_data *ha)
7738 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
7742 for (i = 1; i < ha->max_rsp_queues; i++) {
7743 rsp = ha->rsp_q_map[i];
7744 if (rsp && test_bit(i, ha->rsp_qid_map)) {
7757 for (i = 1; i < ha->max_req_queues; i++) {
7758 req = ha->req_q_map[i];
7759 if (req && test_bit(i, ha->req_qid_map)) {
7781 * ha = adapter block pointer.
7787 struct qla_hw_data *ha = vha->hw;
7788 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
7791 ha->isp_ops->disable_intrs(ha);
7793 spin_lock_irqsave(&ha->hardware_lock, flags);
7798 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7807 struct qla_hw_data *ha = vha->hw;
7808 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
7810 if (IS_P3P_TYPE(ha))
7814 ha->isp_ops->disable_intrs(ha);
7816 spin_lock_irqsave(&ha->hardware_lock, flags);
7821 spin_unlock_irqrestore(&ha->hardware_lock, flags);
7823 if (IS_NOPOLLING_TYPE(ha))
7824 ha->isp_ops->enable_intrs(ha);
7836 struct qla_hw_data *ha = vha->hw;
7837 struct pci_dev *pdev = ha->pdev;
7862 struct qla_hw_data *ha = vha->hw;
7865 icb = (struct init_cb_24xx *)ha->init_cb;
7866 nv = ha->nvram;
7869 if (ha->port_no == 0) {
7870 ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
7871 ha->vpd_base = FA_NVRAM_VPD0_ADDR;
7873 ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
7874 ha->vpd_base = FA_NVRAM_VPD1_ADDR;
7877 ha->nvram_size = sizeof(*nv);
7878 ha->vpd_size = FA_NVRAM_VPD_SIZE;
7881 ha->vpd = ha->nvram + VPD_OFFSET;
7882 ha->isp_ops->read_nvram(vha, ha->vpd,
7883 ha->nvram_base - FA_NVRAM_FUNC0_ADDR, FA_NVRAM_VPD_SIZE * 4);
7887 ha->isp_ops->read_nvram(vha, dptr, ha->nvram_base, ha->nvram_size);
7888 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
7894 nv, ha->nvram_size);
7911 memset(nv, 0, ha->nvram_size);
7919 nv->port_name[1] = 0x00 + ha->port_no + 1;
7963 memset(icb, 0, ha->init_cb_size);
7982 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
8008 ha->flags.disable_risc_code_load = 0;
8009 ha->flags.enable_lip_reset = 0;
8010 ha->flags.enable_lip_full_login =
8012 ha->flags.enable_target_reset =
8014 ha->flags.enable_led_scheme = 0;
8015 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0;
8017 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
8020 memcpy(ha->fw_seriallink_options24, nv->seriallink_options,
8021 sizeof(ha->fw_seriallink_options24));
8024 ha->serial0 = icb->port_name[5];
8025 ha->serial1 = icb->port_name[6];
8026 ha->serial2 = icb->port_name[7];
8032 ha->retry_count = le16_to_cpu(nv->login_retry_count);
8039 ha->login_timeout = le16_to_cpu(nv->login_timeout);
8042 ha->r_a_tov = 100;
8044 ha->loop_reset_delay = nv->reset_delay;
8057 ha->loop_down_abort_time =
8060 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
8061 ha->loop_down_abort_time =
8062 (LOOP_DOWN_TIME - ha->link_down_timeout);
8066 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
8068 ha->port_down_retry_count = qlport_down_retry;
8071 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
8072 if (ha->port_down_retry_count ==
8074 ha->port_down_retry_count > 3)
8075 ha->login_retry_count = ha->port_down_retry_count;
8076 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
8077 ha->login_retry_count = ha->port_down_retry_count;
8079 ha->login_retry_count = ql2xloginretrycount;
8086 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
8088 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
8093 if (ha->zio_mode != QLA_ZIO_DISABLED) {
8094 ha->zio_mode = QLA_ZIO_MODE_6;
8098 ha->zio_mode, ha->zio_timer * 100);
8101 (uint32_t)ha->zio_mode);
8102 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
8206 struct qla_hw_data *ha = vha->hw;
8211 if (!ha->flt_region_aux_img_status_pri) {
8217 ha->flt_region_aux_img_status_pri,
8243 if (!ha->flt_region_aux_img_status_sec) {
8250 ha->flt_region_aux_img_status_sec,
8305 struct qla_hw_data *ha = vha->hw;
8310 if (!ha->flt_region_img_status_pri) {
8316 ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2) !=
8345 if (!ha->flt_region_img_status_sec) {
8351 ha->flt_region_img_status_sec, sizeof(sec_image_status) >> 2);
8415 struct qla_hw_data *ha = vha->hw;
8416 struct req_que *req = ha->req_q_map[0];
8417 struct fwdt *fwdt = ha->fwdt;
8449 dlen = ha->fw_transfer_size >> 2;
8475 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
8559 struct qla_hw_data *ha = vha->hw;
8560 struct req_que *req = ha->req_q_map[0];
8616 wlen = (uint16_t)(ha->fw_transfer_size >> 1);
8662 struct qla_hw_data *ha = vha->hw;
8663 struct req_que *req = ha->req_q_map[0];
8664 struct fwdt *fwdt = ha->fwdt;
8703 dlen = ha->fw_transfer_size >> 2;
8731 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
8829 struct qla_hw_data *ha = vha->hw;
8841 if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
8851 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw_sec);
8858 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
8864 if (!rval || !ha->flt_region_gold_fw)
8869 rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
8874 ha->flags.running_gold_fw = 1;
8882 struct qla_hw_data *ha = vha->hw;
8884 if (ha->flags.pci_channel_io_perm_failure)
8886 if (!IS_FWI2_CAPABLE(ha))
8888 if (!ha->fw_major_version)
8890 if (!ha->flags.fw_started)
8896 ha->isp_ops->reset_chip(vha);
8897 if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
8906 QLA_FW_STOPPED(ha);
8907 ha->flags.fw_init_done = 0;
8916 struct qla_hw_data *ha = vha->hw;
8917 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
8926 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
8932 rval2 = ha->isp_ops->fabric_login(vha, NPH_SNS, 0xff, 0xff, 0xfc, mb,
8965 struct qla_hw_data *ha = vha->hw;
8971 if (cs84xx->bus == ha->pdev->bus) {
8984 cs84xx->bus = ha->pdev->bus;
9007 struct qla_hw_data *ha = vha->hw;
9009 if (ha->cs84xx)
9010 kref_put(&ha->cs84xx->kref, __qla84xx_chip_release);
9018 struct qla_hw_data *ha = vha->hw;
9020 mutex_lock(&ha->cs84xx->fw_update_mutex);
9024 mutex_unlock(&ha->cs84xx->fw_update_mutex);
9042 struct qla_hw_data *ha = vha->hw;
9047 icb = (struct init_cb_81xx *)ha->init_cb;
9048 nv = ha->nvram;
9051 ha->nvram_size = sizeof(*nv);
9052 ha->vpd_size = FA_NVRAM_VPD_SIZE;
9053 if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
9054 ha->vpd_size = FA_VPD_SIZE_82XX;
9056 if (IS_QLA28XX(ha) || IS_QLA27XX(ha))
9060 ha->vpd = ha->nvram + VPD_OFFSET;
9062 faddr = ha->flt_region_vpd;
9063 if (IS_QLA28XX(ha)) {
9065 faddr = ha->flt_region_vpd_sec;
9071 ha->isp_ops->read_optrom(vha, ha->vpd, faddr << 2, ha->vpd_size);
9074 faddr = ha->flt_region_nvram;
9075 if (IS_QLA28XX(ha)) {
9077 faddr = ha->flt_region_nvram_sec;
9083 ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size);
9086 for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++)
9092 nv, ha->nvram_size);
9109 memset(nv, 0, ha->nvram_size);
9116 nv->port_name[1] = 0x00 + ha->port_no + 1;
9150 nv->enode_mac[5] = 0x06 + ha->port_no + 1;
9155 if (IS_T10_PI_CAPABLE(ha))
9161 memset(icb, 0, ha->init_cb_size);
9188 icb->enode_mac[5] = 0x06 + ha->port_no + 1;
9192 memcpy(ha->ex_init_cb, &nv->ex_version, sizeof(*ha->ex_init_cb));
9193 ha->frame_payload_size = le16_to_cpu(icb->frame_payload_size);
9218 if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
9220 ha->flags.scm_supported_a = 1;
9224 ha->flags.disable_risc_code_load = 0;
9225 ha->flags.enable_lip_reset = 0;
9226 ha->flags.enable_lip_full_login =
9228 ha->flags.enable_target_reset =
9230 ha->flags.enable_led_scheme = 0;
9231 ha->flags.disable_serdes = le32_to_cpu(nv->host_p) & BIT_5 ? 1 : 0;
9233 ha->operating_mode = (le32_to_cpu(icb->firmware_options_2) &
9237 ha->serial0 = icb->port_name[5];
9238 ha->serial1 = icb->port_name[6];
9239 ha->serial2 = icb->port_name[7];
9245 ha->retry_count = le16_to_cpu(nv->login_retry_count);
9252 ha->login_timeout = le16_to_cpu(nv->login_timeout);
9255 ha->r_a_tov = 100;
9257 ha->loop_reset_delay = nv->reset_delay;
9270 ha->loop_down_abort_time =
9273 ha->link_down_timeout = le16_to_cpu(nv->link_down_timeout);
9274 ha->loop_down_abort_time =
9275 (LOOP_DOWN_TIME - ha->link_down_timeout);
9279 ha->port_down_retry_count = le16_to_cpu(nv->port_down_retry_count);
9281 ha->port_down_retry_count = qlport_down_retry;
9284 ha->login_retry_count = le16_to_cpu(nv->login_retry_count);
9285 if (ha->port_down_retry_count ==
9287 ha->port_down_retry_count > 3)
9288 ha->login_retry_count = ha->port_down_retry_count;
9289 else if (ha->port_down_retry_count > (int)ha->login_retry_count)
9290 ha->login_retry_count = ha->port_down_retry_count;
9292 ha->login_retry_count = ql2xloginretrycount;
9296 (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)))
9301 ha->zio_mode = le32_to_cpu(icb->firmware_options_2) &
9303 ha->zio_timer = le16_to_cpu(icb->interrupt_delay_timer) ?
9309 if (ha->zio_mode != QLA_ZIO_DISABLED) {
9310 ha->zio_mode = QLA_ZIO_MODE_6;
9314 ha->zio_mode,
9315 ha->zio_timer * 100);
9318 (uint32_t)ha->zio_mode);
9319 icb->interrupt_delay_timer = cpu_to_le16(ha->zio_timer);
9330 ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha);
9343 struct qla_hw_data *ha = vha->hw;
9350 ha->flags.chip_reset_done = 1;
9355 qla2x00_marker(vha, ha->base_qpair, 0, 0, MK_SYNC_ALL);
9376 ha->isp_ops->enable_intrs(ha);
9378 ha->isp_abort_cnt = 0;
9384 if (ha->fce) {
9385 ha->flags.fce_enabled = 1;
9386 memset(ha->fce, 0,
9387 fce_calc_size(ha->fce_bufs));
9389 ha->fce_dma, ha->fce_bufs, ha->fce_mb,
9390 &ha->fce_bufs);
9395 ha->flags.fce_enabled = 0;
9399 if (ha->eft) {
9400 memset(ha->eft, 0, EFT_SIZE);
9402 ha->eft_dma, EFT_NUM_BUFFERS);
9415 spin_lock_irqsave(&ha->vport_slock, flags);
9416 list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
9419 spin_unlock_irqrestore(&ha->vport_slock, flags);
9423 spin_lock_irqsave(&ha->vport_slock, flags);
9427 spin_unlock_irqrestore(&ha->vport_slock, flags);
9464 struct qla_hw_data *ha = vha->hw;
9466 if (!ha->fcp_prio_cfg || !ha->flags.fcp_prio_enabled)
9470 entries = ha->fcp_prio_cfg->num_entries;
9471 pri_entry = &ha->fcp_prio_cfg->entry[0];
9591 * ha = adapter block pointer.
9619 struct qla_hw_data *ha = vha->hw;
9624 if (!(ha->fw_attributes & BIT_6) || !ha->flags.msix_enabled) {
9642 qpair->use_shadow_reg = IS_SHADOW_REG_CAPABLE(ha) ? 1 : 0;
9645 mutex_lock(&ha->mq_lock);
9646 qpair_id = find_first_zero_bit(ha->qpair_qid_map, ha->max_qpairs);
9647 if (ha->num_qpairs >= ha->max_qpairs) {
9648 mutex_unlock(&ha->mq_lock);
9653 ha->num_qpairs++;
9654 set_bit(qpair_id, ha->qpair_qid_map);
9655 ha->queue_pair_map[qpair_id] = qpair;
9658 qpair->fw_started = ha->flags.fw_started;
9661 qpair->chip_reset = ha->base_qpair->chip_reset;
9662 qpair->enable_class_2 = ha->base_qpair->enable_class_2;
9664 ha->base_qpair->enable_explicit_conf;
9666 for (i = 0; i < ha->msix_count; i++) {
9667 msix = &ha->msix_entries[i];
9683 qpair->pdev = ha->pdev;
9684 if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
9687 mutex_unlock(&ha->mq_lock);
9690 rsp_id = qla25xx_create_rsp_que(ha, 0, 0, 0, qpair, startqp);
9697 qpair->rsp = ha->rsp_q_map[rsp_id];
9700 req_id = qla25xx_create_req_que(ha, 0, vp_idx, 0, rsp_id, qos,
9708 qpair->req = ha->req_q_map[req_id];
9715 if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
9716 if (ha->fw_attributes & BIT_4)
9757 mutex_lock(&ha->mq_lock);
9763 ha->queue_pair_map[qpair_id] = NULL;
9764 clear_bit(qpair_id, ha->qpair_qid_map);
9765 ha->num_qpairs--;
9766 mutex_unlock(&ha->mq_lock);
9775 struct qla_hw_data *ha = qpair->hw;
9795 dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
9802 mutex_lock(&ha->mq_lock);
9803 ha->queue_pair_map[qpair->id] = NULL;
9804 clear_bit(qpair->id, ha->qpair_qid_map);
9805 ha->num_qpairs--;
9814 mutex_unlock(&ha->mq_lock);