Lines Matching refs:vha

15 static int qla_nvme_ls_reject_iocb(struct scsi_qla_host *vha,
22 struct scsi_qla_host *vha;
36 int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
45 if (!vha->flags.nvme_enabled) {
46 ql_log(ql_log_info, vha, 0x2100,
52 if (!vha->nvme_local_port && qla_nvme_register_hba(vha))
79 ql_log(ql_log_info, vha, 0x2102,
84 ret = nvme_fc_register_remoteport(vha->nvme_local_port, &req,
87 ql_log(ql_log_warn, vha, 0x212e,
97 ql_log(ql_log_info, vha, 0x212a,
101 ql_log(ql_log_info, vha, 0x212b,
115 struct scsi_qla_host *vha;
123 vha = (struct scsi_qla_host *)lport->private;
124 ha = vha->hw;
126 ql_log(ql_log_info, vha, 0x2104,
131 ql_log(ql_log_warn, vha, 0x212f,
143 ql_log(ql_log_info, vha, 0x2121,
149 qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
151 ql_log(ql_log_warn, vha, 0x2122,
155 qla_adjust_iocb_limit(vha);
305 struct qla_hw_data *ha = fcport->vha->hw;
310 ql_dbg(ql_dbg_io, fcport->vha, 0xffff,
318 ql_log(ql_log_info, sp->fcport->vha, 0xffff,
335 ql_dbg(ql_dbg_io, fcport->vha, 0x212b,
368 struct scsi_qla_host *vha = uctx->vha;
369 struct qla_hw_data *ha = vha->hw;
383 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
411 ql_dbg(ql_dbg_unsol, vha, 0x2122,
428 ql_dbg(ql_log_warn, vha, 0x2123,
438 a.vp_idx = vha->vp_idx;
441 qla_nvme_ls_reject_iocb(vha, ha->base_qpair, &a, true);
475 struct scsi_qla_host *vha;
483 vha = fcport->vha;
484 ha = vha->hw;
490 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
517 ql_log(ql_log_warn, vha, 0x700e,
563 struct scsi_qla_host *vha = sp->fcport->vha;
564 struct qla_hw_data *ha = vha->hw;
585 req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
600 if (qla2x00_check_reg16_for_disconnect(vha, cnt)) {
649 if ((vha->flags.nvme_first_burst) &&
668 if (vha->flags.nvme2_enabled &&
678 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
747 if (vha->flags.process_response_queue &&
749 qla24xx_process_response_queue(vha, rsp);
766 struct scsi_qla_host *vha;
787 vha = fcport->vha;
788 ha = vha->hw;
790 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
806 sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC);
819 sp->vha = vha;
826 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x212d,
839 struct scsi_qla_host *vha = lport->private;
841 blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
846 struct scsi_qla_host *vha = lport->private;
848 ql_log(ql_log_info, vha, 0x210f,
849 "localport delete of %p completed.\n", vha->nvme_local_port);
850 vha->nvme_local_port = NULL;
851 complete(&vha->nvme_del_done);
863 ql_log(ql_log_info, fcport->vha, 0x2110,
897 ql_log(ql_log_warn, fcport->vha, 0x2112,
901 if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags))
907 ql_log(ql_log_info, fcport->vha, 0x2114,
913 void qla_nvme_delete(struct scsi_qla_host *vha)
920 if (vha->nvme_local_port) {
921 init_completion(&vha->nvme_del_done);
922 ql_log(ql_log_info, vha, 0x2116,
924 vha->nvme_local_port);
925 nv_ret = nvme_fc_unregister_localport(vha->nvme_local_port);
927 ql_log(ql_log_info, vha, 0x2115,
930 wait_for_completion(&vha->nvme_del_done);
934 int qla_nvme_register_hba(struct scsi_qla_host *vha)
944 ha = vha->hw;
948 ql_log(ql_log_warn, vha, 0xfffd,
953 ql_log(ql_log_warn, vha, 0xfffd,
964 ql_log(ql_log_info, vha, 0xfffb,
968 pinfo.node_name = wwn_to_u64(vha->node_name);
969 pinfo.port_name = wwn_to_u64(vha->port_name);
971 pinfo.port_id = vha->d_id.b24;
978 if (!vha->nvme_local_port) {
979 ql_log(ql_log_info, vha, 0xffff,
982 qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
986 &vha->nvme_local_port);
993 ql_log(ql_log_warn, vha, 0xffff,
996 vha->nvme_local_port->private = vha;
1009 ha = orig_sp->fcport->vha->hw;
1024 struct scsi_qla_host *vha;
1029 vha = orig_sp->fcport->vha;
1043 ql_dbg(ql_dbg_async, vha, 0xf09d,
1050 ql_dbg(ql_dbg_async, vha, 0xf09e,
1053 ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e,
1059 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0xf09f,
1065 ql_dbg(ql_dbg_async, vha, 0xf0a0,
1070 ql_dbg(ql_dbg_async, vha, 0xf0a1,
1102 static void qla_nvme_lsrjt_pt_iocb(struct scsi_qla_host *vha,
1129 qla_nvme_ls_reject_iocb(struct scsi_qla_host *vha, struct qla_qpair *qp,
1136 ql_log(ql_log_warn, vha, 0x210e,
1142 qla_nvme_fc_format_rjt((void *)vha->hw->lsrjt.c, a->opcode,
1146 a->tx_addr = vha->hw->lsrjt.cdma;
1149 ql_dbg(ql_dbg_unsol, vha, 0x211f,
1152 ql_dump_buffer(ql_dbg_unsol + ql_dbg_verbose, vha, 0x210f,
1153 vha->hw->lsrjt.c, sizeof(*vha->hw->lsrjt.c));
1157 ql_dbg(ql_dbg_unsol, vha, 0x2110,
1161 qla_nvme_lsrjt_pt_iocb(vha, lsrjt_iocb, a);
1164 qla2x00_start_iocbs(vha, qp->req);
1176 * @vha: SCSI qla host
1180 qla2xxx_process_purls_pkt(struct scsi_qla_host *vha, struct purex_item *item)
1191 ql_dbg(ql_dbg_unsol, vha, 0x2125, "NVMe transport ls_req failed\n");
1193 a.vp_idx = vha->vp_idx;
1196 qla_nvme_ls_reject_iocb(vha, vha->hw->base_qpair, &a, true);
1205 scsi_qla_host_t *base_vha, *vha, *tvp;
1214 list_for_each_entry_safe(vha, tvp, &ha->vp_list, list) {
1215 if (vha->vp_idx == vp_index) {
1217 return vha;
1234 scsi_qla_host_t *vha;
1244 vha = qla2xxx_get_vha_from_vp_idx(ha, p->vp_index);
1245 if (!vha) {
1266 fcport = qla2x00_find_fcport_by_nportid(vha, &id, 0);
1268 ql_dbg(ql_dbg_unsol, vha, 0x211e,
1279 item = qla27xx_copy_multiple_pkt(vha, pkt, rsp, true, false);
1289 ql_log(ql_log_info, vha, 0x2126, "Failed allocate memory\n");
1297 uctx->vha = vha;
1307 ql_dbg(ql_dbg_unsol, vha, 0x2121,
1317 ql_dump_buffer(ql_dbg_unsol + ql_dbg_verbose, vha, 0x2120,
1320 qla24xx_queue_purex_item(vha, item, qla2xxx_process_purls_pkt);
1323 qla_nvme_ls_reject_iocb(vha, (*rsp)->qpair, &a, false);
1324 __qla_consume_iocb(vha, pkt, rsp);