Lines Matching refs:vha

109 static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
111 static void qlt_disable_vha(struct scsi_qla_host *vha);
117 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
119 static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
155 void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
157 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
164 static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
167 if (unlikely(vha->marker_needed != 0)) {
168 int rc = qla2x00_issue_marker(vha, vha_locked);
171 ql_dbg(ql_dbg_tgt, vha, 0xe03d,
173 vha->vp_idx);
180 struct scsi_qla_host *qla_find_host_by_d_id(struct scsi_qla_host *vha,
186 if (vha->d_id.b.area == d_id.area &&
187 vha->d_id.b.domain == d_id.domain &&
188 vha->d_id.b.al_pa == d_id.al_pa)
189 return vha;
193 host = btree_lookup32(&vha->hw->host_map, key);
195 ql_dbg(ql_dbg_tgt_mgt + ql_dbg_verbose, vha, 0xf005,
201 static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
205 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
207 vha->hw->tgt.num_pend_cmds++;
208 if (vha->hw->tgt.num_pend_cmds > vha->qla_stats.stat_max_pend_cmds)
209 vha->qla_stats.stat_max_pend_cmds =
210 vha->hw->tgt.num_pend_cmds;
211 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
213 static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
217 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
218 vha->hw->tgt.num_pend_cmds--;
219 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
223 static void qlt_queue_unknown_atio(scsi_qla_host_t *vha,
227 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
231 ql_dbg(ql_dbg_async, vha, 0x502c,
233 vha->vp_idx);
241 u->vha = vha;
245 spin_lock_irqsave(&vha->cmd_list_lock, flags);
246 list_add_tail(&u->cmd_list, &vha->unknown_atio_list);
247 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
249 schedule_delayed_work(&vha->unknown_atio_work, 1);
255 qlt_send_term_exchange(vha->hw->base_qpair, NULL, atio, ha_locked, 0);
259 static void qlt_try_to_dequeue_unknown_atios(struct scsi_qla_host *vha,
264 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
268 list_for_each_entry_safe(u, t, &vha->unknown_atio_list, cmd_list) {
270 ql_dbg(ql_dbg_async, vha, 0x502e,
273 qlt_send_term_exchange(vha->hw->base_qpair, NULL,
278 host = qla_find_host_by_d_id(vha, u->atio.u.isp24.fcp_hdr.d_id);
280 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x502f,
284 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503a,
287 qlt_send_term_exchange(vha->hw->base_qpair, NULL,
290 ql_dbg(ql_dbg_async + ql_dbg_verbose, vha, 0x503d,
291 "Reschedule u %p, vha %p, host %p\n", u, vha, host);
294 schedule_delayed_work(&vha->unknown_atio_work,
301 spin_lock_irqsave(&vha->cmd_list_lock, flags);
303 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
310 struct scsi_qla_host *vha = container_of(to_delayed_work(work),
313 qlt_try_to_dequeue_unknown_atios(vha, 0);
316 static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
319 ql_dbg(ql_dbg_tgt, vha, 0xe072,
321 __func__, vha->vp_idx, atio->u.raw.entry_type,
327 struct scsi_qla_host *host = qla_find_host_by_d_id(vha,
330 ql_dbg(ql_dbg_tgt, vha, 0xe03e,
332 "with unknown d_id %x:%x:%x\n", vha->vp_idx,
338 qlt_queue_unknown_atio(vha, atio, ha_locked);
341 if (unlikely(!list_empty(&vha->unknown_atio_list)))
342 qlt_try_to_dequeue_unknown_atios(vha, ha_locked);
350 struct scsi_qla_host *host = vha;
354 qlt_issue_marker(vha, ha_locked);
358 host = qla_find_host_by_vp_idx(vha,
361 ql_dbg(ql_dbg_tgt, vha, 0xe03f,
365 vha->vp_idx, entry->u.isp24.vp_index);
374 qla24xx_report_id_acquisition(vha,
382 struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha,
387 ql_dbg(ql_dbg_tgt, vha, 0xe00a,
390 vha->vp_idx, entry->vp_index);
404 ql_dbg(ql_dbg_tgt, vha, 0xe040,
406 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
413 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha,
418 ql_dbg(ql_dbg_tgt, vha, 0xe073,
420 vha->vp_idx, __func__);
425 struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha,
428 ql_dbg(ql_dbg_tgt, vha, 0xe041,
431 vha->vp_idx, entry->vp_index);
444 host = qla_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
446 ql_dbg(ql_dbg_tgt, vha, 0xe042,
449 vha->vp_idx, entry->u.isp24.vp_index);
458 struct scsi_qla_host *host = vha;
462 host = qla_find_host_by_vp_idx(vha,
465 ql_dbg(ql_dbg_tgt, vha, 0xe043,
469 "vp_index %d\n", vha->vp_idx,
482 struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha,
485 ql_dbg(ql_dbg_tgt, vha, 0xe044,
488 "vp_index %d\n", vha->vp_idx, entry->vp_index);
499 struct scsi_qla_host *host = qla_find_host_by_vp_idx(vha,
502 ql_dbg(ql_dbg_tgt, vha, 0xe045,
505 "vp_index %d\n", vha->vp_idx, entry->vp_index);
512 qlt_response_pkt(vha, rsp, pkt);
521 static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport,
526 e = qla2x00_alloc_work(vha, QLA_EVT_NACK);
533 return qla2x00_post_work(vha, e);
538 struct scsi_qla_host *vha = sp->vha;
541 ql_dbg(ql_dbg_disc, vha, 0x20f2,
545 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
547 sp->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
558 ql_dbg(ql_dbg_edif, vha, 0x20ef,
563 qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
565 qla_edb_eventcreate(vha, VND_CMD_AUTH_STATE_NEEDED, sp->fcport->d_id.b24,
579 vha->fcport_count++;
580 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
582 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
598 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
603 int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
615 if (vha->hw->flags.edif_enabled &&
630 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
636 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
641 ql_dbg(ql_dbg_disc, vha, 0x20f4,
658 void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
667 mutex_lock(&vha->vha_tgt.tgt_mutex);
668 t = qlt_create_sess(vha, e->u.nack.fcport, 0);
669 mutex_unlock(&vha->vha_tgt.tgt_mutex);
671 ql_log(ql_log_info, vha, 0xd034,
674 vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
678 qla24xx_async_notify_ack(vha, e->u.nack.fcport,
687 if (!fcport || !fcport->vha || !fcport->vha->hw)
690 ha = fcport->vha->hw;
703 void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
705 struct qla_hw_data *ha = vha->hw;
706 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
710 if (!vha->hw->tgt.tgt_ops)
727 mutex_lock(&vha->vha_tgt.tgt_mutex);
728 sess = qlt_create_sess(vha, fcport, false);
729 mutex_unlock(&vha->vha_tgt.tgt_mutex);
739 ql_dbg(ql_dbg_disc, vha, 0x2107,
746 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
748 "(loop ID %d) reappeared\n", vha->vp_idx,
751 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
760 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
762 "port %8phC (loop ID %d) became global\n", vha->vp_idx,
777 qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
782 lockdep_assert_held(&vha->hw->hardware_lock);
784 list_for_each_entry(pla, &vha->plogi_ack_list, list) {
786 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x210d,
790 qlt_send_term_imm_notif(vha, &pla->iocb, 1);
798 ql_dbg(ql_dbg_async, vha, 0x5088,
800 vha->vp_idx);
806 list_add_tail(&pla->list, &vha->plogi_ack_list);
811 void qlt_plogi_ack_unref(struct scsi_qla_host *vha,
825 ql_dbg(ql_dbg_disc, vha, 0x5089,
843 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PLOGI);
845 qla24xx_post_nack_work(vha, fcport, iocb, SRB_NACK_PRLI);
847 list_for_each_entry(fcport, &vha->vp_fcports, list) {
859 qlt_plogi_ack_link(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla,
866 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097,
886 qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
909 qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
914 if (test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags)) {
919 mutex_lock(&vha->vha_tgt.tgt_mutex);
921 list_for_each_entry(tmp, &vha->logo_list, list) {
924 mutex_unlock(&vha->vha_tgt.tgt_mutex);
929 list_add_tail(&logo->list, &vha->logo_list);
931 mutex_unlock(&vha->vha_tgt.tgt_mutex);
933 res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id);
935 mutex_lock(&vha->vha_tgt.tgt_mutex);
937 mutex_unlock(&vha->vha_tgt.tgt_mutex);
940 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098,
951 struct scsi_qla_host *vha = sess->vha;
952 struct qla_hw_data *ha = vha->hw;
959 ql_dbg(ql_dbg_disc, vha, 0xf084,
968 qla2x00_mark_device_lost(vha, sess, 0);
977 qlt_send_first_logo(vha, &logo);
987 rc = qla2x00_post_async_logout_work(vha, sess,
990 ql_log(ql_log_warn, vha, 0xf085,
997 rc = qla2x00_post_async_prlo_work(vha, sess,
1000 ql_log(ql_log_warn, vha, 0xf085,
1018 ql_dbg(ql_dbg_edif, vha, 0x911e,
1021 qla2x00_release_all_sadb(vha, sess);
1023 ql_dbg(ql_dbg_edif, vha, 0x911e,
1028 qla_edif_clear_appdata(vha, sess);
1029 qla_edif_sess_down(vha, sess);
1045 ql_dbg(ql_dbg_disc, vha, 0xf086,
1061 ql_dbg(ql_dbg_disc, vha, 0xf087,
1067 qla2x00_eh_wait_for_pending_commands(sess->vha, sess->d_id.b24, 0, WAIT_TARGET)) {
1068 ql_log(ql_log_warn, vha, 0x3027,
1070 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1071 qla2xxx_wake_dpc(vha);
1072 qla2x00_wait_for_chip_reset(vha);
1077 qla24xx_async_notify_ack(vha, sess,
1092 vha->fcport_count--;
1101 if (!test_bit(UNLOADING, &vha->dpc_flags))
1102 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1114 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099,
1121 qlt_plogi_ack_unref(vha, con);
1124 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a,
1134 qlt_plogi_ack_unref(vha, own);
1142 qla2x00_dfs_remove_rport(vha, sess);
1144 spin_lock_irqsave(&vha->work_lock, flags);
1148 spin_unlock_irqrestore(&vha->work_lock, flags);
1150 ql_dbg(ql_dbg_disc, vha, 0xf001,
1152 sess, sess->port_name, vha->fcport_count);
1158 !(vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)) &&
1159 (!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
1160 switch (vha->host->active_mode) {
1163 set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
1164 qla2xxx_wake_dpc(vha);
1173 if (vha->fcport_count == 0)
1174 wake_up_all(&vha->fcport_waitQ);
1180 struct scsi_qla_host *vha = sess->vha;
1183 ql_dbg(ql_dbg_disc, sess->vha, 0x210a,
1187 spin_lock_irqsave(&sess->vha->work_lock, flags);
1189 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1199 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1202 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
1208 queue_work(sess->vha->hw->wq, &sess->free_work);
1212 static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
1214 struct qla_hw_data *ha = vha->hw;
1224 atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
1226 qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
1230 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
1234 ql_dbg(ql_dbg_tgt, vha, 0xe000,
1241 ql_dbg(ql_dbg_tgt, vha, 0xe047,
1243 "loop_id %d)\n", vha->host_no, sess, sess->port_name,
1251 if (sess->chip_reset != sess->vha->hw->base_qpair->chip_reset) {
1273 if (sess->vha->fcport_count == 0)
1274 wake_up_all(&sess->vha->fcport_waitQ);
1288 ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
1297 spin_lock_irqsave(&sess->vha->work_lock, flags);
1299 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1303 spin_unlock_irqrestore(&sess->vha->work_lock, flags);
1310 ql_dbg(ql_log_warn, sess->vha, 0xe001,
1314 WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work));
1320 scsi_qla_host_t *vha = tgt->vha;
1322 list_for_each_entry(sess, &vha->vp_fcports, list) {
1330 static int qla24xx_get_loop_id(struct scsi_qla_host *vha, be_id_t s_id,
1333 struct qla_hw_data *ha = vha->hw;
1342 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
1344 vha->vp_idx, qla2x00_gid_list_size(ha));
1349 rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries);
1351 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
1353 vha->vp_idx, rc);
1382 struct scsi_qla_host *vha,
1386 struct qla_hw_data *ha = vha->hw;
1390 if (vha->vha_tgt.qla_tgt->tgt_stop)
1395 ql_dbg(ql_dbg_disc, vha, 0x20f6,
1402 sess->tgt = vha->vha_tgt.qla_tgt;
1415 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
1417 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf015,
1419 vha->vp_idx, fcport->port_name);
1428 ql_dbg(ql_dbg_disc, vha, 0x20f7,
1436 vha->vha_tgt.qla_tgt->sess_count++;
1438 qlt_do_generation_tick(vha, &sess->generation);
1442 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
1444 sess, sess->se_sess, vha->vha_tgt.qla_tgt,
1445 vha->vha_tgt.qla_tgt->sess_count);
1447 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
1450 vha->vp_idx, local ? "local " : "", fcport->port_name,
1462 qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
1464 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
1468 if (!vha->hw->tgt.tgt_ops)
1474 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1476 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1480 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1485 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1486 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
1494 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
1497 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
1511 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
1523 struct scsi_qla_host *vha = tgt->vha;
1531 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
1538 ql_dbg(ql_dbg_tgt_mgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
1539 vha->host_no, vha);
1544 mutex_lock(&vha->vha_tgt.tgt_mutex);
1547 mutex_unlock(&vha->vha_tgt.tgt_mutex);
1550 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
1560 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
1567 (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)))
1568 qlt_disable_vha(vha);
1581 scsi_qla_host_t *vha = tgt->vha;
1584 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
1590 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
1597 mutex_lock(&vha->vha_tgt.tgt_mutex);
1600 mutex_unlock(&vha->vha_tgt.tgt_mutex);
1603 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished\n",
1606 switch (vha->qlini_mode) {
1608 vha->flags.online = 1;
1609 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1620 scsi_qla_host_t *vha = tgt->vha;
1625 struct qla_hw_data *ha = vha->hw;
1633 for (i = 0; i < vha->hw->max_qpairs + 1; i++) {
1646 list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
1654 if (vha->vp_idx)
1657 vha->vha_tgt.target_lport_ptr)
1658 ha->tgt.tgt_ops->remove_target(vha);
1660 vha->vha_tgt.qla_tgt = NULL;
1662 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
1677 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
1683 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
1708 struct scsi_qla_host *vha = qpair->vha;
1709 struct qla_hw_data *ha = vha->hw;
1716 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
1720 ql_dbg(ql_dbg_tgt, vha, 0xe049,
1722 "request packet\n", vha->vp_idx, __func__);
1726 if (vha->vha_tgt.qla_tgt != NULL)
1727 vha->vha_tgt.qla_tgt->notify_ack_expected++;
1759 ql_dbg(ql_dbg_tgt, vha, 0xe005,
1761 vha->vp_idx, nack->u.isp24.status);
1765 qla2x00_start_iocbs(vha, qpair->req);
1770 struct scsi_qla_host *vha = mcmd->vha;
1771 struct qla_hw_data *ha = vha->hw;
1780 ql_dbg(ql_dbg_tgt, vha, 0xe006,
1786 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1788 vha->vp_idx, __func__);
1811 resp->vp_index = vha->vp_idx;
1841 vha->vha_tgt.qla_tgt->abts_resp_expected++;
1848 qla2x00_start_iocbs(vha, qpair->req);
1860 struct scsi_qla_host *vha = qpair->vha;
1861 struct qla_hw_data *ha = vha->hw;
1866 ql_dbg(ql_dbg_tgt, vha, 0xe006,
1873 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1875 "request packet", vha->vp_idx, __func__);
1883 resp->vp_index = vha->vp_idx;
1916 vha->vha_tgt.qla_tgt->abts_resp_expected++;
1923 qla2x00_start_iocbs(vha, qpair->req);
1929 static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1938 ql_dbg(ql_dbg_tgt, vha, 0xe04b,
1940 "request packet\n", vha->vp_idx, __func__);
1961 ctio->vp_index = vha->vp_idx;
1981 ql_dbg(ql_dbg_tgt, vha, 0xe007,
1992 qla2x00_start_iocbs(vha, qpair->req);
2007 static void abort_cmds_for_lun(struct scsi_qla_host *vha, u64 lun, be_id_t s_id)
2015 spin_lock_irqsave(&vha->cmd_list_lock, flags);
2016 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
2027 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
2037 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
2040 static struct qla_qpair_hint *qlt_find_qphint(struct scsi_qla_host *vha,
2043 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
2046 if (vha->flags.qpairs_available) {
2061 struct qla_hw_data *ha = mcmd->vha->hw;
2103 ql_dbg(ql_dbg_tgt_mgt, mcmd->vha, 0xf052,
2105 mcmd->vha->vp_idx, rc);
2111 static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2114 struct qla_hw_data *ha = vha->hw;
2116 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
2119 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
2121 vha->vp_idx, abts->exchange_addr_to_abort);
2125 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
2127 vha->vp_idx, __func__);
2137 mcmd->vha = vha;
2169 static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
2172 struct qla_hw_data *ha = vha->hw;
2180 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
2182 "supported\n", vha->vp_idx);
2189 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
2191 "Address received\n", vha->vp_idx);
2197 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
2199 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id.domain,
2206 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
2208 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
2210 vha->vp_idx);
2226 rc = __qlt_24xx_handle_abts(vha, abts, sess);
2228 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
2230 vha->vp_idx, rc);
2243 struct scsi_qla_host *ha = mcmd->vha;
2303 struct scsi_qla_host *vha = cmd->vha;
2305 ql_dbg(ql_dbg_tgt_dif, vha, 0x3066,
2306 "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
2308 vha, atio, scsi_status, sense_key, asc, ascq);
2310 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
2312 ql_dbg(ql_dbg_async, vha, 0x3067,
2314 vha->host_no, __func__);
2323 ctio->vp_index = vha->vp_idx;
2355 qla2x00_start_iocbs(vha, qpair->req);
2364 struct scsi_qla_host *vha = mcmd->sess->vha;
2365 struct qla_hw_data *ha = vha->hw;
2370 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
2376 if (!vha->flags.online || mcmd->reset_count != qpair->chip_reset) {
2381 ql_dbg(ql_dbg_async, vha, 0xe100,
2383 vha->flags.online, qla2x00_reset_active(vha),
2395 ql_dbg(ql_dbg_disc, vha, 0x2106,
2402 qlt_send_notify_ack(vha->hw->base_qpair,
2490 static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
2510 ha = vha->hw;
2584 ql_dbg(ql_dbg_io, qpair->vha, 0x305b,
2586 qpair->vha->vp_idx);
2733 struct scsi_qla_host *vha;
2738 vha = cmd->vha;
2742 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00b,
2749 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00c,
2756 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00f,
2763 ql_dbg(ql_dbg_tgt_dif, vha, 0xe010,
2770 ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xe011, cmd->cdb, 16);
2830 (IS_FWI2_CAPABLE(cmd->vha->hw) &&
2963 scsi_qla_host_t *vha = cmd->tgt->vha;
2964 struct qla_hw_data *ha = vha->hw;
3062 scsi_qla_host_t *vha = cmd->vha;
3064 ha = vha->hw;
3214 tc.vha = vha;
3258 struct scsi_qla_host *vha = cmd->vha;
3299 vha->flags.online, qla2x00_reset_active(vha),
3398 qla2x00_start_iocbs(vha, qpair->req);
3404 qlt_unmap_sg(vha, cmd);
3414 struct scsi_qla_host *vha = cmd->vha;
3436 vha->hw->tgt.tgt_ops->handle_data(cmd);
3439 vha->flags.online, qla2x00_reset_active(vha),
3479 qla2x00_start_iocbs(vha, qpair->req);
3485 qlt_unmap_sg(vha, cmd);
3505 struct scsi_qla_host *vha = cmd->vha;
3517 ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
3524 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00d,
3540 ql_dbg(ql_dbg_tgt_dif, vha, 0xe00e,
3557 ql_dbg(ql_dbg_tgt_dif, vha, 0xe012,
3575 vha->hw->tgt.tgt_ops->handle_data(cmd);
3581 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3591 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3598 static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3602 struct qla_hw_data *ha = vha->hw;
3606 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
3609 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
3611 ql_dbg(ql_dbg_tgt, vha, 0xe080,
3613 "request packet\n", vha->vp_idx, __func__);
3643 qla2x00_start_iocbs(vha, vha->req);
3647 static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3653 rc = __qlt_send_term_imm_notif(vha, imm);
3665 struct scsi_qla_host *vha = qpair->vha;
3667 struct qla_hw_data *ha = vha->hw;
3672 ql_dbg(ql_dbg_tgt, vha, 0xe009, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
3675 vha = cmd->vha;
3679 ql_dbg(ql_dbg_tgt, vha, 0xe050,
3681 "request packet\n", vha->vp_idx, __func__);
3687 ql_dbg(ql_dbg_tgt, vha, 0xe051,
3689 "incorrect state %d\n", vha->vp_idx, cmd,
3703 ctio24->vp_index = vha->vp_idx;
3717 qla2x00_start_iocbs(vha, qpair->req);
3725 struct scsi_qla_host *vha;
3729 /* why use different vha? NPIV */
3731 vha = cmd->vha;
3733 vha = qpair->vha;
3738 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3744 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3749 qlt_unmap_sg(vha, cmd);
3750 vha->hw->tgt.tgt_ops->free_cmd(cmd);
3759 static void qlt_init_term_exchange(struct scsi_qla_host *vha)
3764 vha->hw->tgt.leak_exchg_thresh_hold =
3765 (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
3768 if (!list_empty(&vha->hw->tgt.q_full_list)) {
3770 list_splice_init(&vha->hw->tgt.q_full_list, &free_list);
3778 vha->hw->tgt.num_qfull_cmds_alloc--;
3781 vha->hw->tgt.num_qfull_cmds_dropped = 0;
3784 static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
3788 total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;
3790 if (vha->hw->tgt.leak_exchg_thresh_hold &&
3791 (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {
3793 ql_dbg(ql_dbg_tgt, vha, 0xe079,
3795 total_leaked, vha->hw->cur_fw_xcb_count);
3797 if (IS_P3P_TYPE(vha->hw))
3798 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
3800 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3801 qla2xxx_wake_dpc(vha);
3809 struct scsi_qla_host *vha = tgt->vha;
3813 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
3815 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
3821 qlt_unmap_sg(vha, cmd);
3829 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf016,
3848 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
3856 qlt_decr_num_pend_cmds(cmd->vha);
3866 cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
3877 struct scsi_qla_host *vha = qpair->vha;
3880 ql_dbg(ql_dbg_tgt_dif, vha, 0xe013,
3905 static void *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
3918 } else if (vha->hw->req_q_map[qid]) {
3919 ql_dbg(ql_dbg_tgt_mgt, vha, 0x1000a,
3921 vha->vp_idx, rsp->id, handle);
3922 req = vha->hw->req_q_map[qid];
3931 ql_dbg(ql_dbg_tgt, vha, 0xe052,
3933 vha->vp_idx, handle);
3939 ql_dbg(ql_dbg_async, vha, 0xe053,
3941 vha->vp_idx, handle, req->id, rsp->id);
3947 ql_dbg(ql_dbg_tgt, vha, 0xe054,
3949 "support NULL handles\n", vha->vp_idx);
3959 static void qlt_do_ctio_completion(struct scsi_qla_host *vha,
3962 struct qla_hw_data *ha = vha->hw;
3970 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
3977 cmd = qlt_ctio_to_cmd(vha, rsp, handle, ctio);
3983 qlt_chk_edif_rx_sa_delete_pending(vha, cmd->sess,
3990 qlt_unmap_sg(vha, cmd);
3996 dev_info(&vha->hw->pdev->dev,
3998 vha->vp_idx, cmd->atio.u.isp24.attr,
4009 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
4013 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
4023 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
4025 "received (state %x, se_cmd %p)\n", vha->vp_idx,
4035 ql_dbg(ql_dbg_disc, vha, 0x20f8,
4046 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
4050 vha->vp_idx, status, cmd->state, se_cmd,
4062 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
4064 vha->vp_idx, status, cmd->state, se_cmd);
4068 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
4070 vha->vp_idx, status, cmd->state, se_cmd);
4101 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
4105 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
4107 "not return a CTIO complete\n", vha->vp_idx, cmd->state);
4112 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
4119 static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
4141 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
4156 scsi_qla_host_t *vha = cmd->vha;
4157 struct qla_hw_data *ha = vha->hw;
4170 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
4191 fcp_task_attr = qlt_get_fcp_task_attr(vha,
4195 ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
4206 ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
4215 qlt_decr_num_pend_cmds(vha);
4216 cmd->vha->hw->tgt.tgt_ops->rel_cmd(cmd);
4225 scsi_qla_host_t *vha = cmd->vha;
4228 spin_lock_irqsave(&vha->cmd_list_lock, flags);
4230 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4235 void qlt_clr_qp_table(struct scsi_qla_host *vha)
4238 struct qla_hw_data *ha = vha->hw;
4239 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4243 ql_log(ql_log_info, vha, 0x706c,
4260 static void qlt_assign_qpair(struct scsi_qla_host *vha,
4264 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4267 if (vha->flags.qpairs_available) {
4273 pci_get_drvdata(vha->hw->pdev);
4275 qpair = vha->hw->base_qpair;
4284 ql_log(ql_log_info, vha, 0xd037,
4304 ql_log(ql_log_info, vha, 0xd038,
4326 ql_log(ql_log_info, vha, 0xd039,
4339 static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
4345 cmd = vha->hw->tgt.tgt_ops->get_cmd(sess);
4353 cmd->tgt = vha->vha_tgt.qla_tgt;
4354 qlt_incr_num_pend_cmds(vha);
4355 cmd->vha = vha;
4365 qlt_assign_qpair(vha, cmd);
4366 cmd->reset_count = vha->hw->base_qpair->chip_reset;
4367 cmd->vp_idx = vha->vp_idx;
4374 static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
4377 struct qla_hw_data *ha = vha->hw;
4378 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4385 ql_dbg(ql_dbg_io, vha, 0x3061,
4394 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, atio->u.isp24.fcp_hdr.s_id);
4401 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
4411 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
4418 cmd = qlt_get_tag(vha, sess, atio);
4420 ql_dbg(ql_dbg_io, vha, 0x3062,
4421 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
4429 spin_lock_irqsave(&vha->cmd_list_lock, flags);
4430 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
4431 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4434 if (vha->flags.qpairs_available) {
4453 struct scsi_qla_host *vha = sess->vha;
4454 struct qla_hw_data *ha = vha->hw;
4457 struct qla_qpair_hint *h = &vha->vha_tgt.qla_tgt->qphints[0];
4461 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
4464 "leak\n", vha->vp_idx);
4478 mcmd->vha = vha;
4486 abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
4489 h = qlt_find_qphint(vha, mcmd->unpacked_lun);
4511 static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
4514 struct qla_hw_data *ha = vha->hw;
4523 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
4537 static int __qlt_abort_task(struct scsi_qla_host *vha,
4541 struct qla_hw_data *ha = vha->hw;
4548 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
4550 vha->vp_idx, __func__);
4568 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
4570 vha->vp_idx, rc);
4579 static int qlt_abort_task(struct scsi_qla_host *vha,
4582 struct qla_hw_data *ha = vha->hw;
4590 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
4594 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
4596 "session\n", vha->vp_idx);
4597 return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
4601 return __qlt_abort_task(vha, iocb, sess);
4607 ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
4630 qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn,
4638 list_for_each_entry(other_sess, &vha->vp_fcports, list) {
4651 ql_dbg(ql_dbg_disc, vha, 0x1000c,
4667 ql_dbg(ql_dbg_disc, vha, 0xf01b,
4682 ql_dbg(ql_dbg_disc, vha, 0x1000d,
4696 static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
4708 spin_lock_irqsave(&vha->cmd_list_lock, flags);
4709 list_for_each_entry(op, &vha->unknown_atio_list, cmd_list) {
4718 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
4726 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
4731 static int qlt_handle_login(struct scsi_qla_host *vha,
4742 lockdep_assert_held(&vha->hw->hardware_lock);
4754 abort_cmds_for_s_id(vha, &port_id);
4757 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
4758 sess = qlt_find_sess_invalidate_other(vha, wwn,
4760 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
4762 ql_dbg(ql_dbg_disc, vha, 0xffff,
4765 qlt_send_term_imm_notif(vha, iocb, 1);
4774 if (vha->hw->flags.edif_enabled &&
4775 !(vha->e_dbell.db_flags & EDB_ACTIVE) &&
4778 ql_dbg(ql_dbg_disc, vha, 0xffff,
4781 qlt_send_term_imm_notif(vha, iocb, 1);
4785 if (vha->hw->flags.edif_enabled) {
4786 if (DBELL_INACTIVE(vha)) {
4787 ql_dbg(ql_dbg_disc, vha, 0xffff,
4790 qlt_send_term_imm_notif(vha, iocb, 1);
4794 ql_dbg(ql_dbg_disc, vha, 0xffff,
4797 qlt_send_term_imm_notif(vha, iocb, 1);
4802 pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
4804 ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff,
4808 qlt_send_term_imm_notif(vha, iocb, 1);
4814 qlt_plogi_ack_link(vha, pla, conflict_sess,
4820 ql_dbg(ql_dbg_disc, vha, 0xffff,
4824 qla24xx_post_newsess_work(vha, &port_id,
4829 qla24xx_post_newsess_work(vha, &port_id,
4850 ql_dbg(ql_dbg_disc, vha, 0xffff,
4860 qlt_send_term_imm_notif(vha, iocb, 1);
4864 qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
4871 if (N2N_TOPO(vha->hw) && fcport_is_bigger(sess))
4872 vha->d_id = sess->d_id;
4874 ql_dbg(ql_dbg_disc, vha, 0xffff,
4878 qla2x00_post_aen_work(vha, FCH_EVT_PORT_ONLINE,
4902 ql_dbg(ql_dbg_disc, vha, 0x20f9,
4909 qlt_plogi_ack_unref(vha, pla);
4934 ql_dbg(ql_dbg_disc, vha, 0x20f9,
4949 static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
4952 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4953 struct qla_hw_data *ha = vha->hw;
4973 ql_dbg(ql_dbg_disc, vha, 0xf026,
4975 vha->vp_idx, iocb->u.isp24.port_id[2],
4985 res = qlt_handle_login(vha, iocb);
4990 sess = qla2x00_find_fcport_by_wwpn(vha,
4993 if (vha->hw->flags.edif_enabled && sess &&
4996 ql_dbg(ql_dbg_disc, vha, 0xffff,
4999 qlt_send_term_imm_notif(vha, iocb, 1);
5004 ql_dbg(ql_dbg_disc, vha, 0xffff,
5008 qlt_send_term_imm_notif(vha, iocb, 1);
5012 res = qlt_handle_login(vha, iocb);
5025 sess = qlt_find_sess_invalidate_other(vha, wwn, port_id,
5036 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
5041 qlt_send_term_imm_notif(vha, iocb, 1);
5051 if (vha->hw->flags.edif_enabled && sess &&
5054 ql_dbg(ql_dbg_disc, vha, 0xffff,
5057 qlt_send_term_imm_notif(vha, iocb, 1);
5082 ql_dbg(ql_dbg_disc, sess->vha, 0xffff,
5086 qlt_send_term_imm_notif(vha, iocb, 1);
5107 ql_log(ql_log_warn, sess->vha, 0xf095,
5110 qlt_send_term_imm_notif(vha, iocb, 1);
5119 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
5143 ql_dbg(ql_dbg_disc, vha, 0x20fa,
5146 qla24xx_post_nack_work(vha, sess, iocb,
5150 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
5151 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
5152 qla2xxx_wake_dpc(vha);
5156 ql_dbg(ql_dbg_disc, vha, 0x20fb,
5159 qla24xx_post_nack_work(vha, sess, iocb,
5170 qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS);
5178 sess = qla2x00_find_fcport_by_loopid(vha, loop_id);
5188 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
5190 ql_dbg(ql_dbg_disc, vha, 0x20fc,
5212 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5220 sess = qla2x00_find_fcport_by_wwpn(vha,
5223 ql_dbg(ql_dbg_disc, vha, 0x20fd,
5235 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
5237 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
5238 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
5242 ql_dbg(ql_dbg_disc, vha, 0xf026,
5244 vha->vp_idx, iocb->u.isp24.status_subcode, res);
5253 static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
5256 struct qla_hw_data *ha = vha->hw;
5267 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
5269 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
5272 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
5279 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5281 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
5283 "subcode %x)\n", vha->vp_idx,
5301 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
5303 "%#x, subcode %x)\n", vha->vp_idx,
5307 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
5313 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
5314 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
5315 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
5321 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
5322 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
5324 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
5330 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
5332 vha->vp_idx);
5334 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
5339 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
5342 "resource count)\n", vha->vp_idx);
5346 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
5348 "L %#x)\n", vha->vp_idx,
5352 if (qlt_abort_task(vha, iocb) == 0)
5357 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
5359 vha->vp_idx, vha->host_no);
5363 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
5365 vha->vp_idx, iocb->u.isp2x.task_flags);
5369 if (qlt_24xx_handle_els(vha, iocb) == 0)
5373 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
5375 "notify status %x\n", vha->vp_idx, status);
5391 struct scsi_qla_host *vha = qpair->vha;
5393 struct qla_hw_data *ha = vha->hw;
5403 sess = qla2x00_find_fcport_by_nportid(vha, &id, 1);
5413 ql_dbg(ql_dbg_io, vha, 0x3063,
5415 "request packet", vha->vp_idx, __func__);
5427 ctio24->vp_index = vha->vp_idx;
5452 qla2x00_start_iocbs(vha, qpair->req);
5462 qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
5465 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5466 struct qla_hw_data *ha = vha->hw;
5472 ql_dbg(ql_dbg_io, vha, 0x300a,
5477 if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
5478 vha->hw->tgt.num_qfull_cmds_dropped++;
5479 if (vha->hw->tgt.num_qfull_cmds_dropped >
5480 vha->qla_stats.stat_max_qfull_cmds_dropped)
5481 vha->qla_stats.stat_max_qfull_cmds_dropped =
5482 vha->hw->tgt.num_qfull_cmds_dropped;
5484 ql_dbg(ql_dbg_io, vha, 0x3068,
5486 vha->vp_idx, __func__,
5487 vha->hw->tgt.num_qfull_cmds_dropped);
5489 qlt_chk_exch_leak_thresh_hold(vha);
5494 (vha, atio->u.isp24.fcp_hdr.s_id);
5500 ql_dbg(ql_dbg_io, vha, 0x3009,
5502 vha->vp_idx, __func__);
5504 vha->hw->tgt.num_qfull_cmds_dropped++;
5505 if (vha->hw->tgt.num_qfull_cmds_dropped >
5506 vha->qla_stats.stat_max_qfull_cmds_dropped)
5507 vha->qla_stats.stat_max_qfull_cmds_dropped =
5508 vha->hw->tgt.num_qfull_cmds_dropped;
5510 qlt_chk_exch_leak_thresh_hold(vha);
5514 qlt_incr_num_pend_cmds(vha);
5518 cmd->tgt = vha->vha_tgt.qla_tgt;
5519 cmd->vha = vha;
5531 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5532 list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
5534 vha->hw->tgt.num_qfull_cmds_alloc++;
5535 if (vha->hw->tgt.num_qfull_cmds_alloc >
5536 vha->qla_stats.stat_max_qfull_cmds_alloc)
5537 vha->qla_stats.stat_max_qfull_cmds_alloc =
5538 vha->hw->tgt.num_qfull_cmds_alloc;
5539 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5545 struct scsi_qla_host *vha = qpair->vha;
5546 struct qla_hw_data *ha = vha->hw;
5558 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5560 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5564 list_splice_init(&vha->hw->tgt.q_full_list, &q_full_list);
5565 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5579 ql_dbg(ql_dbg_io, vha, 0x3006,
5583 ql_dbg(ql_dbg_io, vha, 0x3007,
5587 ql_dbg(ql_dbg_io, vha, 0x3008,
5594 vha->hw->tgt.num_qfull_cmds_alloc--;
5609 spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
5610 list_splice(&q_full_list, &vha->hw->tgt.q_full_list);
5611 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
5622 struct scsi_qla_host *vha = qpair->vha;
5626 qlt_alloc_qfull_cmd(vha, atio, status, 1);
5630 qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, struct qla_qpair *qpair,
5633 struct qla_hw_data *ha = vha->hw;
5650 static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5653 struct qla_hw_data *ha = vha->hw;
5654 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5659 ql_dbg(ql_dbg_tgt, vha, 0x3064,
5674 ql_dbg(ql_dbg_io, vha, 0x3065,
5677 "sending QUEUE_FULL\n", vha->vp_idx);
5688 rc = qlt_chk_qfull_thresh_hold(vha, ha->base_qpair,
5694 rc = qlt_handle_cmd_for_atio(vha, atio);
5696 rc = qlt_handle_task_mgmt(vha, atio);
5703 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5707 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5713 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5715 vha->vp_idx);
5720 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5722 vha->vp_idx);
5736 ql_dbg(ql_dbg_tgt, vha, 0xe05b,
5738 "with error status %x\n", vha->vp_idx,
5743 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
5747 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
5754 ql_dbg(ql_dbg_tgt, vha, 0xe05c,
5756 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
5768 static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
5771 struct qla_hw_data *ha = vha->hw;
5787 ql_log(ql_log_warn, vha, 0xffff,
5790 vha, 0xffff, (uint8_t *)entry, sizeof(*entry));
5793 ha->isp_ops->fw_dump(vha);
5795 qla2xxx_dump_fw(vha);
5797 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
5798 qla2xxx_wake_dpc(vha);
5810 static void qlt_handle_abts_completion(struct scsi_qla_host *vha,
5817 struct qla_hw_data *ha = vha->hw;
5819 mcmd = qlt_ctio_to_cmd(vha, rsp, pkt->handle, pkt);
5821 ql_dbg(ql_dbg_async, vha, 0xe064,
5823 vha->vp_idx);
5828 vha = mcmd->vha;
5829 vha->vha_tgt.qla_tgt->abts_resp_expected--;
5831 ql_dbg(ql_dbg_tgt, vha, 0xe038,
5838 if (qlt_chk_unresolv_exchg(vha, rsp->qpair, entry)) {
5842 qlt_24xx_retry_term_exchange(vha, rsp->qpair,
5845 ql_dbg(ql_dbg_tgt, vha, 0xe063,
5847 vha->vp_idx, entry->compl_status,
5859 static void qlt_response_pkt(struct scsi_qla_host *vha,
5862 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
5865 ql_dbg(ql_dbg_tgt, vha, 0xe05d,
5867 vha->vp_idx, pkt->entry_type, vha->hw);
5882 qlt_do_ctio_completion(vha, rsp, entry->handle,
5895 ql_dbg(ql_dbg_tgt, vha, 0xe05e,
5897 "status %x received\n", vha->vp_idx,
5902 rc = qlt_chk_qfull_thresh_hold(vha, rsp->qpair, atio, 1);
5906 rc = qlt_handle_cmd_for_atio(vha, atio);
5910 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5914 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
5920 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5922 vha->vp_idx);
5927 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5929 vha->vp_idx);
5942 qlt_do_ctio_completion(vha, rsp, entry->handle,
5952 qlt_do_ctio_completion(vha, rsp, entry->handle,
5959 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
5960 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
5967 ql_dbg(ql_dbg_tgt, vha, 0xe036,
5974 ql_dbg(ql_dbg_tgt, vha, 0xe061,
5976 "failed %x\n", vha->vp_idx,
5980 ql_dbg(ql_dbg_tgt, vha, 0xe062,
5982 vha->vp_idx);
5987 ql_dbg(ql_dbg_tgt, vha, 0xe037,
5988 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
5989 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
5994 qlt_handle_abts_completion(vha, rsp, pkt);
5996 ql_dbg(ql_dbg_tgt, vha, 0xe064,
5998 "received\n", vha->vp_idx);
6003 ql_dbg(ql_dbg_tgt, vha, 0xe065,
6005 "type %x\n", vha->vp_idx, pkt->entry_type);
6014 void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
6017 struct qla_hw_data *ha = vha->hw;
6018 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6038 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
6040 "occurred", vha->vp_idx, code);
6043 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
6048 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
6050 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
6065 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
6067 "(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
6072 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf017,
6074 vha->vp_idx,
6079 vha->hw->exch_starvation++;
6080 if (vha->hw->exch_starvation > 5) {
6081 ql_log(ql_log_warn, vha, 0xd03a,
6084 vha->hw->exch_starvation = 0;
6085 if (IS_P3P_TYPE(vha->hw))
6087 &vha->dpc_flags);
6090 &vha->dpc_flags);
6091 qla2xxx_wake_dpc(vha);
6097 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
6100 "m[2]=%x, m[3]=%x)", vha->vp_idx, code,
6105 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
6107 vha->hw->exch_starvation = 0;
6109 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
6118 static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
6126 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
6128 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
6130 vha->vp_idx);
6136 rc = qla24xx_gpdb_wait(vha, fcport, 0);
6138 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
6141 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
6147 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
6148 tfcp = qla2x00_find_fcport_by_wwpn(vha, fcport->port_name, 1);
6160 if (vha->hw->current_topology == ISP_CFG_F)
6163 list_add_tail(&fcport->list, &vha->vp_fcports);
6165 vha->fcport_count++;
6173 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
6175 switch (vha->host->active_mode) {
6179 if (!IS_IIDMA_CAPABLE(vha->hw) || !vha->hw->flags.gpsc_supported) {
6182 ql_dbg(ql_dbg_disc, vha, 0x20ff,
6184 __func__, __LINE__, fcport->port_name, vha->fcport_count);
6185 qla24xx_post_gpsc_work(vha, fcport);
6201 static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha,
6214 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
6220 mutex_lock(&vha->vha_tgt.tgt_mutex);
6224 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
6226 rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
6228 mutex_unlock(&vha->vha_tgt.tgt_mutex);
6230 ql_log(ql_log_info, vha, 0xf071,
6233 vha->vp_idx, s_id.domain, s_id.area, s_id.al_pa);
6240 qlt_send_first_logo(vha, &logo);
6246 fcport = qlt_get_port_database(vha, loop_id);
6248 mutex_unlock(&vha->vha_tgt.tgt_mutex);
6253 atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
6254 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
6256 "(counter was %d, new %d), retrying", vha->vp_idx,
6258 atomic_read(&vha->vha_tgt.
6263 sess = qlt_create_sess(vha, fcport, true);
6265 mutex_unlock(&vha->vha_tgt.tgt_mutex);
6273 struct scsi_qla_host *vha = tgt->vha;
6274 struct qla_hw_data *ha = vha->hw;
6287 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
6291 sess = qlt_make_local_sess(vha, s_id);
6304 ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01c,
6312 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
6334 struct scsi_qla_host *vha = tgt->vha;
6337 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
6440 tgt->vha = base_vha;
6466 int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
6468 if (!vha->vha_tgt.qla_tgt)
6471 if (vha->fc_vport) {
6472 qlt_release(vha->vha_tgt.qla_tgt);
6477 qlt_init_term_exchange(vha);
6479 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
6480 vha->host_no, ha);
6481 qlt_release(vha->vha_tgt.qla_tgt);
6497 static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
6500 pr_debug("qla2xxx HW vha->node_name: %8phC\n", vha->node_name);
6501 pr_debug("qla2xxx HW vha->port_name: %8phC\n", vha->port_name);
6520 struct scsi_qla_host *vha;
6529 vha = tgt->vha;
6530 ha = vha->hw;
6532 host = vha->host;
6539 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
6558 ql_dbg(ql_dbg_tgt, vha, 0xe068,
6563 qlt_lport_dump(vha, phys_wwpn, b);
6565 if (memcmp(vha->port_name, b, WWN_SIZE)) {
6569 rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
6585 * @vha: Registered scsi_qla_host pointer
6587 void qlt_lport_deregister(struct scsi_qla_host *vha)
6589 struct qla_hw_data *ha = vha->hw;
6590 struct Scsi_Host *sh = vha->host;
6594 vha->vha_tgt.target_lport_ptr = NULL;
6604 void qlt_set_mode(struct scsi_qla_host *vha)
6606 switch (vha->qlini_mode) {
6609 vha->host->active_mode = MODE_TARGET;
6612 vha->host->active_mode = MODE_INITIATOR;
6615 vha->host->active_mode = MODE_DUAL;
6623 static void qlt_clear_mode(struct scsi_qla_host *vha)
6625 switch (vha->qlini_mode) {
6627 vha->host->active_mode = MODE_UNKNOWN;
6630 vha->host->active_mode = MODE_INITIATOR;
6634 vha->host->active_mode = MODE_INITIATOR;
6647 qlt_enable_vha(struct scsi_qla_host *vha)
6649 struct qla_hw_data *ha = vha->hw;
6650 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6655 ql_dbg(ql_dbg_tgt, vha, 0xe069,
6661 if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED)
6668 qlt_set_mode(vha);
6672 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
6674 if (vha->vp_idx) {
6675 qla24xx_disable_vp(vha);
6676 qla24xx_enable_vp(vha);
6692 static void qlt_disable_vha(struct scsi_qla_host *vha)
6694 struct qla_hw_data *ha = vha->hw;
6695 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
6699 ql_dbg(ql_dbg_tgt, vha, 0xe06a,
6707 qlt_clear_mode(vha);
6710 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
6711 qla2xxx_wake_dpc(vha);
6717 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS)
6718 ql_dbg(ql_dbg_tgt, vha, 0xe081,
6728 qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
6730 vha->vha_tgt.qla_tgt = NULL;
6732 mutex_init(&vha->vha_tgt.tgt_mutex);
6733 mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
6735 INIT_LIST_HEAD(&vha->unknown_atio_list);
6736 INIT_DELAYED_WORK(&vha->unknown_atio_work, qlt_unknown_atio_work_fn);
6738 qlt_clear_mode(vha);
6748 qlt_add_target(ha, vha);
6752 qlt_rff_id(struct scsi_qla_host *vha)
6758 if (qla_tgt_mode_enabled(vha)) {
6760 } else if (qla_ini_mode_enabled(vha)) {
6762 } else if (qla_dual_mode_enabled(vha))
6778 qlt_init_atio_q_entries(struct scsi_qla_host *vha)
6780 struct qla_hw_data *ha = vha->hw;
6784 if (qla_ini_mode_enabled(vha))
6799 qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
6801 struct qla_hw_data *ha = vha->hw;
6819 ql_log(ql_log_warn, vha, 0xd03c,
6829 qlt_24xx_atio_pkt_all_vps(vha,
6848 wrt_reg_dword(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
6852 qlt_24xx_config_rings(struct scsi_qla_host *vha)
6854 struct qla_hw_data *ha = vha->hw;
6861 wrt_reg_dword(ISP_ATIO_Q_IN(vha), 0);
6862 wrt_reg_dword(ISP_ATIO_Q_OUT(vha), 0);
6863 rd_reg_dword(ISP_ATIO_Q_OUT(vha));
6869 ql_dbg(ql_dbg_init, vha, 0xf072,
6878 ql_dbg(ql_dbg_init, vha, 0xf072,
6885 qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
6887 struct qla_hw_data *ha = vha->hw;
6893 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
6906 if (qla_tgt_mode_enabled(vha))
6909 nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
6915 if (qla_tgt_mode_enabled(vha))
6962 if (vha->flags.init_done)
6963 fc_host_supported_classes(vha->host) =
6968 if (vha->flags.init_done)
6969 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
6976 qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
6979 struct qla_hw_data *ha = vha->hw;
6991 qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
6993 struct qla_hw_data *ha = vha->hw;
6999 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha)) {
7012 if (qla_tgt_mode_enabled(vha))
7015 nv->exchange_count = cpu_to_le16(vha->ql2xexchoffld);
7021 if (qla_tgt_mode_enabled(vha))
7065 if (vha->flags.init_done)
7066 fc_host_supported_classes(vha->host) =
7071 if (vha->flags.init_done)
7072 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
7079 qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
7082 struct qla_hw_data *ha = vha->hw;
7104 qlt_modify_vp_config(struct scsi_qla_host *vha,
7108 if (qla_tgt_mode_enabled(vha) || qla_dual_mode_enabled(vha))
7112 if (qla_tgt_mode_enabled(vha))
7146 scsi_qla_host_t *vha;
7152 vha = pci_get_drvdata(ha->pdev);
7156 qlt_24xx_process_atio_queue(vha, 0);
7168 scsi_qla_host_t *vha = op->vha;
7169 struct qla_hw_data *ha = vha->hw;
7172 if (qla2x00_reset_active(vha) ||
7177 qlt_24xx_process_atio_queue(vha, 0);
7181 qlt_response_pkt_all_vps(vha, op->rsp, (response_t *)&op->atio);
7188 qlt_handle_abts_recv(struct scsi_qla_host *vha, struct rsp_que *rsp,
7199 qlt_response_pkt_all_vps(vha, rsp, pkt);
7204 op->vha = vha;
7205 op->chip_reset = vha->hw->base_qpair->chip_reset;