Lines Matching refs:vha

20 qla2x00_vp_stop_timer(scsi_qla_host_t *vha)
22 if (vha->vp_idx && vha->timer_active) {
23 del_timer_sync(&vha->timer);
24 vha->timer_active = 0;
29 qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
32 struct qla_hw_data *ha = vha->hw;
39 ql_dbg(ql_dbg_vport, vha, 0xa000,
48 vha->vp_idx = vp_id;
51 list_add_tail(&vha->list, &ha->vp_list);
55 qla_update_vp_map(vha, SET_VP_IDX);
63 qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
66 struct qla_hw_data *ha = vha->hw;
81 if (atomic_read(&vha->vref_count) == 0) {
82 list_del(&vha->list);
83 qla_update_vp_map(vha, RESET_VP_IDX);
94 ql_log(ql_log_info, vha, 0xfffa,
95 "vha->vref_count=%u timeout\n", vha->vref_count.counter);
97 list_del(&vha->list);
98 qla_update_vp_map(vha, RESET_VP_IDX);
102 vp_id = vha->vp_idx;
112 scsi_qla_host_t *vha;
118 list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) {
119 if (!memcmp(port_name, vha->port_name, WWN_SIZE)) {
121 return vha;
142 qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
152 list_for_each_entry(fcport, &vha->vp_fcports, list) {
153 ql_dbg(ql_dbg_vport, vha, 0xa001,
155 fcport->loop_id, fcport->vha->vp_idx);
157 qla2x00_mark_device_lost(vha, fcport, 0);
163 qla24xx_disable_vp(scsi_qla_host_t *vha)
169 if (vha->hw->flags.edif_enabled) {
170 if (DBELL_ACTIVE(vha))
171 qla2x00_post_aen_work(vha, FCH_EVT_VENDOR_UNIQUE,
174 qla2x00_wait_for_sess_deletion(vha);
177 if (vha->hw->flags.fw_started)
178 ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
180 atomic_set(&vha->loop_state, LOOP_DOWN);
181 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
182 list_for_each_entry(fcport, &vha->vp_fcports, list)
185 if (!vha->hw->flags.edif_enabled)
186 qla2x00_wait_for_sess_deletion(vha);
189 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
190 qla_update_vp_map(vha, RESET_AL_PA);
191 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
193 qla2x00_mark_vp_devices_dead(vha);
194 atomic_set(&vha->vp_state, VP_FAILED);
195 vha->flags.management_server_logged_in = 0;
197 fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED);
199 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
206 qla24xx_enable_vp(scsi_qla_host_t *vha)
209 struct qla_hw_data *ha = vha->hw;
216 vha->vp_err_state = VP_ERR_PORTDWN;
217 fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN);
218 ql_dbg(ql_dbg_taskm, vha, 0x800b,
228 ret = qla24xx_modify_vp_config(vha);
232 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
236 ql_dbg(ql_dbg_taskm, vha, 0x801a,
237 "Virtual port with id: %d - Enabled.\n", vha->vp_idx);
241 ql_dbg(ql_dbg_taskm, vha, 0x801b,
242 "Virtual port with id: %d - Disabled.\n", vha->vp_idx);
247 qla24xx_configure_vp(scsi_qla_host_t *vha)
252 fc_vport = vha->fc_vport;
254 ql_dbg(ql_dbg_vport, vha, 0xa002,
256 ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
258 ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
263 clear_bit(VP_SCR_NEEDED, &vha->vp_flags);
266 vha->flags.online = 1;
267 if (qla24xx_configure_vhba(vha))
270 atomic_set(&vha->vp_state, VP_ACTIVE);
277 scsi_qla_host_t *vha, *tvp;
283 list_for_each_entry_safe(vha, tvp, &ha->vp_list, list) {
284 if (vha->vp_idx) {
285 if (test_bit(VPORT_DELETE, &vha->dpc_flags))
288 atomic_inc(&vha->vref_count);
298 ql_dbg(ql_dbg_async, vha, 0x5024,
299 "Async_event for VP[%d], mb=0x%x vha=%p.\n",
300 i, *mb, vha);
301 qla2x00_async_event(vha, rsp, mb);
305 if ((mb[3] & 0xff) == vha->vp_idx) {
306 ql_dbg(ql_dbg_async, vha, 0x5024,
307 "Async_event for VP[%d], mb=0x%x vha=%p\n",
308 i, *mb, vha);
309 qla2x00_async_event(vha, rsp, mb);
315 atomic_dec(&vha->vref_count);
316 wake_up(&vha->vref_waitq);
324 qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
334 if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
335 qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
336 list_for_each_entry(fcport, &vha->vp_fcports, list)
344 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
345 atomic_set(&vha->loop_state, LOOP_DOWN);
346 qla2x00_mark_all_devices_lost(vha);
348 if (!atomic_read(&vha->loop_down_timer))
349 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
352 ql_dbg(ql_dbg_taskm, vha, 0x801d,
353 "Scheduling enable of Vport %d.\n", vha->vp_idx);
355 return qla24xx_enable_vp(vha);
359 qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
361 struct qla_hw_data *ha = vha->hw;
364 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
365 "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
369 if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
371 ql_dbg(ql_dbg_dpc, vha, 0x4014,
373 qla24xx_configure_vp(vha);
374 ql_dbg(ql_dbg_dpc, vha, 0x4015,
380 if (test_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags)) {
381 if (atomic_read(&vha->loop_state) == LOOP_READY) {
382 qla24xx_process_purex_list(&vha->purex_list);
383 clear_bit(PROCESS_PUREX_IOCB, &vha->dpc_flags);
387 if (test_bit(RELOGIN_NEEDED, &vha->dpc_flags) &&
388 !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
389 atomic_read(&vha->loop_state) != LOOP_DOWN) {
391 if (!vha->relogin_jif ||
392 time_after_eq(jiffies, vha->relogin_jif)) {
393 vha->relogin_jif = jiffies + HZ;
394 clear_bit(RELOGIN_NEEDED, &vha->dpc_flags);
396 ql_dbg(ql_dbg_dpc, vha, 0x4018,
398 qla24xx_post_relogin_work(vha);
402 if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
403 (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) {
404 clear_bit(RESET_ACTIVE, &vha->dpc_flags);
407 if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
408 if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
409 ql_dbg(ql_dbg_dpc, vha, 0x401a,
411 qla2x00_loop_resync(vha);
412 clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
413 ql_dbg(ql_dbg_dpc, vha, 0x401b,
418 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
424 qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha)
426 struct qla_hw_data *ha = vha->hw;
430 if (vha->vp_idx)
435 clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
460 scsi_qla_host_t *vha;
478 vha = qla24xx_find_vhost_by_name(ha, port_name);
479 if (vha)
484 ql_dbg(ql_dbg_vport, vha, 0xa004,
498 scsi_qla_host_t *vha;
502 vha = qla2x00_create_host(sht, ha);
503 if (!vha) {
504 ql_log(ql_log_warn, vha, 0xa005,
509 host = vha->host;
510 fc_vport->dd_data = vha;
512 u64_to_wwn(fc_vport->node_name, vha->node_name);
513 u64_to_wwn(fc_vport->port_name, vha->port_name);
515 vha->fc_vport = fc_vport;
516 vha->device_flags = 0;
517 vha->vp_idx = qla24xx_allocate_vp_id(vha);
518 if (vha->vp_idx > ha->max_npiv_vports) {
519 ql_dbg(ql_dbg_vport, vha, 0xa006,
523 vha->mgmt_svr_loop_id = qla2x00_reserve_mgmt_server_loop_id(vha);
525 vha->dpc_flags = 0L;
527 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
528 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
534 set_bit(VP_SCR_NEEDED, &vha->vp_flags);
535 atomic_set(&vha->loop_state, LOOP_DOWN);
536 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
538 qla2x00_start_timer(vha, WATCH_INTERVAL);
540 vha->req = base_vha->req;
541 vha->flags.nvme_enabled = base_vha->flags.nvme_enabled;
554 ql_dbg(ql_dbg_vport, vha, 0xa007,
556 vha->host_no, vha);
558 vha->flags.init_done = 1;
561 set_bit(vha->vp_idx, ha->vp_idx_map);
565 return vha;
572 qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
574 struct qla_hw_data *ha = vha->hw;
592 qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
594 struct qla_hw_data *ha = vha->hw;
617 qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req)
621 if (req && vha->flags.qpairs_req_created) {
623 ret = qla25xx_init_req_que(vha, req);
627 qla25xx_free_req_que(vha, req);
634 qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
638 if (rsp && vha->flags.qpairs_rsp_created) {
640 ret = qla25xx_init_rsp_que(vha, rsp);
644 qla25xx_free_rsp_que(vha, rsp);
652 qla25xx_delete_queues(struct scsi_qla_host *vha)
657 struct qla_hw_data *ha = vha->hw;
661 list_for_each_entry_safe(qpair, tqpair, &vha->qp_list,
663 qla2xxx_delete_qpair(vha, qpair);
669 ret = qla25xx_delete_req_que(vha, req);
671 ql_log(ql_log_warn, vha, 0x00ea,
683 ret = qla25xx_delete_rsp_que(vha, rsp);
685 ql_log(ql_log_warn, vha, 0x00eb,
704 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
801 vha->flags.qpairs_req_created = 1;
816 struct scsi_qla_host *vha = qpair->vha;
819 qla24xx_process_response_queue(vha, qpair->rsp);
832 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
917 vha->flags.qpairs_rsp_created = 1;
941 * @vha: adapter block pointer
946 int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
949 struct qla_hw_data *ha = vha->hw;
950 int vp_index = vha->vp_idx;
955 ql_dbg(ql_dbg_vport, vha, 0x10c1,
969 qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha) + 2,
976 ql_dbg(ql_dbg_async, vha, 0xffff,
982 ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n",
991 ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Timeout. %x.\n",
995 ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n",
999 ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n",
1009 struct scsi_qla_host *qla_find_host_by_vp_idx(struct scsi_qla_host *vha, uint16_t vp_idx)
1011 struct qla_hw_data *ha = vha->hw;
1013 if (vha->vp_idx == vp_idx)
1014 return vha;
1018 return ha->vp_map[vp_idx].vha;
1025 qla_update_vp_map(struct scsi_qla_host *vha, int cmd)
1031 if (!vha->hw->vp_map)
1034 key = vha->d_id.b24;
1038 vha->hw->vp_map[vha->vp_idx].vha = vha;
1041 slot = btree_lookup32(&vha->hw->host_map, key);
1043 ql_dbg(ql_dbg_disc, vha, 0xf018,
1044 "Save vha in host_map %p %06x\n", vha, key);
1045 rc = btree_insert32(&vha->hw->host_map,
1046 key, vha, GFP_ATOMIC);
1048 ql_log(ql_log_info, vha, 0xd03e,
1053 ql_dbg(ql_dbg_disc, vha, 0xf019,
1054 "replace existing vha in host_map %p %06x\n", vha, key);
1055 btree_update32(&vha->hw->host_map, key, vha);
1058 vha->hw->vp_map[vha->vp_idx].vha = NULL;
1061 ql_dbg(ql_dbg_disc, vha, 0xf01a,
1062 "clear vha in host_map %p %06x\n", vha, key);
1063 slot = btree_lookup32(&vha->hw->host_map, key);
1065 btree_remove32(&vha->hw->host_map, key);
1066 vha->d_id.b24 = 0;
1071 void qla_update_host_map(struct scsi_qla_host *vha, port_id_t id)
1074 if (!vha->d_id.b24) {
1075 vha->d_id = id;
1076 qla_update_vp_map(vha, SET_AL_PA);
1077 } else if (vha->d_id.b24 != id.b24) {
1078 qla_update_vp_map(vha, RESET_AL_PA);
1079 vha->d_id = id;
1080 qla_update_vp_map(vha, SET_AL_PA);
1084 int qla_create_buf_pool(struct scsi_qla_host *vha, struct qla_qpair *qp)
1093 ql_log(ql_log_warn, vha, 0x0186,
1100 ql_log(ql_log_warn, vha, 0x0186,
1108 ql_log(ql_log_warn, vha, 0x0186,
1121 struct qla_hw_data *ha = qp->vha->hw;
1137 int qla_get_buf(struct scsi_qla_host *vha, struct qla_qpair *qp, struct qla_buf_dsc *dsc)
1142 struct qla_hw_data *ha = vha->hw;
1148 ql_dbg(ql_dbg_io, vha, 0x00e2,
1156 ql_dbg(ql_dbg_io, vha, 0x00e3,
1166 ql_log(ql_log_fatal, vha, 0x13b1,
1192 struct qla_hw_data *ha = qp->vha->hw;
1200 ql_dbg(ql_dbg_io + ql_dbg_verbose, qp->vha, 0x300b,
1216 ql_dbg(ql_dbg_io + ql_dbg_verbose, qp->vha, 0x3010,
1258 void qla_adjust_buf(struct scsi_qla_host *vha)
1264 if (vha->vp_idx)
1267 if (!vha->buf_expired) {
1268 vha->buf_expired = jiffies + EXPIRE;
1271 if (time_before(jiffies, vha->buf_expired))
1274 vha->buf_expired = jiffies + EXPIRE;
1276 for (i = 0; i < vha->hw->num_qpairs; i++) {
1277 qp = vha->hw->queue_pair_map[i];