Lines Matching defs:phba

78 	if (vport->phba->cfg_fof)
85 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
87 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb);
89 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc);
93 * @phba: Pointer to HBA object.
100 lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
114 * @phba: The Hba for which this call is being executed.
117 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
119 * @phba to process WORKER_RAM_DOWN_EVENT event.
124 lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
130 spin_lock_irqsave(&phba->hbalock, flags);
131 atomic_inc(&phba->num_rsrc_err);
132 phba->last_rsrc_error_time = jiffies;
134 expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
136 spin_unlock_irqrestore(&phba->hbalock, flags);
140 phba->last_ramp_down_time = jiffies;
142 spin_unlock_irqrestore(&phba->hbalock, flags);
144 spin_lock_irqsave(&phba->pport->work_port_lock, flags);
145 evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
147 phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
148 spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
151 lpfc_worker_wake_up(phba);
157 * @phba: The Hba for which this call is being executed.
161 * associated with @phba.
164 lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
173 num_rsrc_err = atomic_read(&phba->num_rsrc_err);
183 vports = lpfc_create_vport_work_array(phba);
185 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
196 lpfc_destroy_vport_work_array(phba, vports);
197 atomic_set(&phba->num_rsrc_err, 0);
202 * @phba: Pointer to HBA context object.
209 lpfc_scsi_dev_block(struct lpfc_hba *phba)
217 vports = lpfc_create_vport_work_array(phba);
219 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
226 lpfc_destroy_vport_work_array(phba, vports);
248 struct lpfc_hba *phba = vport->phba;
258 bpl_size = phba->cfg_sg_dma_buf_size -
263 num_to_alloc, phba->cfg_sg_dma_buf_size,
278 psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
287 iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
289 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
332 if ((phba->sli_rev == 3) &&
333 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
367 lpfc_release_scsi_buf_s3(phba, psb);
384 struct lpfc_hba *phba = vport->phba;
393 spin_lock_irqsave(&phba->hbalock, iflag);
394 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
395 qp = &phba->sli4_hba.hdwq[idx];
409 spin_unlock_irqrestore(&phba->hbalock, iflag);
414 * @phba: pointer to lpfc hba data structure.
422 lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
434 struct lpfc_sli_ring *pring = phba->sli4_hba.els_wq->pring;
438 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
440 offline = pci_channel_offline(phba->pcidev);
445 qp = &phba->sli4_hba.hdwq[idx];
446 spin_lock_irqsave(&phba->hbalock, iflag);
459 spin_unlock_irqrestore(&phba->hbalock, iflag);
461 lpfc_sli4_nvme_xri_aborted(phba, axri,
465 lpfc_sli4_nvme_pci_offline_aborted(phba, psb);
466 spin_lock_irqsave(&phba->hbalock, iflag);
477 spin_unlock_irqrestore(&phba->hbalock, iflag);
479 spin_lock_irqsave(&phba->rrq_list_lock, iflag);
480 rrq_empty = list_empty(&phba->active_rrq_list);
481 spin_unlock_irqrestore(&phba->rrq_list_lock, iflag);
483 lpfc_set_rrq_active(phba, ndlp,
485 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
488 if (phba->cfg_fcp_wait_abts_rsp || offline) {
512 lpfc_release_scsi_buf_s4(phba, psb);
514 lpfc_worker_wake_up(phba);
517 spin_lock_irqsave(&phba->hbalock, iflag);
524 for (i = 1; i <= phba->sli.last_iotag; i++) {
525 iocbq = phba->sli.iocbq_lookup[i];
534 spin_unlock_irqrestore(&phba->hbalock, iflag);
536 lpfc_worker_wake_up(phba);
540 spin_unlock_irqrestore(&phba->hbalock, iflag);
545 * @phba: The HBA for which this call is being executed.
549 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
557 lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
561 struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get;
564 spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
568 spin_lock(&phba->scsi_buf_list_put_lock);
569 list_splice(&phba->lpfc_scsi_buf_list_put,
570 &phba->lpfc_scsi_buf_list_get);
571 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
574 spin_unlock(&phba->scsi_buf_list_put_lock);
576 spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
578 if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
586 * @phba: The HBA for which this call is being executed.
598 lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
611 if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) {
615 idx = phba->sli4_hba.cpu_map[cpu].hdwq;
618 lpfc_cmd = lpfc_get_io_buf(phba, ndlp, idx,
619 !phba->cfg_xri_rebalancing);
621 qp = &phba->sli4_hba.hdwq[idx];
640 tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd);
642 lpfc_release_io_buf(phba, lpfc_cmd, lpfc_cmd->hdwq);
673 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
681 * @phba: The HBA for which this call is being executed.
685 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
693 lpfc_get_scsi_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
696 return phba->lpfc_get_scsi_buf(phba, ndlp, cmnd);
701 * @phba: The Hba for which this call is being executed.
704 * This routine releases @psb scsi buffer by adding it to tail of @phba
708 lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
715 spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
718 list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
719 spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
724 * @phba: The Hba for which this call is being executed.
733 lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
744 if (!phba->cfg_fcp_wait_abts_rsp)
750 lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp);
756 * @phba: The Hba for which this call is being executed.
759 * This routine releases @psb scsi buffer by adding it to tail of @phba
763 lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
769 phba->lpfc_release_scsi_buf(phba, psb);
793 * @phba: The Hba for which this call is being executed.
806 lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
834 nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
840 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
841 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
844 " %d\n", __func__, phba->cfg_sg_seg_cnt,
846 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
863 if (phba->sli_rev == 3 &&
864 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
891 if (phba->sli_rev == 3 &&
892 !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
947 * @phba: The Hba for which this call is being executed.
956 lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc,
978 if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) {
983 if (phba->lpfc_injerr_lba < (u64)lba ||
984 (phba->lpfc_injerr_lba >= (u64)(lba + numblks)))
987 blockoff = phba->lpfc_injerr_lba - (u64)lba;
1001 if (phba->lpfc_injerr_nportid &&
1002 (phba->lpfc_injerr_nportid != ndlp->nlp_DID))
1009 if (phba->lpfc_injerr_wwpn.u.wwn[0] &&
1010 (memcmp(&ndlp->nlp_portname, &phba->lpfc_injerr_wwpn,
1024 if (phba->lpfc_injerr_wref_cnt) {
1036 lpfc_printf_log(phba, KERN_ERR,
1056 phba->lpfc_injerr_wref_cnt--;
1057 if (phba->lpfc_injerr_wref_cnt == 0) {
1058 phba->lpfc_injerr_nportid = 0;
1059 phba->lpfc_injerr_lba =
1061 memset(&phba->lpfc_injerr_wwpn,
1077 phba->lpfc_injerr_wref_cnt--;
1078 if (phba->lpfc_injerr_wref_cnt == 0) {
1079 phba->lpfc_injerr_nportid = 0;
1080 phba->lpfc_injerr_lba =
1082 memset(&phba->lpfc_injerr_wwpn,
1087 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1098 phba->lpfc_injerr_wref_cnt--;
1099 if (phba->lpfc_injerr_wref_cnt == 0) {
1100 phba->lpfc_injerr_nportid = 0;
1101 phba->lpfc_injerr_lba =
1103 memset(&phba->lpfc_injerr_wwpn,
1108 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1114 if (phba->lpfc_injerr_rref_cnt) {
1125 phba->lpfc_injerr_rref_cnt--;
1126 if (phba->lpfc_injerr_rref_cnt == 0) {
1127 phba->lpfc_injerr_nportid = 0;
1128 phba->lpfc_injerr_lba =
1130 memset(&phba->lpfc_injerr_wwpn,
1135 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1145 if (phba->lpfc_injerr_wapp_cnt) {
1157 lpfc_printf_log(phba, KERN_ERR,
1177 phba->lpfc_injerr_wapp_cnt--;
1178 if (phba->lpfc_injerr_wapp_cnt == 0) {
1179 phba->lpfc_injerr_nportid = 0;
1180 phba->lpfc_injerr_lba =
1182 memset(&phba->lpfc_injerr_wwpn,
1197 phba->lpfc_injerr_wapp_cnt--;
1198 if (phba->lpfc_injerr_wapp_cnt == 0) {
1199 phba->lpfc_injerr_nportid = 0;
1200 phba->lpfc_injerr_lba =
1202 memset(&phba->lpfc_injerr_wwpn,
1207 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1218 phba->lpfc_injerr_wapp_cnt--;
1219 if (phba->lpfc_injerr_wapp_cnt == 0) {
1220 phba->lpfc_injerr_nportid = 0;
1221 phba->lpfc_injerr_lba =
1223 memset(&phba->lpfc_injerr_wwpn,
1228 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1234 if (phba->lpfc_injerr_rapp_cnt) {
1245 phba->lpfc_injerr_rapp_cnt--;
1246 if (phba->lpfc_injerr_rapp_cnt == 0) {
1247 phba->lpfc_injerr_nportid = 0;
1248 phba->lpfc_injerr_lba =
1250 memset(&phba->lpfc_injerr_wwpn,
1255 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1266 if (phba->lpfc_injerr_wgrd_cnt) {
1278 phba->lpfc_injerr_wgrd_cnt--;
1279 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1280 phba->lpfc_injerr_nportid = 0;
1281 phba->lpfc_injerr_lba =
1283 memset(&phba->lpfc_injerr_wwpn,
1290 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1300 phba->lpfc_injerr_wgrd_cnt--;
1301 if (phba->lpfc_injerr_wgrd_cnt == 0) {
1302 phba->lpfc_injerr_nportid = 0;
1303 phba->lpfc_injerr_lba =
1305 memset(&phba->lpfc_injerr_wwpn,
1312 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1318 if (phba->lpfc_injerr_rgrd_cnt) {
1328 phba->lpfc_injerr_rgrd_cnt--;
1329 if (phba->lpfc_injerr_rgrd_cnt == 0) {
1330 phba->lpfc_injerr_nportid = 0;
1331 phba->lpfc_injerr_lba =
1333 memset(&phba->lpfc_injerr_wwpn,
1340 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1354 * @phba: The Hba for which this call is being executed.
1363 lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1390 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1419 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1434 * @phba: The Hba for which this call is being executed.
1443 lpfc_bg_err_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1504 * @phba: The Hba for which this call is being executed.
1534 lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1550 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1558 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1561 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1636 * @phba: The Hba for which this call is being executed.
1674 lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1704 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1710 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1719 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1722 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
1731 if (num_bde >= (phba->cfg_total_seg_cnt - 2))
1812 if (num_bde >= phba->cfg_total_seg_cnt)
1816 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1878 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1890 * @phba: The Hba for which this call is being executed.
1919 lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc,
1939 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
1947 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
1950 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2004 if (!lsp_just_set && !((j + 1) % phba->border_sge_num) &&
2009 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2045 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2063 * @phba: The Hba for which this call is being executed.
2100 lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
2131 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2137 status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop);
2146 rc = lpfc_bg_err_inject(phba, sc, &reftag, NULL, 1);
2149 lpfc_bg_err_opcodes(phba, sc, &txop, &rxop);
2158 if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) &&
2159 !(phba->cfg_xpsgl))
2163 if (!((j + 1) % phba->border_sge_num) ||
2164 !((j + 2) % phba->border_sge_num) ||
2165 !((j + 3) % phba->border_sge_num)) {
2171 sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd);
2183 sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size);
2278 if ((num_sge >= phba->cfg_total_seg_cnt) &&
2279 !phba->cfg_xpsgl)
2283 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2289 if (!((j + 1) % phba->border_sge_num)) {
2296 sgl_xtra = lpfc_get_sgl_per_hdwq(phba,
2310 phba->cfg_sg_dma_buf_size);
2387 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2400 * @phba: The Hba for which this call is being executed.
2410 lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc)
2427 if (phba)
2428 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2438 * @phba: The Hba for which this call is being executed.
2447 lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba,
2479 * @phba: The Hba for which this call is being executed.
2490 lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
2502 struct lpfc_vport *vport = phba->pport;
2516 datasegcnt = dma_map_sg(&phba->pcidev->dev,
2525 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
2526 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
2531 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
2537 if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) {
2542 num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl,
2557 protsegcnt = dma_map_sg(&phba->pcidev->dev,
2572 (phba->cfg_total_seg_cnt - 2)) {
2577 num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl,
2581 (num_bde > phba->cfg_total_seg_cnt)) {
2592 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2610 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
2632 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
2636 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2640 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
2683 lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
2821 phba->bg_guard_err_cnt++;
2822 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2831 phba->bg_reftag_err_cnt++;
2832 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2841 phba->bg_apptag_err_cnt++;
2842 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2862 lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd,
2874 if (phba->sli_rev == LPFC_SLI_REV4) {
2917 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2929 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2943 phba->bg_guard_err_cnt++;
2944 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2956 phba->bg_reftag_err_cnt++;
2957 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
2969 phba->bg_apptag_err_cnt++;
2970 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3014 lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
3022 lpfc_calc_bg_err(phba, lpfc_cmd);
3030 * @phba: The Hba for which this call is being executed.
3042 lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3050 struct lpfc_vport *vport = phba->pport;
3085 if (!phba->cfg_xpsgl &&
3086 lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
3087 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3091 __func__, phba->cfg_sg_seg_cnt,
3093 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3123 !((j + 1) % phba->border_sge_num) &&
3130 phba, lpfc_cmd);
3173 phba->cfg_sg_dma_buf_size);
3189 ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3190 phba->cfg_enable_pbde)) {
3214 if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
3215 phba->cfg_enable_pbde) {
3234 if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) &&
3255 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3270 (phba->cfg_XLanePriority << 1));
3278 * @phba: The Hba for which this call is being executed.
3290 lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
3303 struct lpfc_vport *vport = phba->pport;
3316 datasegcnt = dma_map_sg(&phba->pcidev->dev,
3332 if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt &&
3333 !phba->cfg_xpsgl) {
3334 WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt);
3339 prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd);
3345 phba->cfg_total_seg_cnt) &&
3346 !phba->cfg_xpsgl) {
3351 num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl,
3367 protsegcnt = dma_map_sg(&phba->pcidev->dev,
3381 (phba->cfg_total_seg_cnt - 2)) &&
3382 !phba->cfg_xpsgl) {
3387 num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl,
3392 (num_sge > phba->cfg_total_seg_cnt &&
3393 !phba->cfg_xpsgl)) {
3404 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3426 fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
3433 if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) &&
3453 if ((phba->cfg_fof) && ((struct lpfc_device_data *)
3461 (phba->cfg_XLanePriority << 1));
3480 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd),
3484 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3488 phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt,
3498 * @phba: The Hba for which this call is being executed.
3509 lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3511 return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
3517 * @phba: The Hba for which this call is being executed.
3528 lpfc_bg_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
3530 return phba->lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
3550 return vport->phba->lpfc_scsi_prep_cmnd_buf(vport, lpfc_cmd, tmo);
3555 * @phba: Pointer to hba context object.
3564 lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
3580 fast_path_evt = lpfc_alloc_fast_evt(phba);
3595 fast_path_evt = lpfc_alloc_fast_evt(phba);
3621 fast_path_evt = lpfc_alloc_fast_evt(phba);
3640 spin_lock_irqsave(&phba->hbalock, flags);
3641 list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
3642 spin_unlock_irqrestore(&phba->hbalock, flags);
3643 lpfc_worker_wake_up(phba);
3649 * @phba: The HBA for which this call is being executed.
3656 lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_io_buf *psb)
3667 dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(psb->pCmd),
3674 * @phba: pointer to phba object
3680 lpfc_unblock_requests(struct lpfc_hba *phba)
3686 if (phba->sli_rev == LPFC_SLI_REV4 &&
3687 !phba->sli4_hba.max_cfg_param.vpi_used) {
3688 shost = lpfc_shost_from_vport(phba->pport);
3693 vports = lpfc_create_vport_work_array(phba);
3695 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3699 lpfc_destroy_vport_work_array(phba, vports);
3704 * @phba: pointer to phba object
3710 lpfc_block_requests(struct lpfc_hba *phba)
3716 if (atomic_read(&phba->cmf_stop_io))
3719 if (phba->sli_rev == LPFC_SLI_REV4 &&
3720 !phba->sli4_hba.max_cfg_param.vpi_used) {
3721 shost = lpfc_shost_from_vport(phba->pport);
3726 vports = lpfc_create_vport_work_array(phba);
3728 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3732 lpfc_destroy_vport_work_array(phba, vports);
3737 * @phba: The HBA for which this call is being executed.
3749 lpfc_update_cmf_cmpl(struct lpfc_hba *phba,
3761 cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id());
3771 * @phba: The HBA for which this call is being executed.
3778 lpfc_update_cmf_cmd(struct lpfc_hba *phba, uint32_t size)
3785 if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
3786 phba->cmf_max_bytes_per_interval) {
3789 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3792 if (total >= phba->cmf_max_bytes_per_interval) {
3793 if (!atomic_xchg(&phba->cmf_bw_wait, 1)) {
3794 lpfc_block_requests(phba);
3795 phba->cmf_last_ts =
3796 lpfc_calc_cmf_latency(phba);
3798 atomic_inc(&phba->cmf_busy);
3801 if (size > atomic_read(&phba->rx_max_read_cnt))
3802 atomic_set(&phba->rx_max_read_cnt, size);
3805 cgs = per_cpu_ptr(phba->cmf_stat, raw_smp_processor_id());
3994 lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, fcpi_parm);
3999 * @phba: The hba for which this call is being executed.
4008 lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
4041 lpfc_release_scsi_buf(phba, lpfc_cmd);
4047 if (phba->sli4_hba.hdwq)
4048 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
4051 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
4052 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
4062 if (phba->cfg_fcp_wait_abts_rsp)
4133 fast_path_evt = lpfc_alloc_fast_evt(phba);
4152 spin_lock_irqsave(&phba->hbalock, flags);
4154 &phba->work_list);
4155 spin_unlock_irqrestore(&phba->hbalock, flags);
4156 lpfc_worker_wake_up(phba);
4187 lpfc_parse_bg_err(phba, lpfc_cmd, pwqeOut);
4200 lpfc_set_rrq_active(phba, ndlp,
4285 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4291 phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
4292 lpfc_io_ktime(phba, lpfc_cmd);
4300 if (phba->cmf_active_mode != LPFC_CFG_OFF &&
4305 lpfc_update_cmf_cmpl(phba, lat, scsi_bufflen(cmd), shost);
4324 lpfc_release_scsi_buf(phba, lpfc_cmd);
4329 * @phba: The Hba for which this call is being executed.
4338 lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
4358 if (!cmd || !phba) {
4366 if (phba->sli4_hba.hdwq)
4367 phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
4370 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
4371 this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
4438 phba->sli_rev == LPFC_SLI_REV4 ?
4452 fast_path_evt = lpfc_alloc_fast_evt(phba);
4471 spin_lock_irqsave(&phba->hbalock, flags);
4473 &phba->work_list);
4474 spin_unlock_irqrestore(&phba->hbalock, flags);
4475 lpfc_worker_wake_up(phba);
4503 lpfc_parse_bg_err(phba, lpfc_cmd,
4514 && (phba->sli_rev == LPFC_SLI_REV4)
4520 lpfc_set_rrq_active(phba, pnode,
4565 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
4574 phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
4575 lpfc_io_ktime(phba, lpfc_cmd);
4592 lpfc_release_scsi_buf(phba, lpfc_cmd);
4688 struct lpfc_hba *phba = vport->phba;
4698 hdwq = &phba->sli4_hba.hdwq[idx];
4731 if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
4767 phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
4870 if (!(vport->phba->sli3_options & LPFC_SLI3_BG_ENABLED))
4939 vport->phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
4956 lpfc_prep_embed_io(vport->phba, lpfc_cmd);
4961 lpfc_sli4_set_rsp_sgl_last(vport->phba, lpfc_cmd);
4968 * @phba: The hba struct for which this call is being executed.
4971 * This routine sets up the SCSI interface API function jump table in @phba
4976 lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
4979 phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf;
4983 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
4984 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
4985 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
4986 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
4987 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s3;
4988 phba->lpfc_scsi_prep_task_mgmt_cmd =
4992 phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
4993 phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
4994 phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
4995 phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s4;
4996 phba->lpfc_scsi_prep_cmnd_buf = lpfc_scsi_prep_cmnd_buf_s4;
4997 phba->lpfc_scsi_prep_task_mgmt_cmd =
5001 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
5006 phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
5012 * @phba: The Hba for which this call is being executed.
5020 lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
5026 lpfc_release_scsi_buf(phba, lpfc_cmd);
5033 * @phba: lpfc_hba pointer.
5045 lpfc_check_pci_resettable(struct lpfc_hba *phba)
5047 const struct pci_dev *pdev = phba->pcidev;
5055 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5062 if (phba->sli_rev != LPFC_SLI_REV4 ||
5063 test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
5064 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5075 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5099 struct lpfc_hba *phba = vport->phba;
5105 if (phba && phba->pcidev){
5107 scnprintf(tmp, sizeof(tmp), phba->ModelDesc);
5115 phba->pcidev->bus->number, phba->pcidev->devfn,
5116 phba->pcidev->irq);
5122 if (phba->Port[0]) {
5123 scnprintf(tmp, sizeof(tmp), " port %s", phba->Port);
5130 link_speed = lpfc_sli_port_speed_get(phba);
5140 if (!lpfc_check_pci_resettable(phba)) {
5152 * @phba: The Hba for which this call is being executed.
5154 * This routine modifies fcp_poll_timer field of @phba by cfg_poll_tmo.
5157 static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
5160 (jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));
5162 if (!list_empty(&phba->sli.sli3_ring[LPFC_FCP_RING].txcmplq))
5163 mod_timer(&phba->fcp_poll_timer,
5169 * @phba: The Hba for which this call is being executed.
5171 * This routine starts the fcp_poll_timer of @phba.
5173 void lpfc_poll_start_timer(struct lpfc_hba * phba)
5175 lpfc_poll_rearm_timer(phba);
5187 struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer);
5189 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5190 lpfc_sli_handle_fast_ring_event(phba,
5191 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5193 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5194 lpfc_poll_rearm_timer(phba);
5229 struct lpfc_hba *phba = vport->phba;
5254 (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))) {
5256 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5273 if (phba->cmf_active_mode != LPFC_CFG_OFF &&
5277 err = lpfc_update_cmf_cmd(phba, scsi_bufflen(cmnd));
5282 if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
5313 lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp, cmnd);
5315 lpfc_rampdown_queue_depth(phba);
5340 if (vport->phba->cfg_enable_bg) {
5351 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd);
5353 err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
5365 if (lpfc_is_vmid_enabled(phba) &&
5367 phba->pport->vmid_priority_tagging ==
5384 if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
5385 this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
5388 err = lpfc_sli_issue_fcp_io(phba, LPFC_FCP_RING, cur_iocbq,
5393 lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd;
5409 phba->sli_rev == LPFC_SLI_REV4 ?
5411 phba->sli_rev == LPFC_SLI_REV4 ?
5412 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi] :
5415 phba->sli_rev == LPFC_SLI_REV4 ?
5424 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
5425 lpfc_sli_handle_fast_ring_event(phba,
5426 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5428 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5429 lpfc_poll_rearm_timer(phba);
5432 if (phba->cfg_xri_rebalancing)
5433 lpfc_keep_pvt_pool_above_lowwm(phba, lpfc_cmd->hdwq_no);
5439 lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
5440 if (phba->sli4_hba.hdwq) {
5443 phba->sli4_hba.hdwq[idx].scsi_cstat.output_requests--;
5446 phba->sli4_hba.hdwq[idx].scsi_cstat.input_requests--;
5449 phba->sli4_hba.hdwq[idx].scsi_cstat.control_requests--;
5453 lpfc_release_scsi_buf(phba, lpfc_cmd);
5455 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
5460 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
5466 lpfc_release_scsi_buf(phba, lpfc_cmd);
5467 lpfc_update_cmf_cmpl(phba, LPFC_CGN_NOT_SENT, scsi_bufflen(cmnd),
5485 del_timer_sync(&vport->phba->inactive_vmid_poll);
5517 struct lpfc_hba *phba = vport->phba;
5538 spin_lock(&phba->hbalock);
5540 if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag)) {
5557 if (phba->sli_rev == LPFC_SLI_REV4) {
5558 pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring;
5592 if (phba->sli_rev == LPFC_SLI_REV4)
5594 spin_unlock(&phba->hbalock);
5600 if (phba->sli_rev == LPFC_SLI_REV4) {
5602 ret_val = lpfc_sli4_issue_abort_iotag(phba, iocb,
5605 pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
5606 ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocb,
5611 lpfc_issue_hb_tmo(phba);
5621 spin_unlock(&phba->hbalock);
5624 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
5625 lpfc_sli_handle_fast_ring_event(phba,
5626 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
5655 if (phba->sli_rev == LPFC_SLI_REV4)
5658 spin_unlock(&phba->hbalock);
5783 struct lpfc_hba *phba = vport->phba;
5797 lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode, NULL);
5800 lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
5805 status = phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
5808 lpfc_release_scsi_buf(phba, lpfc_cmd);
5813 iocbqrsp = lpfc_sli_get_iocbq(phba);
5815 lpfc_release_scsi_buf(phba, lpfc_cmd);
5828 status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
5831 (get_job_ulpstatus(phba, iocbqrsp) != IOSTAT_SUCCESS)) {
5833 get_job_ulpstatus(phba, iocbqrsp) != IOSTAT_FCP_RSP_ERROR)
5839 get_job_ulpstatus(phba, iocbqrsp),
5840 get_job_word4(phba, iocbqrsp),
5844 if (get_job_ulpstatus(phba, iocbqrsp) ==
5860 lpfc_sli_release_iocbq(phba, iocbqrsp);
5863 lpfc_release_scsi_buf(phba, lpfc_cmd);
5936 struct lpfc_hba *phba = vport->phba;
5943 &phba->sli.sli3_ring[LPFC_FCP_RING],
6186 struct lpfc_hba *phba = vport->phba;
6192 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
6193 lpfc_offline(phba);
6194 rc = lpfc_sli_brdrestart(phba);
6199 if (phba->sli_rev < LPFC_SLI_REV4) {
6200 rc = lpfc_sli_chipset_init(phba);
6205 rc = lpfc_online(phba);
6209 lpfc_unblock_mgmt_io(phba);
6215 lpfc_unblock_mgmt_io(phba);
6236 struct lpfc_hba *phba = vport->phba;
6249 if (phba->cfg_fof) {
6257 spin_lock_irqsave(&phba->devicelock, flags);
6258 device_data = __lpfc_get_device_data(phba,
6259 &phba->luns,
6264 spin_unlock_irqrestore(&phba->devicelock, flags);
6265 device_data = lpfc_create_device_data(phba,
6269 phba->cfg_XLanePriority,
6273 spin_lock_irqsave(&phba->devicelock, flags);
6274 list_add_tail(&device_data->listentry, &phba->luns);
6278 spin_unlock_irqrestore(&phba->devicelock, flags);
6283 sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
6286 if (phba->sli_rev == LPFC_SLI_REV4)
6298 total = phba->total_scsi_bufs;
6306 if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
6313 phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
6318 num_to_alloc, phba->cfg_hba_queue_depth,
6319 (phba->cfg_hba_queue_depth - total));
6320 num_to_alloc = phba->cfg_hba_queue_depth - total;
6331 phba->total_scsi_bufs += num_allocated;
6350 struct lpfc_hba *phba = vport->phba;
6354 if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
6355 lpfc_sli_handle_fast_ring_event(phba,
6356 &phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
6357 if (phba->cfg_poll & DISABLE_FCP_RING_INT)
6358 lpfc_poll_rearm_timer(phba);
6374 struct lpfc_hba *phba = vport->phba;
6378 atomic_dec(&phba->sdev_cnt);
6379 if ((phba->cfg_fof) && (device_data)) {
6380 spin_lock_irqsave(&phba->devicelock, flags);
6383 lpfc_delete_device_data(phba, device_data);
6384 spin_unlock_irqrestore(&phba->devicelock, flags);
6392 * @phba: Pointer to host bus adapter structure.
6410 lpfc_create_device_data(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6418 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6419 !(phba->cfg_fof))
6428 lun_info = mempool_alloc(phba->device_data_mem_pool, memory_flags);
6446 * @phba: Pointer to host bus adapter structure.
6453 lpfc_delete_device_data(struct lpfc_hba *phba,
6457 if (unlikely(!phba) || !lun_info ||
6458 !(phba->cfg_fof))
6463 mempool_free(lun_info, phba->device_data_mem_pool);
6469 * @phba: Pointer to host bus adapter structure.
6484 __lpfc_get_device_data(struct lpfc_hba *phba, struct list_head *list,
6491 if (unlikely(!phba) || !list || !vport_wwpn || !target_wwpn ||
6492 !phba->cfg_fof)
6511 * @phba: Pointer to host bus adapter structure.
6537 lpfc_find_next_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6552 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6556 !phba->cfg_fof)
6565 spin_lock_irqsave(&phba->devicelock, flags);
6566 list_for_each_entry(lun_info, &phba->luns, listentry) {
6591 if (phba->cfg_oas_flags & OAS_FIND_ANY_VPORT)
6594 if (phba->cfg_oas_flags & OAS_FIND_ANY_TARGET)
6608 spin_unlock_irqrestore(&phba->devicelock, flags);
6614 * @phba: Pointer to host bus adapter structure.
6634 lpfc_enable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6641 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6642 !phba->cfg_fof)
6645 spin_lock_irqsave(&phba->devicelock, flags);
6648 lun_info = __lpfc_get_device_data(phba, &phba->luns, vport_wwpn,
6654 spin_unlock_irqrestore(&phba->devicelock, flags);
6659 lun_info = lpfc_create_device_data(phba, vport_wwpn, target_wwpn, lun,
6665 list_add_tail(&lun_info->listentry, &phba->luns);
6666 spin_unlock_irqrestore(&phba->devicelock, flags);
6669 spin_unlock_irqrestore(&phba->devicelock, flags);
6675 * @phba: Pointer to host bus adapter structure.
6694 lpfc_disable_oas_lun(struct lpfc_hba *phba, struct lpfc_name *vport_wwpn,
6701 if (unlikely(!phba) || !vport_wwpn || !target_wwpn ||
6702 !phba->cfg_fof)
6705 spin_lock_irqsave(&phba->devicelock, flags);
6708 lun_info = __lpfc_get_device_data(phba,
6709 &phba->luns, vport_wwpn,
6715 lpfc_delete_device_data(phba, lun_info);
6716 spin_unlock_irqrestore(&phba->devicelock, flags);
6720 spin_unlock_irqrestore(&phba->devicelock, flags);