Lines Matching defs:phba

72 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba);
73 static void lpfc_cpuhp_remove(struct lpfc_hba *phba);
74 static void lpfc_cpuhp_add(struct lpfc_hba *phba);
86 static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
87 static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
93 static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
97 static void lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba);
98 static void lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba);
104 static int lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport);
105 static void lpfc_cgn_update_tstamp(struct lpfc_hba *phba, struct lpfc_cgn_ts *ts);
109 * @phba: pointer to lpfc hba data structure.
122 lpfc_config_port_prep(struct lpfc_hba *phba)
124 lpfc_vpd_t *vp = &phba->vpd;
134 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
136 phba->link_state = LPFC_HBA_ERROR;
141 phba->link_state = LPFC_INIT_MBX_CMDS;
143 if (lpfc_is_LC_HBA(phba->pcidev->device)) {
152 lpfc_read_nv(phba, pmb);
158 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
161 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
166 mempool_free(pmb, phba->mbox_mem_pool);
169 memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
170 sizeof(phba->wwnn));
171 memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
172 sizeof(phba->wwpn));
179 phba->sli3_options &= (uint32_t)LPFC_SLI3_BG_ENABLED;
182 lpfc_read_rev(phba, pmb);
183 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
185 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
189 mempool_free( pmb, phba->mbox_mem_pool);
200 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
203 mempool_free(pmb, phba->mbox_mem_pool);
207 if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
208 mempool_free(pmb, phba->mbox_mem_pool);
235 phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
237 if (lpfc_is_LC_HBA(phba->pcidev->device))
238 memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
239 sizeof (phba->RandomData));
246 lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
247 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
250 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
270 lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
274 mempool_free(pmb, phba->mbox_mem_pool);
280 * @phba: pointer to lpfc hba data structure.
289 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
292 phba->temp_sensor_support = 1;
294 phba->temp_sensor_support = 0;
295 mempool_free(pmboxq, phba->mbox_mem_pool);
301 * @phba: pointer to lpfc hba data structure.
310 lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
319 mempool_free(pmboxq, phba->mbox_mem_pool);
332 snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
335 snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
338 mempool_free(pmboxq, phba->mbox_mem_pool);
353 struct lpfc_hba *phba = vport->phba;
375 if (phba->sli_rev == LPFC_SLI_REV4 &&
377 phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_FABRIC) {
378 if (!(phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG))
379 phba->sli4_hba.fawwpn_flag &=
381 lpfc_printf_log(phba, KERN_INFO,
389 phba->sli4_hba.fawwpn_flag);
405 * @phba: pointer to lpfc hba data structure.
417 lpfc_config_port_post(struct lpfc_hba *phba)
419 struct lpfc_vport *vport = phba->pport;
424 struct lpfc_sli *psli = &phba->sli;
429 spin_lock_irq(&phba->hbalock);
434 if (phba->over_temp_state == HBA_OVER_TEMP)
435 phba->over_temp_state = HBA_NORMAL_TEMP;
436 spin_unlock_irq(&phba->hbalock);
438 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
440 phba->link_state = LPFC_HBA_ERROR;
446 rc = lpfc_read_sparam(phba, pmb, 0);
448 mempool_free(pmb, phba->mbox_mem_pool);
453 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
454 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
458 phba->link_state = LPFC_HBA_ERROR;
459 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
470 lpfc_mbuf_free(phba, mp->virt, mp->phys);
478 fc_host_max_npiv_vports(shost) = phba->max_vpi;
482 if (phba->SerialNumber[0] == 0) {
490 phba->SerialNumber[i] =
493 phba->SerialNumber[i] =
498 phba->SerialNumber[i] =
501 phba->SerialNumber[i] =
506 lpfc_read_config(phba, pmb);
508 if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
509 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
513 phba->link_state = LPFC_HBA_ERROR;
514 mempool_free( pmb, phba->mbox_mem_pool);
519 lpfc_sli_read_link_ste(phba);
522 if (phba->cfg_hba_queue_depth > mb->un.varRdConfig.max_xri) {
523 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
525 phba->cfg_hba_queue_depth,
527 phba->cfg_hba_queue_depth = mb->un.varRdConfig.max_xri;
530 phba->lmt = mb->un.varRdConfig.lmt;
533 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
535 phba->link_state = LPFC_LINK_DOWN;
544 if (phba->sli_rev != 3)
545 lpfc_post_rcv_buf(phba);
550 if (phba->intr_type == MSIX) {
551 rc = lpfc_config_msi(phba, pmb);
553 mempool_free(pmb, phba->mbox_mem_pool);
556 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
558 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
563 mempool_free(pmb, phba->mbox_mem_pool);
568 spin_lock_irq(&phba->hbalock);
570 clear_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
573 if (lpfc_readl(phba->HCregaddr, &status)) {
574 spin_unlock_irq(&phba->hbalock);
587 if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
588 (phba->cfg_poll & DISABLE_FCP_RING_INT))
591 writel(status, phba->HCregaddr);
592 readl(phba->HCregaddr); /* flush */
593 spin_unlock_irq(&phba->hbalock);
596 timeout = phba->fc_ratov * 2;
600 mod_timer(&phba->hb_tmofunc,
602 clear_bit(HBA_HBEAT_INP, &phba->hba_flag);
603 clear_bit(HBA_HBEAT_TMO, &phba->hba_flag);
604 phba->last_completion_time = jiffies;
606 mod_timer(&phba->eratt_poll,
607 jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
609 if (test_bit(LINK_DISABLED, &phba->hba_flag)) {
610 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
612 lpfc_down_link(phba, pmb);
614 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
616 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
620 mempool_free(pmb, phba->mbox_mem_pool);
623 } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
624 mempool_free(pmb, phba->mbox_mem_pool);
625 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
630 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
632 phba->link_state = LPFC_HBA_ERROR;
636 lpfc_config_async(phba, pmb, LPFC_ELS_RING);
638 pmb->vport = phba->pport;
639 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
642 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
646 mempool_free(pmb, phba->mbox_mem_pool);
650 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
652 phba->link_state = LPFC_HBA_ERROR;
656 lpfc_dump_wakeup_param(phba, pmb);
658 pmb->vport = phba->pport;
659 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
662 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
665 mempool_free(pmb, phba->mbox_mem_pool);
673 * @phba: Pointer to HBA context object.
679 lpfc_sli4_refresh_params(struct lpfc_hba *phba)
686 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
694 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
698 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
700 mempool_free(mboxq, phba->mbox_mem_pool);
704 phba->sli4_hba.pc_sli4_params.mi_cap =
708 if (phba->cfg_enable_mi)
709 phba->sli4_hba.pc_sli4_params.mi_ver =
712 phba->sli4_hba.pc_sli4_params.mi_ver = 0;
714 phba->sli4_hba.pc_sli4_params.cmf =
716 phba->sli4_hba.pc_sli4_params.pls =
719 mempool_free(mboxq, phba->mbox_mem_pool);
725 * @phba: pointer to lpfc hba data structure.
738 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
740 return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
745 * @phba: pointer to lpfc hba data structure.
759 lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
762 struct lpfc_vport *vport = phba->pport;
767 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
769 phba->link_state = LPFC_HBA_ERROR;
775 if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
776 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
777 !(phba->lmt & LMT_1Gb)) ||
778 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
779 !(phba->lmt & LMT_2Gb)) ||
780 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
781 !(phba->lmt & LMT_4Gb)) ||
782 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
783 !(phba->lmt & LMT_8Gb)) ||
784 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
785 !(phba->lmt & LMT_10Gb)) ||
786 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
787 !(phba->lmt & LMT_16Gb)) ||
788 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
789 !(phba->lmt & LMT_32Gb)) ||
790 ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_64G) &&
791 !(phba->lmt & LMT_64Gb))) {
793 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
796 phba->cfg_link_speed);
797 phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
799 lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
801 if (phba->sli_rev < LPFC_SLI_REV4)
802 lpfc_set_loopback_flag(phba);
803 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
805 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
809 if (phba->sli_rev <= LPFC_SLI_REV3) {
811 writel(0, phba->HCregaddr);
812 readl(phba->HCregaddr); /* flush */
814 writel(0xffffffff, phba->HAregaddr);
815 readl(phba->HAregaddr); /* flush */
817 phba->link_state = LPFC_HBA_ERROR;
819 mempool_free(pmb, phba->mbox_mem_pool);
822 phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
824 mempool_free(pmb, phba->mbox_mem_pool);
831 * @phba: pointer to lpfc hba data structure.
843 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
848 pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
850 phba->link_state = LPFC_HBA_ERROR;
854 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
856 lpfc_down_link(phba, pmb);
858 rc = lpfc_sli_issue_mbox(phba, pmb, flag);
860 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
864 mempool_free(pmb, phba->mbox_mem_pool);
868 mempool_free(pmb, phba->mbox_mem_pool);
875 * @phba: pointer to lpfc HBA data structure.
885 lpfc_hba_down_prep(struct lpfc_hba *phba)
890 if (phba->sli_rev <= LPFC_SLI_REV3) {
892 writel(0, phba->HCregaddr);
893 readl(phba->HCregaddr); /* flush */
896 if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
897 lpfc_cleanup_discovery_resources(phba->pport);
899 vports = lpfc_create_vport_work_array(phba);
901 for (i = 0; i <= phba->max_vports &&
904 lpfc_destroy_vport_work_array(phba, vports);
913 * @phba: pointer to lpfc HBA data structure.
923 lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
929 clear_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag);
931 while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
933 spin_lock_irq(&phba->hbalock);
934 list_remove_head(&phba->sli4_hba.sp_queue_event,
936 spin_unlock_irq(&phba->hbalock);
942 lpfc_sli_release_iocbq(phba, rspiocbq);
948 lpfc_in_buf_free(phba, &dmabuf->dbuf);
955 * @phba: pointer to lpfc HBA data structure.
965 lpfc_hba_free_post_buf(struct lpfc_hba *phba)
967 struct lpfc_sli *psli = &phba->sli;
973 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
974 lpfc_sli_hbqbuf_free_all(phba);
978 spin_lock_irq(&phba->hbalock);
980 spin_unlock_irq(&phba->hbalock);
986 lpfc_mbuf_free(phba, mp->virt, mp->phys);
990 spin_lock_irq(&phba->hbalock);
992 spin_unlock_irq(&phba->hbalock);
998 * @phba: pointer to lpfc HBA data structure.
1007 lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
1009 struct lpfc_sli *psli = &phba->sli;
1016 if (phba->sli_rev != LPFC_SLI_REV4) {
1019 spin_lock_irq(&phba->hbalock);
1026 spin_unlock_irq(&phba->hbalock);
1028 lpfc_sli_abort_iocb_ring(phba, pring);
1031 lpfc_sli_cancel_iocbs(phba, &completions,
1035 list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
1046 lpfc_sli_abort_iocb_ring(phba, pring);
1049 lpfc_sli_cancel_iocbs(phba, &completions,
1055 * @phba: pointer to lpfc HBA data structure.
1065 lpfc_hba_down_post_s3(struct lpfc_hba *phba)
1067 lpfc_hba_free_post_buf(phba);
1068 lpfc_hba_clean_txcmplq(phba);
1074 * @phba: pointer to lpfc HBA data structure.
1084 lpfc_hba_down_post_s4(struct lpfc_hba *phba)
1096 lpfc_sli_hbqbuf_free_all(phba);
1097 lpfc_hba_clean_txcmplq(phba);
1109 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
1111 &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
1114 list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
1115 &phba->sli4_hba.lpfc_els_sgl_list);
1118 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
1123 spin_lock_irq(&phba->hbalock);
1125 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
1126 qp = &phba->sli4_hba.hdwq[idx];
1146 spin_unlock_irq(&phba->hbalock);
1148 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
1149 spin_lock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1150 list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1152 spin_unlock_irq(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1155 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1159 lpfc_sli4_free_sp_events(phba);
1165 * @phba: pointer to lpfc HBA data structure.
1175 lpfc_hba_down_post(struct lpfc_hba *phba)
1177 return (*phba->lpfc_hba_down_post)(phba);
1195 struct lpfc_hba *phba;
1199 phba = from_timer(phba, t, hb_tmofunc);
1202 spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
1203 tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
1205 phba->pport->work_port_events |= WORKER_HB_TMO;
1206 spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
1210 lpfc_worker_wake_up(phba);
1229 struct lpfc_hba *phba;
1231 phba = from_timer(phba, t, rrq_tmr);
1232 if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
1233 clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
1237 set_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
1238 lpfc_worker_wake_up(phba);
1243 * @phba: pointer to lpfc hba data structure.
1258 lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
1260 clear_bit(HBA_HBEAT_INP, &phba->hba_flag);
1261 clear_bit(HBA_HBEAT_TMO, &phba->hba_flag);
1264 mempool_free(pmboxq, phba->mbox_mem_pool);
1265 if (!test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag) &&
1266 !(phba->link_state == LPFC_HBA_ERROR) &&
1267 !test_bit(FC_UNLOADING, &phba->pport->load_flag))
1268 mod_timer(&phba->hb_tmofunc,
1285 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1294 if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
1297 if (phba->link_state == LPFC_HBA_ERROR ||
1298 test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag) ||
1299 phba->cmf_active_mode != LPFC_CFG_OFF)
1303 hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
1310 idle_stat = &phba->sli4_hba.idle_stat[i];
1340 schedule_delayed_work(&phba->idle_stat_delay_work,
1347 struct lpfc_hba *phba = container_of(to_delayed_work(work),
1355 if (!phba->cfg_auto_imax ||
1356 test_bit(FC_UNLOADING, &phba->pport->load_flag))
1359 if (phba->link_state == LPFC_HBA_ERROR ||
1360 test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
1363 ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay),
1368 for (i = 0; i < phba->cfg_irq_chann; i++) {
1370 eq = phba->sli4_hba.hba_eq_hdl[i].eq;
1380 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i);
1393 eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info,
1399 lpfc_modify_hba_eq_delay(phba, eq->hdwq, 1,
1407 queue_delayed_work(phba->wq, &phba->eq_delay_work,
1413 * @phba: pointer to lpfc hba data structure.
1418 static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
1423 hwq_count = phba->cfg_hdw_queue;
1426 lpfc_adjust_pvt_pool_count(phba, i);
1429 lpfc_adjust_high_watermark(phba, i);
1433 lpfc_snapshot_mxp(phba, i);
1440 * @phba: pointer to lpfc hba data structure.
1447 lpfc_issue_hb_mbox(struct lpfc_hba *phba)
1453 if (test_bit(HBA_HBEAT_INP, &phba->hba_flag))
1456 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1460 lpfc_heart_beat(phba, pmboxq);
1462 pmboxq->vport = phba->pport;
1463 retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
1466 mempool_free(pmboxq, phba->mbox_mem_pool);
1469 set_bit(HBA_HBEAT_INP, &phba->hba_flag);
1476 * @phba: pointer to lpfc hba data structure.
1485 lpfc_issue_hb_tmo(struct lpfc_hba *phba)
1487 if (phba->cfg_enable_hba_heartbeat)
1489 set_bit(HBA_HBEAT_TMO, &phba->hba_flag);
1494 * @phba: pointer to lpfc hba data structure.
1509 lpfc_hb_timeout_handler(struct lpfc_hba *phba)
1515 struct lpfc_sli *psli = &phba->sli;
1518 if (phba->cfg_xri_rebalancing) {
1520 lpfc_hb_mxp_handler(phba);
1523 vports = lpfc_create_vport_work_array(phba);
1525 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
1529 lpfc_destroy_vport_work_array(phba, vports);
1531 if (phba->link_state == LPFC_HBA_ERROR ||
1532 test_bit(FC_UNLOADING, &phba->pport->load_flag) ||
1533 test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
1536 if (phba->elsbuf_cnt &&
1537 (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
1538 spin_lock_irq(&phba->hbalock);
1539 list_splice_init(&phba->elsbuf, &completions);
1540 phba->elsbuf_cnt = 0;
1541 phba->elsbuf_prev_cnt = 0;
1542 spin_unlock_irq(&phba->hbalock);
1547 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
1551 phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
1554 if (phba->cfg_enable_hba_heartbeat) {
1556 spin_lock_irq(&phba->pport->work_port_lock);
1557 if (time_after(phba->last_completion_time +
1560 spin_unlock_irq(&phba->pport->work_port_lock);
1561 if (test_bit(HBA_HBEAT_INP, &phba->hba_flag))
1567 spin_unlock_irq(&phba->pport->work_port_lock);
1570 if (test_bit(HBA_HBEAT_INP, &phba->hba_flag)) {
1576 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1580 - phba->last_completion_time));
1586 retval = lpfc_issue_hb_mbox(phba);
1591 phba->skipped_hb = 0;
1592 } else if (time_before_eq(phba->last_completion_time,
1593 phba->skipped_hb)) {
1594 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
1598 - phba->last_completion_time));
1600 phba->skipped_hb = jiffies;
1607 if (test_bit(HBA_HBEAT_TMO, &phba->hba_flag)) {
1608 retval = lpfc_issue_hb_mbox(phba);
1618 mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo));
1623 * @phba: pointer to lpfc hba data structure.
1629 lpfc_offline_eratt(struct lpfc_hba *phba)
1631 struct lpfc_sli *psli = &phba->sli;
1633 spin_lock_irq(&phba->hbalock);
1635 spin_unlock_irq(&phba->hbalock);
1636 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1638 lpfc_offline(phba);
1639 lpfc_reset_barrier(phba);
1640 spin_lock_irq(&phba->hbalock);
1641 lpfc_sli_brdreset(phba);
1642 spin_unlock_irq(&phba->hbalock);
1643 lpfc_hba_down_post(phba);
1644 lpfc_sli_brdready(phba, HS_MBRDY);
1645 lpfc_unblock_mgmt_io(phba);
1646 phba->link_state = LPFC_HBA_ERROR;
1652 * @phba: pointer to lpfc hba data structure.
1658 lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
1660 spin_lock_irq(&phba->hbalock);
1661 if (phba->link_state == LPFC_HBA_ERROR &&
1662 test_bit(HBA_PCI_ERR, &phba->bit_flags)) {
1663 spin_unlock_irq(&phba->hbalock);
1666 phba->link_state = LPFC_HBA_ERROR;
1667 spin_unlock_irq(&phba->hbalock);
1669 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1670 lpfc_sli_flush_io_rings(phba);
1671 lpfc_offline(phba);
1672 lpfc_hba_down_post(phba);
1673 lpfc_unblock_mgmt_io(phba);
1678 * @phba: pointer to lpfc hba data structure.
1686 lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
1688 uint32_t old_host_status = phba->work_hs;
1689 struct lpfc_sli *psli = &phba->sli;
1694 if (pci_channel_offline(phba->pcidev)) {
1695 clear_bit(DEFER_ERATT, &phba->hba_flag);
1699 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1702 phba->work_hs, phba->work_status[0],
1703 phba->work_status[1]);
1705 spin_lock_irq(&phba->hbalock);
1707 spin_unlock_irq(&phba->hbalock);
1715 lpfc_sli_abort_fcp_rings(phba);
1721 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
1722 lpfc_offline(phba);
1725 while (phba->work_hs & HS_FFER1) {
1727 if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
1728 phba->work_hs = UNPLUG_ERR ;
1732 if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
1733 phba->work_hs = 0;
1743 if (!phba->work_hs && !test_bit(FC_UNLOADING, &phba->pport->load_flag))
1744 phba->work_hs = old_host_status & ~HS_FFER1;
1746 clear_bit(DEFER_ERATT, &phba->hba_flag);
1747 phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
1748 phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
1752 lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
1759 shost = lpfc_shost_from_vport(phba->pport);
1768 * @phba: pointer to lpfc hba data structure.
1777 lpfc_handle_eratt_s3(struct lpfc_hba *phba)
1779 struct lpfc_vport *vport = phba->pport;
1780 struct lpfc_sli *psli = &phba->sli;
1789 if (pci_channel_offline(phba->pcidev)) {
1790 clear_bit(DEFER_ERATT, &phba->hba_flag);
1795 if (!phba->cfg_enable_hba_reset)
1799 lpfc_board_errevt_to_mgmt(phba);
1801 if (test_bit(DEFER_ERATT, &phba->hba_flag))
1802 lpfc_handle_deferred_eratt(phba);
1804 if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
1805 if (phba->work_hs & HS_FFER6)
1807 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1810 phba->work_hs, phba->work_status[0],
1811 phba->work_status[1]);
1812 if (phba->work_hs & HS_FFER8)
1814 lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
1817 phba->work_hs, phba->work_status[0],
1818 phba->work_status[1]);
1820 spin_lock_irq(&phba->hbalock);
1822 spin_unlock_irq(&phba->hbalock);
1830 lpfc_sli_abort_fcp_rings(phba);
1836 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
1837 lpfc_offline(phba);
1838 lpfc_sli_brdrestart(phba);
1839 if (lpfc_online(phba) == 0) { /* Initialize the HBA */
1840 lpfc_unblock_mgmt_io(phba);
1843 lpfc_unblock_mgmt_io(phba);
1844 } else if (phba->work_hs & HS_CRIT_TEMP) {
1845 temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
1850 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1854 temperature, phba->work_hs,
1855 phba->work_status[0], phba->work_status[1]);
1857 shost = lpfc_shost_from_vport(phba->pport);
1864 spin_lock_irq(&phba->hbalock);
1865 phba->over_temp_state = HBA_OVER_TEMP;
1866 spin_unlock_irq(&phba->hbalock);
1867 lpfc_offline_eratt(phba);
1874 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1877 phba->work_hs,
1878 phba->work_status[0], phba->work_status[1]);
1886 lpfc_offline_eratt(phba);
1893 * @phba: pointer to lpfc hba data structure.
1903 lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
1910 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
1916 rc = lpfc_sli4_pdev_status_reg_wait(phba);
1923 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1932 spin_lock_irq(&phba->hbalock);
1933 phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
1934 if (phba->sli.mbox_active) {
1935 mboxq = phba->sli.mbox_active;
1937 __lpfc_mbox_cmpl_put(phba, mboxq);
1938 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1939 phba->sli.mbox_active = NULL;
1941 spin_unlock_irq(&phba->hbalock);
1944 lpfc_offline_prep(phba, mbx_action);
1945 lpfc_sli_flush_io_rings(phba);
1946 lpfc_offline(phba);
1948 lpfc_sli4_disable_intr(phba);
1949 rc = lpfc_sli_brdrestart(phba);
1951 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1956 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
1958 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1962 phba->intr_mode = intr_mode;
1963 rc = lpfc_online(phba);
1965 lpfc_unblock_mgmt_io(phba);
1972 * @phba: pointer to lpfc hba data structure.
1978 lpfc_handle_eratt_s4(struct lpfc_hba *phba)
1980 struct lpfc_vport *vport = phba->pport;
1996 if (pci_channel_offline(phba->pcidev)) {
1997 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1999 lpfc_sli_flush_io_rings(phba);
2004 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
2008 phba->sli4_hba.u.if_type0.UERRLOregaddr,
2011 phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
2016 if (!test_bit(HBA_RECOVERABLE_UE, &phba->hba_flag)) {
2017 lpfc_sli4_offline_eratt(phba);
2020 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2023 for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
2024 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
2037 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2046 if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
2051 rc = lpfc_sli4_port_sta_fn_reset(phba,
2055 lpfc_printf_log(phba, KERN_ERR,
2062 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2065 phba->link_state = LPFC_HBA_ERROR;
2071 phba->sli4_hba.u.if_type2.STATUSregaddr,
2075 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2077 readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
2078 lpfc_sli4_offline_eratt(phba);
2081 reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
2082 reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
2084 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2089 phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
2094 shost = lpfc_shost_from_vport(phba->pport);
2101 spin_lock_irq(&phba->hbalock);
2102 phba->over_temp_state = HBA_OVER_TEMP;
2103 spin_unlock_irq(&phba->hbalock);
2104 lpfc_sli4_offline_eratt(phba);
2109 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2115 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
2119 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2123 if (!phba->cfg_enable_hba_reset)
2127 rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
2138 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2140 lpfc_sli4_offline_eratt(phba);
2146 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
2149 lpfc_board_errevt_to_mgmt(phba);
2160 * @phba: pointer to lpfc HBA data structure.
2170 lpfc_handle_eratt(struct lpfc_hba *phba)
2172 (*phba->lpfc_handle_eratt)(phba);
2177 * @phba: pointer to lpfc hba data structure.
2183 lpfc_handle_latt(struct lpfc_hba *phba)
2185 struct lpfc_vport *vport = phba->pport;
2186 struct lpfc_sli *psli = &phba->sli;
2191 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2197 rc = lpfc_mbox_rsrc_prep(phba, pmb);
2200 mempool_free(pmb, phba->mbox_mem_pool);
2205 lpfc_els_flush_all_cmd(phba);
2207 lpfc_read_topology(phba, pmb, pmb->ctx_buf);
2211 phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
2212 rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
2219 spin_lock_irq(&phba->hbalock);
2220 writel(HA_LATT, phba->HAregaddr);
2221 readl(phba->HAregaddr); /* flush */
2222 spin_unlock_irq(&phba->hbalock);
2227 phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
2228 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
2231 spin_lock_irq(&phba->hbalock);
2233 control = readl(phba->HCregaddr);
2235 writel(control, phba->HCregaddr);
2236 readl(phba->HCregaddr); /* flush */
2239 writel(HA_LATT, phba->HAregaddr);
2240 readl(phba->HAregaddr); /* flush */
2241 spin_unlock_irq(&phba->hbalock);
2242 lpfc_linkdown(phba);
2243 phba->link_state = LPFC_HBA_ERROR;
2245 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2252 lpfc_fill_vpd(struct lpfc_hba *phba, uint8_t *vpd, int length, int *pindex)
2265 phba->SerialNumber[j++] = vpd[(*pindex)++];
2269 phba->SerialNumber[j] = 0;
2272 phba->vpd_flag |= VPD_MODEL_DESC;
2279 phba->ModelDesc[j++] = vpd[(*pindex)++];
2283 phba->ModelDesc[j] = 0;
2286 phba->vpd_flag |= VPD_MODEL_NAME;
2293 phba->ModelName[j++] = vpd[(*pindex)++];
2297 phba->ModelName[j] = 0;
2300 phba->vpd_flag |= VPD_PROGRAM_TYPE;
2307 phba->ProgramType[j++] = vpd[(*pindex)++];
2311 phba->ProgramType[j] = 0;
2314 phba->vpd_flag |= VPD_PORT;
2321 if ((phba->sli_rev == LPFC_SLI_REV4) &&
2322 (phba->sli4_hba.pport_name_sta ==
2327 phba->Port[j++] = vpd[(*pindex)++];
2331 if ((phba->sli_rev != LPFC_SLI_REV4) ||
2332 (phba->sli4_hba.pport_name_sta ==
2334 phba->Port[j] = 0;
2348 * @phba: pointer to lpfc hba data structure.
2354 * ModelDesc, etc. fields of the phba data structure will be populated.
2361 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
2373 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2399 lpfc_fill_vpd(phba, vpd, Length, &index);
2416 * @phba: pointer to lpfc hba data structure.
2427 lpfc_get_atto_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2429 uint16_t sub_dev_id = phba->pcidev->subsystem_device;
2495 phba->Port);
2500 * @phba: pointer to lpfc hba data structure.
2511 lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
2514 uint16_t dev_id = phba->pcidev->device;
2528 if (phba->pcidev->vendor == PCI_VENDOR_ID_ATTO) {
2529 lpfc_get_atto_model_desc(phba, mdp, descp);
2533 if (phba->lmt & LMT_64Gb)
2535 else if (phba->lmt & LMT_32Gb)
2537 else if (phba->lmt & LMT_16Gb)
2539 else if (phba->lmt & LMT_10Gb)
2541 else if (phba->lmt & LMT_8Gb)
2543 else if (phba->lmt & LMT_4Gb)
2545 else if (phba->lmt & LMT_2Gb)
2547 else if (phba->lmt & LMT_1Gb)
2552 vp = &phba->vpd;
2756 phba->Port);
2771 * @phba: pointer to lpfc hba data structure.
2782 lpfc_sli3_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
2793 iocb = lpfc_sli_get_iocbq(phba);
2804 mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
2807 lpfc_sli_release_iocbq(phba, iocb);
2817 mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
2821 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2823 lpfc_sli_release_iocbq(phba, iocb);
2849 if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
2851 lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
2855 lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
2859 lpfc_sli_release_iocbq(phba, iocb);
2863 lpfc_sli_ringpostbuf_put(phba, pring, mp1);
2865 lpfc_sli_ringpostbuf_put(phba, pring, mp2);
2873 * @phba: pointer to lpfc hba data structure.
2883 lpfc_post_rcv_buf(struct lpfc_hba *phba)
2885 struct lpfc_sli *psli = &phba->sli;
2888 lpfc_sli3_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
2988 * @phba: pointer to lpfc hba data structure.
2994 lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
2998 uint32_t *pwwnn = (uint32_t *) phba->wwnn;
3008 lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
3027 struct lpfc_hba *phba = vport->phba;
3031 if (phba->link_state > LPFC_LINK_DOWN)
3035 if (lpfc_is_vmid_enabled(phba))
3078 pci_channel_offline(phba->pcidev))
3079 lpfc_sli_flush_io_rings(vport->phba);
3129 * @phba: pointer to lpfc hba data structure.
3135 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3138 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
3141 del_timer(&phba->fcf.redisc_wait);
3146 * @phba: pointer to lpfc hba data structure.
3154 lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
3156 spin_lock_irq(&phba->hbalock);
3157 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
3159 spin_unlock_irq(&phba->hbalock);
3162 __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3164 phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
3165 spin_unlock_irq(&phba->hbalock);
3170 * @phba: pointer to lpfc hba data structure.
3177 lpfc_cmf_stop(struct lpfc_hba *phba)
3183 if (!phba->sli4_hba.pc_sli4_params.cmf)
3186 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3190 hrtimer_cancel(&phba->cmf_stats_timer);
3191 hrtimer_cancel(&phba->cmf_timer);
3194 atomic_set(&phba->cmf_busy, 0);
3196 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3202 atomic_set(&phba->cmf_bw_wait, 0);
3205 queue_work(phba->wq, &phba->unblock_request_work);
3209 lpfc_get_max_line_rate(struct lpfc_hba *phba)
3211 uint64_t rate = lpfc_sli_port_speed_get(phba);
3217 lpfc_cmf_signal_init(struct lpfc_hba *phba)
3219 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3223 phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
3224 phba->cmf_max_line_rate = lpfc_get_max_line_rate(phba);
3225 phba->cmf_link_byte_count = div_u64(phba->cmf_max_line_rate *
3226 phba->cmf_interval_rate, 1000);
3227 phba->cmf_max_bytes_per_interval = phba->cmf_link_byte_count;
3230 lpfc_issue_cmf_sync_wqe(phba, 0, 0);
3235 * @phba: pointer to lpfc hba data structure.
3241 lpfc_cmf_start(struct lpfc_hba *phba)
3247 if (!phba->sli4_hba.pc_sli4_params.cmf ||
3248 phba->cmf_active_mode == LPFC_CFG_OFF)
3252 lpfc_init_congestion_buf(phba);
3254 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
3255 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
3256 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
3257 atomic_set(&phba->cgn_sync_warn_cnt, 0);
3259 atomic_set(&phba->cmf_busy, 0);
3261 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
3267 phba->cmf_latency.tv_sec = 0;
3268 phba->cmf_latency.tv_nsec = 0;
3270 lpfc_cmf_signal_init(phba);
3272 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
3275 phba->cmf_timer_cnt = 0;
3276 hrtimer_start(&phba->cmf_timer,
3279 hrtimer_start(&phba->cmf_stats_timer,
3283 ktime_get_real_ts64(&phba->cmf_latency);
3285 atomic_set(&phba->cmf_bw_wait, 0);
3286 atomic_set(&phba->cmf_stop_io, 0);
3291 * @phba: pointer to lpfc hba data structure.
3297 lpfc_stop_hba_timers(struct lpfc_hba *phba)
3299 if (phba->pport)
3300 lpfc_stop_vport_timers(phba->pport);
3301 cancel_delayed_work_sync(&phba->eq_delay_work);
3302 cancel_delayed_work_sync(&phba->idle_stat_delay_work);
3303 del_timer_sync(&phba->sli.mbox_tmo);
3304 del_timer_sync(&phba->fabric_block_timer);
3305 del_timer_sync(&phba->eratt_poll);
3306 del_timer_sync(&phba->hb_tmofunc);
3307 if (phba->sli_rev == LPFC_SLI_REV4) {
3308 del_timer_sync(&phba->rrq_tmr);
3309 clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
3311 clear_bit(HBA_HBEAT_INP, &phba->hba_flag);
3312 clear_bit(HBA_HBEAT_TMO, &phba->hba_flag);
3314 switch (phba->pci_dev_grp) {
3317 del_timer_sync(&phba->fcp_poll_timer);
3321 lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
3324 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3326 phba->pci_dev_grp);
3334 * @phba: pointer to lpfc hba data structure.
3344 lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
3350 spin_lock_irqsave(&phba->hbalock, iflag);
3351 phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
3352 spin_unlock_irqrestore(&phba->hbalock, iflag);
3356 spin_lock_irqsave(&phba->hbalock, iflag);
3357 if (phba->sli.mbox_active) {
3358 actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
3362 timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
3363 phba->sli.mbox_active) * 1000) + jiffies;
3365 spin_unlock_irqrestore(&phba->hbalock, iflag);
3368 while (phba->sli.mbox_active) {
3372 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3375 phba->sli.sli_flag, actcmd);
3383 * @phba: pointer to lpfc hba data structure.
3390 lpfc_sli4_node_prep(struct lpfc_hba *phba)
3396 if (phba->sli_rev != LPFC_SLI_REV4)
3399 vports = lpfc_create_vport_work_array(phba);
3403 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3410 rpi = lpfc_sli4_alloc_rpi(phba);
3424 lpfc_destroy_vport_work_array(phba, vports);
3429 * @phba: pointer to lpfc hba data structure.
3434 static void lpfc_create_expedite_pool(struct lpfc_hba *phba)
3442 epd_pool = &phba->epd_pool;
3443 qp = &phba->sli4_hba.hdwq[0];
3464 * @phba: pointer to lpfc hba data structure.
3469 static void lpfc_destroy_expedite_pool(struct lpfc_hba *phba)
3477 epd_pool = &phba->epd_pool;
3478 qp = &phba->sli4_hba.hdwq[0];
3496 * @phba: pointer to lpfc hba data structure.
3502 void lpfc_create_multixri_pools(struct lpfc_hba *phba)
3515 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3517 phba->cfg_hdw_queue, phba->sli4_hba.num_present_cpu,
3518 phba->sli4_hba.io_xri_cnt);
3520 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3521 lpfc_create_expedite_pool(phba);
3523 hwq_count = phba->cfg_hdw_queue;
3524 count_per_hwq = phba->sli4_hba.io_xri_cnt / hwq_count;
3530 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3534 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3535 lpfc_destroy_expedite_pool(phba);
3539 qp = &phba->sli4_hba.hdwq[j];
3543 phba->cfg_xri_rebalancing = 0;
3547 qp = &phba->sli4_hba.hdwq[i];
3565 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3585 * @phba: pointer to lpfc hba data structure.
3589 static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba)
3601 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3602 lpfc_destroy_expedite_pool(phba);
3604 if (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
3605 lpfc_sli_flush_io_rings(phba);
3607 hwq_count = phba->cfg_hdw_queue;
3610 qp = &phba->sli4_hba.hdwq[i];
3623 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3644 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3668 * @phba: pointer to lpfc hba data structure.
3679 lpfc_online(struct lpfc_hba *phba)
3686 if (!phba)
3688 vport = phba->pport;
3693 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3696 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
3698 if (phba->sli_rev == LPFC_SLI_REV4) {
3699 if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
3700 lpfc_unblock_mgmt_io(phba);
3703 spin_lock_irq(&phba->hbalock);
3704 if (!phba->sli4_hba.max_cfg_param.vpi_used)
3706 spin_unlock_irq(&phba->hbalock);
3711 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
3712 !phba->nvmet_support) {
3713 error = lpfc_nvme_create_localport(phba->pport);
3715 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3720 lpfc_sli_queue_init(phba);
3721 if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
3722 lpfc_unblock_mgmt_io(phba);
3727 vports = lpfc_create_vport_work_array(phba);
3729 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3731 if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
3734 if (phba->sli_rev == LPFC_SLI_REV4) {
3744 lpfc_destroy_vport_work_array(phba, vports);
3746 if (phba->cfg_xri_rebalancing)
3747 lpfc_create_multixri_pools(phba);
3749 lpfc_cpuhp_add(phba);
3751 lpfc_unblock_mgmt_io(phba);
3757 * @phba: pointer to lpfc hba data structure.
3767 lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
3771 spin_lock_irqsave(&phba->hbalock, iflag);
3772 phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
3773 spin_unlock_irqrestore(&phba->hbalock, iflag);
3778 * @phba: pointer to lpfc hba data structure.
3786 lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
3788 struct lpfc_vport *vport = phba->pport;
3799 lpfc_block_mgmt_io(phba, mbx_action);
3801 lpfc_linkdown(phba);
3803 offline = pci_channel_offline(phba->pcidev);
3804 hba_pci_err = test_bit(HBA_PCI_ERR, &phba->bit_flags);
3807 vports = lpfc_create_vport_work_array(phba);
3809 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3832 if (phba->sli_rev == LPFC_SLI_REV4)
3843 if (phba->sli_rev == LPFC_SLI_REV4) {
3850 lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
3877 lpfc_destroy_vport_work_array(phba, vports);
3879 lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
3881 if (phba->wq)
3882 flush_workqueue(phba->wq);
3887 * @phba: pointer to lpfc hba data structure.
3894 lpfc_offline(struct lpfc_hba *phba)
3900 if (test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
3904 lpfc_stop_port(phba);
3909 lpfc_nvmet_destroy_targetport(phba);
3910 lpfc_nvme_destroy_localport(phba->pport);
3912 vports = lpfc_create_vport_work_array(phba);
3914 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
3916 lpfc_destroy_vport_work_array(phba, vports);
3917 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
3921 lpfc_sli_hba_down(phba);
3922 spin_lock_irq(&phba->hbalock);
3923 phba->work_ha = 0;
3924 spin_unlock_irq(&phba->hbalock);
3925 vports = lpfc_create_vport_work_array(phba);
3927 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
3934 lpfc_destroy_vport_work_array(phba, vports);
3938 if (test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
3939 __lpfc_cpuhp_remove(phba);
3941 if (phba->cfg_xri_rebalancing)
3942 lpfc_destroy_multixri_pools(phba);
3947 * @phba: pointer to lpfc hba data structure.
3954 lpfc_scsi_free(struct lpfc_hba *phba)
3958 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
3961 spin_lock_irq(&phba->hbalock);
3965 spin_lock(&phba->scsi_buf_list_put_lock);
3966 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
3969 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3972 phba->total_scsi_bufs--;
3974 spin_unlock(&phba->scsi_buf_list_put_lock);
3976 spin_lock(&phba->scsi_buf_list_get_lock);
3977 list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
3980 dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
3983 phba->total_scsi_bufs--;
3985 spin_unlock(&phba->scsi_buf_list_get_lock);
3986 spin_unlock_irq(&phba->hbalock);
3991 * @phba: pointer to lpfc hba data structure.
3998 lpfc_io_free(struct lpfc_hba *phba)
4004 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4005 qp = &phba->sli4_hba.hdwq[idx];
4013 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4015 if (phba->cfg_xpsgl && !phba->nvmet_support)
4016 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
4017 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
4029 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4031 if (phba->cfg_xpsgl && !phba->nvmet_support)
4032 lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
4033 lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
4043 * @phba: pointer to lpfc hba data structure.
4054 lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
4064 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4066 if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
4068 xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
4069 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4071 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
4078 lpfc_printf_log(phba, KERN_ERR,
4086 sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
4090 lpfc_printf_log(phba, KERN_ERR,
4102 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
4104 &phba->sli4_hba.lpfc_els_sgl_list);
4105 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
4106 } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
4108 xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
4109 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4111 "%d to %d\n", phba->sli4_hba.els_xri_cnt,
4113 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
4114 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
4121 __lpfc_mbuf_free(phba, sglq_entry->virt,
4127 &phba->sli4_hba.lpfc_els_sgl_list);
4128 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
4130 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4133 phba->sli4_hba.els_xri_cnt = els_xri_cnt;
4139 &phba->sli4_hba.lpfc_els_sgl_list, list) {
4140 lxri = lpfc_sli4_next_xritag(phba);
4142 lpfc_printf_log(phba, KERN_ERR,
4150 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4155 lpfc_free_els_sgl_list(phba);
4161 * @phba: pointer to lpfc hba data structure.
4172 lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
4183 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4186 nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4187 if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
4189 xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
4190 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4192 phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
4198 lpfc_printf_log(phba, KERN_ERR,
4206 sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
4210 lpfc_printf_log(phba, KERN_ERR,
4219 phba->cfg_sg_dma_buf_size);
4223 spin_lock_irq(&phba->hbalock);
4224 spin_lock(&phba->sli4_hba.sgl_list_lock);
4226 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4227 spin_unlock(&phba->sli4_hba.sgl_list_lock);
4228 spin_unlock_irq(&phba->hbalock);
4229 } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
4231 xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
4232 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4234 "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
4236 spin_lock_irq(&phba->hbalock);
4237 spin_lock(&phba->sli4_hba.sgl_list_lock);
4238 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
4245 lpfc_nvmet_buf_free(phba, sglq_entry->virt,
4251 &phba->sli4_hba.lpfc_nvmet_sgl_list);
4252 spin_unlock(&phba->sli4_hba.sgl_list_lock);
4253 spin_unlock_irq(&phba->hbalock);
4255 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4258 phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
4264 &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
4265 lxri = lpfc_sli4_next_xritag(phba);
4267 lpfc_printf_log(phba, KERN_ERR,
4275 sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4280 lpfc_free_nvmet_sgl_list(phba);
4285 lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *cbuf)
4294 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4295 qp = &phba->sli4_hba.hdwq[idx];
4347 lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
4354 qp = phba->sli4_hba.hdwq;
4357 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
4363 qp = &phba->sli4_hba.hdwq[idx];
4381 * @phba: pointer to lpfc hba data structure.
4392 lpfc_sli4_io_sgl_update(struct lpfc_hba *phba)
4405 els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
4406 io_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
4407 phba->sli4_hba.io_xri_max = io_xri_max;
4409 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4412 phba->sli4_hba.io_xri_cnt,
4413 phba->sli4_hba.io_xri_max,
4416 cnt = lpfc_io_buf_flush(phba, &io_sgl_list);
4418 if (phba->sli4_hba.io_xri_cnt > phba->sli4_hba.io_xri_max) {
4420 io_xri_cnt = phba->sli4_hba.io_xri_cnt -
4421 phba->sli4_hba.io_xri_max;
4427 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4433 phba->sli4_hba.io_xri_cnt -= io_xri_cnt;
4439 phba->sli4_hba.io_xri_cnt = cnt;
4442 lxri = lpfc_sli4_next_xritag(phba);
4444 lpfc_printf_log(phba, KERN_ERR,
4452 lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4454 cnt = lpfc_io_buf_replenish(phba, &io_sgl_list);
4458 lpfc_io_free(phba);
4464 * @phba: Pointer to lpfc hba data structure.
4477 lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4487 phba->sli4_hba.io_xri_cnt = 0;
4497 lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
4505 if (phba->cfg_xpsgl && !phba->nvmet_support) {
4512 if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
4515 lpfc_printf_log(phba, KERN_ERR,
4520 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4530 lxri = lpfc_sli4_next_xritag(phba);
4532 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4540 iotag = lpfc_sli_next_iotag(phba, pwqeq);
4542 dma_pool_free(phba->lpfc_sg_dma_buf_pool,
4545 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4548 lpfc_sli4_free_xri(phba, lxri);
4552 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
4562 phba->sli4_hba.io_xri_cnt++;
4564 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
4573 phba, &post_nblist, bcnt);
4581 lpfc_get_wwpn(struct lpfc_hba *phba)
4588 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
4594 lpfc_read_nv(phba, mboxq);
4595 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
4597 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4602 mempool_free(mboxq, phba->mbox_mem_pool);
4608 mempool_free(mboxq, phba->mbox_mem_pool);
4609 if (phba->sli_rev == LPFC_SLI_REV4)
4615 static unsigned short lpfc_get_sg_tablesize(struct lpfc_hba *phba)
4617 if (phba->sli_rev == LPFC_SLI_REV4)
4618 if (phba->cfg_xpsgl && !phba->nvmet_support)
4621 return phba->cfg_scsi_seg_cnt;
4623 return phba->cfg_sg_seg_cnt;
4628 * @phba: pointer to lpfc hba data structure.
4638 lpfc_vmid_res_alloc(struct lpfc_hba *phba, struct lpfc_vport *vport)
4641 if (phba->sli_rev == LPFC_SLI_REV3) {
4642 phba->cfg_vmid_app_header = 0;
4643 phba->cfg_vmid_priority_tagging = 0;
4646 if (lpfc_is_vmid_enabled(phba)) {
4648 kcalloc(phba->cfg_max_vmid, sizeof(struct lpfc_vmid),
4656 vport->vmid_priority_tagging = phba->cfg_vmid_priority_tagging;
4658 phba->cfg_vmid_inactivity_timeout;
4659 vport->max_vmid = phba->cfg_max_vmid;
4677 * @phba: pointer to lpfc hba data structure.
4692 lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
4704 if (phba->sli_rev < LPFC_SLI_REV4 &&
4705 dev == &phba->pcidev->dev) {
4707 lpfc_sli_brdrestart(phba);
4708 rc = lpfc_sli_chipset_init(phba);
4712 wwn = lpfc_get_wwpn(phba);
4717 lpfc_printf_log(phba, KERN_ERR,
4727 if (dev == &phba->pcidev->dev) {
4728 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
4737 template->sg_tablesize = lpfc_get_sg_tablesize(phba);
4747 template->sg_tablesize = lpfc_get_sg_tablesize(phba);
4755 vport->phba = phba;
4770 vport->cfg_enable_fc4_type = phba->cfg_enable_fc4_type;
4776 if (phba->sli_rev == LPFC_SLI_REV4)
4781 if (phba->sli_rev == LPFC_SLI_REV4) {
4782 if (!phba->cfg_fcp_mq_threshold ||
4783 phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue)
4784 phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue;
4787 phba->cfg_fcp_mq_threshold);
4790 phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
4802 shost->can_queue = phba->cfg_hba_queue_depth - 10;
4803 if (dev != &phba->pcidev->dev) {
4811 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
4815 phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
4818 rc = lpfc_vmid_res_alloc(phba, vport);
4835 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
4836 lpfc_setup_bg(phba, shost);
4838 error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
4842 spin_lock_irq(&phba->port_list_lock);
4843 list_add_tail(&vport->listentry, &phba->port_list);
4844 spin_unlock_irq(&phba->port_list_lock);
4867 struct lpfc_hba *phba = vport->phba;
4873 spin_lock_irq(&phba->port_list_lock);
4875 spin_unlock_irq(&phba->port_list_lock);
4918 struct lpfc_hba *phba = vport->phba;
4928 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4935 phba->link_state <= LPFC_LINK_DOWN) {
4936 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
4950 if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
4963 struct lpfc_hba *phba = vport->phba;
4970 if (test_bit(HBA_FCOE_MODE, &phba->hba_flag))
4973 if (phba->lmt & LMT_256Gb)
4975 if (phba->lmt & LMT_128Gb)
4977 if (phba->lmt & LMT_64Gb)
4979 if (phba->lmt & LMT_32Gb)
4981 if (phba->lmt & LMT_16Gb)
4983 if (phba->lmt & LMT_10Gb)
4985 if (phba->lmt & LMT_8Gb)
4987 if (phba->lmt & LMT_4Gb)
4989 if (phba->lmt & LMT_2Gb)
4991 if (phba->lmt & LMT_1Gb)
5005 struct lpfc_hba *phba = vport->phba;
5036 fc_host_max_npiv_vports(shost) = phba->max_vpi;
5042 * @phba: pointer to lpfc hba data structure.
5049 lpfc_stop_port_s3(struct lpfc_hba *phba)
5052 writel(0, phba->HCregaddr);
5053 readl(phba->HCregaddr); /* flush */
5055 writel(0xffffffff, phba->HAregaddr);
5056 readl(phba->HAregaddr); /* flush */
5059 lpfc_stop_hba_timers(phba);
5060 phba->pport->work_port_events = 0;
5065 * @phba: pointer to lpfc hba data structure.
5072 lpfc_stop_port_s4(struct lpfc_hba *phba)
5075 lpfc_stop_hba_timers(phba);
5076 if (phba->pport)
5077 phba->pport->work_port_events = 0;
5078 phba->sli4_hba.intr_enable = 0;
5083 * @phba: Pointer to HBA context object.
5089 lpfc_stop_port(struct lpfc_hba *phba)
5091 phba->lpfc_stop_port(phba);
5093 if (phba->wq)
5094 flush_workqueue(phba->wq);
5099 * @phba: Pointer to hba for which this call is being executed.
5104 lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
5109 mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
5110 spin_lock_irq(&phba->hbalock);
5112 phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
5114 phba->fcf.fcf_flag |= FCF_REDISC_PEND;
5115 spin_unlock_irq(&phba->hbalock);
5131 struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
5134 spin_lock_irq(&phba->hbalock);
5135 if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
5136 spin_unlock_irq(&phba->hbalock);
5140 phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
5142 phba->fcf.fcf_flag |= FCF_REDISC_EVT;
5143 spin_unlock_irq(&phba->hbalock);
5144 lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
5147 lpfc_worker_wake_up(phba);
5162 struct lpfc_hba *phba = from_timer(phba, t, inactive_vmid_poll);
5166 if (phba->pport->vmid_priority_tagging) {
5168 phba->pport->work_port_events |= WORKER_CHECK_VMID_ISSUE_QFPA;
5172 if (phba->pport->vmid_inactivity_timeout ||
5173 test_bit(FC_DEREGISTER_ALL_APP_ID, &phba->pport->load_flag)) {
5175 phba->pport->work_port_events |= WORKER_CHECK_INACTIVE_VMID;
5179 lpfc_worker_wake_up(phba);
5182 mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 *
5188 * @phba: pointer to lpfc hba data structure.
5194 lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
5211 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5222 * @phba: pointer to lpfc hba data structure.
5231 lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
5249 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5260 * @phba: pointer to lpfc hba data structure.
5267 lpfc_sli_port_speed_get(struct lpfc_hba *phba)
5271 if (!lpfc_is_link_up(phba))
5274 if (phba->sli_rev <= LPFC_SLI_REV3) {
5275 switch (phba->fc_linkspeed) {
5298 if (phba->sli4_hba.link_state.logical_speed)
5300 phba->sli4_hba.link_state.logical_speed;
5302 link_speed = phba->sli4_hba.link_state.speed;
5309 * @phba: pointer to lpfc hba data structure.
5319 lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
5405 * @phba: pointer to lpfc hba data structure.
5411 lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
5420 att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
5423 phba->fcoe_eventtag = acqe_link->event_tag;
5424 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5426 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5431 rc = lpfc_mbox_rsrc_prep(phba, pmb);
5433 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5439 lpfc_els_flush_all_cmd(phba);
5442 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
5445 phba->sli.slistat.link_event++;
5448 lpfc_read_topology(phba, pmb, pmb->ctx_buf);
5450 pmb->vport = phba->pport;
5453 phba->sli4_hba.link_state.speed =
5454 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
5456 phba->sli4_hba.link_state.duplex =
5458 phba->sli4_hba.link_state.status =
5460 phba->sli4_hba.link_state.type =
5462 phba->sli4_hba.link_state.number =
5464 phba->sli4_hba.link_state.fault =
5466 phba->sli4_hba.link_state.logical_speed =
5469 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5473 phba->sli4_hba.link_state.speed,
5474 phba->sli4_hba.link_state.topology,
5475 phba->sli4_hba.link_state.status,
5476 phba->sli4_hba.link_state.type,
5477 phba->sli4_hba.link_state.number,
5478 phba->sli4_hba.link_state.logical_speed,
5479 phba->sli4_hba.link_state.fault);
5484 if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
5485 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5500 lpfc_sli4_parse_latt_fault(phba, acqe_link);
5518 lpfc_mbx_cmpl_read_topology(phba, pmb);
5523 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
5529 * @phba: pointer to lpfc hba data structure.
5538 lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
5579 lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba)
5581 if (!phba->rx_monitor) {
5582 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5585 lpfc_rx_monitor_report(phba, phba->rx_monitor, NULL, 0,
5592 * @phba: pointer to lpfc hba data structure.
5598 lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag)
5604 if (!phba->cgn_i)
5606 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5612 lpfc_cgn_update_tstamp(phba, &cp->stat_lnk);
5616 lpfc_cgn_update_tstamp(phba, &cp->stat_delivery);
5620 lpfc_cgn_update_tstamp(phba, &cp->stat_peer);
5624 lpfc_cgn_update_tstamp(phba, &cp->stat_fpin);
5626 if (phba->cgn_fpin_frequency &&
5627 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5628 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5639 * @phba: pointer to lpfc hba data structure.
5643 lpfc_cgn_update_tstamp(struct lpfc_hba *phba, struct lpfc_cgn_ts *ts)
5658 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5679 struct lpfc_hba *phba;
5691 phba = container_of(timer, struct lpfc_hba, cmf_stats_timer);
5693 if (!phba->cgn_i)
5695 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
5697 phba->cgn_evt_timestamp = jiffies +
5699 phba->cgn_evt_minute++;
5702 lpfc_cgn_update_tstamp(phba, &cp->base_time);
5704 if (phba->cgn_fpin_frequency &&
5705 phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ) {
5706 value = LPFC_CGN_TIMER_TO_MIN / phba->cgn_fpin_frequency;
5711 lvalue = atomic_read(&phba->cgn_latency_evt_cnt);
5712 latsum = atomic64_read(&phba->cgn_latency_evt);
5713 atomic_set(&phba->cgn_latency_evt_cnt, 0);
5714 atomic64_set(&phba->cgn_latency_evt, 0);
5720 bps = div_u64(phba->rx_block_cnt, LPFC_SEC_MIN) * 512;
5721 phba->rx_block_cnt = 0;
5726 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
5727 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
5728 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
5729 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
5732 value = (uint16_t)(phba->pport->cfg_lun_queue_depth);
5748 dvalue = atomic_read(&phba->cgn_driver_evt_cnt);
5749 atomic_set(&phba->cgn_driver_evt_cnt, 0);
5753 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_WARN) ||
5754 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
5755 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5756 wvalue = atomic_read(&phba->cgn_fabric_warn_cnt);
5757 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
5761 if ((phba->cgn_reg_fpin & LPFC_CGN_FPIN_ALARM) ||
5762 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM)
5763 avalue = atomic_read(&phba->cgn_fabric_alarm_cnt);
5764 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
5793 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5798 if ((phba->cgn_evt_minute % LPFC_MIN_HOUR) == 0) {
5837 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5844 if ((phba->cgn_evt_minute % LPFC_MIN_DAY) == 0) {
5884 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5891 value = phba->cgn_fpin_frequency;
5906 * @phba: The Hba for which this call is being executed.
5913 lpfc_calc_cmf_latency(struct lpfc_hba *phba)
5923 if (cmpl_time.tv_sec == phba->cmf_latency.tv_sec) {
5924 msec = (cmpl_time.tv_nsec - phba->cmf_latency.tv_nsec) /
5927 if (cmpl_time.tv_nsec >= phba->cmf_latency.tv_nsec) {
5929 phba->cmf_latency.tv_sec) * MSEC_PER_SEC;
5931 phba->cmf_latency.tv_nsec) / NSEC_PER_MSEC);
5933 msec = (cmpl_time.tv_sec - phba->cmf_latency.tv_sec -
5935 msec += (((NSEC_PER_SEC - phba->cmf_latency.tv_nsec) +
5950 struct lpfc_hba *phba = container_of(timer, struct lpfc_hba,
5962 if (phba->cmf_active_mode == LPFC_CFG_OFF ||
5963 !phba->cmf_latency.tv_sec) {
5964 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
5966 phba->cmf_active_mode,
5967 (uint64_t)phba->cmf_latency.tv_sec);
5974 if (!phba->pport)
5980 atomic_set(&phba->cmf_stop_io, 1);
5987 ms = lpfc_calc_cmf_latency(phba);
5994 ktime_get_real_ts64(&phba->cmf_latency);
5996 phba->cmf_link_byte_count =
5997 div_u64(phba->cmf_max_line_rate * LPFC_CMF_INTERVAL, 1000);
6005 cgs = per_cpu_ptr(phba->cmf_stat, cpu);
6017 if (phba->cmf_active_mode == LPFC_CFG_MANAGED &&
6018 phba->link_state != LPFC_LINK_DOWN &&
6019 test_bit(HBA_SETUP, &phba->hba_flag)) {
6020 mbpi = phba->cmf_last_sync_bw;
6021 phba->cmf_last_sync_bw = 0;
6034 lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra);
6039 mbpi = phba->cmf_link_byte_count;
6042 phba->cmf_timer_cnt++;
6046 atomic_add(io_cnt, &phba->cgn_latency_evt_cnt);
6047 atomic64_add(lat, &phba->cgn_latency_evt);
6049 busy = atomic_xchg(&phba->cmf_busy, 0);
6050 max_read = atomic_xchg(&phba->rx_max_read_cnt, 0);
6054 if (mbpi > phba->cmf_link_byte_count ||
6055 phba->cmf_active_mode == LPFC_CFG_MONITOR)
6056 mbpi = phba->cmf_link_byte_count;
6061 if (mbpi != phba->cmf_max_bytes_per_interval)
6062 phba->cmf_max_bytes_per_interval = mbpi;
6066 if (phba->rx_monitor) {
6071 entry.cmf_info = phba->cmf_active_info;
6082 if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
6083 entry.timer_utilization = phba->cmf_last_ts;
6087 phba->cmf_last_ts = 0;
6089 lpfc_rx_monitor_record(phba->rx_monitor, &entry);
6092 if (phba->cmf_active_mode == LPFC_CFG_MONITOR) {
6097 atomic_inc(&phba->cgn_driver_evt_cnt);
6099 phba->rx_block_cnt += div_u64(rcv, 512); /* save 512 byte block cnt */
6104 if (atomic_xchg(&phba->cmf_bw_wait, 0))
6105 queue_work(phba->wq, &phba->unblock_request_work);
6108 atomic_set(&phba->cmf_stop_io, 0);
6118 ((phba->trunk_link.link##__idx.state == LPFC_LINK_UP) ?\
6126 lpfc_update_trunk_link_status(struct lpfc_hba *phba,
6133 phba->sli4_hba.link_state.speed =
6134 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6137 phba->sli4_hba.link_state.logical_speed =
6140 phba->fc_linkspeed =
6142 phba,
6146 phba->trunk_link.link0.state =
6149 phba->trunk_link.link0.fault = port_fault & 0x1 ? err : 0;
6153 phba->trunk_link.link1.state =
6156 phba->trunk_link.link1.fault = port_fault & 0x2 ? err : 0;
6160 phba->trunk_link.link2.state =
6163 phba->trunk_link.link2.fault = port_fault & 0x4 ? err : 0;
6167 phba->trunk_link.link3.state =
6170 phba->trunk_link.link3.fault = port_fault & 0x8 ? err : 0;
6175 phba->trunk_link.phy_lnk_speed =
6176 phba->sli4_hba.link_state.logical_speed / (cnt * 1000);
6178 phba->trunk_link.phy_lnk_speed = LPFC_LINK_SPEED_UNKNOWN;
6180 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6184 phba->sli4_hba.link_state.speed,
6185 phba->sli4_hba.link_state.logical_speed,
6189 if (phba->cmf_active_mode != LPFC_CFG_OFF)
6190 lpfc_cmf_signal_init(phba);
6193 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6209 * @phba: pointer to lpfc hba data structure.
6217 lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
6227 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6235 lpfc_update_trunk_link_status(phba, acqe_fc);
6240 phba->sli4_hba.link_state.speed =
6241 lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
6243 phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
6244 phba->sli4_hba.link_state.topology =
6246 phba->sli4_hba.link_state.status =
6248 phba->sli4_hba.link_state.type =
6250 phba->sli4_hba.link_state.number =
6252 phba->sli4_hba.link_state.fault =
6254 phba->sli4_hba.link_state.link_status =
6261 if (phba->sli4_hba.link_state.status >= LPFC_FC_LA_TYPE_LINK_UP &&
6262 phba->sli4_hba.link_state.status < LPFC_FC_LA_TYPE_ACTIVATE_FAIL) {
6265 phba->sli4_hba.link_state.logical_speed = 0;
6266 else if (!phba->sli4_hba.conf_trunk)
6267 phba->sli4_hba.link_state.logical_speed =
6271 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6275 phba->sli4_hba.link_state.speed,
6276 phba->sli4_hba.link_state.topology,
6277 phba->sli4_hba.link_state.status,
6278 phba->sli4_hba.link_state.type,
6279 phba->sli4_hba.link_state.number,
6280 phba->sli4_hba.link_state.logical_speed,
6281 phba->sli4_hba.link_state.fault,
6282 phba->sli4_hba.link_state.link_status);
6289 if (phba->sli4_hba.link_state.status >= LPFC_FC_LA_TYPE_ACTIVATE_FAIL) {
6290 switch (phba->sli4_hba.link_state.status) {
6293 phba->sli4_hba.link_state.status =
6304 phba->sli4_hba.link_state.status =
6311 lpfc_log_msg(phba, log_level, LOG_SLI,
6318 pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6320 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6324 rc = lpfc_mbox_rsrc_prep(phba, pmb);
6326 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6332 lpfc_els_flush_all_cmd(phba);
6335 phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
6338 phba->sli.slistat.link_event++;
6341 lpfc_read_topology(phba, pmb, pmb->ctx_buf);
6343 pmb->vport = phba->pport;
6345 if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
6346 phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
6348 switch (phba->sli4_hba.link_state.status) {
6350 phba->link_flag |= LS_MDS_LINK_DOWN;
6353 phba->link_flag |= LS_MDS_LOOPBACK;
6364 lpfc_sli4_parse_latt_fault(phba, (void *)acqe_fc);
6370 if (phba->sli4_hba.link_state.status ==
6379 lpfc_mbx_cmpl_read_topology(phba, pmb);
6384 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
6390 lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
6395 * @phba: pointer to lpfc hba data structure.
6401 lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
6417 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6423 port_name = phba->Port[0];
6433 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6437 phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
6438 shost = lpfc_shost_from_vport(phba->pport);
6450 lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_LDS_EVENT,
6454 shost = lpfc_shost_from_vport(phba->pport);
6466 switch (phba->sli4_hba.lnk_info.lnk_no) {
6492 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6496 phba->sli4_hba.lnk_info.lnk_no);
6501 if (phba->sli4_hba.lnk_info.optic_state == status)
6541 rc = lpfc_sli4_read_config(phba);
6543 phba->lmt = 0;
6544 lpfc_printf_log(phba, KERN_ERR,
6549 rc = lpfc_sli4_refresh_params(phba);
6551 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6555 vports = lpfc_create_vport_work_array(phba);
6557 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6563 lpfc_destroy_vport_work_array(phba, vports);
6565 phba->sli4_hba.lnk_info.optic_state = status;
6566 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
6570 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6577 lpfc_sli4_cgn_parm_chg_evt(phba);
6585 lpfc_log_msg(phba, KERN_WARNING, LOG_SLI | LOG_DISCOVERY,
6588 phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_FABRIC;
6589 memset(phba->pport->fc_portname.u.wwn, 0,
6594 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6600 if (phba->cmf_active_mode == LPFC_CFG_OFF)
6604 phba->cgn_acqe_cnt++;
6607 atomic64_add(cnt, &phba->cgn_acqe_stat.warn);
6608 atomic64_add(cgn_signal->alarm_cnt, &phba->cgn_acqe_stat.alarm);
6614 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6617 &phba->cgn_sync_alarm_cnt);
6621 if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
6622 phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6624 atomic_add(cnt, &phba->cgn_sync_warn_cnt);
6630 lpfc_printf_log(phba, KERN_INFO,
6638 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
6640 lpfc_sli4_async_cmstat_evt(phba);
6643 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6665 struct lpfc_hba *phba;
6669 phba = vport->phba;
6670 if (!phba)
6683 if ((phba->pport->port_state < LPFC_FLOGI) &&
6684 (phba->pport->port_state != LPFC_VPORT_FAILED))
6687 if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
6702 * @phba: pointer to lpfc hba data structure.
6708 lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
6713 vports = lpfc_create_vport_work_array(phba);
6715 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
6717 lpfc_destroy_vport_work_array(phba, vports);
6722 * @phba: pointer to lpfc hba data structure.
6728 lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
6739 phba->fc_eventTag = acqe_fip->event_tag;
6740 phba->fcoe_eventtag = acqe_fip->event_tag;
6745 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6751 lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
6757 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6763 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6768 rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
6772 if (test_bit(FCF_TS_INPROG, &phba->hba_flag))
6774 spin_lock_irq(&phba->hbalock);
6776 if (phba->fcf.fcf_flag & (FCF_REDISC_EVT | FCF_REDISC_PEND)) {
6777 spin_unlock_irq(&phba->hbalock);
6782 if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
6783 spin_unlock_irq(&phba->hbalock);
6786 spin_unlock_irq(&phba->hbalock);
6789 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6793 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
6796 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6802 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6809 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6810 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6818 spin_lock_irq(&phba->hbalock);
6819 if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
6820 (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
6821 spin_unlock_irq(&phba->hbalock);
6823 lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
6826 spin_unlock_irq(&phba->hbalock);
6829 if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
6838 spin_lock_irq(&phba->hbalock);
6840 phba->fcf.fcf_flag |= FCF_DEAD_DISC;
6841 spin_unlock_irq(&phba->hbalock);
6843 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
6847 rc = lpfc_sli4_redisc_fcf_table(phba);
6849 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6854 spin_lock_irq(&phba->hbalock);
6855 phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
6856 spin_unlock_irq(&phba->hbalock);
6861 lpfc_sli4_fcf_dead_failthrough(phba);
6864 lpfc_sli4_clear_fcf_rr_bmask(phba);
6869 lpfc_sli4_perform_all_vport_cvl(phba);
6873 phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
6874 lpfc_printf_log(phba, KERN_ERR,
6879 vport = lpfc_find_vport_by_vpid(phba,
6886 vports = lpfc_create_vport_work_array(phba);
6888 for (i = 0; i <= phba->max_vports && vports[i] != NULL;
6897 lpfc_destroy_vport_work_array(phba, vports);
6926 spin_lock_irq(&phba->hbalock);
6927 if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
6928 spin_unlock_irq(&phba->hbalock);
6932 phba->fcf.fcf_flag |= FCF_ACVL_DISC;
6933 spin_unlock_irq(&phba->hbalock);
6934 lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
6938 rc = lpfc_sli4_redisc_fcf_table(phba);
6940 lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
6945 spin_lock_irq(&phba->hbalock);
6946 phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
6947 spin_unlock_irq(&phba->hbalock);
6952 lpfc_retry_pport_discovery(phba);
6958 lpfc_sli4_clear_fcf_rr_bmask(phba);
6962 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6971 * @phba: pointer to lpfc hba data structure.
6977 lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
6980 phba->fc_eventTag = acqe_dcbx->event_tag;
6981 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6988 * @phba: pointer to lpfc hba data structure.
6996 lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
7001 phba->fc_eventTag = acqe_grp5->event_tag;
7002 phba->fcoe_eventtag = acqe_grp5->event_tag;
7003 prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
7004 phba->sli4_hba.link_state.logical_speed =
7006 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
7009 phba->sli4_hba.link_state.logical_speed);
7014 * @phba: pointer to lpfc hba data structure.
7020 lpfc_sli4_async_cmstat_evt(struct lpfc_hba *phba)
7022 if (!phba->cgn_i)
7024 lpfc_init_congestion_stat(phba);
7029 * @phba: pointer to lpfc hba data structure.
7036 lpfc_cgn_params_val(struct lpfc_hba *phba, struct lpfc_cgn_param *p_cfg_param)
7038 spin_lock_irq(&phba->hbalock);
7042 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
7048 spin_unlock_irq(&phba->hbalock);
7059 * @phba: pointer to lpfc hba data structure.
7067 * @phba structure. If the magic is incorrect, an error message is
7071 lpfc_cgn_params_parse(struct lpfc_hba *phba,
7082 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
7100 phba->cmf_active_mode);
7102 oldmode = phba->cmf_active_mode;
7107 lpfc_cgn_params_val(phba, p_cgn_param);
7110 spin_lock_irq(&phba->hbalock);
7111 memcpy(&phba->cgn_p, p_cgn_param,
7115 if (phba->cgn_i) {
7116 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
7117 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
7118 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
7119 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
7120 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
7125 spin_unlock_irq(&phba->hbalock);
7127 phba->cmf_active_mode = phba->cgn_p.cgn_param_mode;
7131 if (phba->cgn_p.cgn_param_mode != LPFC_CFG_OFF) {
7133 lpfc_cmf_start(phba);
7135 if (phba->link_state >= LPFC_LINK_UP) {
7136 phba->cgn_reg_fpin =
7137 phba->cgn_init_reg_fpin;
7138 phba->cgn_reg_signal =
7139 phba->cgn_init_reg_signal;
7140 lpfc_issue_els_edc(phba->pport, 0);
7145 switch (phba->cgn_p.cgn_param_mode) {
7148 lpfc_cmf_stop(phba);
7149 if (phba->link_state >= LPFC_LINK_UP)
7150 lpfc_issue_els_edc(phba->pport, 0);
7153 phba->cmf_max_bytes_per_interval =
7154 phba->cmf_link_byte_count;
7157 queue_work(phba->wq,
7158 &phba->unblock_request_work);
7163 switch (phba->cgn_p.cgn_param_mode) {
7166 lpfc_cmf_stop(phba);
7167 if (phba->link_state >= LPFC_LINK_UP)
7168 lpfc_issue_els_edc(phba->pport, 0);
7171 lpfc_cmf_signal_init(phba);
7177 oldmode != phba->cgn_p.cgn_param_mode) {
7178 if (phba->cgn_p.cgn_param_mode == LPFC_CFG_MANAGED)
7180 phba->cgn_p.cgn_param_level0);
7184 dev_info(&phba->pcidev->dev, "%d: "
7186 phba->brd_no,
7188 [phba->cgn_p.cgn_param_mode],
7192 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7201 * @phba: pointer to lpfc hba data structure.
7213 lpfc_sli4_cgn_params_read(struct lpfc_hba *phba)
7225 ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME,
7232 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7240 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT | LOG_INIT,
7243 /* Parse data pointer over len and update the phba congestion
7248 lpfc_cgn_params_parse(phba, p_cgn_param, len);
7257 * @phba: pointer to lpfc hba data structure.
7274 lpfc_sli4_cgn_parm_chg_evt(struct lpfc_hba *phba)
7278 if (!phba->sli4_hba.pc_sli4_params.cmf) {
7279 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7288 ret = lpfc_sli4_cgn_params_read(phba);
7290 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7294 lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
7302 * @phba: pointer to lpfc hba data structure.
7307 void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
7313 clear_bit(ASYNC_EVENT, &phba->hba_flag);
7316 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7317 while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
7318 list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
7320 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock,
7326 lpfc_sli4_async_link_evt(phba,
7330 lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
7333 lpfc_sli4_async_dcbx_evt(phba,
7337 lpfc_sli4_async_grp5_evt(phba,
7341 lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
7344 lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
7347 lpfc_printf_log(phba, KERN_ERR,
7356 lpfc_sli4_cq_event_release(phba, cq_event);
7357 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
7359 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
7364 * @phba: pointer to lpfc hba data structure.
7369 void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
7373 spin_lock_irq(&phba->hbalock);
7375 phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
7377 phba->fcf.failover_rec.flag = 0;
7379 phba->fcf.fcf_flag |= FCF_REDISC_FOV;
7380 spin_unlock_irq(&phba->hbalock);
7383 lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
7385 rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
7387 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7394 * @phba: pointer to lpfc hba data structure.
7403 lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
7408 phba->pci_dev_grp = dev_grp;
7412 phba->sli_rev = LPFC_SLI_REV4;
7415 rc = lpfc_init_api_table_setup(phba, dev_grp);
7419 rc = lpfc_scsi_api_table_setup(phba, dev_grp);
7423 rc = lpfc_sli_api_table_setup(phba, dev_grp);
7427 rc = lpfc_mbox_api_table_setup(phba, dev_grp);
7436 * @phba: pointer to lpfc hba data structure.
7442 static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
7446 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7450 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7454 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7458 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7467 * @phba: pointer to lpfc hba data structure.
7477 lpfc_enable_pci_dev(struct lpfc_hba *phba)
7482 if (!phba->pcidev)
7485 pdev = phba->pcidev;
7506 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
7513 * @phba: pointer to lpfc hba data structure.
7519 lpfc_disable_pci_dev(struct lpfc_hba *phba)
7524 if (!phba->pcidev)
7527 pdev = phba->pcidev;
7537 * @phba: pointer to lpfc hba data structure.
7545 lpfc_reset_hba(struct lpfc_hba *phba)
7550 if (!phba->cfg_enable_hba_reset) {
7551 phba->link_state = LPFC_HBA_ERROR;
7556 if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) {
7557 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
7559 if (test_bit(MBX_TMO_ERR, &phba->bit_flags)) {
7561 rc = lpfc_pci_function_reset(phba);
7562 lpfc_els_flush_all_cmd(phba);
7564 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
7565 lpfc_sli_flush_io_rings(phba);
7567 lpfc_offline(phba);
7568 clear_bit(MBX_TMO_ERR, &phba->bit_flags);
7570 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
7574 lpfc_sli_brdrestart(phba);
7575 lpfc_online(phba);
7576 lpfc_unblock_mgmt_io(phba);
7582 * @phba: pointer to lpfc hba data structure.
7591 lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
7593 struct pci_dev *pdev = phba->pcidev;
7607 * @phba: pointer to lpfc hba data structure.
7617 lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
7619 struct pci_dev *pdev = phba->pcidev;
7623 max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
7625 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7633 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7638 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7647 struct lpfc_hba *phba = container_of(work, struct lpfc_hba,
7650 lpfc_unblock_requests(phba);
7655 * @phba: pointer to lpfc hba data structure.
7665 lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
7667 struct lpfc_sli *psli = &phba->sli;
7672 atomic_set(&phba->fast_event_count, 0);
7673 atomic_set(&phba->dbg_log_idx, 0);
7674 atomic_set(&phba->dbg_log_cnt, 0);
7675 atomic_set(&phba->dbg_log_dmping, 0);
7676 spin_lock_init(&phba->hbalock);
7679 spin_lock_init(&phba->port_list_lock);
7680 INIT_LIST_HEAD(&phba->port_list);
7682 INIT_LIST_HEAD(&phba->work_list);
7685 init_waitqueue_head(&phba->work_waitq);
7687 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
7689 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
7691 ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
7693 (phba->nvmet_support ? "NVMET" : " "));
7696 spin_lock_init(&phba->ras_fwlog_lock);
7699 spin_lock_init(&phba->scsi_buf_list_get_lock);
7700 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
7701 spin_lock_init(&phba->scsi_buf_list_put_lock);
7702 INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
7705 INIT_LIST_HEAD(&phba->fabric_iocb_list);
7708 INIT_LIST_HEAD(&phba->elsbuf);
7711 INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
7714 spin_lock_init(&phba->devicelock);
7715 INIT_LIST_HEAD(&phba->luns);
7720 timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
7722 timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
7724 timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
7726 INIT_DELAYED_WORK(&phba->eq_delay_work, lpfc_hb_eq_delay_work);
7728 INIT_DELAYED_WORK(&phba->idle_stat_delay_work,
7730 INIT_WORK(&phba->unblock_request_work, lpfc_unblock_requests_work);
7736 * @phba: pointer to lpfc hba data structure.
7746 lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
7755 timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
7758 phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
7759 phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
7762 lpfc_get_cfgparam(phba);
7765 rc = lpfc_setup_driver_resource_phase1(phba);
7769 if (!phba->sli.sli3_ring)
7770 phba->sli.sli3_ring = kcalloc(LPFC_SLI3_MAX_RING,
7773 if (!phba->sli.sli3_ring)
7781 if (phba->sli_rev == LPFC_SLI_REV4)
7787 if (phba->cfg_enable_bg) {
7797 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7801 if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
7802 phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
7805 phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
7812 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
7814 ((phba->cfg_sg_seg_cnt + 2) * entry_sz);
7817 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
7820 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
7822 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
7823 phba->cfg_total_seg_cnt);
7825 phba->max_vpi = LPFC_MAX_VPI;
7827 phba->max_vports = 0;
7832 lpfc_sli_setup(phba);
7833 lpfc_sli_queue_init(phba);
7836 if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
7839 phba->lpfc_sg_dma_buf_pool =
7841 &phba->pcidev->dev, phba->cfg_sg_dma_buf_size,
7844 if (!phba->lpfc_sg_dma_buf_pool)
7847 phba->lpfc_cmd_rsp_buf_pool =
7849 &phba->pcidev->dev,
7854 if (!phba->lpfc_cmd_rsp_buf_pool)
7861 if (phba->cfg_sriov_nr_virtfn > 0) {
7862 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
7863 phba->cfg_sriov_nr_virtfn);
7865 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7869 phba->cfg_sriov_nr_virtfn);
7870 phba->cfg_sriov_nr_virtfn = 0;
7877 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
7878 phba->lpfc_sg_dma_buf_pool = NULL;
7880 lpfc_mem_free(phba);
7886 * @phba: pointer to lpfc hba data structure.
7892 lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
7895 lpfc_mem_free_all(phba);
7902 * @phba: pointer to lpfc hba data structure.
7912 lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
7923 phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
7924 phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1;
7925 phba->sli4_hba.curr_disp_cpu = 0;
7928 lpfc_get_cfgparam(phba);
7931 rc = lpfc_setup_driver_resource_phase1(phba);
7936 rc = lpfc_sli4_post_status_check(phba);
7943 phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
7944 if (!phba->wq)
7951 timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
7954 timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
7957 hrtimer_init(&phba->cmf_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7958 phba->cmf_timer.function = lpfc_cmf_timer;
7960 hrtimer_init(&phba->cmf_stats_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7961 phba->cmf_stats_timer.function = lpfc_cmf_stats_timer;
7967 memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
7969 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
7971 phba->max_vpi = LPFC_MAX_VPI;
7974 phba->max_vports = 0;
7977 phba->valid_vlan = 0;
7978 phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
7979 phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
7980 phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
7989 INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
7990 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
7991 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
7994 if (lpfc_is_vmid_enabled(phba))
7995 timer_setup(&phba->inactive_vmid_poll, lpfc_vmid_poll, 0);
8001 spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock);
8002 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list);
8004 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8006 spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
8007 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
8008 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
8009 spin_lock_init(&phba->sli4_hba.t_active_list_lock);
8010 INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
8014 spin_lock_init(&phba->sli4_hba.sgl_list_lock);
8015 spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
8016 spin_lock_init(&phba->sli4_hba.asynce_list_lock);
8017 spin_lock_init(&phba->sli4_hba.els_xri_abrt_list_lock);
8024 INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
8026 INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
8028 INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
8030 INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
8032 INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
8035 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
8036 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
8037 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
8038 INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
8043 INIT_LIST_HEAD(&phba->sli.mboxq);
8044 INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
8047 phba->sli4_hba.lnk_info.optic_state = 0xff;
8050 rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
8055 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
8057 rc = lpfc_pci_function_reset(phba);
8062 phba->temp_sensor_support = 1;
8066 rc = lpfc_create_bootstrap_mbox(phba);
8071 rc = lpfc_setup_endian_order(phba);
8076 rc = lpfc_sli4_read_config(phba);
8080 if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG) {
8088 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
8091 rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
8096 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8098 rc = lpfc_pci_function_reset(phba);
8103 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
8111 phba->nvmet_support = 0;
8115 lpfc_read_nv(phba, mboxq);
8116 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8118 lpfc_printf_log(phba, KERN_ERR,
8124 mempool_free(mboxq, phba->mbox_mem_pool);
8132 phba->sli4_hba.wwnn.u.name = wwn;
8137 phba->sli4_hba.wwpn.u.name = wwn;
8143 if (lpfc_nvmet_mem_alloc(phba))
8146 phba->nvmet_support = 1; /* a match */
8148 lpfc_printf_log(phba, KERN_ERR,
8153 lpfc_printf_log(phba, KERN_ERR,
8160 phba->cfg_xri_rebalancing = 0;
8161 if (phba->irq_chann_mode == NHT_MODE) {
8162 phba->cfg_irq_chann =
8163 phba->sli4_hba.num_present_cpu;
8164 phba->cfg_hdw_queue =
8165 phba->sli4_hba.num_present_cpu;
8166 phba->irq_chann_mode = NORMAL_MODE;
8173 lpfc_nvme_mod_param_dep(phba);
8180 rc = lpfc_get_sli4_parameters(phba, mboxq);
8183 &phba->sli4_hba.sli_intf);
8185 &phba->sli4_hba.sli_intf);
8186 if (phba->sli4_hba.extents_in_use &&
8187 phba->sli4_hba.rpi_hdrs_in_use) {
8188 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8193 mempool_free(mboxq, phba->mbox_mem_pool);
8200 mempool_free(mboxq, phba->mbox_mem_pool);
8211 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
8225 if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8237 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd32) +
8241 phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
8247 if (phba->cfg_enable_bg &&
8248 phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF)
8249 phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF;
8251 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8259 phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd32) +
8261 ((phba->cfg_sg_seg_cnt + extra) *
8265 phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra;
8266 phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt;
8269 * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only
8274 if (phba->cfg_xpsgl && !phba->nvmet_support)
8275 phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE;
8276 else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
8277 phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
8279 phba->cfg_sg_dma_buf_size =
8280 SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
8282 phba->border_sge_num = phba->cfg_sg_dma_buf_size /
8286 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
8287 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
8288 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
8292 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
8294 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
8297 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
8300 phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
8301 phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
8302 phba->cfg_nvme_seg_cnt);
8304 if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
8305 i = phba->cfg_sg_dma_buf_size;
8309 phba->lpfc_sg_dma_buf_pool =
8311 &phba->pcidev->dev,
8312 phba->cfg_sg_dma_buf_size,
8314 if (!phba->lpfc_sg_dma_buf_pool) {
8319 phba->lpfc_cmd_rsp_buf_pool =
8321 &phba->pcidev->dev,
8325 if (!phba->lpfc_cmd_rsp_buf_pool) {
8330 mempool_free(mboxq, phba->mbox_mem_pool);
8333 lpfc_sli4_oas_verify(phba);
8336 lpfc_sli4_ras_init(phba);
8339 rc = lpfc_sli4_queue_verify(phba);
8344 rc = lpfc_sli4_cq_event_pool_create(phba);
8349 lpfc_init_sgl_list(phba);
8352 rc = lpfc_init_active_sgl_array(phba);
8354 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8358 rc = lpfc_sli4_init_rpi_hdrs(phba);
8360 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8367 phba->fcf.fcf_rr_bmask = kcalloc(longs, sizeof(unsigned long),
8369 if (!phba->fcf.fcf_rr_bmask) {
8370 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8377 phba->sli4_hba.hba_eq_hdl = kcalloc(phba->cfg_irq_chann,
8380 if (!phba->sli4_hba.hba_eq_hdl) {
8381 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8388 phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_possible_cpu,
8391 if (!phba->sli4_hba.cpu_map) {
8392 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8399 phba->sli4_hba.eq_info = alloc_percpu(struct lpfc_eq_intr_info);
8400 if (!phba->sli4_hba.eq_info) {
8401 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8407 phba->sli4_hba.idle_stat = kcalloc(phba->sli4_hba.num_possible_cpu,
8408 sizeof(*phba->sli4_hba.idle_stat),
8410 if (!phba->sli4_hba.idle_stat) {
8411 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8418 phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
8419 if (!phba->sli4_hba.c_stat) {
8420 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8427 phba->cmf_stat = alloc_percpu(struct lpfc_cgn_stat);
8428 if (!phba->cmf_stat) {
8429 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8439 if (phba->cfg_sriov_nr_virtfn > 0) {
8440 rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
8441 phba->cfg_sriov_nr_virtfn);
8443 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
8447 phba->cfg_sriov_nr_virtfn);
8448 phba->cfg_sriov_nr_virtfn = 0;
8456 free_percpu(phba->sli4_hba.c_stat);
8459 kfree(phba->sli4_hba.idle_stat);
8461 free_percpu(phba->sli4_hba.eq_info);
8463 kfree(phba->sli4_hba.cpu_map);
8465 kfree(phba->sli4_hba.hba_eq_hdl);
8467 kfree(phba->fcf.fcf_rr_bmask);
8469 lpfc_sli4_remove_rpi_hdrs(phba);
8471 lpfc_free_active_sgl(phba);
8473 lpfc_sli4_cq_event_pool_destroy(phba);
8475 dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool);
8476 phba->lpfc_cmd_rsp_buf_pool = NULL;
8478 dma_pool_destroy(phba->lpfc_sg_dma_buf_pool);
8479 phba->lpfc_sg_dma_buf_pool = NULL;
8481 lpfc_destroy_bootstrap_mbox(phba);
8483 lpfc_mem_free(phba);
8485 destroy_workqueue(phba->wq);
8486 phba->wq = NULL;
8492 * @phba: pointer to lpfc hba data structure.
8498 lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
8502 free_percpu(phba->sli4_hba.eq_info);
8504 free_percpu(phba->sli4_hba.c_stat);
8506 free_percpu(phba->cmf_stat);
8507 kfree(phba->sli4_hba.idle_stat);
8510 kfree(phba->sli4_hba.cpu_map);
8511 phba->sli4_hba.num_possible_cpu = 0;
8512 phba->sli4_hba.num_present_cpu = 0;
8513 phba->sli4_hba.curr_disp_cpu = 0;
8514 cpumask_clear(&phba->sli4_hba.irq_aff_mask);
8517 kfree(phba->sli4_hba.hba_eq_hdl);
8520 lpfc_sli4_remove_rpi_hdrs(phba);
8521 lpfc_sli4_remove_rpis(phba);
8524 kfree(phba->fcf.fcf_rr_bmask);
8527 lpfc_free_active_sgl(phba);
8528 lpfc_free_els_sgl_list(phba);
8529 lpfc_free_nvmet_sgl_list(phba);
8532 lpfc_sli4_cq_event_release_all(phba);
8533 lpfc_sli4_cq_event_pool_destroy(phba);
8536 lpfc_sli4_dealloc_resource_identifiers(phba);
8539 lpfc_destroy_bootstrap_mbox(phba);
8542 lpfc_mem_free_all(phba);
8546 &phba->fcf_conn_rec_list, list) {
8556 * @phba: The hba struct for which this call is being executed.
8560 * in @phba struct.
8565 lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
8567 phba->lpfc_hba_init_link = lpfc_hba_init_link;
8568 phba->lpfc_hba_down_link = lpfc_hba_down_link;
8569 phba->lpfc_selective_reset = lpfc_selective_reset;
8572 phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
8573 phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
8574 phba->lpfc_stop_port = lpfc_stop_port_s3;
8577 phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
8578 phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
8579 phba->lpfc_stop_port = lpfc_stop_port_s4;
8582 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8592 * @phba: pointer to lpfc hba data structure.
8602 lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
8607 phba->worker_thread = kthread_run(lpfc_do_work, phba,
8608 "lpfc_worker_%d", phba->brd_no);
8609 if (IS_ERR(phba->worker_thread)) {
8610 error = PTR_ERR(phba->worker_thread);
8619 * @phba: pointer to lpfc hba data structure.
8626 lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
8628 if (phba->wq) {
8629 destroy_workqueue(phba->wq);
8630 phba->wq = NULL;
8634 if (phba->worker_thread)
8635 kthread_stop(phba->worker_thread);
8640 * @phba: pointer to lpfc hba data structure.
8645 lpfc_free_iocb_list(struct lpfc_hba *phba)
8649 spin_lock_irq(&phba->hbalock);
8651 &phba->lpfc_iocb_list, list) {
8654 phba->total_iocbq_bufs--;
8656 spin_unlock_irq(&phba->hbalock);
8663 * @phba: pointer to lpfc hba data structure.
8674 lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
8681 INIT_LIST_HEAD(&phba->lpfc_iocb_list);
8691 iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
8701 spin_lock_irq(&phba->hbalock);
8702 list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
8703 phba->total_iocbq_bufs++;
8704 spin_unlock_irq(&phba->hbalock);
8710 lpfc_free_iocb_list(phba);
8717 * @phba: pointer to lpfc hba data structure.
8723 lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
8729 lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
8736 * @phba: pointer to lpfc hba data structure.
8741 lpfc_free_els_sgl_list(struct lpfc_hba *phba)
8746 spin_lock_irq(&phba->sli4_hba.sgl_list_lock);
8747 list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
8748 spin_unlock_irq(&phba->sli4_hba.sgl_list_lock);
8751 lpfc_free_sgl_list(phba, &sglq_list);
8756 * @phba: pointer to lpfc hba data structure.
8761 lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
8767 spin_lock_irq(&phba->hbalock);
8768 spin_lock(&phba->sli4_hba.sgl_list_lock);
8769 list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
8770 spin_unlock(&phba->sli4_hba.sgl_list_lock);
8771 spin_unlock_irq(&phba->hbalock);
8776 lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
8784 phba->sli4_hba.nvmet_xri_cnt = 0;
8789 * @phba: pointer to lpfc hba data structure.
8795 lpfc_init_active_sgl_array(struct lpfc_hba *phba)
8799 size *= phba->sli4_hba.max_cfg_param.max_xri;
8801 phba->sli4_hba.lpfc_sglq_active_list =
8803 if (!phba->sli4_hba.lpfc_sglq_active_list)
8810 * @phba: pointer to lpfc hba data structure.
8817 lpfc_free_active_sgl(struct lpfc_hba *phba)
8819 kfree(phba->sli4_hba.lpfc_sglq_active_list);
8824 * @phba: pointer to lpfc hba data structure.
8831 lpfc_init_sgl_list(struct lpfc_hba *phba)
8834 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
8835 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
8836 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
8837 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
8840 phba->sli4_hba.els_xri_cnt = 0;
8843 phba->sli4_hba.io_xri_cnt = 0;
8848 * @phba: pointer to lpfc hba data structure.
8861 lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
8866 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
8867 if (!phba->sli4_hba.rpi_hdrs_in_use)
8869 if (phba->sli4_hba.extents_in_use)
8872 rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
8874 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8876 lpfc_sli4_remove_rpis(phba);
8885 * @phba: pointer to lpfc hba data structure.
8888 * support rpis and stores them in the phba. This single region
8897 lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
8908 if (!phba->sli4_hba.rpi_hdrs_in_use)
8910 if (phba->sli4_hba.extents_in_use)
8914 rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
8916 spin_lock_irq(&phba->hbalock);
8922 curr_rpi_range = phba->sli4_hba.next_rpi;
8923 spin_unlock_irq(&phba->hbalock);
8937 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
8958 spin_lock_irq(&phba->hbalock);
8962 rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
8963 list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
8965 spin_unlock_irq(&phba->hbalock);
8969 dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
8978 * @phba: pointer to lpfc hba data structure.
8986 lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
8990 if (!phba->sli4_hba.rpi_hdrs_in_use)
8994 &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
8996 dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
9003 phba->sli4_hba.next_rpi = 0;
9011 * HBA device. If the allocation is successful, the phba reference to the
9015 * pointer to @phba - successful
9021 struct lpfc_hba *phba;
9024 phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
9025 if (!phba) {
9031 phba->pcidev = pdev;
9034 phba->brd_no = lpfc_get_instance();
9035 if (phba->brd_no < 0) {
9036 kfree(phba);
9039 phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
9041 spin_lock_init(&phba->ct_ev_lock);
9042 INIT_LIST_HEAD(&phba->ct_ev_waiters);
9044 return phba;
9049 * @phba: pointer to lpfc hba data structure.
9055 lpfc_hba_free(struct lpfc_hba *phba)
9057 if (phba->sli_rev == LPFC_SLI_REV4)
9058 kfree(phba->sli4_hba.hdwq);
9061 idr_remove(&lpfc_hba_index, phba->brd_no);
9064 kfree(phba->sli.sli3_ring);
9065 phba->sli.sli3_ring = NULL;
9067 kfree(phba);
9083 struct lpfc_hba *phba = vport->phba;
9086 if (phba->cfg_enable_SmartSAN ||
9087 phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT) {
9090 if (phba->cfg_enable_SmartSAN)
9096 lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
9103 * @phba: pointer to lpfc hba data structure.
9113 lpfc_create_shost(struct lpfc_hba *phba)
9119 phba->fc_edtov = FF_DEF_EDTOV;
9120 phba->fc_ratov = FF_DEF_RATOV;
9121 phba->fc_altov = FF_DEF_ALTOV;
9122 phba->fc_arbtov = FF_DEF_ARBTOV;
9124 atomic_set(&phba->sdev_cnt, 0);
9125 vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
9130 phba->pport = vport;
9132 if (phba->nvmet_support) {
9134 phba->targetport = NULL;
9135 phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
9136 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC,
9142 pci_set_drvdata(phba->pcidev, shost);
9155 * @phba: pointer to lpfc hba data structure.
9161 lpfc_destroy_shost(struct lpfc_hba *phba)
9163 struct lpfc_vport *vport = phba->pport;
9173 * @phba: pointer to lpfc hba data structure.
9180 lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
9185 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9186 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9190 old_mask = phba->cfg_prot_mask;
9191 old_guard = phba->cfg_prot_guard;
9194 phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
9197 phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
9201 if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
9202 phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
9204 if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
9205 if ((old_mask != phba->cfg_prot_mask) ||
9206 (old_guard != phba->cfg_prot_guard))
9207 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9210 phba->cfg_prot_mask,
9211 phba->cfg_prot_guard);
9213 scsi_host_set_prot(shost, phba->cfg_prot_mask);
9214 scsi_host_set_guard(shost, phba->cfg_prot_guard);
9216 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9225 * @phba: pointer to lpfc hba data structure.
9231 lpfc_post_init_setup(struct lpfc_hba *phba)
9237 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
9243 shost = pci_get_drvdata(phba->pcidev);
9244 shost->can_queue = phba->cfg_hba_queue_depth - 10;
9248 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
9250 lpfc_poll_start_timer(phba);
9254 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9268 * @phba: pointer to lpfc hba data structure.
9278 lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
9280 struct pci_dev *pdev = phba->pcidev;
9300 phba->pci_bar0_map = pci_resource_start(pdev, 0);
9303 phba->pci_bar2_map = pci_resource_start(pdev, 2);
9307 phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
9308 if (!phba->slim_memmap_p) {
9315 phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
9316 if (!phba->ctrl_regs_memmap_p) {
9323 phba->slim2p.virt = dma_alloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
9324 &phba->slim2p.phys, GFP_KERNEL);
9325 if (!phba->slim2p.virt)
9328 phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
9329 phba->mbox_ext = (phba->slim2p.virt +
9331 phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
9332 phba->IOCBs = (phba->slim2p.virt +
9335 phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
9337 &phba->hbqslimp.phys,
9339 if (!phba->hbqslimp.virt)
9343 ptr = phba->hbqslimp.virt;
9345 phba->hbqs[i].hbq_virt = ptr;
9346 INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
9350 phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
9351 phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
9353 memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
9355 phba->MBslimaddr = phba->slim_memmap_p;
9356 phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
9357 phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
9358 phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
9359 phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
9365 phba->slim2p.virt, phba->slim2p.phys);
9367 iounmap(phba->ctrl_regs_memmap_p);
9369 iounmap(phba->slim_memmap_p);
9376 * @phba: pointer to lpfc hba data structure.
9382 lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
9387 if (!phba->pcidev)
9390 pdev = phba->pcidev;
9394 phba->hbqslimp.virt, phba->hbqslimp.phys);
9396 phba->slim2p.virt, phba->slim2p.phys);
9399 iounmap(phba->ctrl_regs_memmap_p);
9400 iounmap(phba->slim_memmap_p);
9407 * @phba: pointer to lpfc hba data structure.
9415 lpfc_sli4_post_status_check(struct lpfc_hba *phba)
9424 if (!phba->sli4_hba.PSMPHRregaddr)
9429 if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
9447 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9461 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
9466 &phba->sli4_hba.sli_intf),
9468 &phba->sli4_hba.sli_intf),
9470 &phba->sli4_hba.sli_intf),
9472 &phba->sli4_hba.sli_intf),
9474 &phba->sli4_hba.sli_intf),
9476 &phba->sli4_hba.sli_intf));
9483 &phba->sli4_hba.sli_intf);
9486 phba->sli4_hba.ue_mask_lo =
9487 readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
9488 phba->sli4_hba.ue_mask_hi =
9489 readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
9491 readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
9493 readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
9494 if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
9495 (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
9496 lpfc_printf_log(phba, KERN_ERR,
9506 phba->sli4_hba.ue_mask_lo,
9507 phba->sli4_hba.ue_mask_hi);
9514 if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
9517 phba->work_status[0] =
9518 readl(phba->sli4_hba.u.if_type2.
9520 phba->work_status[1] =
9521 readl(phba->sli4_hba.u.if_type2.
9523 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9530 phba->work_status[0],
9531 phba->work_status[1]);
9538 &phba->sli4_hba.sli_intf) ==
9540 pci_write_config_byte(phba->pcidev,
9553 * @phba: pointer to lpfc hba data structure.
9560 lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9564 phba->sli4_hba.u.if_type0.UERRLOregaddr =
9565 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
9566 phba->sli4_hba.u.if_type0.UERRHIregaddr =
9567 phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
9568 phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
9569 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
9570 phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
9571 phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
9572 phba->sli4_hba.SLIINTFregaddr =
9573 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9576 phba->sli4_hba.u.if_type2.EQDregaddr =
9577 phba->sli4_hba.conf_regs_memmap_p +
9579 phba->sli4_hba.u.if_type2.ERR1regaddr =
9580 phba->sli4_hba.conf_regs_memmap_p +
9582 phba->sli4_hba.u.if_type2.ERR2regaddr =
9583 phba->sli4_hba.conf_regs_memmap_p +
9585 phba->sli4_hba.u.if_type2.CTRLregaddr =
9586 phba->sli4_hba.conf_regs_memmap_p +
9588 phba->sli4_hba.u.if_type2.STATUSregaddr =
9589 phba->sli4_hba.conf_regs_memmap_p +
9591 phba->sli4_hba.SLIINTFregaddr =
9592 phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
9593 phba->sli4_hba.PSMPHRregaddr =
9594 phba->sli4_hba.conf_regs_memmap_p +
9596 phba->sli4_hba.RQDBregaddr =
9597 phba->sli4_hba.conf_regs_memmap_p +
9599 phba->sli4_hba.WQDBregaddr =
9600 phba->sli4_hba.conf_regs_memmap_p +
9602 phba->sli4_hba.CQDBregaddr =
9603 phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
9604 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9605 phba->sli4_hba.MQDBregaddr =
9606 phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
9607 phba->sli4_hba.BMBXregaddr =
9608 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9611 phba->sli4_hba.u.if_type2.EQDregaddr =
9612 phba->sli4_hba.conf_regs_memmap_p +
9614 phba->sli4_hba.u.if_type2.ERR1regaddr =
9615 phba->sli4_hba.conf_regs_memmap_p +
9617 phba->sli4_hba.u.if_type2.ERR2regaddr =
9618 phba->sli4_hba.conf_regs_memmap_p +
9620 phba->sli4_hba.u.if_type2.CTRLregaddr =
9621 phba->sli4_hba.conf_regs_memmap_p +
9623 phba->sli4_hba.u.if_type2.STATUSregaddr =
9624 phba->sli4_hba.conf_regs_memmap_p +
9626 phba->sli4_hba.PSMPHRregaddr =
9627 phba->sli4_hba.conf_regs_memmap_p +
9629 phba->sli4_hba.BMBXregaddr =
9630 phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
9634 dev_printk(KERN_ERR, &phba->pcidev->dev,
9643 * @phba: pointer to lpfc hba data structure.
9649 lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
9653 phba->sli4_hba.PSMPHRregaddr =
9654 phba->sli4_hba.ctrl_regs_memmap_p +
9656 phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9658 phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9660 phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
9664 phba->sli4_hba.RQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9666 phba->sli4_hba.WQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9668 phba->sli4_hba.CQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9670 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9672 phba->sli4_hba.MQDBregaddr = phba->sli4_hba.drbl_regs_memmap_p +
9678 dev_err(&phba->pcidev->dev,
9687 * @phba: pointer to lpfc hba data structure.
9696 lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
9701 phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9704 phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9707 phba->sli4_hba.CQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9710 phba->sli4_hba.EQDBregaddr = phba->sli4_hba.CQDBregaddr;
9711 phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9713 phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
9720 * @phba: pointer to lpfc hba data structure.
9734 lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
9751 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, bmbx_size,
9765 phba->sli4_hba.bmbx.dmabuf = dmabuf;
9766 phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
9768 phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
9770 phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
9781 dma_address = &phba->sli4_hba.bmbx.dma_address;
9782 phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
9787 pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
9795 * @phba: pointer to lpfc hba data structure.
9805 lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
9807 dma_free_coherent(&phba->pcidev->dev,
9808 phba->sli4_hba.bmbx.bmbx_size,
9809 phba->sli4_hba.bmbx.dmabuf->virt,
9810 phba->sli4_hba.bmbx.dmabuf->phys);
9812 kfree(phba->sli4_hba.bmbx.dmabuf);
9813 memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
9831 * @phba: pointer to lpfc hba data structure.
9840 lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config)
9848 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9852 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9855 lpfc_topo_to_str[phba->cfg_topology]);
9859 set_bit(HBA_PERSISTENT_TOPO, &phba->hba_flag);
9862 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
9864 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
9867 phba->cfg_topology = ((pt == LINK_FLAGS_LOOP)
9871 clear_bit(HBA_PERSISTENT_TOPO, &phba->hba_flag);
9875 phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP :
9878 phba->cfg_topology = ((pt == LINK_FLAGS_P2P)
9882 if (test_bit(HBA_PERSISTENT_TOPO, &phba->hba_flag))
9883 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9885 lpfc_topo_to_str[phba->cfg_topology]);
9887 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9890 lpfc_topo_to_str[phba->cfg_topology]);
9895 * @phba: pointer to lpfc hba data structure.
9908 lpfc_sli4_read_config(struct lpfc_hba *phba)
9921 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
9923 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9929 lpfc_read_config(phba, pmb);
9931 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
9933 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9942 phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
9943 phba->sli4_hba.lnk_info.lnk_tp =
9945 phba->sli4_hba.lnk_info.lnk_no =
9947 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
9949 phba->sli4_hba.lnk_info.lnk_tp,
9950 phba->sli4_hba.lnk_info.lnk_no);
9952 lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
9956 phba->bbcredit_support = 1;
9957 phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
9963 lpfc_printf_log(phba, KERN_INFO,
9967 phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_CONFIG;
9970 phba->sli4_hba.fawwpn_flag &= ~LPFC_FAWWPN_CONFIG;
9973 phba->sli4_hba.conf_trunk =
9975 phba->sli4_hba.extents_in_use =
9978 phba->sli4_hba.max_cfg_param.max_xri =
9982 phba->sli4_hba.max_cfg_param.max_xri > 512)
9983 phba->sli4_hba.max_cfg_param.max_xri = 512;
9984 phba->sli4_hba.max_cfg_param.xri_base =
9986 phba->sli4_hba.max_cfg_param.max_vpi =
9989 if (phba->sli4_hba.max_cfg_param.max_vpi > LPFC_MAX_VPORTS)
9990 phba->sli4_hba.max_cfg_param.max_vpi = LPFC_MAX_VPORTS;
9991 phba->sli4_hba.max_cfg_param.vpi_base =
9993 phba->sli4_hba.max_cfg_param.max_rpi =
9995 phba->sli4_hba.max_cfg_param.rpi_base =
9997 phba->sli4_hba.max_cfg_param.max_vfi =
9999 phba->sli4_hba.max_cfg_param.vfi_base =
10001 phba->sli4_hba.max_cfg_param.max_fcfi =
10003 phba->sli4_hba.max_cfg_param.max_eq =
10005 phba->sli4_hba.max_cfg_param.max_rq =
10007 phba->sli4_hba.max_cfg_param.max_wq =
10009 phba->sli4_hba.max_cfg_param.max_cq =
10011 phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
10012 phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
10013 phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
10014 phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
10015 phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
10016 (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
10017 phba->max_vports = phba->max_vpi;
10028 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
10029 phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
10030 phba->cgn_sig_freq = lpfc_fabric_cgn_frequency;
10034 phba->cgn_reg_signal = EDC_CG_SIG_WARN_ONLY;
10035 phba->cgn_reg_fpin &= ~LPFC_CGN_FPIN_WARN;
10041 if (phba->cgn_reg_signal !=
10044 phba->cgn_reg_fpin = LPFC_CGN_FPIN_BOTH;
10045 phba->cgn_reg_signal =
10048 phba->cgn_reg_signal =
10050 phba->cgn_reg_fpin =
10057 phba->cgn_init_reg_fpin = phba->cgn_reg_fpin;
10058 phba->cgn_init_reg_signal = phba->cgn_reg_signal;
10060 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
10062 phba->cgn_reg_signal, phba->cgn_reg_fpin);
10064 lpfc_map_topology(phba, rd_config);
10065 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10072 phba->sli4_hba.extents_in_use,
10073 phba->sli4_hba.max_cfg_param.xri_base,
10074 phba->sli4_hba.max_cfg_param.max_xri,
10075 phba->sli4_hba.max_cfg_param.vpi_base,
10076 phba->sli4_hba.max_cfg_param.max_vpi,
10077 phba->sli4_hba.max_cfg_param.vfi_base,
10078 phba->sli4_hba.max_cfg_param.max_vfi,
10079 phba->sli4_hba.max_cfg_param.rpi_base,
10080 phba->sli4_hba.max_cfg_param.max_rpi,
10081 phba->sli4_hba.max_cfg_param.max_fcfi,
10082 phba->sli4_hba.max_cfg_param.max_eq,
10083 phba->sli4_hba.max_cfg_param.max_cq,
10084 phba->sli4_hba.max_cfg_param.max_wq,
10085 phba->sli4_hba.max_cfg_param.max_rq,
10086 phba->lmt);
10092 qmin = phba->sli4_hba.max_cfg_param.max_wq;
10093 if (phba->sli4_hba.max_cfg_param.max_cq < qmin)
10094 qmin = phba->sli4_hba.max_cfg_param.max_cq;
10100 if (phba->sli4_hba.max_cfg_param.max_eq < qmin)
10101 qmin = phba->sli4_hba.max_cfg_param.max_eq;
10104 if ((phba->cfg_irq_chann > qmin) ||
10105 (phba->cfg_hdw_queue > qmin)) {
10106 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10111 phba->sli4_hba.max_cfg_param.max_wq,
10112 phba->sli4_hba.max_cfg_param.max_cq,
10113 phba->sli4_hba.max_cfg_param.max_eq,
10114 qmin, phba->cfg_irq_chann,
10115 phba->cfg_hdw_queue);
10117 if (phba->cfg_irq_chann > qmin)
10118 phba->cfg_irq_chann = qmin;
10119 if (phba->cfg_hdw_queue > qmin)
10120 phba->cfg_hdw_queue = qmin;
10128 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10133 set_bit(HBA_FORCED_LINK_SPEED, &phba->hba_flag);
10137 phba->cfg_link_speed =
10141 phba->cfg_link_speed =
10145 phba->cfg_link_speed =
10149 phba->cfg_link_speed =
10153 phba->cfg_link_speed =
10157 phba->cfg_link_speed =
10161 phba->cfg_link_speed =
10165 phba->cfg_link_speed =
10169 phba->cfg_link_speed =
10173 lpfc_printf_log(phba, KERN_ERR,
10178 phba->cfg_link_speed =
10185 length = phba->sli4_hba.max_cfg_param.max_xri -
10186 lpfc_sli4_get_els_iocb_cnt(phba);
10187 if (phba->cfg_hba_queue_depth > length) {
10188 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
10190 phba->cfg_hba_queue_depth, length);
10191 phba->cfg_hba_queue_depth = length;
10194 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
10201 lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
10205 rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
10211 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10234 phba->sli4_hba.iov.pf_number =
10236 phba->sli4_hba.iov.vf_number =
10243 lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
10245 "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
10246 phba->sli4_hba.iov.vf_number);
10248 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10254 mempool_free(pmb, phba->mbox_mem_pool);
10260 * @phba: pointer to lpfc hba data structure.
10272 lpfc_setup_endian_order(struct lpfc_hba *phba)
10279 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
10282 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
10285 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10298 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
10300 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10306 mempool_free(mboxq, phba->mbox_mem_pool);
10319 * @phba: pointer to lpfc hba data structure.
10331 lpfc_sli4_queue_verify(struct lpfc_hba *phba)
10338 if (phba->nvmet_support) {
10339 if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq)
10340 phba->cfg_nvmet_mrq = phba->cfg_hdw_queue;
10341 if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX)
10342 phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX;
10345 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10347 phba->cfg_hdw_queue, phba->cfg_irq_chann,
10348 phba->cfg_nvmet_mrq);
10351 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10352 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10355 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10356 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10361 lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx)
10367 cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ);
10369 if (phba->enab_exp_wqcq_pages)
10371 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10372 phba->sli4_hba.cq_esize,
10376 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10377 phba->sli4_hba.cq_esize,
10378 phba->sli4_hba.cq_ecount, cpu);
10380 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10388 phba->sli4_hba.hdwq[idx].io_cq = qdesc;
10391 if (phba->enab_exp_wqcq_pages) {
10393 wqesize = (phba->fcp_embed_io) ?
10394 LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
10395 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE,
10399 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10400 phba->sli4_hba.wq_esize,
10401 phba->sli4_hba.wq_ecount, cpu);
10404 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10411 phba->sli4_hba.hdwq[idx].io_wq = qdesc;
10412 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10418 * @phba: pointer to lpfc hba data structure.
10431 lpfc_sli4_queue_create(struct lpfc_hba *phba)
10444 phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
10445 phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
10446 phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
10447 phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
10448 phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
10449 phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
10450 phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
10451 phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
10452 phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
10453 phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
10455 if (!phba->sli4_hba.hdwq) {
10456 phba->sli4_hba.hdwq = kcalloc(
10457 phba->cfg_hdw_queue, sizeof(struct lpfc_sli4_hdw_queue),
10459 if (!phba->sli4_hba.hdwq) {
10460 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10466 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10467 qp = &phba->sli4_hba.hdwq[idx];
10485 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10486 if (phba->nvmet_support) {
10487 phba->sli4_hba.nvmet_cqset = kcalloc(
10488 phba->cfg_nvmet_mrq,
10491 if (!phba->sli4_hba.nvmet_cqset) {
10492 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10497 phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
10498 phba->cfg_nvmet_mrq,
10501 if (!phba->sli4_hba.nvmet_mrq_hdr) {
10502 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10507 phba->sli4_hba.nvmet_mrq_data = kcalloc(
10508 phba->cfg_nvmet_mrq,
10511 if (!phba->sli4_hba.nvmet_mrq_data) {
10512 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10520 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10528 cpup = &phba->sli4_hba.cpu_map[cpu];
10533 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10536 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10537 phba->sli4_hba.eq_esize,
10538 phba->sli4_hba.eq_ecount, cpu);
10540 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10553 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
10561 cpup = &phba->sli4_hba.cpu_map[cpu];
10568 qp = &phba->sli4_hba.hdwq[cpup->hdwq];
10573 eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
10574 eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
10575 qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
10579 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10580 if (lpfc_alloc_io_wq_cq(phba, idx))
10584 if (phba->nvmet_support) {
10585 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10586 cpu = lpfc_find_cpu_handle(phba, idx,
10588 qdesc = lpfc_sli4_queue_alloc(phba,
10590 phba->sli4_hba.cq_esize,
10591 phba->sli4_hba.cq_ecount,
10594 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10602 phba->sli4_hba.nvmet_cqset[idx] = qdesc;
10610 cpu = lpfc_find_cpu_handle(phba, 0, LPFC_FIND_BY_EQ);
10612 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10613 phba->sli4_hba.cq_esize,
10614 phba->sli4_hba.cq_ecount, cpu);
10616 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10621 phba->sli4_hba.mbx_cq = qdesc;
10624 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10625 phba->sli4_hba.cq_esize,
10626 phba->sli4_hba.cq_ecount, cpu);
10628 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10634 phba->sli4_hba.els_cq = qdesc;
10643 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10644 phba->sli4_hba.mq_esize,
10645 phba->sli4_hba.mq_ecount, cpu);
10647 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10652 phba->sli4_hba.mbx_wq = qdesc;
10659 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10660 phba->sli4_hba.wq_esize,
10661 phba->sli4_hba.wq_ecount, cpu);
10663 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10668 phba->sli4_hba.els_wq = qdesc;
10669 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10671 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10673 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10674 phba->sli4_hba.cq_esize,
10675 phba->sli4_hba.cq_ecount, cpu);
10677 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10683 phba->sli4_hba.nvmels_cq = qdesc;
10686 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10687 phba->sli4_hba.wq_esize,
10688 phba->sli4_hba.wq_ecount, cpu);
10690 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10695 phba->sli4_hba.nvmels_wq = qdesc;
10696 list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
10704 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10705 phba->sli4_hba.rq_esize,
10706 phba->sli4_hba.rq_ecount, cpu);
10708 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10712 phba->sli4_hba.hdr_rq = qdesc;
10715 qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
10716 phba->sli4_hba.rq_esize,
10717 phba->sli4_hba.rq_ecount, cpu);
10719 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10723 phba->sli4_hba.dat_rq = qdesc;
10725 if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
10726 phba->nvmet_support) {
10727 for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
10728 cpu = lpfc_find_cpu_handle(phba, idx,
10731 qdesc = lpfc_sli4_queue_alloc(phba,
10733 phba->sli4_hba.rq_esize,
10737 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10743 phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
10750 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10760 qdesc = lpfc_sli4_queue_alloc(phba,
10762 phba->sli4_hba.rq_esize,
10766 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10772 phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
10777 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
10778 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10779 memset(&phba->sli4_hba.hdwq[idx].nvme_cstat, 0,
10780 sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
10785 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
10786 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10787 memset(&phba->sli4_hba.hdwq[idx].scsi_cstat, 0,
10788 sizeof(phba->sli4_hba.hdwq[idx].scsi_cstat));
10795 lpfc_sli4_queue_destroy(phba);
10824 lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
10830 hdwq = phba->sli4_hba.hdwq;
10833 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
10840 if (phba->cfg_xpsgl && !phba->nvmet_support)
10841 lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]);
10842 lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]);
10845 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
10847 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
10849 phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
10855 * @phba: pointer to lpfc hba data structure.
10866 lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
10873 spin_lock_irq(&phba->hbalock);
10874 phba->sli.sli_flag |= LPFC_QUEUE_FREE_INIT;
10875 while (phba->sli.sli_flag & LPFC_QUEUE_FREE_WAIT) {
10876 spin_unlock_irq(&phba->hbalock);
10878 spin_lock_irq(&phba->hbalock);
10880 spin_unlock_irq(&phba->hbalock);
10882 lpfc_sli4_cleanup_poll_list(phba);
10885 if (phba->sli4_hba.hdwq)
10886 lpfc_sli4_release_hdwq(phba);
10888 if (phba->nvmet_support) {
10889 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
10890 phba->cfg_nvmet_mrq);
10892 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
10893 phba->cfg_nvmet_mrq);
10894 lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
10895 phba->cfg_nvmet_mrq);
10899 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
10902 __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
10905 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
10908 __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
10909 __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
10912 __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
10915 __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
10918 __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
10921 INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
10924 spin_lock_irq(&phba->hbalock);
10925 phba->sli.sli_flag &= ~LPFC_QUEUE_FREE_INIT;
10926 spin_unlock_irq(&phba->hbalock);
10930 lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
10942 (rqbp->rqb_free_buffer)(phba, rqb_buffer);
10949 lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
10957 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10964 rc = lpfc_cq_create(phba, cq, eq,
10967 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10978 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
10983 rc = lpfc_wq_create(phba, wq, cq, qtype);
10985 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10997 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11001 rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
11003 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11010 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11012 phba->sli4_hba.mbx_wq->queue_id,
11013 phba->sli4_hba.mbx_cq->queue_id);
11021 * @phba: pointer to lpfc hba data structure.
11027 lpfc_setup_cq_lookup(struct lpfc_hba *phba)
11032 memset(phba->sli4_hba.cq_lookup, 0,
11033 (sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
11035 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11037 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
11042 if (childq->queue_id > phba->sli4_hba.cq_max)
11045 phba->sli4_hba.cq_lookup[childq->queue_id] =
11053 * @phba: pointer to lpfc hba data structure.
11064 lpfc_sli4_queue_setup(struct lpfc_hba *phba)
11076 mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
11078 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11085 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11089 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11096 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11100 mempool_free(mboxq, phba->mbox_mem_pool);
11105 phba->sli4_hba.fw_func_mode =
11107 phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
11108 phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
11109 phba->sli4_hba.physical_port =
11111 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11113 "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
11114 phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
11116 mempool_free(mboxq, phba->mbox_mem_pool);
11121 qp = phba->sli4_hba.hdwq;
11125 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11132 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11135 cpup = &phba->sli4_hba.cpu_map[cpu];
11146 rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
11147 phba->cfg_fcp_imax);
11149 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11157 phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
11160 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11168 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11169 cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
11170 cpup = &phba->sli4_hba.cpu_map[cpu];
11173 rc = lpfc_create_wq_cq(phba,
11174 phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
11177 &phba->sli4_hba.hdwq[qidx].io_cq_map,
11181 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11195 if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
11196 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11198 phba->sli4_hba.mbx_cq ?
11204 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11205 phba->sli4_hba.mbx_cq,
11206 phba->sli4_hba.mbx_wq,
11209 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11214 if (phba->nvmet_support) {
11215 if (!phba->sli4_hba.nvmet_cqset) {
11216 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11222 if (phba->cfg_nvmet_mrq > 1) {
11223 rc = lpfc_cq_create_set(phba,
11224 phba->sli4_hba.nvmet_cqset,
11228 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11236 rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
11240 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11245 phba->sli4_hba.nvmet_cqset[0]->chann = 0;
11247 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11250 phba->sli4_hba.nvmet_cqset[0]->queue_id,
11256 if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
11257 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11259 phba->sli4_hba.els_cq ? "WQ" : "CQ");
11263 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11264 phba->sli4_hba.els_cq,
11265 phba->sli4_hba.els_wq,
11268 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11273 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11275 phba->sli4_hba.els_wq->queue_id,
11276 phba->sli4_hba.els_cq->queue_id);
11278 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11280 if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
11281 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11283 phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
11287 rc = lpfc_create_wq_cq(phba, qp[0].hba_eq,
11288 phba->sli4_hba.nvmels_cq,
11289 phba->sli4_hba.nvmels_wq,
11292 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11298 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11301 phba->sli4_hba.nvmels_wq->queue_id,
11302 phba->sli4_hba.nvmels_cq->queue_id);
11308 if (phba->nvmet_support) {
11309 if ((!phba->sli4_hba.nvmet_cqset) ||
11310 (!phba->sli4_hba.nvmet_mrq_hdr) ||
11311 (!phba->sli4_hba.nvmet_mrq_data)) {
11312 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11318 if (phba->cfg_nvmet_mrq > 1) {
11319 rc = lpfc_mrq_create(phba,
11320 phba->sli4_hba.nvmet_mrq_hdr,
11321 phba->sli4_hba.nvmet_mrq_data,
11322 phba->sli4_hba.nvmet_cqset,
11325 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11333 rc = lpfc_rq_create(phba,
11334 phba->sli4_hba.nvmet_mrq_hdr[0],
11335 phba->sli4_hba.nvmet_mrq_data[0],
11336 phba->sli4_hba.nvmet_cqset[0],
11339 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11347 phba, KERN_INFO, LOG_INIT,
11350 phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
11351 phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
11352 phba->sli4_hba.nvmet_cqset[0]->queue_id);
11357 if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
11358 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11364 rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
11365 phba->sli4_hba.els_cq, LPFC_USOL);
11367 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11373 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
11376 phba->sli4_hba.hdr_rq->queue_id,
11377 phba->sli4_hba.dat_rq->queue_id,
11378 phba->sli4_hba.els_cq->queue_id);
11380 if (phba->cfg_fcp_imax)
11381 usdelay = LPFC_SEC_TO_USEC / phba->cfg_fcp_imax;
11385 for (qidx = 0; qidx < phba->cfg_irq_chann;
11387 lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
11390 if (phba->sli4_hba.cq_max) {
11391 kfree(phba->sli4_hba.cq_lookup);
11392 phba->sli4_hba.cq_lookup = kcalloc((phba->sli4_hba.cq_max + 1),
11394 if (!phba->sli4_hba.cq_lookup) {
11395 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11397 "size 0x%x\n", phba->sli4_hba.cq_max);
11401 lpfc_setup_cq_lookup(phba);
11406 lpfc_sli4_queue_unset(phba);
11413 * @phba: pointer to lpfc hba data structure.
11424 lpfc_sli4_queue_unset(struct lpfc_hba *phba)
11431 if (phba->sli4_hba.mbx_wq)
11432 lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
11435 if (phba->sli4_hba.nvmels_wq)
11436 lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
11439 if (phba->sli4_hba.els_wq)
11440 lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
11443 if (phba->sli4_hba.hdr_rq)
11444 lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
11445 phba->sli4_hba.dat_rq);
11448 if (phba->sli4_hba.mbx_cq)
11449 lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
11452 if (phba->sli4_hba.els_cq)
11453 lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
11456 if (phba->sli4_hba.nvmels_cq)
11457 lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
11459 if (phba->nvmet_support) {
11461 if (phba->sli4_hba.nvmet_mrq_hdr) {
11462 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11464 phba,
11465 phba->sli4_hba.nvmet_mrq_hdr[qidx],
11466 phba->sli4_hba.nvmet_mrq_data[qidx]);
11470 if (phba->sli4_hba.nvmet_cqset) {
11471 for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
11473 phba, phba->sli4_hba.nvmet_cqset[qidx]);
11478 if (phba->sli4_hba.hdwq) {
11480 for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
11482 qp = &phba->sli4_hba.hdwq[qidx];
11483 lpfc_wq_destroy(phba, qp->io_wq);
11484 lpfc_cq_destroy(phba, qp->io_cq);
11487 for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
11489 eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
11490 lpfc_eq_destroy(phba, eq);
11494 kfree(phba->sli4_hba.cq_lookup);
11495 phba->sli4_hba.cq_lookup = NULL;
11496 phba->sli4_hba.cq_max = 0;
11501 * @phba: pointer to lpfc hba data structure.
11516 lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
11521 for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
11526 &phba->sli4_hba.sp_cqe_event_pool);
11531 lpfc_sli4_cq_event_pool_destroy(phba);
11537 * @phba: pointer to lpfc hba data structure.
11546 lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
11551 &phba->sli4_hba.sp_cqe_event_pool, list) {
11559 * @phba: pointer to lpfc hba data structure.
11568 __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11572 list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
11579 * @phba: pointer to lpfc hba data structure.
11588 lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
11593 spin_lock_irqsave(&phba->hbalock, iflags);
11594 cq_event = __lpfc_sli4_cq_event_alloc(phba);
11595 spin_unlock_irqrestore(&phba->hbalock, iflags);
11601 * @phba: pointer to lpfc hba data structure.
11608 __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11611 list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
11616 * @phba: pointer to lpfc hba data structure.
11623 lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
11627 spin_lock_irqsave(&phba->hbalock, iflags);
11628 __lpfc_sli4_cq_event_release(phba, cq_event);
11629 spin_unlock_irqrestore(&phba->hbalock, iflags);
11634 * @phba: pointer to lpfc hba data structure.
11640 lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
11649 spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11650 list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
11652 spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
11655 spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
11656 list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
11658 spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
11663 lpfc_sli4_cq_event_release(phba, cq_event);
11669 * @phba: pointer to lpfc hba data structure.
11680 lpfc_pci_function_reset(struct lpfc_hba *phba)
11691 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11694 mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
11697 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11705 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
11708 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
11714 mempool_free(mboxq, phba->mbox_mem_pool);
11716 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11733 if (lpfc_readl(phba->sli4_hba.u.if_type2.
11744 phba->work_status[0] = readl(
11745 phba->sli4_hba.u.if_type2.ERR1regaddr);
11746 phba->work_status[1] = readl(
11747 phba->sli4_hba.u.if_type2.ERR2regaddr);
11748 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11752 phba->work_status[0],
11753 phba->work_status[1]);
11770 writel(reg_data.word0, phba->sli4_hba.u.if_type2.
11773 pci_read_config_word(phba->pcidev,
11793 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11804 * @phba: pointer to lpfc hba data structure.
11814 lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
11816 struct pci_dev *pdev = phba->pcidev;
11836 &phba->sli4_hba.sli_intf.word0)) {
11841 if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
11843 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11846 phba->sli4_hba.sli_intf.word0);
11850 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
11858 phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
11865 phba->sli4_hba.conf_regs_memmap_p =
11866 ioremap(phba->pci_bar0_map, bar0map_len);
11867 if (!phba->sli4_hba.conf_regs_memmap_p) {
11873 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
11875 lpfc_sli4_bar0_register_memmap(phba, if_type);
11877 phba->pci_bar0_map = pci_resource_start(pdev, 1);
11884 phba->sli4_hba.conf_regs_memmap_p =
11885 ioremap(phba->pci_bar0_map, bar0map_len);
11886 if (!phba->sli4_hba.conf_regs_memmap_p) {
11892 lpfc_sli4_bar0_register_memmap(phba, if_type);
11901 phba->pci_bar1_map = pci_resource_start(pdev,
11904 phba->sli4_hba.ctrl_regs_memmap_p =
11905 ioremap(phba->pci_bar1_map,
11907 if (!phba->sli4_hba.ctrl_regs_memmap_p) {
11914 phba->pci_bar2_memmap_p =
11915 phba->sli4_hba.ctrl_regs_memmap_p;
11916 lpfc_sli4_bar1_register_memmap(phba, if_type);
11929 phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
11931 phba->sli4_hba.drbl_regs_memmap_p =
11932 ioremap(phba->pci_bar1_map, bar1map_len);
11933 if (!phba->sli4_hba.drbl_regs_memmap_p) {
11939 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
11940 lpfc_sli4_bar1_register_memmap(phba, if_type);
11949 phba->pci_bar2_map = pci_resource_start(pdev,
11952 phba->sli4_hba.drbl_regs_memmap_p =
11953 ioremap(phba->pci_bar2_map,
11955 if (!phba->sli4_hba.drbl_regs_memmap_p) {
11962 phba->pci_bar4_memmap_p =
11963 phba->sli4_hba.drbl_regs_memmap_p;
11964 error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
11979 phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
11981 phba->sli4_hba.dpp_regs_memmap_p =
11982 ioremap(phba->pci_bar2_map, bar2map_len);
11983 if (!phba->sli4_hba.dpp_regs_memmap_p) {
11989 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
11996 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_eq_clr_intr;
11997 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_write_eq_db;
11998 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_write_cq_db;
12001 phba->sli4_hba.sli4_eq_clr_intr = lpfc_sli4_if6_eq_clr_intr;
12002 phba->sli4_hba.sli4_write_eq_db = lpfc_sli4_if6_write_eq_db;
12003 phba->sli4_hba.sli4_write_cq_db = lpfc_sli4_if6_write_cq_db;
12012 if (phba->sli4_hba.drbl_regs_memmap_p)
12013 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12015 if (phba->sli4_hba.ctrl_regs_memmap_p)
12016 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
12018 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12025 * @phba: pointer to lpfc hba data structure.
12031 lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
12034 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
12038 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12039 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
12040 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12043 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12046 iounmap(phba->sli4_hba.drbl_regs_memmap_p);
12047 iounmap(phba->sli4_hba.conf_regs_memmap_p);
12048 if (phba->sli4_hba.dpp_regs_memmap_p)
12049 iounmap(phba->sli4_hba.dpp_regs_memmap_p);
12054 dev_printk(KERN_ERR, &phba->pcidev->dev,
12063 * @phba: pointer to lpfc hba data structure.
12073 lpfc_sli_enable_msix(struct lpfc_hba *phba)
12079 rc = pci_alloc_irq_vectors(phba->pcidev,
12082 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12092 rc = request_irq(pci_irq_vector(phba->pcidev, 0),
12094 LPFC_SP_DRIVER_HANDLER_NAME, phba);
12096 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12103 rc = request_irq(pci_irq_vector(phba->pcidev, 1),
12105 LPFC_FP_DRIVER_HANDLER_NAME, phba);
12108 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12117 pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
12121 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12126 rc = lpfc_config_msi(phba, pmb);
12129 rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
12131 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
12139 mempool_free(pmb, phba->mbox_mem_pool);
12144 mempool_free(pmb, phba->mbox_mem_pool);
12148 free_irq(pci_irq_vector(phba->pcidev, 1), phba);
12152 free_irq(pci_irq_vector(phba->pcidev, 0), phba);
12156 pci_free_irq_vectors(phba->pcidev);
12164 * @phba: pointer to lpfc hba data structure.
12177 lpfc_sli_enable_msi(struct lpfc_hba *phba)
12181 rc = pci_enable_msi(phba->pcidev);
12183 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12186 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12191 rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12192 0, LPFC_DRIVER_NAME, phba);
12194 pci_disable_msi(phba->pcidev);
12195 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
12203 * @phba: pointer to lpfc hba data structure.
12219 lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
12225 retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
12228 clear_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag);
12232 retval = lpfc_sli_enable_msix(phba);
12235 phba->intr_type = MSIX;
12241 if (cfg_mode >= 1 && phba->intr_type == NONE) {
12242 retval = lpfc_sli_enable_msi(phba);
12245 phba->intr_type = MSI;
12251 if (phba->intr_type == NONE) {
12252 retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
12253 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
12256 phba->intr_type = INTx;
12265 * @phba: pointer to lpfc hba data structure.
12273 lpfc_sli_disable_intr(struct lpfc_hba *phba)
12277 if (phba->intr_type == MSIX)
12283 free_irq(pci_irq_vector(phba->pcidev, i), phba);
12284 pci_free_irq_vectors(phba->pcidev);
12287 phba->intr_type = NONE;
12288 phba->sli.slistat.sli_intr = 0;
12293 * @phba: pointer to lpfc hba data structure.
12300 lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
12307 cpup = &phba->sli4_hba.cpu_map[cpu];
12328 * @phba: pointer to lpfc hba data structure.
12334 lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
12341 cpup = &phba->sli4_hba.cpu_map[idx];
12354 * @phba: pointer to lpfc hba data structure.
12362 lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag,
12365 struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu];
12371 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12378 * @phba: pointer to lpfc hba data structure.
12383 lpfc_cpu_map_array_init(struct lpfc_hba *phba)
12390 cpup = &phba->sli4_hba.cpu_map[cpu];
12396 eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu);
12404 * @phba: pointer to lpfc hba data structure.
12409 lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba)
12414 for (i = 0; i < phba->cfg_irq_chann; i++) {
12417 eqhdl->phba = phba;
12423 * @phba: pointer to lpfc hba data structure.
12429 * and the phba->sli4_hba.cpu_map array will reflect this.
12432 lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
12450 cpup = &phba->sli4_hba.cpu_map[cpu];
12454 if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
12462 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12486 cpup = &phba->sli4_hba.cpu_map[cpu];
12499 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12500 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12519 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12532 cpup = &phba->sli4_hba.cpu_map[cpu];
12545 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12546 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12553 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
12568 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12581 cpup = &phba->sli4_hba.cpu_map[cpu];
12590 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12608 cpup = &phba->sli4_hba.cpu_map[cpu];
12618 if (next_idx < phba->cfg_hdw_queue) {
12630 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12631 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12644 for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
12645 new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
12653 cpup->hdwq = idx % phba->cfg_hdw_queue;
12661 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12674 cpup = &phba->sli4_hba.cpu_map[cpu];
12676 c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
12682 cpup->hdwq = idx++ % phba->cfg_hdw_queue;
12686 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
12701 * @phba: pointer to lpfc hba data structure.
12706 lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu,
12718 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12719 maskp = pci_irq_get_affinity(phba->pcidev, idx);
12744 eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
12751 static void __lpfc_cpuhp_remove(struct lpfc_hba *phba)
12753 if (phba->sli_rev != LPFC_SLI_REV4)
12757 &phba->cpuhp);
12763 del_timer_sync(&phba->cpuhp_poll_timer);
12766 static void lpfc_cpuhp_remove(struct lpfc_hba *phba)
12768 if (phba->pport &&
12769 test_bit(FC_OFFLINE_MODE, &phba->pport->fc_flag))
12772 __lpfc_cpuhp_remove(phba);
12775 static void lpfc_cpuhp_add(struct lpfc_hba *phba)
12777 if (phba->sli_rev != LPFC_SLI_REV4)
12782 if (!list_empty(&phba->poll_list))
12783 mod_timer(&phba->cpuhp_poll_timer,
12789 &phba->cpuhp);
12792 static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval)
12794 if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
12799 if (phba->sli_rev != LPFC_SLI_REV4) {
12837 * @phba: pointer to HBA context object.
12842 * online cpu on the phba's original_mask and migrate all offlining IRQ
12852 lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline)
12859 if (phba->irq_chann_mode == NORMAL_MODE)
12862 orig_mask = &phba->sli4_hba.irq_aff_mask;
12867 cpup = &phba->sli4_hba.cpu_map[cpu];
12882 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
12892 for (idx = 0; idx < phba->cfg_irq_chann; idx++)
12903 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12908 if (!phba) {
12909 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12913 if (__lpfc_cpuhp_checks(phba, &retval))
12916 lpfc_irq_rebalance(phba, cpu, true);
12918 retval = lpfc_cpuhp_get_eq(phba, cpu, &eqlist);
12933 struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp);
12938 if (!phba) {
12939 WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id());
12943 if (__lpfc_cpuhp_checks(phba, &retval))
12946 lpfc_irq_rebalance(phba, cpu, false);
12948 list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) {
12949 n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ);
12959 * @phba: pointer to lpfc hba data structure.
12986 lpfc_sli4_enable_msix(struct lpfc_hba *phba)
12998 vectors = phba->cfg_irq_chann;
13000 if (phba->irq_chann_mode != NORMAL_MODE)
13001 aff_mask = &phba->sli4_hba.irq_aff_mask;
13005 vectors = min(phba->cfg_irq_chann, cpu_cnt);
13016 rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags);
13018 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13033 rc = pci_irq_vector(phba->pcidev, index);
13035 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13047 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13059 lpfc_assign_eq_map_info(phba, index,
13070 lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ,
13073 maskp = pci_irq_get_affinity(phba->pcidev, index);
13077 cpup = &phba->sli4_hba.cpu_map[cpu];
13093 lpfc_assign_eq_map_info(phba, index,
13101 if (vectors != phba->cfg_irq_chann) {
13102 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13105 phba->cfg_irq_chann, vectors);
13106 if (phba->cfg_irq_chann > vectors)
13107 phba->cfg_irq_chann = vectors;
13121 pci_free_irq_vectors(phba->pcidev);
13129 * @phba: pointer to lpfc hba data structure.
13142 lpfc_sli4_enable_msi(struct lpfc_hba *phba)
13148 rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1,
13151 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13154 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13159 rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13160 0, LPFC_DRIVER_NAME, phba);
13162 pci_free_irq_vectors(phba->pcidev);
13163 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13169 rc = pci_irq_vector(phba->pcidev, 0);
13171 pci_free_irq_vectors(phba->pcidev);
13172 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13179 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu);
13181 for (index = 0; index < phba->cfg_irq_chann; index++) {
13191 * @phba: pointer to lpfc hba data structure.
13207 lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
13217 retval = lpfc_sli4_enable_msix(phba);
13220 phba->intr_type = MSIX;
13227 if (cfg_mode >= 1 && phba->intr_type == NONE) {
13228 retval = lpfc_sli4_enable_msi(phba);
13231 phba->intr_type = MSI;
13237 if (phba->intr_type == NONE) {
13238 retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
13239 IRQF_SHARED, LPFC_DRIVER_NAME, phba);
13245 phba->intr_type = INTx;
13249 retval = pci_irq_vector(phba->pcidev, 0);
13251 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
13259 lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ,
13261 for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
13272 * @phba: pointer to lpfc hba data structure.
13280 lpfc_sli4_disable_intr(struct lpfc_hba *phba)
13283 if (phba->intr_type == MSIX) {
13288 for (index = 0; index < phba->cfg_irq_chann; index++) {
13294 free_irq(phba->pcidev->irq, phba);
13297 pci_free_irq_vectors(phba->pcidev);
13300 phba->intr_type = NONE;
13301 phba->sli.slistat.sli_intr = 0;
13306 * @phba: pointer to lpfc hba data structure.
13312 lpfc_unset_hba(struct lpfc_hba *phba)
13314 set_bit(FC_UNLOADING, &phba->pport->load_flag);
13316 kfree(phba->vpi_bmask);
13317 kfree(phba->vpi_ids);
13319 lpfc_stop_hba_timers(phba);
13321 phba->pport->work_port_events = 0;
13323 lpfc_sli_hba_down(phba);
13325 lpfc_sli_brdrestart(phba);
13327 lpfc_sli_disable_intr(phba);
13334 * @phba: Pointer to HBA context object.
13346 lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
13353 int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13362 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13363 lpfc_nvme_wait_for_io_drain(phba);
13366 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13367 qp = &phba->sli4_hba.hdwq[idx];
13375 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13377 list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13383 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13388 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13393 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13405 for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
13406 qp = &phba->sli4_hba.hdwq[idx];
13415 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13417 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
13420 list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
13427 * @phba: Pointer to HBA context object.
13436 lpfc_sli4_hba_unset(struct lpfc_hba *phba)
13440 struct pci_dev *pdev = phba->pcidev;
13442 lpfc_stop_hba_timers(phba);
13443 hrtimer_cancel(&phba->cmf_stats_timer);
13444 hrtimer_cancel(&phba->cmf_timer);
13446 if (phba->pport)
13447 phba->sli4_hba.intr_enable = 0;
13455 spin_lock_irq(&phba->hbalock);
13456 phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13457 spin_unlock_irq(&phba->hbalock);
13459 while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13465 if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
13466 spin_lock_irq(&phba->hbalock);
13467 mboxq = phba->sli.mbox_active;
13469 __lpfc_mbox_cmpl_put(phba, mboxq);
13470 phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
13471 phba->sli.mbox_active = NULL;
13472 spin_unlock_irq(&phba->hbalock);
13476 lpfc_sli_hba_iocb_abort(phba);
13478 if (!pci_channel_offline(phba->pcidev))
13480 lpfc_sli4_xri_exchange_busy_wait(phba);
13482 /* per-phba callback de-registration for hotplug event */
13483 if (phba->pport)
13484 lpfc_cpuhp_remove(phba);
13487 lpfc_sli4_disable_intr(phba);
13490 if (phba->cfg_sriov_nr_virtfn)
13494 kthread_stop(phba->worker_thread);
13497 lpfc_ras_stop_fwlog(phba);
13500 lpfc_pci_function_reset(phba);
13503 lpfc_sli4_queue_destroy(phba);
13506 if (phba->ras_fwlog.ras_enabled)
13507 lpfc_sli4_ras_dma_free(phba);
13510 if (phba->pport)
13511 phba->pport->work_port_events = 0;
13565 lpfc_init_congestion_buf(struct lpfc_hba *phba)
13571 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13572 "6235 INIT Congestion Buffer %p\n", phba->cgn_i);
13574 if (!phba->cgn_i)
13576 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13578 atomic_set(&phba->cgn_fabric_warn_cnt, 0);
13579 atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
13580 atomic_set(&phba->cgn_sync_alarm_cnt, 0);
13581 atomic_set(&phba->cgn_sync_warn_cnt, 0);
13583 atomic_set(&phba->cgn_driver_evt_cnt, 0);
13584 atomic_set(&phba->cgn_latency_evt_cnt, 0);
13585 atomic64_set(&phba->cgn_latency_evt, 0);
13586 phba->cgn_evt_minute = 0;
13593 cp->cgn_info_mode = phba->cgn_p.cgn_param_mode;
13594 cp->cgn_info_level0 = phba->cgn_p.cgn_param_level0;
13595 cp->cgn_info_level1 = phba->cgn_p.cgn_param_level1;
13596 cp->cgn_info_level2 = phba->cgn_p.cgn_param_level2;
13598 lpfc_cgn_update_tstamp(phba, &cp->base_time);
13601 if (phba->pport) {
13602 size = (uint16_t)(phba->pport->cfg_lun_queue_depth);
13613 phba->cgn_evt_timestamp = jiffies +
13618 lpfc_init_congestion_stat(struct lpfc_hba *phba)
13623 lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
13624 "6236 INIT Congestion Stat %p\n", phba->cgn_i);
13626 if (!phba->cgn_i)
13629 cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
13632 lpfc_cgn_update_tstamp(phba, &cp->stat_start);
13639 * @phba: Pointer to hba context object.
13643 __lpfc_reg_congestion_buf(struct lpfc_hba *phba, int reg)
13651 if (!phba->cgn_i)
13654 mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
13656 lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
13659 phba->pport->port_state, reg);
13665 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13676 putPaddrLow(phba->cgn_i->phys);
13678 putPaddrHigh(phba->cgn_i->phys);
13680 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13686 mempool_free(mboxq, phba->mbox_mem_pool);
13688 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13699 lpfc_unreg_congestion_buf(struct lpfc_hba *phba)
13701 lpfc_cmf_stop(phba);
13702 return __lpfc_reg_congestion_buf(phba, 0);
13706 lpfc_reg_congestion_buf(struct lpfc_hba *phba)
13708 return __lpfc_reg_congestion_buf(phba, 1);
13713 * @phba: Pointer to HBA context object.
13724 lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
13739 phba->sli4_hba.rpi_hdrs_in_use = 1;
13744 lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
13747 if (!phba->sli4_hba.intr_enable)
13748 rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
13750 mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
13751 rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
13755 sli4_params = &phba->sli4_hba.pc_sli4_params;
13765 phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
13767 phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
13786 phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
13787 phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
13791 phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters);
13802 if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
13803 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13806 phba->cfg_enable_fc4_type);
13812 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
13813 lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
13818 phba->cfg_enable_fc4_type);
13820 phba->nvmet_support = 0;
13821 phba->cfg_nvmet_mrq = 0;
13822 phba->cfg_nvme_seg_cnt = 0;
13825 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
13827 phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
13834 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
13835 phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
13839 phba->cfg_enable_pbde = 1;
13841 phba->cfg_enable_pbde = 0;
13851 if (phba->cfg_suppress_rsp && bf_get(cfg_xib, mbx_sli4_parameters) &&
13853 phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
13855 phba->cfg_suppress_rsp = 0;
13858 phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
13864 rc = dma_set_max_seg_size(&phba->pcidev->dev, sli4_params->sge_supp_len);
13866 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
13877 phba->fcp_embed_io = 1;
13879 phba->fcp_embed_io = 0;
13881 lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
13884 phba->cfg_enable_pbde,
13885 phba->fcp_embed_io, sli4_params->nvme,
13886 phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
13888 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
13890 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
13898 phba->enab_exp_wqcq_pages = 1;
13900 phba->enab_exp_wqcq_pages = 0;
13905 phba->mds_diags_support = 1;
13907 phba->mds_diags_support = 0;
13913 phba->nsler = 1;
13915 phba->nsler = 0;
13940 struct lpfc_hba *phba;
13947 phba = lpfc_hba_alloc(pdev);
13948 if (!phba)
13952 error = lpfc_enable_pci_dev(phba);
13957 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
13962 error = lpfc_sli_pci_mem_setup(phba);
13964 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13970 error = lpfc_sli_driver_resource_setup(phba);
13972 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13979 error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
13981 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13987 error = lpfc_setup_driver_resource_phase2(phba);
13989 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
13995 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
13998 error = lpfc_create_shost(phba);
14000 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14006 vport = phba->pport;
14009 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14016 cfg_mode = phba->cfg_use_msi;
14019 lpfc_stop_port(phba);
14021 intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
14023 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14029 if (lpfc_sli_hba_setup(phba)) {
14030 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14040 phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
14042 phba->intr_mode = intr_mode;
14043 lpfc_log_intr_mode(phba, intr_mode);
14046 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14051 lpfc_sli_disable_intr(phba);
14058 lpfc_post_init_setup(phba);
14061 lpfc_create_static_vport(phba);
14066 lpfc_unset_hba(phba);
14070 lpfc_destroy_shost(phba);
14072 lpfc_unset_driver_resource_phase2(phba);
14074 lpfc_free_iocb_list(phba);
14076 lpfc_sli_driver_resource_unset(phba);
14078 lpfc_sli_pci_mem_unset(phba);
14080 lpfc_disable_pci_dev(phba);
14084 lpfc_hba_free(phba);
14103 struct lpfc_hba *phba = vport->phba;
14111 vports = lpfc_create_vport_work_array(phba);
14113 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
14118 lpfc_destroy_vport_work_array(phba, vports);
14134 lpfc_sli_hba_down(phba);
14136 kthread_stop(phba->worker_thread);
14138 lpfc_sli_brdrestart(phba);
14140 kfree(phba->vpi_bmask);
14141 kfree(phba->vpi_ids);
14143 lpfc_stop_hba_timers(phba);
14144 spin_lock_irq(&phba->port_list_lock);
14146 spin_unlock_irq(&phba->port_list_lock);
14151 if (phba->cfg_sriov_nr_virtfn)
14155 lpfc_sli_disable_intr(phba);
14163 lpfc_scsi_free(phba);
14164 lpfc_free_iocb_list(phba);
14166 lpfc_mem_free_all(phba);
14169 phba->hbqslimp.virt, phba->hbqslimp.phys);
14173 phba->slim2p.virt, phba->slim2p.phys);
14176 iounmap(phba->ctrl_regs_memmap_p);
14177 iounmap(phba->slim_memmap_p);
14179 lpfc_hba_free(phba);
14209 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14211 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14215 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14216 lpfc_offline(phba);
14217 kthread_stop(phba->worker_thread);
14220 lpfc_sli_disable_intr(phba);
14248 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14252 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
14256 phba->worker_thread = kthread_run(lpfc_do_work, phba,
14257 "lpfc_worker_%d", phba->brd_no);
14258 if (IS_ERR(phba->worker_thread)) {
14259 error = PTR_ERR(phba->worker_thread);
14260 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14267 lpfc_cpu_map_array_init(phba);
14269 lpfc_hba_eq_hdl_array_init(phba);
14271 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14273 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14277 phba->intr_mode = intr_mode;
14280 lpfc_sli_brdrestart(phba);
14281 lpfc_online(phba);
14284 lpfc_log_intr_mode(phba, phba->intr_mode);
14291 * @phba: pointer to lpfc hba data structure.
14297 lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
14299 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14306 lpfc_sli_abort_fcp_rings(phba);
14311 * @phba: pointer to lpfc hba data structure.
14318 lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
14320 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14324 lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
14327 lpfc_scsi_dev_block(phba);
14330 lpfc_sli_flush_io_rings(phba);
14333 lpfc_stop_hba_timers(phba);
14336 lpfc_sli_disable_intr(phba);
14337 pci_disable_device(phba->pcidev);
14342 * @phba: pointer to lpfc hba data structure.
14349 lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
14351 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14354 lpfc_scsi_dev_block(phba);
14355 lpfc_sli4_prep_dev_for_reset(phba);
14358 lpfc_stop_hba_timers(phba);
14361 lpfc_sli_flush_io_rings(phba);
14386 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14391 lpfc_sli_prep_dev_for_recover(phba);
14395 lpfc_sli_prep_dev_for_reset(phba);
14399 lpfc_sli_prep_dev_for_perm_failure(phba);
14403 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14405 lpfc_sli_prep_dev_for_reset(phba);
14432 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14433 struct lpfc_sli *psli = &phba->sli;
14454 spin_lock_irq(&phba->hbalock);
14456 spin_unlock_irq(&phba->hbalock);
14459 intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
14461 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14466 phba->intr_mode = intr_mode;
14469 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
14470 lpfc_offline(phba);
14471 lpfc_sli_brdrestart(phba);
14474 lpfc_log_intr_mode(phba, phba->intr_mode);
14493 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
14496 lpfc_online(phba);
14501 * @phba: pointer to lpfc hba data structure.
14506 lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
14508 int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
14510 if (phba->sli_rev == LPFC_SLI_REV4) {
14531 * @phba: pointer to lpfc hba data structure.
14536 lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
14538 int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
14540 if (phba->nvmet_support)
14547 lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset,
14554 sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf);
14568 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14572 phba->pcidev->device, magic_number, ftype, fid,
14576 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14581 phba->pcidev->device, magic_number, ftype, fid,
14585 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14589 offset, phba->pcidev->device, magic_number,
14605 struct lpfc_hba *phba = (struct lpfc_hba *)context;
14627 lpfc_decode_firmware_rev(phba, fwrev, 1);
14629 lpfc_log_msg(phba, KERN_NOTICE, LOG_INIT | LOG_SLI,
14640 dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
14665 rc = lpfc_wr_object(phba, &dma_buffer_list,
14668 rc = lpfc_log_write_firmware_error(phba, offset,
14679 lpfc_log_msg(phba, KERN_NOTICE, LOG_INIT | LOG_SLI,
14687 dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
14694 lpfc_log_msg(phba, KERN_ERR, LOG_INIT | LOG_SLI,
14697 lpfc_log_msg(phba, KERN_NOTICE, LOG_INIT | LOG_SLI,
14703 * @phba: pointer to lpfc hba data structure.
14710 lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
14717 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
14721 scnprintf(file_name, sizeof(file_name), "%s.grp", phba->ModelName);
14725 file_name, &phba->pcidev->dev,
14726 GFP_KERNEL, (void *)phba,
14729 ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
14731 lpfc_write_firmware(fw, (void *)phba);
14760 struct lpfc_hba *phba;
14767 phba = lpfc_hba_alloc(pdev);
14768 if (!phba)
14771 INIT_LIST_HEAD(&phba->poll_list);
14774 error = lpfc_enable_pci_dev(phba);
14779 error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
14784 error = lpfc_sli4_pci_mem_setup(phba);
14786 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14792 error = lpfc_sli4_driver_resource_setup(phba);
14794 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14799 spin_lock_init(&phba->rrq_list_lock);
14800 INIT_LIST_HEAD(&phba->active_rrq_list);
14801 INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
14804 error = lpfc_setup_driver_resource_phase2(phba);
14806 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14812 lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
14815 cfg_mode = phba->cfg_use_msi;
14818 phba->pport = NULL;
14819 lpfc_stop_port(phba);
14822 lpfc_cpu_map_array_init(phba);
14825 lpfc_hba_eq_hdl_array_init(phba);
14828 intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
14830 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14836 if (phba->intr_type != MSIX) {
14837 phba->cfg_irq_chann = 1;
14838 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14839 if (phba->nvmet_support)
14840 phba->cfg_nvmet_mrq = 1;
14843 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
14846 error = lpfc_create_shost(phba);
14848 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14852 vport = phba->pport;
14858 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
14864 if (lpfc_sli4_hba_setup(phba)) {
14865 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14872 phba->intr_mode = intr_mode;
14873 lpfc_log_intr_mode(phba, intr_mode);
14876 lpfc_post_init_setup(phba);
14881 if (phba->nvmet_support == 0) {
14882 if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14890 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14899 if (phba->cfg_request_firmware_upgrade)
14900 lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
14903 lpfc_create_static_vport(phba);
14905 timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
14906 cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
14913 lpfc_destroy_shost(phba);
14915 lpfc_sli4_disable_intr(phba);
14917 lpfc_unset_driver_resource_phase2(phba);
14919 lpfc_sli4_driver_resource_unset(phba);
14921 lpfc_sli4_pci_mem_unset(phba);
14923 lpfc_disable_pci_dev(phba);
14927 lpfc_hba_free(phba);
14946 struct lpfc_hba *phba = vport->phba;
14951 if (phba->cgn_i)
14952 lpfc_unreg_congestion_buf(phba);
14957 vports = lpfc_create_vport_work_array(phba);
14959 for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
14964 lpfc_destroy_vport_work_array(phba, vports);
14974 lpfc_nvmet_destroy_targetport(phba);
14978 if (phba->cfg_xri_rebalancing)
14979 lpfc_destroy_multixri_pools(phba);
14988 lpfc_stop_hba_timers(phba);
14989 spin_lock_irq(&phba->port_list_lock);
14991 spin_unlock_irq(&phba->port_list_lock);
14996 lpfc_io_free(phba);
14997 lpfc_free_iocb_list(phba);
14998 lpfc_sli4_hba_unset(phba);
15000 lpfc_unset_driver_resource_phase2(phba);
15001 lpfc_sli4_driver_resource_unset(phba);
15004 lpfc_sli4_pci_mem_unset(phba);
15008 lpfc_disable_pci_dev(phba);
15011 lpfc_hba_free(phba);
15040 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15042 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15046 lpfc_offline_prep(phba, LPFC_MBX_WAIT);
15047 lpfc_offline(phba);
15048 kthread_stop(phba->worker_thread);
15051 lpfc_sli4_disable_intr(phba);
15052 lpfc_sli4_queue_destroy(phba);
15080 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15084 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
15088 phba->worker_thread = kthread_run(lpfc_do_work, phba,
15089 "lpfc_worker_%d", phba->brd_no);
15090 if (IS_ERR(phba->worker_thread)) {
15091 error = PTR_ERR(phba->worker_thread);
15092 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15099 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15101 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15105 phba->intr_mode = intr_mode;
15108 lpfc_sli_brdrestart(phba);
15109 lpfc_online(phba);
15112 lpfc_log_intr_mode(phba, phba->intr_mode);
15119 * @phba: pointer to lpfc hba data structure.
15125 lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
15127 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15133 lpfc_sli_abort_fcp_rings(phba);
15138 * @phba: pointer to lpfc hba data structure.
15145 lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
15147 int offline = pci_channel_offline(phba->pcidev);
15149 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15154 lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
15158 lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
15160 lpfc_sli_flush_io_rings(phba);
15161 lpfc_offline(phba);
15164 lpfc_stop_hba_timers(phba);
15166 lpfc_sli4_queue_destroy(phba);
15168 lpfc_sli4_disable_intr(phba);
15169 pci_disable_device(phba->pcidev);
15174 * @phba: pointer to lpfc hba data structure.
15181 lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
15183 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15187 lpfc_scsi_dev_block(phba);
15190 lpfc_stop_hba_timers(phba);
15193 lpfc_sli_flush_io_rings(phba);
15216 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15222 lpfc_sli4_prep_dev_for_recover(phba);
15225 hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
15228 lpfc_sli4_prep_dev_for_reset(phba);
15230 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
15235 set_bit(HBA_PCI_ERR, &phba->bit_flags);
15237 lpfc_sli4_prep_dev_for_perm_failure(phba);
15240 hba_pci_err = test_and_set_bit(HBA_PCI_ERR, &phba->bit_flags);
15242 lpfc_sli4_prep_dev_for_reset(phba);
15244 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15246 lpfc_sli4_prep_dev_for_reset(phba);
15273 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15274 struct lpfc_sli *psli = &phba->sli;
15287 hba_pci_err = test_and_clear_bit(HBA_PCI_ERR, &phba->bit_flags);
15300 spin_lock_irq(&phba->hbalock);
15302 spin_unlock_irq(&phba->hbalock);
15305 lpfc_cpu_map_array_init(phba);
15307 intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
15309 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15314 phba->intr_mode = intr_mode;
15315 lpfc_cpu_affinity_check(phba, phba->cfg_irq_chann);
15318 lpfc_log_intr_mode(phba, phba->intr_mode);
15337 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15345 if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
15347 lpfc_sli_brdrestart(phba);
15349 lpfc_online(phba);
15403 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15405 switch (phba->pci_dev_grp) {
15413 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15415 phba->pci_dev_grp);
15438 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15441 switch (phba->pci_dev_grp) {
15449 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15451 phba->pci_dev_grp);
15474 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15477 switch (phba->pci_dev_grp) {
15485 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15487 phba->pci_dev_grp);
15512 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15515 if (phba->link_state == LPFC_HBA_ERROR &&
15516 test_bit(HBA_IOQ_FLUSH, &phba->hba_flag))
15519 switch (phba->pci_dev_grp) {
15527 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15529 phba->pci_dev_grp);
15553 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15556 switch (phba->pci_dev_grp) {
15564 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15566 phba->pci_dev_grp);
15586 struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
15588 switch (phba->pci_dev_grp) {
15596 lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15598 phba->pci_dev_grp);
15606 * @phba: pointer to lpfc hba data structure.
15615 lpfc_sli4_oas_verify(struct lpfc_hba *phba)
15618 if (!phba->cfg_EnableXLane)
15621 if (phba->sli4_hba.pc_sli4_params.oas_supported) {
15622 phba->cfg_fof = 1;
15624 phba->cfg_fof = 0;
15625 mempool_destroy(phba->device_data_mem_pool);
15626 phba->device_data_mem_pool = NULL;
15634 * @phba: pointer to lpfc hba data structure.
15640 lpfc_sli4_ras_init(struct lpfc_hba *phba)
15643 if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
15645 (bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf) ==
15647 phba->ras_fwlog.ras_hwsupport = true;
15648 if (phba->cfg_ras_fwlog_func == PCI_FUNC(phba->pcidev->devfn) &&
15649 phba->cfg_ras_fwlog_buffsize)
15650 phba->ras_fwlog.ras_enabled = true;
15652 phba->ras_fwlog.ras_enabled = false;
15654 phba->ras_fwlog.ras_hwsupport = false;
15761 void lpfc_dmp_dbg(struct lpfc_hba *phba)
15770 if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
15773 start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ;
15774 dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt);
15791 dev_info(&phba->pcidev->dev, "start %d end %d cnt %d\n",
15799 rem_nsec = do_div(phba->dbg_log[temp_idx].t_ns, NSEC_PER_SEC);
15800 dev_info(&phba->pcidev->dev, "%d: [%5lu.%06lu] %s",
15802 (unsigned long)phba->dbg_log[temp_idx].t_ns,
15804 phba->dbg_log[temp_idx].log);
15807 atomic_set(&phba->dbg_log_cnt, 0);
15808 atomic_set(&phba->dbg_log_dmping, 0);
15812 void lpfc_dbg_print(struct lpfc_hba *phba, const char *fmt, ...)
15816 int dbg_dmping = atomic_read(&phba->dbg_log_dmping);
15824 dev_info(&phba->pcidev->dev, "%pV", &vaf);
15828 idx = (unsigned int)atomic_fetch_add(1, &phba->dbg_log_idx) %
15831 atomic_inc(&phba->dbg_log_cnt);
15833 vscnprintf(phba->dbg_log[idx].log,
15834 sizeof(phba->dbg_log[idx].log), fmt, args);
15837 phba->dbg_log[idx].t_ns = local_clock();