• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/scsi/lpfc/

Lines Matching refs:ndlp

71 	struct lpfc_nodelist * ndlp;
75 ndlp = rdata->pnode;
77 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
85 phba = ndlp->phba;
87 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
89 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
91 if (ndlp->nlp_sid != NLP_NO_SID) {
92 lpfc_sli_abort_iocb(ndlp->vport,
94 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
105 struct lpfc_nodelist * ndlp;
113 ndlp = rdata->pnode;
114 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
117 vport = ndlp->vport;
122 ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag);
126 * appropriately we just need to cleanup the ndlp rport info here.
130 put_rport = ndlp->rport != NULL;
132 ndlp->rport = NULL;
134 lpfc_nlp_put(ndlp);
140 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE)
143 evtp = &ndlp->dev_loss_evt;
152 evtp->evt_arg1 = lpfc_nlp_get(ndlp);
168 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
179 rport = ndlp->rport;
185 name = (uint8_t *) &ndlp->nlp_portname;
186 vport = ndlp->vport;
191 ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
195 * appropriately we just need to cleanup the ndlp rport info here.
198 if (ndlp->nlp_sid != NLP_NO_SID) {
202 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
205 put_rport = ndlp->rport != NULL;
207 ndlp->rport = NULL;
209 lpfc_nlp_put(ndlp);
215 if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
222 ndlp->nlp_DID);
226 if (ndlp->nlp_type & NLP_FABRIC) {
229 put_rport = ndlp->rport != NULL;
231 ndlp->rport = NULL;
233 lpfc_nlp_put(ndlp);
239 if (ndlp->nlp_sid != NLP_NO_SID) {
243 ndlp->nlp_sid, 0, LPFC_CTX_TGT);
253 ndlp->nlp_DID, ndlp->nlp_flag,
254 ndlp->nlp_state, ndlp->nlp_rpi);
262 ndlp->nlp_DID, ndlp->nlp_flag,
263 ndlp->nlp_state, ndlp->nlp_rpi);
267 put_rport = ndlp->rport != NULL;
269 ndlp->rport = NULL;
271 lpfc_nlp_put(ndlp);
276 !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
277 !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
278 (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
279 (ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
280 (ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
281 lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
409 struct lpfc_nodelist *ndlp;
420 ndlp = (struct lpfc_nodelist *) (evtp->evt_arg1);
421 lpfc_els_retry_delay_handler(ndlp);
422 free_evt = 0; /* evt is part of ndlp */
426 lpfc_nlp_put(ndlp);
429 ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
430 lpfc_dev_loss_tmo_handler(ndlp);
435 lpfc_nlp_put(ndlp);
687 struct lpfc_nodelist *ndlp, *next_ndlp;
690 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
691 if (!NLP_CHK_NODE_ACT(ndlp))
693 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
697 (ndlp->nlp_DID == NameServer_DID)))
698 lpfc_unreg_rpi(vport, ndlp);
702 (!remove && ndlp->nlp_type & NLP_FABRIC))
704 rc = lpfc_disc_state_machine(vport, ndlp, NULL,
819 struct lpfc_nodelist *ndlp;
821 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
822 if (!NLP_CHK_NODE_ACT(ndlp))
824 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
826 if (ndlp->nlp_type & NLP_FABRIC) {
827 /* On Linkup its safe to clean up the ndlp
830 if (ndlp->nlp_DID != Fabric_DID)
831 lpfc_unreg_rpi(vport, ndlp);
832 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
833 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
837 lpfc_unreg_rpi(vport, ndlp);
2261 struct lpfc_nodelist *ndlp;
2279 ndlp = lpfc_findnode_did(vport, Fabric_DID);
2280 if (!ndlp)
2286 lpfc_register_new_vport(phba, vport, ndlp);
2850 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
2855 if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND)
2856 ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND;
2858 if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL ||
2859 ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) {
2869 ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
2877 lpfc_disc_state_machine(vport, ndlp, pmb,
2886 lpfc_nlp_put(ndlp);
3147 struct lpfc_nodelist *ndlp;
3149 ndlp = (struct lpfc_nodelist *) pmb->context2;
3166 /* Decrement the reference count to ndlp after the
3167 * reference to the ndlp are done.
3169 lpfc_nlp_put(ndlp);
3174 /* Decrement the reference count to ndlp after the reference
3175 * to the ndlp are done.
3177 lpfc_nlp_put(ndlp);
3181 ndlp->nlp_rpi = mb->un.varWords[0];
3182 ndlp->nlp_flag |= NLP_RPI_VALID;
3183 ndlp->nlp_type |= NLP_FABRIC;
3184 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
3201 * all the current reference to the ndlp have been done.
3203 lpfc_nlp_put(ndlp);
3218 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3229 lpfc_nlp_put(ndlp);
3234 /* If no other thread is using the ndlp, free it */
3235 lpfc_nlp_not_used(ndlp);
3254 ndlp->nlp_rpi = mb->un.varWords[0];
3255 ndlp->nlp_flag |= NLP_RPI_VALID;
3256 ndlp->nlp_type |= NLP_FABRIC;
3257 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
3281 lpfc_nlp_put(ndlp);
3290 lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3299 rport_ids.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
3300 rport_ids.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
3301 rport_ids.port_id = ndlp->nlp_DID;
3311 if (ndlp->rport && ndlp->rport->dd_data &&
3312 ((struct lpfc_rport_data *) ndlp->rport->dd_data)->pnode == ndlp)
3313 lpfc_nlp_put(ndlp);
3317 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
3319 ndlp->rport = rport = fc_remote_port_add(shost, 0, &rport_ids);
3327 rport->maxframe_size = ndlp->nlp_maxframe;
3328 rport->supported_classes = ndlp->nlp_class_sup;
3330 rdata->pnode = lpfc_nlp_get(ndlp);
3332 if (ndlp->nlp_type & NLP_FCP_TARGET)
3334 if (ndlp->nlp_type & NLP_FCP_INITIATOR)
3343 ndlp->nlp_sid = rport->scsi_target_id;
3349 lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp)
3351 struct fc_rport *rport = ndlp->rport;
3353 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_RPORT,
3355 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
3398 lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
3404 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
3405 ndlp->nlp_type |= NLP_FC_NODE;
3408 ndlp->nlp_flag &= ~NLP_NODEV_REMOVE;
3410 ndlp->nlp_flag &= ~NLP_RCV_PLOGI;
3413 if (ndlp->rport && (old_state == NLP_STE_MAPPED_NODE ||
3416 lpfc_unregister_remote_port(ndlp);
3427 lpfc_register_remote_port(vport, ndlp);
3435 ndlp->lat_data = kcalloc(LPFC_MAX_BUCKET_COUNT,
3439 if (!ndlp->lat_data)
3443 "0x%x\n", ndlp->nlp_DID);
3452 (!ndlp->rport ||
3453 ndlp->rport->scsi_target_id == -1 ||
3454 ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) {
3456 ndlp->nlp_flag |= NLP_TGT_NO_SCSIID;
3458 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
3484 lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
3488 int old_state = ndlp->nlp_state;
3493 ndlp->nlp_DID,
3499 ndlp->nlp_DID, old_state, state);
3503 lpfc_cancel_retry_delay_tmo(vport, ndlp);
3505 ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID;
3506 ndlp->nlp_type &= ~NLP_FC_NODE;
3509 if (list_empty(&ndlp->nlp_listp)) {
3511 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
3516 ndlp->nlp_state = state;
3518 lpfc_nlp_state_cleanup(vport, ndlp, old_state, state);
3522 lpfc_enqueue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3526 if (list_empty(&ndlp->nlp_listp)) {
3528 list_add_tail(&ndlp->nlp_listp, &vport->fc_nodes);
3534 lpfc_dequeue_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3538 lpfc_cancel_retry_delay_tmo(vport, ndlp);
3539 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
3540 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
3542 list_del_init(&ndlp->nlp_listp);
3544 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
3549 lpfc_disable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3551 lpfc_cancel_retry_delay_tmo(vport, ndlp);
3552 if (ndlp->nlp_state && !list_empty(&ndlp->nlp_listp))
3553 lpfc_nlp_counters(vport, ndlp->nlp_state, -1);
3554 lpfc_nlp_state_cleanup(vport, ndlp, ndlp->nlp_state,
3560 * @ndlp: Pointer to FC node object.
3565 * to phba from @ndlp can be obtained indirectly through it's reference to
3566 * @vport, a direct reference to phba is taken here by @ndlp. This is due
3567 * to the life-span of the @ndlp might go beyond the existence of @vport as
3568 * the final release of ndlp is determined by its reference count. And, the
3569 * operation on @ndlp needs the reference to phba.
3572 lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
3575 INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
3576 INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
3577 init_timer(&ndlp->nlp_delayfunc);
3578 ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
3579 ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
3580 ndlp->nlp_DID = did;
3581 ndlp->vport = vport;
3582 ndlp->phba = vport->phba;
3583 ndlp->nlp_sid = NLP_NO_SID;
3584 kref_init(&ndlp->kref);
3585 NLP_INT_NODE_ACT(ndlp);
3586 atomic_set(&ndlp->cmd_pending, 0);
3587 ndlp->cmd_qdepth = vport->cfg_tgt_queue_depth;
3591 lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
3598 if (!ndlp)
3602 /* The ndlp should not be in memory free mode */
3603 if (NLP_CHK_FREE_REQ(ndlp)) {
3606 "0277 lpfc_enable_node: ndlp:x%p "
3608 (void *)ndlp, ndlp->nlp_usg_map,
3609 atomic_read(&ndlp->kref.refcount));
3612 /* The ndlp should not already be in active mode */
3613 if (NLP_CHK_NODE_ACT(ndlp)) {
3616 "0278 lpfc_enable_node: ndlp:x%p "
3618 (void *)ndlp, ndlp->nlp_usg_map,
3619 atomic_read(&ndlp->kref.refcount));
3624 did = ndlp->nlp_DID;
3626 /* re-initialize ndlp except of ndlp linked list pointer */
3627 memset((((char *)ndlp) + sizeof (struct list_head)), 0,
3629 lpfc_initialize_node(vport, ndlp, did);
3634 lpfc_nlp_set_state(vport, ndlp, state);
3638 ndlp->nlp_DID, 0, 0);
3639 return ndlp;
3643 lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3648 * the ndlp from the vport. The ndlp marked as UNUSED on the list
3650 * that the ndlp not already in the UNUSED state before we proceed.
3652 if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
3654 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNUSED_NODE);
3655 lpfc_nlp_put(ndlp);
3743 struct lpfc_nodelist *ndlp)
3747 struct lpfc_vport *vport = ndlp->vport;
3755 if (iocb->context_un.ndlp == ndlp)
3758 if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
3761 if (iocb->context1 == (uint8_t *) ndlp)
3768 if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
3769 (ndlp->nlp_flag & NLP_DELAY_TMO)) {
3772 if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi) {
3786 lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
3794 lpfc_fabric_abort_nport(ndlp);
3801 if (ndlp->nlp_flag & NLP_RPI_VALID) {
3814 ndlp))) {
3843 lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3849 if (ndlp->nlp_flag & NLP_RPI_VALID) {
3852 lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
3859 lpfc_no_rpi(phba, ndlp);
3861 ndlp->nlp_rpi = 0;
3862 ndlp->nlp_flag &= ~NLP_RPI_VALID;
3863 ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3880 struct lpfc_nodelist *ndlp;
3888 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
3889 if (ndlp->nlp_flag & NLP_RPI_VALID) {
3892 lpfc_unreg_rpi(vports[i], ndlp);
3955 lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
3966 ndlp->nlp_DID, ndlp->nlp_flag,
3967 ndlp->nlp_state, ndlp->nlp_rpi);
3968 if (NLP_CHK_FREE_REQ(ndlp)) {
3970 "0280 lpfc_cleanup_node: ndlp:x%p "
3972 (void *)ndlp, ndlp->nlp_usg_map,
3973 atomic_read(&ndlp->kref.refcount));
3974 lpfc_dequeue_node(vport, ndlp);
3977 "0281 lpfc_cleanup_node: ndlp:x%p "
3979 (void *)ndlp, ndlp->nlp_usg_map,
3980 atomic_read(&ndlp->kref.refcount));
3981 lpfc_disable_node(vport, ndlp);
3984 /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
3987 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
3996 (ndlp == (struct lpfc_nodelist *) mb->context2)) {
4008 * the ndlp reference count as we are in the process
4015 lpfc_els_abort(phba, ndlp);
4018 ndlp->nlp_flag &= ~NLP_DELAY_TMO;
4021 ndlp->nlp_last_elscmd = 0;
4022 del_timer_sync(&ndlp->nlp_delayfunc);
4024 list_del_init(&ndlp->els_retry_evt.evt_listp);
4025 list_del_init(&ndlp->dev_loss_evt.evt_listp);
4027 lpfc_unreg_rpi(vport, ndlp);
4038 lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
4045 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4046 if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
4047 !(ndlp->nlp_flag & NLP_REG_LOGIN_SEND) &&
4048 !(ndlp->nlp_flag & NLP_RPI_VALID)) {
4054 rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
4071 lpfc_cleanup_node(vport, ndlp);
4074 * We can get here with a non-NULL ndlp->rport because when we
4078 if (ndlp->rport) {
4079 rdata = ndlp->rport->dd_data;
4081 ndlp->rport = NULL;
4086 lpfc_matchdid(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4095 if (ndlp->nlp_DID == did)
4105 ndlpdid.un.word = ndlp->nlp_DID;
4117 matchdid.un.word = ndlp->nlp_DID;
4134 struct lpfc_nodelist *ndlp;
4137 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
4138 if (lpfc_matchdid(vport, ndlp, did)) {
4139 data1 = (((uint32_t) ndlp->nlp_state << 24) |
4140 ((uint32_t) ndlp->nlp_xri << 16) |
4141 ((uint32_t) ndlp->nlp_type << 8) |
4142 ((uint32_t) ndlp->nlp_rpi & 0xff));
4146 ndlp, ndlp->nlp_DID,
4147 ndlp->nlp_flag, data1);
4148 return ndlp;
4162 struct lpfc_nodelist *ndlp;
4165 ndlp = __lpfc_findnode_did(vport, did);
4167 return ndlp;
4174 struct lpfc_nodelist *ndlp;
4176 ndlp = lpfc_findnode_did(vport, did);
4177 if (!ndlp) {
4181 ndlp = (struct lpfc_nodelist *)
4183 if (!ndlp)
4185 lpfc_nlp_init(vport, ndlp, did);
4186 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
4188 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
4190 return ndlp;
4191 } else if (!NLP_CHK_NODE_ACT(ndlp)) {
4192 ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE);
4193 if (!ndlp)
4196 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
4198 return ndlp;
4207 if (ndlp->nlp_flag & NLP_RCV_PLOGI)
4213 lpfc_cancel_retry_delay_tmo(vport, ndlp);
4215 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
4218 ndlp = NULL;
4224 if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE ||
4225 ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
4226 ndlp->nlp_flag & NLP_RCV_PLOGI)
4228 lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
4230 ndlp->nlp_flag |= NLP_NPR_2B_DISC;
4233 return ndlp;
4440 lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
4456 if (iocb->context1 != ndlp) {
4470 if (iocb->context1 != ndlp) {
4489 struct lpfc_nodelist *ndlp, *next_ndlp;
4493 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
4495 if (!NLP_CHK_NODE_ACT(ndlp))
4497 if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE ||
4498 ndlp->nlp_state == NLP_STE_ADISC_ISSUE) {
4499 lpfc_free_tx(phba, ndlp);
4556 struct lpfc_nodelist *ndlp, *next_ndlp;
4581 list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes,
4583 if (!NLP_CHK_NODE_ACT(ndlp))
4585 if (ndlp->nlp_state != NLP_STE_NPR_NODE)
4587 if (ndlp->nlp_type & NLP_FABRIC) {
4588 /* Clean up the ndlp on Fabric connections */
4589 lpfc_drop_node(vport, ndlp);
4591 } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) {
4595 lpfc_unreg_rpi(vport, ndlp);
4629 /* Next look for NameServer ndlp */
4630 ndlp = lpfc_findnode_did(vport, NameServer_DID);
4631 if (ndlp && NLP_CHK_NODE_ACT(ndlp))
4632 lpfc_els_abort(phba, ndlp);
4783 struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
4788 ndlp->nlp_rpi = mb->un.varWords[0];
4789 ndlp->nlp_flag |= NLP_RPI_VALID;
4790 ndlp->nlp_type |= NLP_FABRIC;
4791 lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
4800 lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA);
4807 lpfc_nlp_put(ndlp);
4816 lpfc_filter_by_rpi(struct lpfc_nodelist *ndlp, void *param)
4820 return ndlp->nlp_rpi == *rpi;
4824 lpfc_filter_by_wwpn(struct lpfc_nodelist *ndlp, void *param)
4826 return memcmp(&ndlp->nlp_portname, param,
4827 sizeof(ndlp->nlp_portname)) == 0;
4833 struct lpfc_nodelist *ndlp;
4835 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
4836 if (filter(ndlp, param))
4837 return ndlp;
4843 * This routine looks up the ndlp lists for the given RPI. If rpi found it
4853 * This routine looks up the ndlp lists for the given WWPN. If WWPN found it
4860 struct lpfc_nodelist *ndlp;
4863 ndlp = __lpfc_find_node(vport, lpfc_filter_by_wwpn, wwpn);
4865 return ndlp;
4869 lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
4872 memset(ndlp, 0, sizeof (struct lpfc_nodelist));
4874 lpfc_initialize_node(vport, ndlp, did);
4875 INIT_LIST_HEAD(&ndlp->nlp_listp);
4879 ndlp->nlp_DID, 0, 0);
4884 /* This routine releases all resources associated with a specifc NPort's ndlp
4892 struct lpfc_nodelist *ndlp = container_of(kref, struct lpfc_nodelist,
4895 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
4897 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type);
4899 lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
4900 "0279 lpfc_nlp_release: ndlp:x%p "
4902 (void *)ndlp, ndlp->nlp_usg_map,
4903 atomic_read(&ndlp->kref.refcount));
4905 /* remove ndlp from action. */
4906 lpfc_nlp_remove(ndlp->vport, ndlp);
4908 /* clear the ndlp active flag for all release cases */
4909 phba = ndlp->phba;
4911 NLP_CLR_NODE_ACT(ndlp);
4914 /* free ndlp memory for final ndlp release */
4915 if (NLP_CHK_FREE_REQ(ndlp)) {
4916 kfree(ndlp->lat_data);
4917 mempool_free(ndlp, ndlp->phba->nlp_mem_pool);
4921 /* This routine bumps the reference count for a ndlp structure to ensure
4922 * that one discovery thread won't free a ndlp while another discovery thread
4926 lpfc_nlp_get(struct lpfc_nodelist *ndlp)
4931 if (ndlp) {
4932 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
4934 ndlp->nlp_DID, ndlp->nlp_flag,
4935 atomic_read(&ndlp->kref.refcount));
4936 /* The check of ndlp usage to prevent incrementing the
4937 * ndlp reference count that is in the process of being
4940 phba = ndlp->phba;
4942 if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) {
4944 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
4945 "0276 lpfc_nlp_get: ndlp:x%p "
4947 (void *)ndlp, ndlp->nlp_usg_map,
4948 atomic_read(&ndlp->kref.refcount));
4951 kref_get(&ndlp->kref);
4954 return ndlp;
4957 /* This routine decrements the reference count for a ndlp structure. If the
4959 * freed. Returning 1 indicates the ndlp resource has been released; on the
4960 * other hand, returning 0 indicates the ndlp resource has not been released
4964 lpfc_nlp_put(struct lpfc_nodelist *ndlp)
4969 if (!ndlp)
4972 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
4974 ndlp->nlp_DID, ndlp->nlp_flag,
4975 atomic_read(&ndlp->kref.refcount));
4976 phba = ndlp->phba;
4978 /* Check the ndlp memory free acknowledge flag to avoid the
4980 * after previous one has done ndlp memory free.
4982 if (NLP_CHK_FREE_ACK(ndlp)) {
4984 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
4985 "0274 lpfc_nlp_put: ndlp:x%p "
4987 (void *)ndlp, ndlp->nlp_usg_map,
4988 atomic_read(&ndlp->kref.refcount));
4991 /* Check the ndlp inactivate log flag to avoid the possible
4992 * race condition that kref_put got invoked again after ndlp
4995 if (NLP_CHK_IACT_REQ(ndlp)) {
4997 lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE,
4998 "0275 lpfc_nlp_put: ndlp:x%p "
5000 (void *)ndlp, ndlp->nlp_usg_map,
5001 atomic_read(&ndlp->kref.refcount));
5004 /* For last put, mark the ndlp usage flags to make sure no
5005 * other kref_get and kref_put on the same ndlp shall get
5007 * invoked on this ndlp.
5009 if (atomic_read(&ndlp->kref.refcount) == 1) {
5010 /* Indicate ndlp is put to inactive state. */
5011 NLP_SET_IACT_REQ(ndlp);
5012 /* Acknowledge ndlp memory free has been seen. */
5013 if (NLP_CHK_FREE_REQ(ndlp))
5014 NLP_SET_FREE_ACK(ndlp);
5023 return kref_put(&ndlp->kref, lpfc_nlp_release);
5028 * ndlp has been freed. A return value of 0 indicates the ndlp is
5032 lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
5034 lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE,
5036 ndlp->nlp_DID, ndlp->nlp_flag,
5037 atomic_read(&ndlp->kref.refcount));
5038 if (atomic_read(&ndlp->kref.refcount) == 1)
5039 if (lpfc_nlp_put(ndlp))
5059 struct lpfc_nodelist *ndlp;
5067 list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
5068 if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
5069 (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
5077 ndlp->nlp_rpi, ndlp->nlp_DID,
5078 ndlp->nlp_flag);
5079 if (ndlp->nlp_flag & NLP_RPI_VALID)
5151 struct lpfc_nodelist *ndlp;
5167 ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
5168 if (ndlp)
5169 lpfc_cancel_retry_delay_tmo(vports[i], ndlp);