Lines Matching refs:lport

134 	struct list_head		endp_list; /* for lport->endp_list */
140 struct nvme_fc_lport *lport;
157 struct nvme_fc_lport *lport;
243 struct nvme_fc_lport *lport =
247 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
248 WARN_ON(!list_empty(&lport->endp_list));
252 list_del(&lport->port_list);
255 ida_free(&nvme_fc_local_port_cnt, lport->localport.port_num);
256 ida_destroy(&lport->endp_cnt);
258 put_device(lport->dev);
260 kfree(lport);
264 nvme_fc_lport_put(struct nvme_fc_lport *lport)
266 kref_put(&lport->ref, nvme_fc_free_lport);
270 nvme_fc_lport_get(struct nvme_fc_lport *lport)
272 return kref_get_unless_zero(&lport->ref);
281 struct nvme_fc_lport *lport;
286 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
287 if (lport->localport.node_name != pinfo->node_name ||
288 lport->localport.port_name != pinfo->port_name)
291 if (lport->dev != dev) {
292 lport = ERR_PTR(-EXDEV);
296 if (lport->localport.port_state != FC_OBJSTATE_DELETED) {
297 lport = ERR_PTR(-EEXIST);
301 if (!nvme_fc_lport_get(lport)) {
304 * act as if lport already deleted
306 lport = NULL;
310 /* resume the lport */
312 lport->ops = ops;
313 lport->localport.port_role = pinfo->port_role;
314 lport->localport.port_id = pinfo->port_id;
315 lport->localport.port_state = FC_OBJSTATE_ONLINE;
319 return lport;
322 lport = NULL;
327 return lport;
375 /* found an lport, but something about its state is bad */
380 /* found existing lport, which was resumed */
458 struct nvme_fc_lport *lport = localport_to_lport(portptr);
474 if (atomic_read(&lport->act_rport_cnt) == 0)
475 lport->ops->localport_delete(&lport->localport);
477 nvme_fc_lport_put(lport);
494 nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport,
506 lport->localport.node_name, lport->localport.port_name);
518 struct nvme_fc_lport *lport =
525 /* remove from lport list */
531 ida_free(&lport->endp_cnt, rport->remoteport.port_num);
535 nvme_fc_lport_put(lport);
582 nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport,
591 list_for_each_entry(rport, &lport->endp_list, endp_list) {
669 struct nvme_fc_lport *lport = localport_to_lport(localport);
674 if (!nvme_fc_lport_get(lport)) {
684 newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo);
693 nvme_fc_lport_put(lport);
695 nvme_fc_signal_discovery_scan(lport, newrec);
702 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
709 idx = ida_alloc(&lport->endp_cnt, GFP_KERNEL);
722 newrec->remoteport.localport = &lport->localport;
724 newrec->dev = lport->dev;
725 newrec->lport = lport;
726 if (lport->ops->remote_priv_sz)
740 list_add_tail(&newrec->endp_list, &lport->endp_list);
743 nvme_fc_signal_discovery_scan(lport, newrec);
751 nvme_fc_lport_put(lport);
771 rport->lport->ops->ls_abort(&rport->lport->localport,
881 rport->lport->ops->remoteport_delete(portptr);
907 nvme_fc_signal_discovery_scan(rport->lport, rport);
1095 ret = rport->lport->ops->ls_req(&rport->lport->localport,
1182 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1194 if (ctrl->lport->ops->lsrqst_priv_sz)
1298 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1310 if (ctrl->lport->ops->lsrqst_priv_sz)
1428 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL);
1440 if (ctrl->lport->ops->lsrqst_priv_sz)
1459 struct nvme_fc_lport *lport = rport->lport;
1466 fc_dma_sync_single_for_cpu(lport->dev, lsop->rspdma,
1468 fc_dma_unmap_single(lport->dev, lsop->rspdma,
1482 struct nvme_fc_lport *lport = rport->lport;
1486 fc_dma_sync_single_for_device(lport->dev, lsop->rspdma,
1489 ret = lport->ops->xmt_ls_rsp(&lport->localport, &rport->remoteport,
1492 dev_warn(lport->dev,
1533 dev_info(rport->lport->dev,
1575 dev_info(rport->lport->dev,
1698 void nvme_fc_rcv_ls_req_err_msg(struct nvme_fc_lport *lport,
1701 dev_info(lport->dev, "RCV %s LS failed: No memory\n",
1731 struct nvme_fc_lport *lport = rport->lport;
1740 if (!lport->ops->xmt_ls_rsp) {
1741 dev_info(lport->dev,
1750 dev_info(lport->dev,
1760 nvme_fc_rcv_ls_req_err_msg(lport, w0);
1768 nvme_fc_rcv_ls_req_err_msg(lport, w0);
1773 lsop->rspdma = fc_dma_map_single(lport->dev, lsop->rspbuf,
1776 if (fc_dma_mapping_error(lport->dev, lsop->rspdma)) {
1777 dev_info(lport->dev,
1805 fc_dma_unmap_single(lport->dev, lsop->rspdma,
1824 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
1826 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
1860 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
1980 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
2117 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
2119 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
2126 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
2129 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
2171 if (ctrl->lport->ops->fcprqst_priv_sz) {
2172 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
2297 if (ctrl->lport->ops->delete_queue)
2298 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
2319 if (ctrl->lport->ops->create_queue)
2320 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
2616 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
2639 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
2740 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
2749 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
2876 if (ctrl->lport->ops->map_queues)
2877 ctrl->lport->ops->map_queues(&ctrl->lport->localport,
2902 ctrl->lport->ops->max_hw_queues);
2919 ctrl->lport->ops->fcprqst_priv_sz));
2956 ctrl->lport->ops->max_hw_queues);
3003 struct nvme_fc_lport *lport = rport->lport;
3005 atomic_inc(&lport->act_rport_cnt);
3011 struct nvme_fc_lport *lport = rport->lport;
3014 cnt = atomic_dec_return(&lport->act_rport_cnt);
3015 if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED)
3016 lport->ops->localport_delete(&lport->localport);
3039 struct nvme_fc_lport *lport = rport->lport;
3047 lport->ops->remoteport_delete(&rport->remoteport);
3078 ctrl->cnum, ctrl->lport->localport.port_name,
3116 ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments;
3451 struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
3497 ctrl->lport = lport;
3499 ctrl->dev = lport->dev;
3515 lport->ops->max_hw_queues);
3540 if (lport->dev)
3541 ctrl->ctrl.numa_node = dev_to_node(lport->dev);
3548 ctrl->lport->ops->fcprqst_priv_sz));
3684 struct nvme_fc_lport *lport;
3702 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3703 if (lport->localport.node_name != laddr.nn ||
3704 lport->localport.port_name != laddr.pn ||
3705 lport->localport.port_state != FC_OBJSTATE_ONLINE)
3708 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3720 ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
3750 struct nvme_fc_lport *lport;
3756 list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
3757 list_for_each_entry(rport, &lport->endp_list, endp_list) {
3758 if (!nvme_fc_lport_get(lport))
3765 * Revert the lport put and retry. Anything
3770 nvme_fc_lport_put(lport);
3792 lport = rport->lport;
3794 nvme_fc_signal_discovery_scan(lport, rport);
3796 nvme_fc_lport_put(lport);
3954 struct nvme_fc_lport *lport;
3959 list_for_each_entry(lport, &nvme_fc_lport_list, port_list)
3960 list_for_each_entry(rport, &lport->endp_list, endp_list)