1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for         *
3 * Fibre Channel Host Bus Adapters.                                *
4 * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
5 * ���Broadcom��� refers to Broadcom Inc. and/or its subsidiaries.     *
6 * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7 * EMULEX and SLI are trademarks of Emulex.                        *
8 * www.broadcom.com                                                *
9 *                                                                 *
10 * This program is free software; you can redistribute it and/or   *
11 * modify it under the terms of version 2 of the GNU General       *
12 * Public License as published by the Free Software Foundation.    *
13 * This program is distributed in the hope that it will be useful. *
14 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
15 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
16 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
17 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
18 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
19 * more details, a copy of which can be found in the file COPYING  *
20 * included with this package.                                     *
21 *******************************************************************/
22
23/*
24 * Fibre Channel SCSI LAN Device Driver CT support: FC Generic Services FC-GS
25 */
26
27#include <linux/blkdev.h>
28#include <linux/pci.h>
29#include <linux/interrupt.h>
30#include <linux/slab.h>
31#include <linux/utsname.h>
32
33#include <scsi/scsi.h>
34#include <scsi/scsi_device.h>
35#include <scsi/scsi_host.h>
36#include <scsi/scsi_transport_fc.h>
37#include <scsi/fc/fc_fs.h>
38
39#include "lpfc_hw4.h"
40#include "lpfc_hw.h"
41#include "lpfc_sli.h"
42#include "lpfc_sli4.h"
43#include "lpfc_nl.h"
44#include "lpfc_disc.h"
45#include "lpfc.h"
46#include "lpfc_scsi.h"
47#include "lpfc_logmsg.h"
48#include "lpfc_crtn.h"
49#include "lpfc_version.h"
50#include "lpfc_vport.h"
51#include "lpfc_debugfs.h"
52
53/* FDMI Port Speed definitions - FC-GS-7 */
54#define HBA_PORTSPEED_1GFC		0x00000001	/* 1G FC */
55#define HBA_PORTSPEED_2GFC		0x00000002	/* 2G FC */
56#define HBA_PORTSPEED_4GFC		0x00000008	/* 4G FC */
57#define HBA_PORTSPEED_10GFC		0x00000004	/* 10G FC */
58#define HBA_PORTSPEED_8GFC		0x00000010	/* 8G FC */
59#define HBA_PORTSPEED_16GFC		0x00000020	/* 16G FC */
60#define HBA_PORTSPEED_32GFC		0x00000040	/* 32G FC */
61#define HBA_PORTSPEED_20GFC		0x00000080	/* 20G FC */
62#define HBA_PORTSPEED_40GFC		0x00000100	/* 40G FC */
63#define HBA_PORTSPEED_128GFC		0x00000200	/* 128G FC */
64#define HBA_PORTSPEED_64GFC		0x00000400	/* 64G FC */
65#define HBA_PORTSPEED_256GFC		0x00000800	/* 256G FC */
66#define HBA_PORTSPEED_UNKNOWN		0x00008000	/* Unknown */
67#define HBA_PORTSPEED_10GE		0x00010000	/* 10G E */
68#define HBA_PORTSPEED_40GE		0x00020000	/* 40G E */
69#define HBA_PORTSPEED_100GE		0x00040000	/* 100G E */
70#define HBA_PORTSPEED_25GE		0x00080000	/* 25G E */
71#define HBA_PORTSPEED_50GE		0x00100000	/* 50G E */
72#define HBA_PORTSPEED_400GE		0x00200000	/* 400G E */
73
74#define FOURBYTES	4
75
76
77static char *lpfc_release_version = LPFC_DRIVER_VERSION;
78static void
79lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
80		      struct lpfc_iocbq *rspiocb);
81
82static void
83lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
84			  struct lpfc_dmabuf *mp, uint32_t size)
85{
86	if (!mp) {
87		lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
88				"0146 Ignoring unsolicited CT No HBQ "
89				"status = x%x\n",
90				get_job_ulpstatus(phba, piocbq));
91	}
92	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
93			"0145 Ignoring unsolicited CT HBQ Size:%d "
94			"status = x%x\n",
95			size, get_job_ulpstatus(phba, piocbq));
96}
97
98static void
99lpfc_ct_unsol_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
100		     struct lpfc_dmabuf *mp, uint32_t size)
101{
102	lpfc_ct_ignore_hbq_buffer(phba, piocbq, mp, size);
103}
104
105/**
106 * lpfc_ct_unsol_cmpl : Completion callback function for unsol ct commands
107 * @phba : pointer to lpfc hba data structure.
108 * @cmdiocb : pointer to lpfc command iocb data structure.
109 * @rspiocb : pointer to lpfc response iocb data structure.
110 *
111 * This routine is the callback function for issuing unsol ct reject command.
112 * The memory allocated in the reject command path is freed up here.
113 **/
114static void
115lpfc_ct_unsol_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
116		   struct lpfc_iocbq *rspiocb)
117{
118	struct lpfc_nodelist *ndlp;
119	struct lpfc_dmabuf *mp, *bmp;
120
121	ndlp = cmdiocb->ndlp;
122	if (ndlp)
123		lpfc_nlp_put(ndlp);
124
125	mp = cmdiocb->rsp_dmabuf;
126	bmp = cmdiocb->bpl_dmabuf;
127	if (mp) {
128		lpfc_mbuf_free(phba, mp->virt, mp->phys);
129		kfree(mp);
130		cmdiocb->rsp_dmabuf = NULL;
131	}
132
133	if (bmp) {
134		lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
135		kfree(bmp);
136		cmdiocb->bpl_dmabuf = NULL;
137	}
138
139	lpfc_sli_release_iocbq(phba, cmdiocb);
140}
141
142/**
143 * lpfc_ct_reject_event - Issue reject for unhandled CT MIB commands
144 * @ndlp: pointer to a node-list data structure.
145 * @ct_req: pointer to the CT request data structure.
146 * @ulp_context: context of received UNSOL CT command
147 * @ox_id: ox_id of the UNSOL CT command
148 *
149 * This routine is invoked by the lpfc_ct_handle_mibreq routine for sending
150 * a reject response. Reject response is sent for the unhandled commands.
151 **/
152static void
153lpfc_ct_reject_event(struct lpfc_nodelist *ndlp,
154		     struct lpfc_sli_ct_request *ct_req,
155		     u16 ulp_context, u16 ox_id)
156{
157	struct lpfc_vport *vport = ndlp->vport;
158	struct lpfc_hba *phba = vport->phba;
159	struct lpfc_sli_ct_request *ct_rsp;
160	struct lpfc_iocbq *cmdiocbq = NULL;
161	struct lpfc_dmabuf *bmp = NULL;
162	struct lpfc_dmabuf *mp = NULL;
163	struct ulp_bde64 *bpl;
164	u8 rc = 0;
165	u32 tmo;
166
167	/* fill in BDEs for command */
168	mp = kmalloc(sizeof(*mp), GFP_KERNEL);
169	if (!mp) {
170		rc = 1;
171		goto ct_exit;
172	}
173
174	mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp->phys);
175	if (!mp->virt) {
176		rc = 2;
177		goto ct_free_mp;
178	}
179
180	/* Allocate buffer for Buffer ptr list */
181	bmp = kmalloc(sizeof(*bmp), GFP_KERNEL);
182	if (!bmp) {
183		rc = 3;
184		goto ct_free_mpvirt;
185	}
186
187	bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &bmp->phys);
188	if (!bmp->virt) {
189		rc = 4;
190		goto ct_free_bmp;
191	}
192
193	INIT_LIST_HEAD(&mp->list);
194	INIT_LIST_HEAD(&bmp->list);
195
196	bpl = (struct ulp_bde64 *)bmp->virt;
197	memset(bpl, 0, sizeof(struct ulp_bde64));
198	bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
199	bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
200	bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
201	bpl->tus.f.bdeSize = (LPFC_CT_PREAMBLE - 4);
202	bpl->tus.w = le32_to_cpu(bpl->tus.w);
203
204	ct_rsp = (struct lpfc_sli_ct_request *)mp->virt;
205	memset(ct_rsp, 0, sizeof(struct lpfc_sli_ct_request));
206
207	ct_rsp->RevisionId.bits.Revision = SLI_CT_REVISION;
208	ct_rsp->RevisionId.bits.InId = 0;
209	ct_rsp->FsType = ct_req->FsType;
210	ct_rsp->FsSubType = ct_req->FsSubType;
211	ct_rsp->CommandResponse.bits.Size = 0;
212	ct_rsp->CommandResponse.bits.CmdRsp =
213		cpu_to_be16(SLI_CT_RESPONSE_FS_RJT);
214	ct_rsp->ReasonCode = SLI_CT_REQ_NOT_SUPPORTED;
215	ct_rsp->Explanation = SLI_CT_NO_ADDITIONAL_EXPL;
216
217	cmdiocbq = lpfc_sli_get_iocbq(phba);
218	if (!cmdiocbq) {
219		rc = 5;
220		goto ct_free_bmpvirt;
221	}
222
223	if (phba->sli_rev == LPFC_SLI_REV4) {
224		lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, bmp,
225					 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
226					 ox_id, 1, FC_RCTL_DD_SOL_CTL, 1,
227					 CMD_XMIT_SEQUENCE64_WQE);
228	} else {
229		lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, bmp, 0, ulp_context, 1,
230					 FC_RCTL_DD_SOL_CTL, 1,
231					 CMD_XMIT_SEQUENCE64_CX);
232	}
233
234	/* Save for completion so we can release these resources */
235	cmdiocbq->rsp_dmabuf = mp;
236	cmdiocbq->bpl_dmabuf = bmp;
237	cmdiocbq->cmd_cmpl = lpfc_ct_unsol_cmpl;
238	tmo = (3 * phba->fc_ratov);
239
240	cmdiocbq->retry = 0;
241	cmdiocbq->vport = vport;
242	cmdiocbq->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
243
244	cmdiocbq->ndlp = lpfc_nlp_get(ndlp);
245	if (!cmdiocbq->ndlp)
246		goto ct_no_ndlp;
247
248	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
249	if (rc) {
250		lpfc_nlp_put(ndlp);
251		goto ct_no_ndlp;
252	}
253	return;
254
255ct_no_ndlp:
256	rc = 6;
257	lpfc_sli_release_iocbq(phba, cmdiocbq);
258ct_free_bmpvirt:
259	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
260ct_free_bmp:
261	kfree(bmp);
262ct_free_mpvirt:
263	lpfc_mbuf_free(phba, mp->virt, mp->phys);
264ct_free_mp:
265	kfree(mp);
266ct_exit:
267	lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
268			 "6440 Unsol CT: Rsp err %d Data: x%lx\n",
269			 rc, vport->fc_flag);
270}
271
272/**
273 * lpfc_ct_handle_mibreq - Process an unsolicited CT MIB request data buffer
274 * @phba: pointer to lpfc hba data structure.
275 * @ctiocbq: pointer to lpfc CT command iocb data structure.
276 *
277 * This routine is used for processing the IOCB associated with a unsolicited
278 * CT MIB request. It first determines whether there is an existing ndlp that
279 * matches the DID from the unsolicited IOCB. If not, it will return.
280 **/
281static void
282lpfc_ct_handle_mibreq(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocbq)
283{
284	struct lpfc_sli_ct_request *ct_req;
285	struct lpfc_nodelist *ndlp = NULL;
286	struct lpfc_vport *vport = ctiocbq->vport;
287	u32 ulp_status = get_job_ulpstatus(phba, ctiocbq);
288	u32 ulp_word4 = get_job_word4(phba, ctiocbq);
289	u32 did;
290	u16 mi_cmd;
291
292	did = bf_get(els_rsp64_sid, &ctiocbq->wqe.xmit_els_rsp);
293	if (ulp_status) {
294		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
295				 "6438 Unsol CT: status:x%x/x%x did : x%x\n",
296				 ulp_status, ulp_word4, did);
297		return;
298	}
299
300	/* Ignore traffic received during vport shutdown */
301	if (test_bit(FC_UNLOADING, &vport->load_flag))
302		return;
303
304	ndlp = lpfc_findnode_did(vport, did);
305	if (!ndlp) {
306		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
307				 "6439 Unsol CT: NDLP Not Found for DID : x%x",
308				 did);
309		return;
310	}
311
312	ct_req = (struct lpfc_sli_ct_request *)ctiocbq->cmd_dmabuf->virt;
313
314	mi_cmd = be16_to_cpu(ct_req->CommandResponse.bits.CmdRsp);
315	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
316			 "6442 : MI Cmd : x%x Not Supported\n", mi_cmd);
317	lpfc_ct_reject_event(ndlp, ct_req,
318			     bf_get(wqe_ctxt_tag,
319				    &ctiocbq->wqe.xmit_els_rsp.wqe_com),
320			     bf_get(wqe_rcvoxid,
321				    &ctiocbq->wqe.xmit_els_rsp.wqe_com));
322}
323
324/**
325 * lpfc_ct_unsol_event - Process an unsolicited event from a ct sli ring
326 * @phba: pointer to lpfc hba data structure.
327 * @pring: pointer to a SLI ring.
328 * @ctiocbq: pointer to lpfc ct iocb data structure.
329 *
330 * This routine is used to process an unsolicited event received from a SLI
331 * (Service Level Interface) ring. The actual processing of the data buffer
332 * associated with the unsolicited event is done by invoking appropriate routine
333 * after properly set up the iocb buffer from the SLI ring on which the
334 * unsolicited event was received.
335 **/
336void
337lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
338		    struct lpfc_iocbq *ctiocbq)
339{
340	struct lpfc_dmabuf *mp = NULL;
341	IOCB_t *icmd = &ctiocbq->iocb;
342	int i;
343	struct lpfc_iocbq *iocbq;
344	struct lpfc_iocbq *iocb;
345	dma_addr_t dma_addr;
346	uint32_t size;
347	struct list_head head;
348	struct lpfc_sli_ct_request *ct_req;
349	struct lpfc_dmabuf *bdeBuf1 = ctiocbq->cmd_dmabuf;
350	struct lpfc_dmabuf *bdeBuf2 = ctiocbq->bpl_dmabuf;
351	u32 status, parameter, bde_count = 0;
352	struct lpfc_wcqe_complete *wcqe_cmpl = NULL;
353
354	ctiocbq->cmd_dmabuf = NULL;
355	ctiocbq->rsp_dmabuf = NULL;
356	ctiocbq->bpl_dmabuf = NULL;
357
358	wcqe_cmpl = &ctiocbq->wcqe_cmpl;
359	status = get_job_ulpstatus(phba, ctiocbq);
360	parameter = get_job_word4(phba, ctiocbq);
361	if (phba->sli_rev == LPFC_SLI_REV4)
362		bde_count = wcqe_cmpl->word3;
363	else
364		bde_count = icmd->ulpBdeCount;
365
366	if (unlikely(status == IOSTAT_NEED_BUFFER)) {
367		lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
368	} else if ((status == IOSTAT_LOCAL_REJECT) &&
369		   ((parameter & IOERR_PARAM_MASK) ==
370		   IOERR_RCV_BUFFER_WAITING)) {
371		/* Not enough posted buffers; Try posting more buffers */
372		phba->fc_stat.NoRcvBuf++;
373		if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
374			lpfc_sli3_post_buffer(phba, pring, 2);
375		return;
376	}
377
378	/* If there are no BDEs associated
379	 * with this IOCB, there is nothing to do.
380	 */
381	if (bde_count == 0)
382		return;
383
384	ctiocbq->cmd_dmabuf = bdeBuf1;
385	if (bde_count == 2)
386		ctiocbq->bpl_dmabuf = bdeBuf2;
387
388	ct_req = (struct lpfc_sli_ct_request *)ctiocbq->cmd_dmabuf->virt;
389
390	if (ct_req->FsType == SLI_CT_MANAGEMENT_SERVICE &&
391	    ct_req->FsSubType == SLI_CT_MIB_Subtypes) {
392		lpfc_ct_handle_mibreq(phba, ctiocbq);
393	} else {
394		if (!lpfc_bsg_ct_unsol_event(phba, pring, ctiocbq))
395			return;
396	}
397
398	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
399		INIT_LIST_HEAD(&head);
400		list_add_tail(&head, &ctiocbq->list);
401		list_for_each_entry(iocb, &head, list) {
402			if (phba->sli_rev == LPFC_SLI_REV4)
403				bde_count = iocb->wcqe_cmpl.word3;
404			else
405				bde_count = iocb->iocb.ulpBdeCount;
406
407			if (!bde_count)
408				continue;
409			bdeBuf1 = iocb->cmd_dmabuf;
410			iocb->cmd_dmabuf = NULL;
411			if (phba->sli_rev == LPFC_SLI_REV4)
412				size = iocb->wqe.gen_req.bde.tus.f.bdeSize;
413			else
414				size  = iocb->iocb.un.cont64[0].tus.f.bdeSize;
415			lpfc_ct_unsol_buffer(phba, ctiocbq, bdeBuf1, size);
416			lpfc_in_buf_free(phba, bdeBuf1);
417			if (bde_count == 2) {
418				bdeBuf2 = iocb->bpl_dmabuf;
419				iocb->bpl_dmabuf = NULL;
420				if (phba->sli_rev == LPFC_SLI_REV4)
421					size = iocb->unsol_rcv_len;
422				else
423					size = iocb->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize;
424				lpfc_ct_unsol_buffer(phba, ctiocbq, bdeBuf2,
425						     size);
426				lpfc_in_buf_free(phba, bdeBuf2);
427			}
428		}
429		list_del(&head);
430	} else {
431		INIT_LIST_HEAD(&head);
432		list_add_tail(&head, &ctiocbq->list);
433		list_for_each_entry(iocbq, &head, list) {
434			icmd = &iocbq->iocb;
435			if (icmd->ulpBdeCount == 0)
436				lpfc_ct_unsol_buffer(phba, iocbq, NULL, 0);
437			for (i = 0; i < icmd->ulpBdeCount; i++) {
438				dma_addr = getPaddr(icmd->un.cont64[i].addrHigh,
439						    icmd->un.cont64[i].addrLow);
440				mp = lpfc_sli_ringpostbuf_get(phba, pring,
441							      dma_addr);
442				size = icmd->un.cont64[i].tus.f.bdeSize;
443				lpfc_ct_unsol_buffer(phba, iocbq, mp, size);
444				lpfc_in_buf_free(phba, mp);
445			}
446			lpfc_sli3_post_buffer(phba, pring, i);
447		}
448		list_del(&head);
449	}
450}
451
452/**
453 * lpfc_ct_handle_unsol_abort - ct upper level protocol abort handler
454 * @phba: Pointer to HBA context object.
455 * @dmabuf: pointer to a dmabuf that describes the FC sequence
456 *
457 * This function serves as the upper level protocol abort handler for CT
458 * protocol.
459 *
460 * Return 1 if abort has been handled, 0 otherwise.
461 **/
462int
463lpfc_ct_handle_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
464{
465	int handled;
466
467	/* CT upper level goes through BSG */
468	handled = lpfc_bsg_ct_unsol_abort(phba, dmabuf);
469
470	return handled;
471}
472
473static void
474lpfc_free_ct_rsp(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
475{
476	struct lpfc_dmabuf *mlast, *next_mlast;
477
478	list_for_each_entry_safe(mlast, next_mlast, &mlist->list, list) {
479		list_del(&mlast->list);
480		lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
481		kfree(mlast);
482	}
483	lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
484	kfree(mlist);
485	return;
486}
487
488static struct lpfc_dmabuf *
489lpfc_alloc_ct_rsp(struct lpfc_hba *phba, __be16 cmdcode, struct ulp_bde64 *bpl,
490		  uint32_t size, int *entries)
491{
492	struct lpfc_dmabuf *mlist = NULL;
493	struct lpfc_dmabuf *mp;
494	int cnt, i = 0;
495
496	/* We get chunks of FCELSSIZE */
497	cnt = size > FCELSSIZE ? FCELSSIZE: size;
498
499	while (size) {
500		/* Allocate buffer for rsp payload */
501		mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
502		if (!mp) {
503			if (mlist)
504				lpfc_free_ct_rsp(phba, mlist);
505			return NULL;
506		}
507
508		INIT_LIST_HEAD(&mp->list);
509
510		if (be16_to_cpu(cmdcode) == SLI_CTNS_GID_FT ||
511		    be16_to_cpu(cmdcode) == SLI_CTNS_GFF_ID)
512			mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
513		else
514			mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys));
515
516		if (!mp->virt) {
517			kfree(mp);
518			if (mlist)
519				lpfc_free_ct_rsp(phba, mlist);
520			return NULL;
521		}
522
523		/* Queue it to a linked list */
524		if (!mlist)
525			mlist = mp;
526		else
527			list_add_tail(&mp->list, &mlist->list);
528
529		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
530		/* build buffer ptr list for IOCB */
531		bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
532		bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
533		bpl->tus.f.bdeSize = (uint16_t) cnt;
534		bpl->tus.w = le32_to_cpu(bpl->tus.w);
535		bpl++;
536
537		i++;
538		size -= cnt;
539	}
540
541	*entries = i;
542	return mlist;
543}
544
545int
546lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb)
547{
548	struct lpfc_dmabuf *buf_ptr;
549
550	/* IOCBQ job structure gets cleaned during release.  Just release
551	 * the dma buffers here.
552	 */
553	if (ctiocb->cmd_dmabuf) {
554		buf_ptr = ctiocb->cmd_dmabuf;
555		lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
556		kfree(buf_ptr);
557		ctiocb->cmd_dmabuf = NULL;
558	}
559	if (ctiocb->rsp_dmabuf) {
560		lpfc_free_ct_rsp(phba, ctiocb->rsp_dmabuf);
561		ctiocb->rsp_dmabuf = NULL;
562	}
563
564	if (ctiocb->bpl_dmabuf) {
565		buf_ptr = ctiocb->bpl_dmabuf;
566		lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
567		kfree(buf_ptr);
568		ctiocb->bpl_dmabuf = NULL;
569	}
570	lpfc_sli_release_iocbq(phba, ctiocb);
571	return 0;
572}
573
574/*
575 * lpfc_gen_req - Build and issue a GEN_REQUEST command  to the SLI Layer
576 * @vport: pointer to a host virtual N_Port data structure.
577 * @bmp: Pointer to BPL for SLI command
578 * @inp: Pointer to data buffer for response data.
579 * @outp: Pointer to data buffer that hold the CT command.
580 * @cmpl: completion routine to call when command completes
581 * @ndlp: Destination NPort nodelist entry
582 *
583 * This function as the final part for issuing a CT command.
584 */
585static int
586lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
587	     struct lpfc_dmabuf *inp, struct lpfc_dmabuf *outp,
588	     void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
589			  struct lpfc_iocbq *),
590	     struct lpfc_nodelist *ndlp, uint32_t event_tag, uint32_t num_entry,
591	     uint32_t tmo, uint8_t retry)
592{
593	struct lpfc_hba  *phba = vport->phba;
594	struct lpfc_iocbq *geniocb;
595	int rc;
596	u16 ulp_context;
597
598	/* Allocate buffer for  command iocb */
599	geniocb = lpfc_sli_get_iocbq(phba);
600
601	if (geniocb == NULL)
602		return 1;
603
604	/* Update the num_entry bde count */
605	geniocb->num_bdes = num_entry;
606
607	geniocb->bpl_dmabuf = bmp;
608
609	/* Save for completion so we can release these resources */
610	geniocb->cmd_dmabuf = inp;
611	geniocb->rsp_dmabuf = outp;
612
613	geniocb->event_tag = event_tag;
614
615	if (!tmo) {
616		 /* FC spec states we need 3 * ratov for CT requests */
617		tmo = (3 * phba->fc_ratov);
618	}
619
620	if (phba->sli_rev == LPFC_SLI_REV4)
621		ulp_context = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
622	else
623		ulp_context = ndlp->nlp_rpi;
624
625	lpfc_sli_prep_gen_req(phba, geniocb, bmp, ulp_context, num_entry, tmo);
626
627	/* Issue GEN REQ IOCB for NPORT <did> */
628	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
629			 "0119 Issue GEN REQ IOCB to NPORT x%x "
630			 "Data: x%x x%x\n",
631			 ndlp->nlp_DID, geniocb->iotag,
632			 vport->port_state);
633	geniocb->cmd_cmpl = cmpl;
634	geniocb->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
635	geniocb->vport = vport;
636	geniocb->retry = retry;
637	geniocb->ndlp = lpfc_nlp_get(ndlp);
638	if (!geniocb->ndlp)
639		goto out;
640
641	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
642	if (rc == IOCB_ERROR) {
643		lpfc_nlp_put(ndlp);
644		goto out;
645	}
646
647	return 0;
648out:
649	lpfc_sli_release_iocbq(phba, geniocb);
650	return 1;
651}
652
653/*
654 * lpfc_ct_cmd - Build and issue a CT command
655 * @vport: pointer to a host virtual N_Port data structure.
656 * @inmp: Pointer to data buffer for response data.
657 * @bmp: Pointer to BPL for SLI command
658 * @ndlp: Destination NPort nodelist entry
659 * @cmpl: completion routine to call when command completes
660 *
661 * This function is called for issuing a CT command.
662 */
663static int
664lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp,
665	    struct lpfc_dmabuf *bmp, struct lpfc_nodelist *ndlp,
666	    void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
667			  struct lpfc_iocbq *),
668	    uint32_t rsp_size, uint8_t retry)
669{
670	struct lpfc_hba  *phba = vport->phba;
671	struct ulp_bde64 *bpl = (struct ulp_bde64 *) bmp->virt;
672	struct lpfc_dmabuf *outmp;
673	int cnt = 0, status;
674	__be16 cmdcode = ((struct lpfc_sli_ct_request *)inmp->virt)->
675		CommandResponse.bits.CmdRsp;
676
677	bpl++;			/* Skip past ct request */
678
679	/* Put buffer(s) for ct rsp in bpl */
680	outmp = lpfc_alloc_ct_rsp(phba, cmdcode, bpl, rsp_size, &cnt);
681	if (!outmp)
682		return -ENOMEM;
683	/*
684	 * Form the CT IOCB.  The total number of BDEs in this IOCB
685	 * is the single command plus response count from
686	 * lpfc_alloc_ct_rsp.
687	 */
688	cnt += 1;
689	status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp,
690			phba->fc_eventTag, cnt, 0, retry);
691	if (status) {
692		lpfc_free_ct_rsp(phba, outmp);
693		return -ENOMEM;
694	}
695	return 0;
696}
697
698struct lpfc_vport *
699lpfc_find_vport_by_did(struct lpfc_hba *phba, uint32_t did) {
700	struct lpfc_vport *vport_curr;
701	unsigned long flags;
702
703	spin_lock_irqsave(&phba->port_list_lock, flags);
704	list_for_each_entry(vport_curr, &phba->port_list, listentry) {
705		if ((vport_curr->fc_myDID) && (vport_curr->fc_myDID == did)) {
706			spin_unlock_irqrestore(&phba->port_list_lock, flags);
707			return vport_curr;
708		}
709	}
710	spin_unlock_irqrestore(&phba->port_list_lock, flags);
711	return NULL;
712}
713
714static void
715lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
716{
717	struct lpfc_nodelist *ndlp;
718
719	if ((vport->port_type != LPFC_NPIV_PORT) ||
720	    !(vport->ct_flags & FC_CT_RFF_ID) || !vport->cfg_restrict_login) {
721
722		ndlp = lpfc_setup_disc_node(vport, Did);
723
724		if (ndlp) {
725			lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
726				"Parse GID_FTrsp: did:x%x flg:x%lx x%x",
727				Did, ndlp->nlp_flag, vport->fc_flag);
728
729			/* By default, the driver expects to support FCP FC4 */
730			if (fc4_type == FC_TYPE_FCP)
731				ndlp->nlp_fc4_type |= NLP_FC4_FCP;
732
733			if (fc4_type == FC_TYPE_NVME)
734				ndlp->nlp_fc4_type |= NLP_FC4_NVME;
735
736			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
737					 "0238 Process x%06x NameServer Rsp "
738					 "Data: x%x x%x x%x x%lx x%x\n", Did,
739					 ndlp->nlp_flag, ndlp->nlp_fc4_type,
740					 ndlp->nlp_state, vport->fc_flag,
741					 vport->fc_rscn_id_cnt);
742
743			/* if ndlp needs to be discovered and prior
744			 * state of ndlp hit devloss, change state to
745			 * allow rediscovery.
746			 */
747			if (ndlp->nlp_flag & NLP_NPR_2B_DISC &&
748			    ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
749				lpfc_nlp_set_state(vport, ndlp,
750						   NLP_STE_NPR_NODE);
751			}
752		} else {
753			lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
754				"Skip1 GID_FTrsp: did:x%x flg:x%lx cnt:%d",
755				Did, vport->fc_flag, vport->fc_rscn_id_cnt);
756
757			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
758					 "0239 Skip x%06x NameServer Rsp "
759					 "Data: x%lx x%x x%px\n",
760					 Did, vport->fc_flag,
761					 vport->fc_rscn_id_cnt, ndlp);
762		}
763	} else {
764		if (!test_bit(FC_RSCN_MODE, &vport->fc_flag) ||
765		    lpfc_rscn_payload_check(vport, Did)) {
766			lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
767				"Query GID_FTrsp: did:x%x flg:x%lx cnt:%d",
768				Did, vport->fc_flag, vport->fc_rscn_id_cnt);
769
770			/*
771			 * This NPortID was previously a FCP/NVMe target,
772			 * Don't even bother to send GFF_ID.
773			 */
774			ndlp = lpfc_findnode_did(vport, Did);
775			if (ndlp &&
776			    (ndlp->nlp_type &
777			    (NLP_FCP_TARGET | NLP_NVME_TARGET))) {
778				if (fc4_type == FC_TYPE_FCP)
779					ndlp->nlp_fc4_type |= NLP_FC4_FCP;
780				if (fc4_type == FC_TYPE_NVME)
781					ndlp->nlp_fc4_type |= NLP_FC4_NVME;
782				lpfc_setup_disc_node(vport, Did);
783			} else if (lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID,
784				   0, Did) == 0)
785				vport->num_disc_nodes++;
786			else
787				lpfc_setup_disc_node(vport, Did);
788		} else {
789			lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
790				"Skip2 GID_FTrsp: did:x%x flg:x%lx cnt:%d",
791				Did, vport->fc_flag, vport->fc_rscn_id_cnt);
792
793			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
794					 "0245 Skip x%06x NameServer Rsp "
795					 "Data: x%lx x%x\n", Did,
796					 vport->fc_flag,
797					 vport->fc_rscn_id_cnt);
798		}
799	}
800}
801
802static void
803lpfc_ns_rsp_audit_did(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
804{
805	struct lpfc_hba *phba = vport->phba;
806	struct lpfc_nodelist *ndlp = NULL;
807	char *str;
808
809	if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT)
810		str = "GID_FT";
811	else
812		str = "GID_PT";
813	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
814			 "6430 Process %s rsp for %08x type %x %s %s\n",
815			 str, Did, fc4_type,
816			 (fc4_type == FC_TYPE_FCP) ?  "FCP" : " ",
817			 (fc4_type == FC_TYPE_NVME) ?  "NVME" : " ");
818	/*
819	 * To conserve rpi's, filter out addresses for other
820	 * vports on the same physical HBAs.
821	 */
822	if (Did != vport->fc_myDID &&
823	    (!lpfc_find_vport_by_did(phba, Did) ||
824	     vport->cfg_peer_port_login)) {
825		if (!phba->nvmet_support) {
826			/* FCPI/NVMEI path. Process Did */
827			lpfc_prep_node_fc4type(vport, Did, fc4_type);
828			return;
829		}
830		/* NVMET path.  NVMET only cares about NVMEI nodes. */
831		list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
832			if (ndlp->nlp_type != NLP_NVME_INITIATOR ||
833			    ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
834				continue;
835			spin_lock_irq(&ndlp->lock);
836			if (ndlp->nlp_DID == Did)
837				ndlp->nlp_flag &= ~NLP_NVMET_RECOV;
838			else
839				ndlp->nlp_flag |= NLP_NVMET_RECOV;
840			spin_unlock_irq(&ndlp->lock);
841		}
842	}
843}
844
845static int
846lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type,
847	    uint32_t Size)
848{
849	struct lpfc_sli_ct_request *Response =
850		(struct lpfc_sli_ct_request *) mp->virt;
851	struct lpfc_dmabuf *mlast, *next_mp;
852	uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType;
853	uint32_t Did, CTentry;
854	int Cnt;
855	struct list_head head;
856	struct lpfc_nodelist *ndlp = NULL;
857
858	lpfc_set_disctmo(vport);
859	vport->num_disc_nodes = 0;
860	vport->fc_ns_retry = 0;
861
862
863	list_add_tail(&head, &mp->list);
864	list_for_each_entry_safe(mp, next_mp, &head, list) {
865		mlast = mp;
866
867		Cnt = Size  > FCELSSIZE ? FCELSSIZE : Size;
868
869		Size -= Cnt;
870
871		if (!ctptr) {
872			ctptr = (uint32_t *) mlast->virt;
873		} else
874			Cnt -= 16;	/* subtract length of CT header */
875
876		/* Loop through entire NameServer list of DIDs */
877		while (Cnt >= sizeof(uint32_t)) {
878			/* Get next DID from NameServer List */
879			CTentry = *ctptr++;
880			Did = ((be32_to_cpu(CTentry)) & Mask_DID);
881			lpfc_ns_rsp_audit_did(vport, Did, fc4_type);
882			if (CTentry & (cpu_to_be32(SLI_CT_LAST_ENTRY)))
883				goto nsout1;
884
885			Cnt -= sizeof(uint32_t);
886		}
887		ctptr = NULL;
888
889	}
890
891	/* All GID_FT entries processed.  If the driver is running in
892	 * in target mode, put impacted nodes into recovery and drop
893	 * the RPI to flush outstanding IO.
894	 */
895	if (vport->phba->nvmet_support) {
896		list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
897			if (!(ndlp->nlp_flag & NLP_NVMET_RECOV))
898				continue;
899			lpfc_disc_state_machine(vport, ndlp, NULL,
900						NLP_EVT_DEVICE_RECOVERY);
901			spin_lock_irq(&ndlp->lock);
902			ndlp->nlp_flag &= ~NLP_NVMET_RECOV;
903			spin_unlock_irq(&ndlp->lock);
904		}
905	}
906
907nsout1:
908	list_del(&head);
909	return 0;
910}
911
912static void
913lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
914			struct lpfc_iocbq *rspiocb)
915{
916	struct lpfc_vport *vport = cmdiocb->vport;
917	struct lpfc_dmabuf *outp;
918	struct lpfc_dmabuf *inp;
919	struct lpfc_sli_ct_request *CTrsp;
920	struct lpfc_sli_ct_request *CTreq;
921	struct lpfc_nodelist *ndlp;
922	u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
923	u32 ulp_word4 = get_job_word4(phba, rspiocb);
924	int rc, type;
925
926	/* First save ndlp, before we overwrite it */
927	ndlp = cmdiocb->ndlp;
928
929	/* we pass cmdiocb to state machine which needs rspiocb as well */
930	cmdiocb->rsp_iocb = rspiocb;
931	inp = cmdiocb->cmd_dmabuf;
932	outp = cmdiocb->rsp_dmabuf;
933
934	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
935		 "GID_FT cmpl:     status:x%x/x%x rtry:%d",
936		ulp_status, ulp_word4, vport->fc_ns_retry);
937
938	/* Ignore response if link flipped after this request was made */
939	if (cmdiocb->event_tag != phba->fc_eventTag) {
940		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
941				 "9043 Event tag mismatch. Ignoring NS rsp\n");
942		goto out;
943	}
944
945	/* Skip processing response on pport if unloading */
946	if (vport == phba->pport && test_bit(FC_UNLOADING, &vport->load_flag)) {
947		if (test_bit(FC_RSCN_MODE, &vport->fc_flag))
948			lpfc_els_flush_rscn(vport);
949		goto out;
950	}
951
952	if (lpfc_els_chk_latt(vport)) {
953		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
954				 "0216 Link event during NS query\n");
955		if (test_bit(FC_RSCN_MODE, &vport->fc_flag))
956			lpfc_els_flush_rscn(vport);
957		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
958		goto out;
959	}
960	if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) {
961		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
962				 "0226 NS query failed due to link event: "
963				 "ulp_status x%x ulp_word4 x%x fc_flag x%lx "
964				 "port_state x%x gidft_inp x%x\n",
965				 ulp_status, ulp_word4, vport->fc_flag,
966				 vport->port_state, vport->gidft_inp);
967		if (test_bit(FC_RSCN_MODE, &vport->fc_flag))
968			lpfc_els_flush_rscn(vport);
969		if (vport->gidft_inp)
970			vport->gidft_inp--;
971		goto out;
972	}
973
974	if (test_and_clear_bit(FC_RSCN_DEFERRED, &vport->fc_flag)) {
975		/* This is a GID_FT completing so the gidft_inp counter was
976		 * incremented before the GID_FT was issued to the wire.
977		 */
978		if (vport->gidft_inp)
979			vport->gidft_inp--;
980
981		/*
982		 * Skip processing the NS response
983		 * Re-issue the NS cmd
984		 */
985		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
986				 "0151 Process Deferred RSCN Data: x%lx x%x\n",
987				 vport->fc_flag, vport->fc_rscn_id_cnt);
988		lpfc_els_handle_rscn(vport);
989
990		goto out;
991	}
992
993	if (ulp_status) {
994		/* Check for retry */
995		if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
996			if (ulp_status != IOSTAT_LOCAL_REJECT ||
997			    (ulp_word4 & IOERR_PARAM_MASK) !=
998			    IOERR_NO_RESOURCES)
999				vport->fc_ns_retry++;
1000
1001			type = lpfc_get_gidft_type(vport, cmdiocb);
1002			if (type == 0)
1003				goto out;
1004
1005			/* CT command is being retried */
1006			rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT,
1007					 vport->fc_ns_retry, type);
1008			if (rc == 0)
1009				goto out;
1010			else { /* Unable to send NS cmd */
1011				if (vport->gidft_inp)
1012					vport->gidft_inp--;
1013			}
1014		}
1015		if (test_bit(FC_RSCN_MODE, &vport->fc_flag))
1016			lpfc_els_flush_rscn(vport);
1017		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1018		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1019				 "0257 GID_FT Query error: 0x%x 0x%x\n",
1020				 ulp_status, vport->fc_ns_retry);
1021	} else {
1022		/* Good status, continue checking */
1023		CTreq = (struct lpfc_sli_ct_request *) inp->virt;
1024		CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
1025		if (CTrsp->CommandResponse.bits.CmdRsp ==
1026		    cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) {
1027			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1028					 "0208 NameServer Rsp Data: x%lx x%x "
1029					 "x%x x%x sz x%x\n",
1030					 vport->fc_flag,
1031					 CTreq->un.gid.Fc4Type,
1032					 vport->num_disc_nodes,
1033					 vport->gidft_inp,
1034					 get_job_data_placed(phba, rspiocb));
1035
1036			lpfc_ns_rsp(vport,
1037				    outp,
1038				    CTreq->un.gid.Fc4Type,
1039				    get_job_data_placed(phba, rspiocb));
1040		} else if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) ==
1041			   SLI_CT_RESPONSE_FS_RJT) {
1042			/* NameServer Rsp Error */
1043			if ((CTrsp->ReasonCode == SLI_CT_UNABLE_TO_PERFORM_REQ)
1044			    && (CTrsp->Explanation == SLI_CT_NO_FC4_TYPES)) {
1045				lpfc_printf_vlog(vport, KERN_INFO,
1046					LOG_DISCOVERY,
1047					"0269 No NameServer Entries "
1048					"Data: x%x x%x x%x x%lx\n",
1049					be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
1050					(uint32_t) CTrsp->ReasonCode,
1051					(uint32_t) CTrsp->Explanation,
1052					vport->fc_flag);
1053
1054				lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
1055				"GID_FT no entry  cmd:x%x rsn:x%x exp:x%x",
1056				be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
1057				(uint32_t) CTrsp->ReasonCode,
1058				(uint32_t) CTrsp->Explanation);
1059			} else {
1060				lpfc_printf_vlog(vport, KERN_INFO,
1061					LOG_DISCOVERY,
1062					"0240 NameServer Rsp Error "
1063					"Data: x%x x%x x%x x%lx\n",
1064					be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
1065					(uint32_t) CTrsp->ReasonCode,
1066					(uint32_t) CTrsp->Explanation,
1067					vport->fc_flag);
1068
1069				lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
1070				"GID_FT rsp err1  cmd:x%x rsn:x%x exp:x%x",
1071				be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
1072				(uint32_t) CTrsp->ReasonCode,
1073				(uint32_t) CTrsp->Explanation);
1074			}
1075
1076
1077		} else {
1078			/* NameServer Rsp Error */
1079			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1080					"0241 NameServer Rsp Error "
1081					"Data: x%x x%x x%x x%lx\n",
1082					be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
1083					(uint32_t) CTrsp->ReasonCode,
1084					(uint32_t) CTrsp->Explanation,
1085					vport->fc_flag);
1086
1087			lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
1088				"GID_FT rsp err2  cmd:x%x rsn:x%x exp:x%x",
1089				be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
1090				(uint32_t) CTrsp->ReasonCode,
1091				(uint32_t) CTrsp->Explanation);
1092		}
1093		if (vport->gidft_inp)
1094			vport->gidft_inp--;
1095	}
1096
1097	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1098			 "4216 GID_FT cmpl inp %d disc %d\n",
1099			 vport->gidft_inp, vport->num_disc_nodes);
1100
1101	/* Link up / RSCN discovery */
1102	if ((vport->num_disc_nodes == 0) &&
1103	    (vport->gidft_inp == 0)) {
1104		/*
1105		 * The driver has cycled through all Nports in the RSCN payload.
1106		 * Complete the handling by cleaning up and marking the
1107		 * current driver state.
1108		 */
1109		if (vport->port_state >= LPFC_DISC_AUTH) {
1110			if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) {
1111				lpfc_els_flush_rscn(vport);
1112				/* RSCN still */
1113				set_bit(FC_RSCN_MODE, &vport->fc_flag);
1114			} else {
1115				lpfc_els_flush_rscn(vport);
1116			}
1117		}
1118
1119		lpfc_disc_start(vport);
1120	}
1121out:
1122	lpfc_ct_free_iocb(phba, cmdiocb);
1123	lpfc_nlp_put(ndlp);
1124	return;
1125}
1126
1127static void
1128lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1129			struct lpfc_iocbq *rspiocb)
1130{
1131	struct lpfc_vport *vport = cmdiocb->vport;
1132	struct lpfc_dmabuf *outp;
1133	struct lpfc_dmabuf *inp;
1134	struct lpfc_sli_ct_request *CTrsp;
1135	struct lpfc_sli_ct_request *CTreq;
1136	struct lpfc_nodelist *ndlp;
1137	u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
1138	u32 ulp_word4 = get_job_word4(phba, rspiocb);
1139	int rc;
1140
1141	/* First save ndlp, before we overwrite it */
1142	ndlp = cmdiocb->ndlp;
1143
1144	/* we pass cmdiocb to state machine which needs rspiocb as well */
1145	cmdiocb->rsp_iocb = rspiocb;
1146	inp = cmdiocb->cmd_dmabuf;
1147	outp = cmdiocb->rsp_dmabuf;
1148
1149	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
1150			      "GID_PT cmpl:     status:x%x/x%x rtry:%d",
1151			      ulp_status, ulp_word4,
1152			      vport->fc_ns_retry);
1153
1154	/* Ignore response if link flipped after this request was made */
1155	if (cmdiocb->event_tag != phba->fc_eventTag) {
1156		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1157				 "9044 Event tag mismatch. Ignoring NS rsp\n");
1158		goto out;
1159	}
1160
1161	/* Skip processing response on pport if unloading */
1162	if (vport == phba->pport && test_bit(FC_UNLOADING, &vport->load_flag)) {
1163		if (test_bit(FC_RSCN_MODE, &vport->fc_flag))
1164			lpfc_els_flush_rscn(vport);
1165		goto out;
1166	}
1167
1168	if (lpfc_els_chk_latt(vport)) {
1169		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1170				 "4108 Link event during NS query\n");
1171		if (test_bit(FC_RSCN_MODE, &vport->fc_flag))
1172			lpfc_els_flush_rscn(vport);
1173		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1174		goto out;
1175	}
1176	if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) {
1177		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1178				 "4166 NS query failed due to link event: "
1179				 "ulp_status x%x ulp_word4 x%x fc_flag x%lx "
1180				 "port_state x%x gidft_inp x%x\n",
1181				 ulp_status, ulp_word4, vport->fc_flag,
1182				 vport->port_state, vport->gidft_inp);
1183		if (test_bit(FC_RSCN_MODE, &vport->fc_flag))
1184			lpfc_els_flush_rscn(vport);
1185		if (vport->gidft_inp)
1186			vport->gidft_inp--;
1187		goto out;
1188	}
1189
1190	if (test_and_clear_bit(FC_RSCN_DEFERRED, &vport->fc_flag)) {
1191		/* This is a GID_PT completing so the gidft_inp counter was
1192		 * incremented before the GID_PT was issued to the wire.
1193		 */
1194		if (vport->gidft_inp)
1195			vport->gidft_inp--;
1196
1197		/*
1198		 * Skip processing the NS response
1199		 * Re-issue the NS cmd
1200		 */
1201		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1202				 "4167 Process Deferred RSCN Data: x%lx x%x\n",
1203				 vport->fc_flag, vport->fc_rscn_id_cnt);
1204		lpfc_els_handle_rscn(vport);
1205
1206		goto out;
1207	}
1208
1209	if (ulp_status) {
1210		/* Check for retry */
1211		if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
1212			if (ulp_status != IOSTAT_LOCAL_REJECT ||
1213			    (ulp_word4 & IOERR_PARAM_MASK) !=
1214			    IOERR_NO_RESOURCES)
1215				vport->fc_ns_retry++;
1216
1217			/* CT command is being retried */
1218			rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_PT,
1219					 vport->fc_ns_retry, GID_PT_N_PORT);
1220			if (rc == 0)
1221				goto out;
1222			else { /* Unable to send NS cmd */
1223				if (vport->gidft_inp)
1224					vport->gidft_inp--;
1225			}
1226		}
1227		if (test_bit(FC_RSCN_MODE, &vport->fc_flag))
1228			lpfc_els_flush_rscn(vport);
1229		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
1230		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1231				 "4103 GID_FT Query error: 0x%x 0x%x\n",
1232				 ulp_status, vport->fc_ns_retry);
1233	} else {
1234		/* Good status, continue checking */
1235		CTreq = (struct lpfc_sli_ct_request *)inp->virt;
1236		CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
1237		if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) ==
1238		    SLI_CT_RESPONSE_FS_ACC) {
1239			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1240					 "4105 NameServer Rsp Data: x%lx x%x "
1241					 "x%x x%x sz x%x\n",
1242					 vport->fc_flag,
1243					 CTreq->un.gid.Fc4Type,
1244					 vport->num_disc_nodes,
1245					 vport->gidft_inp,
1246					 get_job_data_placed(phba, rspiocb));
1247
1248			lpfc_ns_rsp(vport,
1249				    outp,
1250				    CTreq->un.gid.Fc4Type,
1251				    get_job_data_placed(phba, rspiocb));
1252		} else if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) ==
1253			   SLI_CT_RESPONSE_FS_RJT) {
1254			/* NameServer Rsp Error */
1255			if ((CTrsp->ReasonCode == SLI_CT_UNABLE_TO_PERFORM_REQ)
1256			    && (CTrsp->Explanation == SLI_CT_NO_FC4_TYPES)) {
1257				lpfc_printf_vlog(
1258					vport, KERN_INFO, LOG_DISCOVERY,
1259					"4106 No NameServer Entries "
1260					"Data: x%x x%x x%x x%lx\n",
1261					be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
1262					(uint32_t)CTrsp->ReasonCode,
1263					(uint32_t)CTrsp->Explanation,
1264					vport->fc_flag);
1265
1266				lpfc_debugfs_disc_trc(
1267				vport, LPFC_DISC_TRC_CT,
1268				"GID_PT no entry  cmd:x%x rsn:x%x exp:x%x",
1269				be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
1270				(uint32_t)CTrsp->ReasonCode,
1271				(uint32_t)CTrsp->Explanation);
1272			} else {
1273				lpfc_printf_vlog(
1274					vport, KERN_INFO, LOG_DISCOVERY,
1275					"4107 NameServer Rsp Error "
1276					"Data: x%x x%x x%x x%lx\n",
1277					be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
1278					(uint32_t)CTrsp->ReasonCode,
1279					(uint32_t)CTrsp->Explanation,
1280					vport->fc_flag);
1281
1282				lpfc_debugfs_disc_trc(
1283				vport, LPFC_DISC_TRC_CT,
1284				"GID_PT rsp err1  cmd:x%x rsn:x%x exp:x%x",
1285				be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
1286				(uint32_t)CTrsp->ReasonCode,
1287				(uint32_t)CTrsp->Explanation);
1288			}
1289		} else {
1290			/* NameServer Rsp Error */
1291			lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1292					 "4109 NameServer Rsp Error "
1293					 "Data: x%x x%x x%x x%lx\n",
1294					 be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
1295					 (uint32_t)CTrsp->ReasonCode,
1296					 (uint32_t)CTrsp->Explanation,
1297					 vport->fc_flag);
1298
1299			lpfc_debugfs_disc_trc(
1300				vport, LPFC_DISC_TRC_CT,
1301				"GID_PT rsp err2  cmd:x%x rsn:x%x exp:x%x",
1302				be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
1303				(uint32_t)CTrsp->ReasonCode,
1304				(uint32_t)CTrsp->Explanation);
1305		}
1306		if (vport->gidft_inp)
1307			vport->gidft_inp--;
1308	}
1309
1310	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1311			 "6450 GID_PT cmpl inp %d disc %d\n",
1312			 vport->gidft_inp, vport->num_disc_nodes);
1313
1314	/* Link up / RSCN discovery */
1315	if ((vport->num_disc_nodes == 0) &&
1316	    (vport->gidft_inp == 0)) {
1317		/*
1318		 * The driver has cycled through all Nports in the RSCN payload.
1319		 * Complete the handling by cleaning up and marking the
1320		 * current driver state.
1321		 */
1322		if (vport->port_state >= LPFC_DISC_AUTH) {
1323			if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) {
1324				lpfc_els_flush_rscn(vport);
1325				/* RSCN still */
1326				set_bit(FC_RSCN_MODE, &vport->fc_flag);
1327			} else {
1328				lpfc_els_flush_rscn(vport);
1329			}
1330		}
1331
1332		lpfc_disc_start(vport);
1333	}
1334out:
1335	lpfc_ct_free_iocb(phba, cmdiocb);
1336	lpfc_nlp_put(ndlp);
1337}
1338
1339static void
1340lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1341			struct lpfc_iocbq *rspiocb)
1342{
1343	struct lpfc_vport *vport = cmdiocb->vport;
1344	struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf;
1345	struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf;
1346	struct lpfc_sli_ct_request *CTrsp;
1347	int did, rc, retry;
1348	uint8_t fbits;
1349	struct lpfc_nodelist *ndlp = NULL, *free_ndlp = NULL;
1350	u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
1351	u32 ulp_word4 = get_job_word4(phba, rspiocb);
1352
1353	did = ((struct lpfc_sli_ct_request *) inp->virt)->un.gff.PortId;
1354	did = be32_to_cpu(did);
1355
1356	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
1357		"GFF_ID cmpl:     status:x%x/x%x did:x%x",
1358		ulp_status, ulp_word4, did);
1359
1360	/* Ignore response if link flipped after this request was made */
1361	if (cmdiocb->event_tag != phba->fc_eventTag) {
1362		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1363				 "9045 Event tag mismatch. Ignoring NS rsp\n");
1364		goto iocb_free;
1365	}
1366
1367	if (ulp_status == IOSTAT_SUCCESS) {
1368		/* Good status, continue checking */
1369		CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
1370		fbits = CTrsp->un.gff_acc.fbits[FCP_TYPE_FEATURE_OFFSET];
1371
1372		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1373				 "6431 Process GFF_ID rsp for %08x "
1374				 "fbits %02x %s %s\n",
1375				 did, fbits,
1376				 (fbits & FC4_FEATURE_INIT) ? "Initiator" : " ",
1377				 (fbits & FC4_FEATURE_TARGET) ? "Target" : " ");
1378
1379		if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) ==
1380		    SLI_CT_RESPONSE_FS_ACC) {
1381			if ((fbits & FC4_FEATURE_INIT) &&
1382			    !(fbits & FC4_FEATURE_TARGET)) {
1383				lpfc_printf_vlog(vport, KERN_INFO,
1384						 LOG_DISCOVERY,
1385						 "0270 Skip x%x GFF "
1386						 "NameServer Rsp Data: (init) "
1387						 "x%x x%x\n", did, fbits,
1388						 vport->fc_rscn_id_cnt);
1389				goto out;
1390			}
1391		}
1392	}
1393	else {
1394		/* Check for retry */
1395		if (cmdiocb->retry < LPFC_MAX_NS_RETRY) {
1396			retry = 1;
1397			if (ulp_status == IOSTAT_LOCAL_REJECT) {
1398				switch ((ulp_word4 &
1399					IOERR_PARAM_MASK)) {
1400
1401				case IOERR_NO_RESOURCES:
1402					/* We don't increment the retry
1403					 * count for this case.
1404					 */
1405					break;
1406				case IOERR_LINK_DOWN:
1407				case IOERR_SLI_ABORTED:
1408				case IOERR_SLI_DOWN:
1409					retry = 0;
1410					break;
1411				default:
1412					cmdiocb->retry++;
1413				}
1414			}
1415			else
1416				cmdiocb->retry++;
1417
1418			if (retry) {
1419				/* CT command is being retried */
1420				rc = lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID,
1421					 cmdiocb->retry, did);
1422				if (rc == 0) {
1423					/* success */
1424					free_ndlp = cmdiocb->ndlp;
1425					lpfc_ct_free_iocb(phba, cmdiocb);
1426					lpfc_nlp_put(free_ndlp);
1427					return;
1428				}
1429			}
1430		}
1431		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1432				 "0267 NameServer GFF Rsp "
1433				 "x%x Error (%d %d) Data: x%lx x%x\n",
1434				 did, ulp_status, ulp_word4,
1435				 vport->fc_flag, vport->fc_rscn_id_cnt);
1436	}
1437
1438	/* This is a target port, unregistered port, or the GFF_ID failed */
1439	ndlp = lpfc_setup_disc_node(vport, did);
1440	if (ndlp) {
1441		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1442				 "0242 Process x%x GFF "
1443				 "NameServer Rsp Data: x%x x%lx x%x\n",
1444				 did, ndlp->nlp_flag, vport->fc_flag,
1445				 vport->fc_rscn_id_cnt);
1446	} else {
1447		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1448				 "0243 Skip x%x GFF "
1449				 "NameServer Rsp Data: x%lx x%x\n", did,
1450				 vport->fc_flag, vport->fc_rscn_id_cnt);
1451	}
1452out:
1453	/* Link up / RSCN discovery */
1454	if (vport->num_disc_nodes)
1455		vport->num_disc_nodes--;
1456
1457	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1458			 "6451 GFF_ID cmpl inp %d disc %d\n",
1459			 vport->gidft_inp, vport->num_disc_nodes);
1460
1461	if (vport->num_disc_nodes == 0) {
1462		/*
1463		 * The driver has cycled through all Nports in the RSCN payload.
1464		 * Complete the handling by cleaning up and marking the
1465		 * current driver state.
1466		 */
1467		if (vport->port_state >= LPFC_DISC_AUTH) {
1468			if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) {
1469				lpfc_els_flush_rscn(vport);
1470				/* RSCN still */
1471				set_bit(FC_RSCN_MODE, &vport->fc_flag);
1472			} else {
1473				lpfc_els_flush_rscn(vport);
1474			}
1475		}
1476		lpfc_disc_start(vport);
1477	}
1478
1479iocb_free:
1480	free_ndlp = cmdiocb->ndlp;
1481	lpfc_ct_free_iocb(phba, cmdiocb);
1482	lpfc_nlp_put(free_ndlp);
1483	return;
1484}
1485
1486static void
1487lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1488			struct lpfc_iocbq *rspiocb)
1489{
1490	struct lpfc_vport *vport = cmdiocb->vport;
1491	struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf;
1492	struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf;
1493	struct lpfc_sli_ct_request *CTrsp;
1494	int did;
1495	struct lpfc_nodelist *ndlp = NULL;
1496	struct lpfc_nodelist *ns_ndlp = cmdiocb->ndlp;
1497	uint32_t fc4_data_0, fc4_data_1;
1498	u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
1499	u32 ulp_word4 = get_job_word4(phba, rspiocb);
1500
1501	did = ((struct lpfc_sli_ct_request *)inp->virt)->un.gft.PortId;
1502	did = be32_to_cpu(did);
1503
1504	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
1505			      "GFT_ID cmpl: status:x%x/x%x did:x%x",
1506			      ulp_status, ulp_word4, did);
1507
1508	/* Ignore response if link flipped after this request was made */
1509	if ((uint32_t)cmdiocb->event_tag != phba->fc_eventTag) {
1510		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1511				 "9046 Event tag mismatch. Ignoring NS rsp\n");
1512		goto out;
1513	}
1514
1515	if (ulp_status == IOSTAT_SUCCESS) {
1516		/* Good status, continue checking */
1517		CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
1518		fc4_data_0 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[0]);
1519		fc4_data_1 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[1]);
1520
1521		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1522				 "6432 Process GFT_ID rsp for %08x "
1523				 "Data %08x %08x %s %s\n",
1524				 did, fc4_data_0, fc4_data_1,
1525				 (fc4_data_0 & LPFC_FC4_TYPE_BITMASK) ?
1526				  "FCP" : " ",
1527				 (fc4_data_1 & LPFC_FC4_TYPE_BITMASK) ?
1528				  "NVME" : " ");
1529
1530		/* Lookup the NPort_ID queried in the GFT_ID and find the
1531		 * driver's local node.  It's an error if the driver
1532		 * doesn't have one.
1533		 */
1534		ndlp = lpfc_findnode_did(vport, did);
1535		if (ndlp) {
1536			/* The bitmask value for FCP and NVME FCP types is
1537			 * the same because they are 32 bits distant from
1538			 * each other in word0 and word0.
1539			 */
1540			if (fc4_data_0 & LPFC_FC4_TYPE_BITMASK)
1541				ndlp->nlp_fc4_type |= NLP_FC4_FCP;
1542			if (fc4_data_1 &  LPFC_FC4_TYPE_BITMASK)
1543				ndlp->nlp_fc4_type |= NLP_FC4_NVME;
1544			lpfc_printf_vlog(vport, KERN_INFO,
1545					 LOG_DISCOVERY | LOG_NODE,
1546					 "3064 Setting ndlp x%px, DID x%06x "
1547					 "with FC4 x%08x, Data: x%08x x%08x "
1548					 "%d\n",
1549					 ndlp, did, ndlp->nlp_fc4_type,
1550					 FC_TYPE_FCP, FC_TYPE_NVME,
1551					 ndlp->nlp_state);
1552
1553			if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE &&
1554			    ndlp->nlp_fc4_type) {
1555				ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1556				/* This is a fabric topology so if discovery
1557				 * started with an unsolicited PLOGI, don't
1558				 * send a PRLI.  Targets don't issue PLOGI or
1559				 * PRLI when acting as a target. Likely this is
1560				 * an initiator function.
1561				 */
1562				if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) {
1563					lpfc_nlp_set_state(vport, ndlp,
1564							   NLP_STE_PRLI_ISSUE);
1565					lpfc_issue_els_prli(vport, ndlp, 0);
1566				}
1567			} else if (!ndlp->nlp_fc4_type) {
1568				/* If fc4 type is still unknown, then LOGO */
1569				lpfc_printf_vlog(vport, KERN_INFO,
1570						 LOG_DISCOVERY | LOG_NODE,
1571						 "6443 Sending LOGO ndlp x%px,"
1572						 "DID x%06x with fc4_type: "
1573						 "x%08x, state: %d\n",
1574						 ndlp, did, ndlp->nlp_fc4_type,
1575						 ndlp->nlp_state);
1576				lpfc_issue_els_logo(vport, ndlp, 0);
1577				ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
1578				lpfc_nlp_set_state(vport, ndlp,
1579						   NLP_STE_NPR_NODE);
1580			}
1581		}
1582	} else
1583		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1584				 "3065 GFT_ID failed x%08x\n", ulp_status);
1585
1586out:
1587	lpfc_ct_free_iocb(phba, cmdiocb);
1588	lpfc_nlp_put(ns_ndlp);
1589}
1590
1591static void
1592lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1593	     struct lpfc_iocbq *rspiocb)
1594{
1595	struct lpfc_vport *vport = cmdiocb->vport;
1596	struct lpfc_dmabuf *inp;
1597	struct lpfc_dmabuf *outp;
1598	struct lpfc_sli_ct_request *CTrsp;
1599	struct lpfc_nodelist *ndlp;
1600	int cmdcode, rc;
1601	uint8_t retry;
1602	uint32_t latt;
1603	u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
1604	u32 ulp_word4 = get_job_word4(phba, rspiocb);
1605
1606	/* First save ndlp, before we overwrite it */
1607	ndlp = cmdiocb->ndlp;
1608
1609	/* we pass cmdiocb to state machine which needs rspiocb as well */
1610	cmdiocb->rsp_iocb = rspiocb;
1611
1612	inp = cmdiocb->cmd_dmabuf;
1613	outp = cmdiocb->rsp_dmabuf;
1614
1615	cmdcode = be16_to_cpu(((struct lpfc_sli_ct_request *) inp->virt)->
1616					CommandResponse.bits.CmdRsp);
1617	CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
1618
1619	latt = lpfc_els_chk_latt(vport);
1620
1621	/* RFT request completes status <ulp_status> CmdRsp <CmdRsp> */
1622	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1623			 "0209 CT Request completes, latt %d, "
1624			 "ulp_status x%x CmdRsp x%x, Context x%x, Tag x%x\n",
1625			 latt, ulp_status,
1626			 be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp),
1627			 get_job_ulpcontext(phba, cmdiocb), cmdiocb->iotag);
1628
1629	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
1630		"CT cmd cmpl:     status:x%x/x%x cmd:x%x",
1631		ulp_status, ulp_word4, cmdcode);
1632
1633	if (ulp_status) {
1634		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
1635				 "0268 NS cmd x%x Error (x%x x%x)\n",
1636				 cmdcode, ulp_status, ulp_word4);
1637
1638		if (ulp_status == IOSTAT_LOCAL_REJECT &&
1639		    (((ulp_word4 & IOERR_PARAM_MASK) ==
1640		      IOERR_SLI_DOWN) ||
1641		     ((ulp_word4 & IOERR_PARAM_MASK) ==
1642		      IOERR_SLI_ABORTED)))
1643			goto out;
1644
1645		retry = cmdiocb->retry;
1646		if (retry >= LPFC_MAX_NS_RETRY)
1647			goto out;
1648
1649		retry++;
1650		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1651				 "0250 Retrying NS cmd %x\n", cmdcode);
1652		rc = lpfc_ns_cmd(vport, cmdcode, retry, 0);
1653		if (rc == 0)
1654			goto out;
1655	}
1656
1657out:
1658	lpfc_ct_free_iocb(phba, cmdiocb);
1659	lpfc_nlp_put(ndlp);
1660	return;
1661}
1662
1663static void
1664lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1665			struct lpfc_iocbq *rspiocb)
1666{
1667	struct lpfc_vport *vport = cmdiocb->vport;
1668	u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
1669
1670	if (ulp_status == IOSTAT_SUCCESS) {
1671		struct lpfc_dmabuf *outp;
1672		struct lpfc_sli_ct_request *CTrsp;
1673
1674		outp = cmdiocb->rsp_dmabuf;
1675		CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
1676		if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) ==
1677		    SLI_CT_RESPONSE_FS_ACC)
1678			vport->ct_flags |= FC_CT_RFT_ID;
1679	}
1680	lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
1681	return;
1682}
1683
1684static void
1685lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1686			struct lpfc_iocbq *rspiocb)
1687{
1688	struct lpfc_vport *vport = cmdiocb->vport;
1689	u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
1690
1691	if (ulp_status == IOSTAT_SUCCESS) {
1692		struct lpfc_dmabuf *outp;
1693		struct lpfc_sli_ct_request *CTrsp;
1694
1695		outp = cmdiocb->rsp_dmabuf;
1696		CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
1697		if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) ==
1698		    SLI_CT_RESPONSE_FS_ACC)
1699			vport->ct_flags |= FC_CT_RNN_ID;
1700	}
1701	lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
1702	return;
1703}
1704
1705static void
1706lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1707			 struct lpfc_iocbq *rspiocb)
1708{
1709	struct lpfc_vport *vport = cmdiocb->vport;
1710	u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
1711
1712	if (ulp_status == IOSTAT_SUCCESS) {
1713		struct lpfc_dmabuf *outp;
1714		struct lpfc_sli_ct_request *CTrsp;
1715
1716		outp = cmdiocb->rsp_dmabuf;
1717		CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
1718		if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) ==
1719		    SLI_CT_RESPONSE_FS_ACC)
1720			vport->ct_flags |= FC_CT_RSPN_ID;
1721	}
1722	lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
1723	return;
1724}
1725
1726static void
1727lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1728			 struct lpfc_iocbq *rspiocb)
1729{
1730	struct lpfc_vport *vport = cmdiocb->vport;
1731	u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
1732
1733	if (ulp_status == IOSTAT_SUCCESS) {
1734		struct lpfc_dmabuf *outp;
1735		struct lpfc_sli_ct_request *CTrsp;
1736
1737		outp = cmdiocb->rsp_dmabuf;
1738		CTrsp = (struct lpfc_sli_ct_request *) outp->virt;
1739		if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) ==
1740		    SLI_CT_RESPONSE_FS_ACC)
1741			vport->ct_flags |= FC_CT_RSNN_NN;
1742	}
1743	lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
1744	return;
1745}
1746
1747static void
1748lpfc_cmpl_ct_cmd_da_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1749 struct lpfc_iocbq *rspiocb)
1750{
1751	struct lpfc_vport *vport = cmdiocb->vport;
1752
1753	/* even if it fails we will act as though it succeeded. */
1754	vport->ct_flags = 0;
1755	lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
1756	return;
1757}
1758
1759static void
1760lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1761			struct lpfc_iocbq *rspiocb)
1762{
1763	struct lpfc_vport *vport = cmdiocb->vport;
1764	u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
1765
1766	if (ulp_status == IOSTAT_SUCCESS) {
1767		struct lpfc_dmabuf *outp;
1768		struct lpfc_sli_ct_request *CTrsp;
1769
1770		outp = cmdiocb->rsp_dmabuf;
1771		CTrsp = (struct lpfc_sli_ct_request *)outp->virt;
1772		if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) ==
1773		    SLI_CT_RESPONSE_FS_ACC)
1774			vport->ct_flags |= FC_CT_RFF_ID;
1775	}
1776	lpfc_cmpl_ct(phba, cmdiocb, rspiocb);
1777	return;
1778}
1779
1780/*
1781 * Although the symbolic port name is thought to be an integer
1782 * as of January 18, 2016, leave it as a string until more of
1783 * the record state becomes defined.
1784 */
1785int
1786lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol,
1787	size_t size)
1788{
1789	int n;
1790
1791	/*
1792	 * Use the lpfc board number as the Symbolic Port
1793	 * Name object.  NPIV is not in play so this integer
1794	 * value is sufficient and unique per FC-ID.
1795	 */
1796	n = scnprintf(symbol, size, "%d", vport->phba->brd_no);
1797	return n;
1798}
1799
1800
1801int
1802lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol,
1803	size_t size)
1804{
1805	char fwrev[FW_REV_STR_SIZE] = {0};
1806	char tmp[MAXHOSTNAMELEN] = {0};
1807
1808	memset(symbol, 0, size);
1809
1810	scnprintf(tmp, sizeof(tmp), "Emulex %s", vport->phba->ModelName);
1811	if (strlcat(symbol, tmp, size) >= size)
1812		goto buffer_done;
1813
1814	lpfc_decode_firmware_rev(vport->phba, fwrev, 0);
1815	scnprintf(tmp, sizeof(tmp), " FV%s", fwrev);
1816	if (strlcat(symbol, tmp, size) >= size)
1817		goto buffer_done;
1818
1819	scnprintf(tmp, sizeof(tmp), " DV%s", lpfc_release_version);
1820	if (strlcat(symbol, tmp, size) >= size)
1821		goto buffer_done;
1822
1823	scnprintf(tmp, sizeof(tmp), " HN:%s", vport->phba->os_host_name);
1824	if (strlcat(symbol, tmp, size) >= size)
1825		goto buffer_done;
1826
1827	/* Note :- OS name is "Linux" */
1828	scnprintf(tmp, sizeof(tmp), " OS:%s", init_utsname()->sysname);
1829	strlcat(symbol, tmp, size);
1830
1831buffer_done:
1832	return strnlen(symbol, size);
1833
1834}
1835
1836static uint32_t
1837lpfc_find_map_node(struct lpfc_vport *vport)
1838{
1839	struct lpfc_nodelist *ndlp, *next_ndlp;
1840	unsigned long iflags;
1841	uint32_t cnt = 0;
1842
1843	spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags);
1844	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
1845		if (ndlp->nlp_type & NLP_FABRIC)
1846			continue;
1847		if ((ndlp->nlp_state == NLP_STE_MAPPED_NODE) ||
1848		    (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE))
1849			cnt++;
1850	}
1851	spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags);
1852	return cnt;
1853}
1854
1855/*
1856 * This routine will return the FC4 Type associated with the CT
1857 * GID_FT command.
1858 */
1859int
1860lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb)
1861{
1862	struct lpfc_sli_ct_request *CtReq;
1863	struct lpfc_dmabuf *mp;
1864	uint32_t type;
1865
1866	mp = cmdiocb->cmd_dmabuf;
1867	if (mp == NULL)
1868		return 0;
1869	CtReq = (struct lpfc_sli_ct_request *)mp->virt;
1870	type = (uint32_t)CtReq->un.gid.Fc4Type;
1871	if ((type != SLI_CTPT_FCP) && (type != SLI_CTPT_NVME))
1872		return 0;
1873	return type;
1874}
1875
1876/*
1877 * lpfc_ns_cmd
1878 * Description:
1879 *    Issue Cmd to NameServer
1880 *       SLI_CTNS_GID_FT
1881 *       LI_CTNS_RFT_ID
1882 */
1883int
1884lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
1885	    uint8_t retry, uint32_t context)
1886{
1887	struct lpfc_nodelist * ndlp;
1888	struct lpfc_hba *phba = vport->phba;
1889	struct lpfc_dmabuf *mp, *bmp;
1890	struct lpfc_sli_ct_request *CtReq;
1891	struct ulp_bde64 *bpl;
1892	void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
1893		      struct lpfc_iocbq *) = NULL;
1894	uint32_t *ptr;
1895	uint32_t rsp_size = 1024;
1896	size_t   size;
1897	int rc = 0;
1898
1899	ndlp = lpfc_findnode_did(vport, NameServer_DID);
1900	if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) {
1901		rc=1;
1902		goto ns_cmd_exit;
1903	}
1904
1905	/* fill in BDEs for command */
1906	/* Allocate buffer for command payload */
1907	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1908	if (!mp) {
1909		rc=2;
1910		goto ns_cmd_exit;
1911	}
1912
1913	INIT_LIST_HEAD(&mp->list);
1914	mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
1915	if (!mp->virt) {
1916		rc=3;
1917		goto ns_cmd_free_mp;
1918	}
1919
1920	/* Allocate buffer for Buffer ptr list */
1921	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1922	if (!bmp) {
1923		rc=4;
1924		goto ns_cmd_free_mpvirt;
1925	}
1926
1927	INIT_LIST_HEAD(&bmp->list);
1928	bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(bmp->phys));
1929	if (!bmp->virt) {
1930		rc=5;
1931		goto ns_cmd_free_bmp;
1932	}
1933
1934	/* NameServer Req */
1935	lpfc_printf_vlog(vport, KERN_INFO ,LOG_DISCOVERY,
1936			 "0236 NameServer Req Data: x%x x%lx x%x x%x\n",
1937			 cmdcode, vport->fc_flag, vport->fc_rscn_id_cnt,
1938			 context);
1939
1940	bpl = (struct ulp_bde64 *) bmp->virt;
1941	memset(bpl, 0, sizeof(struct ulp_bde64));
1942	bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) );
1943	bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) );
1944	bpl->tus.f.bdeFlags = 0;
1945	if (cmdcode == SLI_CTNS_GID_FT)
1946		bpl->tus.f.bdeSize = GID_REQUEST_SZ;
1947	else if (cmdcode == SLI_CTNS_GID_PT)
1948		bpl->tus.f.bdeSize = GID_REQUEST_SZ;
1949	else if (cmdcode == SLI_CTNS_GFF_ID)
1950		bpl->tus.f.bdeSize = GFF_REQUEST_SZ;
1951	else if (cmdcode == SLI_CTNS_GFT_ID)
1952		bpl->tus.f.bdeSize = GFT_REQUEST_SZ;
1953	else if (cmdcode == SLI_CTNS_RFT_ID)
1954		bpl->tus.f.bdeSize = RFT_REQUEST_SZ;
1955	else if (cmdcode == SLI_CTNS_RNN_ID)
1956		bpl->tus.f.bdeSize = RNN_REQUEST_SZ;
1957	else if (cmdcode == SLI_CTNS_RSPN_ID)
1958		bpl->tus.f.bdeSize = RSPN_REQUEST_SZ;
1959	else if (cmdcode == SLI_CTNS_RSNN_NN)
1960		bpl->tus.f.bdeSize = RSNN_REQUEST_SZ;
1961	else if (cmdcode == SLI_CTNS_DA_ID)
1962		bpl->tus.f.bdeSize = DA_ID_REQUEST_SZ;
1963	else if (cmdcode == SLI_CTNS_RFF_ID)
1964		bpl->tus.f.bdeSize = RFF_REQUEST_SZ;
1965	else
1966		bpl->tus.f.bdeSize = 0;
1967	bpl->tus.w = le32_to_cpu(bpl->tus.w);
1968
1969	CtReq = (struct lpfc_sli_ct_request *) mp->virt;
1970	memset(CtReq, 0, sizeof(struct lpfc_sli_ct_request));
1971	CtReq->RevisionId.bits.Revision = SLI_CT_REVISION;
1972	CtReq->RevisionId.bits.InId = 0;
1973	CtReq->FsType = SLI_CT_DIRECTORY_SERVICE;
1974	CtReq->FsSubType = SLI_CT_DIRECTORY_NAME_SERVER;
1975	CtReq->CommandResponse.bits.Size = 0;
1976	switch (cmdcode) {
1977	case SLI_CTNS_GID_FT:
1978		CtReq->CommandResponse.bits.CmdRsp =
1979		    cpu_to_be16(SLI_CTNS_GID_FT);
1980		CtReq->un.gid.Fc4Type = context;
1981
1982		if (vport->port_state < LPFC_NS_QRY)
1983			vport->port_state = LPFC_NS_QRY;
1984		lpfc_set_disctmo(vport);
1985		cmpl = lpfc_cmpl_ct_cmd_gid_ft;
1986		rsp_size = FC_MAX_NS_RSP;
1987		break;
1988
1989	case SLI_CTNS_GID_PT:
1990		CtReq->CommandResponse.bits.CmdRsp =
1991		    cpu_to_be16(SLI_CTNS_GID_PT);
1992		CtReq->un.gid.PortType = context;
1993
1994		if (vport->port_state < LPFC_NS_QRY)
1995			vport->port_state = LPFC_NS_QRY;
1996		lpfc_set_disctmo(vport);
1997		cmpl = lpfc_cmpl_ct_cmd_gid_pt;
1998		rsp_size = FC_MAX_NS_RSP;
1999		break;
2000
2001	case SLI_CTNS_GFF_ID:
2002		CtReq->CommandResponse.bits.CmdRsp =
2003			cpu_to_be16(SLI_CTNS_GFF_ID);
2004		CtReq->un.gff.PortId = cpu_to_be32(context);
2005		cmpl = lpfc_cmpl_ct_cmd_gff_id;
2006		break;
2007
2008	case SLI_CTNS_GFT_ID:
2009		CtReq->CommandResponse.bits.CmdRsp =
2010			cpu_to_be16(SLI_CTNS_GFT_ID);
2011		CtReq->un.gft.PortId = cpu_to_be32(context);
2012		cmpl = lpfc_cmpl_ct_cmd_gft_id;
2013		break;
2014
2015	case SLI_CTNS_RFT_ID:
2016		vport->ct_flags &= ~FC_CT_RFT_ID;
2017		CtReq->CommandResponse.bits.CmdRsp =
2018		    cpu_to_be16(SLI_CTNS_RFT_ID);
2019		CtReq->un.rft.port_id = cpu_to_be32(vport->fc_myDID);
2020
2021		/* Register Application Services type if vmid enabled. */
2022		if (phba->cfg_vmid_app_header)
2023			CtReq->un.rft.app_serv_reg =
2024				cpu_to_be32(RFT_APP_SERV_REG);
2025
2026		/* Register FC4 FCP type if enabled.  */
2027		if (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
2028		    vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)
2029			CtReq->un.rft.fcp_reg = cpu_to_be32(RFT_FCP_REG);
2030
2031		/* Register NVME type if enabled. */
2032		if (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
2033		    vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)
2034			CtReq->un.rft.nvme_reg = cpu_to_be32(RFT_NVME_REG);
2035
2036		ptr = (uint32_t *)CtReq;
2037		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2038				 "6433 Issue RFT (%s %s %s): %08x %08x %08x "
2039				 "%08x %08x %08x %08x %08x\n",
2040				 CtReq->un.rft.fcp_reg ? "FCP" : " ",
2041				 CtReq->un.rft.nvme_reg ? "NVME" : " ",
2042				 CtReq->un.rft.app_serv_reg ? "APPS" : " ",
2043				 *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3),
2044				 *(ptr + 4), *(ptr + 5),
2045				 *(ptr + 6), *(ptr + 7));
2046		cmpl = lpfc_cmpl_ct_cmd_rft_id;
2047		break;
2048
2049	case SLI_CTNS_RNN_ID:
2050		vport->ct_flags &= ~FC_CT_RNN_ID;
2051		CtReq->CommandResponse.bits.CmdRsp =
2052		    cpu_to_be16(SLI_CTNS_RNN_ID);
2053		CtReq->un.rnn.PortId = cpu_to_be32(vport->fc_myDID);
2054		memcpy(CtReq->un.rnn.wwnn,  &vport->fc_nodename,
2055		       sizeof(struct lpfc_name));
2056		cmpl = lpfc_cmpl_ct_cmd_rnn_id;
2057		break;
2058
2059	case SLI_CTNS_RSPN_ID:
2060		vport->ct_flags &= ~FC_CT_RSPN_ID;
2061		CtReq->CommandResponse.bits.CmdRsp =
2062		    cpu_to_be16(SLI_CTNS_RSPN_ID);
2063		CtReq->un.rspn.PortId = cpu_to_be32(vport->fc_myDID);
2064		size = sizeof(CtReq->un.rspn.symbname);
2065		CtReq->un.rspn.len =
2066			lpfc_vport_symbolic_port_name(vport,
2067			CtReq->un.rspn.symbname, size);
2068		cmpl = lpfc_cmpl_ct_cmd_rspn_id;
2069		break;
2070	case SLI_CTNS_RSNN_NN:
2071		vport->ct_flags &= ~FC_CT_RSNN_NN;
2072		CtReq->CommandResponse.bits.CmdRsp =
2073		    cpu_to_be16(SLI_CTNS_RSNN_NN);
2074		memcpy(CtReq->un.rsnn.wwnn, &vport->fc_nodename,
2075		       sizeof(struct lpfc_name));
2076		size = sizeof(CtReq->un.rsnn.symbname);
2077		CtReq->un.rsnn.len =
2078			lpfc_vport_symbolic_node_name(vport,
2079			CtReq->un.rsnn.symbname, size);
2080		cmpl = lpfc_cmpl_ct_cmd_rsnn_nn;
2081		break;
2082	case SLI_CTNS_DA_ID:
2083		/* Implement DA_ID Nameserver request */
2084		CtReq->CommandResponse.bits.CmdRsp =
2085			cpu_to_be16(SLI_CTNS_DA_ID);
2086		CtReq->un.da_id.port_id = cpu_to_be32(vport->fc_myDID);
2087		cmpl = lpfc_cmpl_ct_cmd_da_id;
2088		break;
2089	case SLI_CTNS_RFF_ID:
2090		vport->ct_flags &= ~FC_CT_RFF_ID;
2091		CtReq->CommandResponse.bits.CmdRsp =
2092		    cpu_to_be16(SLI_CTNS_RFF_ID);
2093		CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID);
2094		CtReq->un.rff.fbits = FC4_FEATURE_INIT;
2095
2096		/* The driver always supports FC_TYPE_FCP.  However, the
2097		 * caller can specify NVME (type x28) as well.  But only
2098		 * these that FC4 type is supported.
2099		 */
2100		if (((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
2101		     (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) &&
2102		    (context == FC_TYPE_NVME)) {
2103			if ((vport == phba->pport) && phba->nvmet_support) {
2104				CtReq->un.rff.fbits = (FC4_FEATURE_TARGET |
2105					FC4_FEATURE_NVME_DISC);
2106				lpfc_nvmet_update_targetport(phba);
2107			} else {
2108				lpfc_nvme_update_localport(vport);
2109			}
2110			CtReq->un.rff.type_code = context;
2111
2112		} else if (((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
2113			    (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) &&
2114			   (context == FC_TYPE_FCP))
2115			CtReq->un.rff.type_code = context;
2116
2117		else
2118			goto ns_cmd_free_bmpvirt;
2119
2120		ptr = (uint32_t *)CtReq;
2121		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2122				 "6434 Issue RFF (%s): %08x %08x %08x %08x "
2123				 "%08x %08x %08x %08x\n",
2124				 (context == FC_TYPE_NVME) ? "NVME" : "FCP",
2125				 *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3),
2126				 *(ptr + 4), *(ptr + 5),
2127				 *(ptr + 6), *(ptr + 7));
2128		cmpl = lpfc_cmpl_ct_cmd_rff_id;
2129		break;
2130	}
2131	/* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
2132	 * to hold ndlp reference for the corresponding callback function.
2133	 */
2134	if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) {
2135		/* On success, The cmpl function will free the buffers */
2136		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
2137			"Issue CT cmd:    cmd:x%x did:x%x",
2138			cmdcode, ndlp->nlp_DID, 0);
2139		return 0;
2140	}
2141	rc=6;
2142
2143ns_cmd_free_bmpvirt:
2144	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
2145ns_cmd_free_bmp:
2146	kfree(bmp);
2147ns_cmd_free_mpvirt:
2148	lpfc_mbuf_free(phba, mp->virt, mp->phys);
2149ns_cmd_free_mp:
2150	kfree(mp);
2151ns_cmd_exit:
2152	lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
2153			 "0266 Issue NameServer Req x%x err %d Data: x%lx "
2154			 "x%x\n",
2155			 cmdcode, rc, vport->fc_flag, vport->fc_rscn_id_cnt);
2156	return 1;
2157}
2158
2159/**
2160 * lpfc_fdmi_rprt_defer - Check for any deferred FDMI RPRT commands
2161 * @phba: Pointer to HBA context object.
2162 * @mask: Initial port attributes mask
2163 *
2164 * This function checks to see if any vports have deferred their FDMI RPRT.
2165 * A vports RPRT may be deferred if it is issued before the primary ports
2166 * RHBA completes.
2167 */
2168static void
2169lpfc_fdmi_rprt_defer(struct lpfc_hba *phba, uint32_t mask)
2170{
2171	struct lpfc_vport **vports;
2172	struct lpfc_vport *vport;
2173	struct lpfc_nodelist *ndlp;
2174	int i;
2175
2176	phba->hba_flag |= HBA_RHBA_CMPL;
2177	vports = lpfc_create_vport_work_array(phba);
2178	if (vports) {
2179		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
2180			vport = vports[i];
2181			ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
2182			if (!ndlp)
2183				continue;
2184			if (vport->ct_flags & FC_CT_RPRT_DEFER) {
2185				vport->ct_flags &= ~FC_CT_RPRT_DEFER;
2186				vport->fdmi_port_mask = mask;
2187				lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0);
2188			}
2189		}
2190	}
2191	lpfc_destroy_vport_work_array(phba, vports);
2192}
2193
2194/**
2195 * lpfc_cmpl_ct_disc_fdmi - Handle a discovery FDMI completion
2196 * @phba: Pointer to HBA context object.
2197 * @cmdiocb: Pointer to the command IOCBQ.
2198 * @rspiocb: Pointer to the response IOCBQ.
2199 *
2200 * This function to handle the completion of a driver initiated FDMI
2201 * CT command issued during discovery.
2202 */
2203static void
2204lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2205		       struct lpfc_iocbq *rspiocb)
2206{
2207	struct lpfc_vport *vport = cmdiocb->vport;
2208	struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf;
2209	struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf;
2210	struct lpfc_sli_ct_request *CTcmd = inp->virt;
2211	struct lpfc_sli_ct_request *CTrsp = outp->virt;
2212	__be16 fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp;
2213	__be16 fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp;
2214	struct lpfc_nodelist *ndlp, *free_ndlp = NULL;
2215	uint32_t latt, cmd, err;
2216	u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
2217	u32 ulp_word4 = get_job_word4(phba, rspiocb);
2218
2219	latt = lpfc_els_chk_latt(vport);
2220	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
2221		"FDMI cmpl:       status:x%x/x%x latt:%d",
2222		ulp_status, ulp_word4, latt);
2223
2224	if (latt || ulp_status) {
2225
2226		/* Look for a retryable error */
2227		if (ulp_status == IOSTAT_LOCAL_REJECT) {
2228			switch ((ulp_word4 & IOERR_PARAM_MASK)) {
2229			case IOERR_SLI_ABORTED:
2230			case IOERR_SLI_DOWN:
2231				/* Driver aborted this IO.  No retry as error
2232				 * is likely Offline->Online or some adapter
2233				 * error.  Recovery will try again.
2234				 */
2235				break;
2236			case IOERR_ABORT_IN_PROGRESS:
2237			case IOERR_SEQUENCE_TIMEOUT:
2238			case IOERR_ILLEGAL_FRAME:
2239			case IOERR_NO_RESOURCES:
2240			case IOERR_ILLEGAL_COMMAND:
2241				cmdiocb->retry++;
2242				if (cmdiocb->retry >= LPFC_FDMI_MAX_RETRY)
2243					break;
2244
2245				/* Retry the same FDMI command */
2246				err = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING,
2247							  cmdiocb, 0);
2248				if (err == IOCB_ERROR)
2249					break;
2250				return;
2251			default:
2252				break;
2253			}
2254		}
2255
2256		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
2257				 "0229 FDMI cmd %04x failed, latt = %d "
2258				 "ulp_status: x%x, rid x%x\n",
2259				 be16_to_cpu(fdmi_cmd), latt, ulp_status,
2260				 ulp_word4);
2261	}
2262
2263	free_ndlp = cmdiocb->ndlp;
2264	lpfc_ct_free_iocb(phba, cmdiocb);
2265	lpfc_nlp_put(free_ndlp);
2266
2267	ndlp = lpfc_findnode_did(vport, FDMI_DID);
2268	if (!ndlp)
2269		return;
2270
2271	/* Check for a CT LS_RJT response */
2272	cmd =  be16_to_cpu(fdmi_cmd);
2273	if (be16_to_cpu(fdmi_rsp) == SLI_CT_RESPONSE_FS_RJT) {
2274		/* FDMI rsp failed */
2275		lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_ELS,
2276				 "0220 FDMI cmd failed FS_RJT Data: x%x", cmd);
2277
2278		/* Should we fallback to FDMI-2 / FDMI-1 ? */
2279		switch (cmd) {
2280		case SLI_MGMT_RHBA:
2281			if (vport->fdmi_hba_mask == LPFC_FDMI2_HBA_ATTR) {
2282				/* Fallback to FDMI-1 for HBA attributes */
2283				vport->fdmi_hba_mask = LPFC_FDMI1_HBA_ATTR;
2284
2285				/* If HBA attributes are FDMI1, so should
2286				 * port attributes be for consistency.
2287				 */
2288				vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR;
2289				/* Start over */
2290				lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
2291			}
2292			return;
2293
2294		case SLI_MGMT_RPRT:
2295			if (vport->port_type != LPFC_PHYSICAL_PORT) {
2296				ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
2297				if (!ndlp)
2298					return;
2299			}
2300			if (vport->fdmi_port_mask == LPFC_FDMI2_PORT_ATTR) {
2301				/* Fallback to FDMI-1 */
2302				vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR;
2303				/* Start over */
2304				lpfc_fdmi_cmd(vport, ndlp, cmd, 0);
2305				return;
2306			}
2307			if (vport->fdmi_port_mask == LPFC_FDMI2_SMART_ATTR) {
2308				vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
2309				/* Retry the same command */
2310				lpfc_fdmi_cmd(vport, ndlp, cmd, 0);
2311			}
2312			return;
2313
2314		case SLI_MGMT_RPA:
2315			/* No retry on Vendor, RPA only done on physical port */
2316			if (phba->link_flag & LS_CT_VEN_RPA) {
2317				phba->link_flag &= ~LS_CT_VEN_RPA;
2318				if (phba->cmf_active_mode == LPFC_CFG_OFF)
2319					return;
2320				lpfc_printf_log(phba, KERN_WARNING,
2321						LOG_DISCOVERY | LOG_ELS,
2322						"6460 VEN FDMI RPA RJT\n");
2323				return;
2324			}
2325			if (vport->fdmi_port_mask == LPFC_FDMI2_PORT_ATTR) {
2326				/* Fallback to FDMI-1 */
2327				vport->fdmi_hba_mask = LPFC_FDMI1_HBA_ATTR;
2328				vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR;
2329				/* Start over */
2330				lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
2331				return;
2332			}
2333			if (vport->fdmi_port_mask == LPFC_FDMI2_SMART_ATTR) {
2334				vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
2335				/* Retry the same command */
2336				lpfc_fdmi_cmd(vport, ndlp, cmd, 0);
2337			}
2338			return;
2339		}
2340	}
2341
2342	/*
2343	 * On success, need to cycle thru FDMI registration for discovery
2344	 * DHBA -> DPRT -> RHBA -> RPA  (physical port)
2345	 * DPRT -> RPRT (vports)
2346	 */
2347	switch (cmd) {
2348	case SLI_MGMT_RHBA:
2349		/* Check for any RPRTs deferred till after RHBA completes */
2350		lpfc_fdmi_rprt_defer(phba, vport->fdmi_port_mask);
2351
2352		lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA, 0);
2353		break;
2354
2355	case SLI_MGMT_DHBA:
2356		lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
2357		break;
2358
2359	case SLI_MGMT_DPRT:
2360		if (vport->port_type == LPFC_PHYSICAL_PORT) {
2361			lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA, 0);
2362		} else {
2363			ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
2364			if (!ndlp)
2365				return;
2366
2367			/* Only issue a RPRT for the vport if the RHBA
2368			 * for the physical port completes successfully.
2369			 * We may have to defer the RPRT accordingly.
2370			 */
2371			if (phba->hba_flag & HBA_RHBA_CMPL) {
2372				lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0);
2373			} else {
2374				lpfc_printf_vlog(vport, KERN_INFO,
2375						 LOG_DISCOVERY,
2376						 "6078 RPRT deferred\n");
2377				vport->ct_flags |= FC_CT_RPRT_DEFER;
2378			}
2379		}
2380		break;
2381	case SLI_MGMT_RPA:
2382		if (vport->port_type == LPFC_PHYSICAL_PORT &&
2383		    phba->sli4_hba.pc_sli4_params.mi_ver) {
2384			/* mi is only for the phyical port, no vports */
2385			if (phba->link_flag & LS_CT_VEN_RPA) {
2386				lpfc_printf_vlog(vport, KERN_INFO,
2387						 LOG_DISCOVERY | LOG_ELS |
2388						 LOG_CGN_MGMT,
2389						 "6449 VEN RPA FDMI Success\n");
2390				phba->link_flag &= ~LS_CT_VEN_RPA;
2391				break;
2392			}
2393
2394			lpfc_printf_log(phba, KERN_INFO,
2395					LOG_DISCOVERY | LOG_CGN_MGMT,
2396					"6210 Issue Vendor MI FDMI %x\n",
2397					phba->sli4_hba.pc_sli4_params.mi_ver);
2398
2399			/* CGN is only for the physical port, no vports */
2400			if (lpfc_fdmi_cmd(vport, ndlp, cmd,
2401					  LPFC_FDMI_VENDOR_ATTR_mi) == 0)
2402				phba->link_flag |= LS_CT_VEN_RPA;
2403			lpfc_printf_log(phba, KERN_INFO,
2404					LOG_DISCOVERY | LOG_ELS,
2405					"6458 Send MI FDMI:%x Flag x%x\n",
2406					phba->sli4_hba.pc_sli4_params.mi_ver,
2407					phba->link_flag);
2408		} else {
2409			lpfc_printf_log(phba, KERN_INFO,
2410					LOG_DISCOVERY | LOG_ELS,
2411					"6459 No FDMI VEN MI support - "
2412					"RPA Success\n");
2413		}
2414		break;
2415	}
2416	return;
2417}
2418
2419
2420/**
2421 * lpfc_fdmi_change_check - Check for changed FDMI parameters
2422 * @vport: pointer to a host virtual N_Port data structure.
2423 *
2424 * Check how many mapped NPorts we are connected to
2425 * Check if our hostname changed
2426 * Called from hbeat timeout routine to check if any FDMI parameters
2427 * changed. If so, re-register those Attributes.
2428 */
2429void
2430lpfc_fdmi_change_check(struct lpfc_vport *vport)
2431{
2432	struct lpfc_hba *phba = vport->phba;
2433	struct lpfc_nodelist *ndlp;
2434	uint16_t cnt;
2435
2436	if (!lpfc_is_link_up(phba))
2437		return;
2438
2439	/* Must be connected to a Fabric */
2440	if (!test_bit(FC_FABRIC, &vport->fc_flag))
2441		return;
2442
2443	ndlp = lpfc_findnode_did(vport, FDMI_DID);
2444	if (!ndlp)
2445		return;
2446
2447	/* Check if system hostname changed */
2448	if (strcmp(phba->os_host_name, init_utsname()->nodename)) {
2449		memset(phba->os_host_name, 0, sizeof(phba->os_host_name));
2450		scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s",
2451			  init_utsname()->nodename);
2452		lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0);
2453
2454		/* Since this effects multiple HBA and PORT attributes, we need
2455		 * de-register and go thru the whole FDMI registration cycle.
2456		 * DHBA -> DPRT -> RHBA -> RPA  (physical port)
2457		 * DPRT -> RPRT (vports)
2458		 */
2459		if (vport->port_type == LPFC_PHYSICAL_PORT) {
2460			/* For extra Vendor RPA */
2461			phba->link_flag &= ~LS_CT_VEN_RPA;
2462			lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0);
2463		} else {
2464			ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
2465			if (!ndlp)
2466				return;
2467			lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0);
2468		}
2469
2470		/* Since this code path registers all the port attributes
2471		 * we can just return without further checking.
2472		 */
2473		return;
2474	}
2475
2476	if (!(vport->fdmi_port_mask & LPFC_FDMI_PORT_ATTR_num_disc))
2477		return;
2478
2479	/* Check if the number of mapped NPorts changed */
2480	cnt = lpfc_find_map_node(vport);
2481	if (cnt == vport->fdmi_num_disc)
2482		return;
2483
2484	if (vport->port_type == LPFC_PHYSICAL_PORT) {
2485		lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA,
2486			      LPFC_FDMI_PORT_ATTR_num_disc);
2487	} else {
2488		ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
2489		if (!ndlp)
2490			return;
2491		lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT,
2492			      LPFC_FDMI_PORT_ATTR_num_disc);
2493	}
2494}
2495
2496static inline int
2497lpfc_fdmi_set_attr_u32(void *attr, uint16_t attrtype, uint32_t attrval)
2498{
2499	struct lpfc_fdmi_attr_u32 *ae = attr;
2500	int size = sizeof(*ae);
2501
2502	ae->type = cpu_to_be16(attrtype);
2503	ae->len = cpu_to_be16(size);
2504	ae->value_u32 = cpu_to_be32(attrval);
2505
2506	return size;
2507}
2508
2509static inline int
2510lpfc_fdmi_set_attr_wwn(void *attr, uint16_t attrtype, struct lpfc_name *wwn)
2511{
2512	struct lpfc_fdmi_attr_wwn *ae = attr;
2513	int size = sizeof(*ae);
2514
2515	ae->type = cpu_to_be16(attrtype);
2516	ae->len = cpu_to_be16(size);
2517	/* WWN's assumed to be bytestreams - Big Endian presentation */
2518	memcpy(ae->name, wwn,
2519	       min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64)));
2520
2521	return size;
2522}
2523
2524static inline int
2525lpfc_fdmi_set_attr_fullwwn(void *attr, uint16_t attrtype,
2526			   struct lpfc_name *wwnn, struct lpfc_name *wwpn)
2527{
2528	struct lpfc_fdmi_attr_fullwwn *ae = attr;
2529	u8 *nname = ae->nname;
2530	u8 *pname = ae->pname;
2531	int size = sizeof(*ae);
2532
2533	ae->type = cpu_to_be16(attrtype);
2534	ae->len = cpu_to_be16(size);
2535	/* WWN's assumed to be bytestreams - Big Endian presentation */
2536	memcpy(nname, wwnn,
2537	       min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64)));
2538	memcpy(pname, wwpn,
2539	       min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64)));
2540
2541	return size;
2542}
2543
2544static inline int
2545lpfc_fdmi_set_attr_string(void *attr, uint16_t attrtype, char *attrstring)
2546{
2547	struct lpfc_fdmi_attr_string *ae = attr;
2548	int len, size;
2549
2550	/*
2551	 * We are trusting the caller that if a fdmi string field
2552	 * is capped at 64 bytes, the caller passes in a string of
2553	 * 64 bytes or less.
2554	 */
2555
2556	strscpy(ae->value_string, attrstring, sizeof(ae->value_string));
2557	len = strnlen(ae->value_string, sizeof(ae->value_string));
2558	/* round string length to a 32bit boundary */
2559	len += (len & 3) ? (4 - (len & 3)) : 4;
2560	/* size is Type/Len (4 bytes) plus string length */
2561	size = FOURBYTES + len;
2562
2563	ae->type = cpu_to_be16(attrtype);
2564	ae->len = cpu_to_be16(size);
2565
2566	return size;
2567}
2568
2569/* Bitfields for FC4 Types that can be reported */
2570#define ATTR_FC4_CT	0x00000001
2571#define ATTR_FC4_FCP	0x00000002
2572#define ATTR_FC4_NVME	0x00000004
2573
2574static inline int
2575lpfc_fdmi_set_attr_fc4types(void *attr, uint16_t attrtype, uint32_t typemask)
2576{
2577	struct lpfc_fdmi_attr_fc4types *ae = attr;
2578	int size = sizeof(*ae);
2579
2580	ae->type = cpu_to_be16(attrtype);
2581	ae->len = cpu_to_be16(size);
2582
2583	if (typemask & ATTR_FC4_FCP)
2584		ae->value_types[2] = 0x01; /* Type 0x8 - FCP */
2585
2586	if (typemask & ATTR_FC4_CT)
2587		ae->value_types[7] = 0x01; /* Type 0x20 - CT */
2588
2589	if (typemask & ATTR_FC4_NVME)
2590		ae->value_types[6] = 0x01; /* Type 0x28 - NVME */
2591
2592	return size;
2593}
2594
2595/* Routines for all individual HBA attributes */
2596static int
2597lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, void *attr)
2598{
2599	return lpfc_fdmi_set_attr_wwn(attr, RHBA_NODENAME,
2600			&vport->fc_sparam.nodeName);
2601}
2602
2603static int
2604lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport, void *attr)
2605{
2606	/* This string MUST be consistent with other FC platforms
2607	 * supported by Broadcom.
2608	 */
2609	return lpfc_fdmi_set_attr_string(attr, RHBA_MANUFACTURER,
2610			"Emulex Corporation");
2611}
2612
2613static int
2614lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, void *attr)
2615{
2616	struct lpfc_hba *phba = vport->phba;
2617
2618	return lpfc_fdmi_set_attr_string(attr, RHBA_SERIAL_NUMBER,
2619			phba->SerialNumber);
2620}
2621
2622static int
2623lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport, void *attr)
2624{
2625	struct lpfc_hba *phba = vport->phba;
2626
2627	return lpfc_fdmi_set_attr_string(attr, RHBA_MODEL,
2628			phba->ModelName);
2629}
2630
2631static int
2632lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport, void *attr)
2633{
2634	struct lpfc_hba *phba = vport->phba;
2635
2636	return lpfc_fdmi_set_attr_string(attr, RHBA_MODEL_DESCRIPTION,
2637			phba->ModelDesc);
2638}
2639
2640static int
2641lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport, void *attr)
2642{
2643	struct lpfc_hba *phba = vport->phba;
2644	lpfc_vpd_t *vp = &phba->vpd;
2645	char buf[16] = { 0 };
2646
2647	snprintf(buf, sizeof(buf), "%08x", vp->rev.biuRev);
2648
2649	return lpfc_fdmi_set_attr_string(attr, RHBA_HARDWARE_VERSION, buf);
2650}
2651
2652static int
2653lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport, void *attr)
2654{
2655	return lpfc_fdmi_set_attr_string(attr, RHBA_DRIVER_VERSION,
2656			lpfc_release_version);
2657}
2658
2659static int
2660lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport, void *attr)
2661{
2662	struct lpfc_hba *phba = vport->phba;
2663	char buf[64] = { 0 };
2664
2665	if (phba->sli_rev == LPFC_SLI_REV4) {
2666		lpfc_decode_firmware_rev(phba, buf, 1);
2667
2668		return lpfc_fdmi_set_attr_string(attr, RHBA_OPTION_ROM_VERSION,
2669				buf);
2670	}
2671
2672	return lpfc_fdmi_set_attr_string(attr, RHBA_OPTION_ROM_VERSION,
2673			phba->OptionROMVersion);
2674}
2675
2676static int
2677lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport, void *attr)
2678{
2679	struct lpfc_hba *phba = vport->phba;
2680	char buf[64] = { 0 };
2681
2682	lpfc_decode_firmware_rev(phba, buf, 1);
2683
2684	return lpfc_fdmi_set_attr_string(attr, RHBA_FIRMWARE_VERSION, buf);
2685}
2686
2687static int
2688lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport, void *attr)
2689{
2690	char buf[256] = { 0 };
2691
2692	snprintf(buf, sizeof(buf), "%s %s %s",
2693		 init_utsname()->sysname,
2694		 init_utsname()->release,
2695		 init_utsname()->version);
2696
2697	return lpfc_fdmi_set_attr_string(attr, RHBA_OS_NAME_VERSION, buf);
2698}
2699
2700static int
2701lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport, void *attr)
2702{
2703	return lpfc_fdmi_set_attr_u32(attr, RHBA_MAX_CT_PAYLOAD_LEN,
2704			LPFC_MAX_CT_SIZE);
2705}
2706
2707static int
2708lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport, void *attr)
2709{
2710	char buf[256] = { 0 };
2711
2712	lpfc_vport_symbolic_node_name(vport, buf, sizeof(buf));
2713
2714	return lpfc_fdmi_set_attr_string(attr, RHBA_SYM_NODENAME, buf);
2715}
2716
2717static int
2718lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport, void *attr)
2719{
2720	return lpfc_fdmi_set_attr_u32(attr, RHBA_VENDOR_INFO, 0);
2721}
2722
2723static int
2724lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport, void *attr)
2725{
2726	/* Each driver instance corresponds to a single port */
2727	return lpfc_fdmi_set_attr_u32(attr, RHBA_NUM_PORTS, 1);
2728}
2729
2730static int
2731lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport, void *attr)
2732{
2733	return lpfc_fdmi_set_attr_wwn(attr, RHBA_FABRIC_WWNN,
2734			&vport->fabric_nodename);
2735}
2736
2737static int
2738lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport, void *attr)
2739{
2740	struct lpfc_hba *phba = vport->phba;
2741
2742	return lpfc_fdmi_set_attr_string(attr, RHBA_BIOS_VERSION,
2743			phba->BIOSVersion);
2744}
2745
2746static int
2747lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport, void *attr)
2748{
2749	/* Driver doesn't have access to this information */
2750	return lpfc_fdmi_set_attr_u32(attr, RHBA_BIOS_STATE, 0);
2751}
2752
2753static int
2754lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport, void *attr)
2755{
2756	return lpfc_fdmi_set_attr_string(attr, RHBA_VENDOR_ID, "EMULEX");
2757}
2758
2759/*
2760 * Routines for all individual PORT attributes
2761 */
2762
2763static int
2764lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport, void *attr)
2765{
2766	struct lpfc_hba   *phba = vport->phba;
2767	u32 fc4types;
2768
2769	fc4types = (ATTR_FC4_CT | ATTR_FC4_FCP);
2770
2771	/* Check to see if Firmware supports NVME and on physical port */
2772	if ((phba->sli_rev == LPFC_SLI_REV4) && (vport == phba->pport) &&
2773	    phba->sli4_hba.pc_sli4_params.nvme)
2774		fc4types |= ATTR_FC4_NVME;
2775
2776	return lpfc_fdmi_set_attr_fc4types(attr, RPRT_SUPPORTED_FC4_TYPES,
2777			fc4types);
2778}
2779
2780static int
2781lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport, void *attr)
2782{
2783	struct lpfc_hba *phba = vport->phba;
2784	u32 speeds = 0;
2785	u32 tcfg;
2786	u8 i, cnt;
2787
2788	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
2789		cnt = 0;
2790		if (phba->sli_rev == LPFC_SLI_REV4) {
2791			tcfg = phba->sli4_hba.conf_trunk;
2792			for (i = 0; i < 4; i++, tcfg >>= 1)
2793				if (tcfg & 1)
2794					cnt++;
2795		}
2796
2797		if (cnt > 2) { /* 4 lane trunk group */
2798			if (phba->lmt & LMT_64Gb)
2799				speeds |= HBA_PORTSPEED_256GFC;
2800			if (phba->lmt & LMT_32Gb)
2801				speeds |= HBA_PORTSPEED_128GFC;
2802			if (phba->lmt & LMT_16Gb)
2803				speeds |= HBA_PORTSPEED_64GFC;
2804		} else if (cnt) { /* 2 lane trunk group */
2805			if (phba->lmt & LMT_128Gb)
2806				speeds |= HBA_PORTSPEED_256GFC;
2807			if (phba->lmt & LMT_64Gb)
2808				speeds |= HBA_PORTSPEED_128GFC;
2809			if (phba->lmt & LMT_32Gb)
2810				speeds |= HBA_PORTSPEED_64GFC;
2811			if (phba->lmt & LMT_16Gb)
2812				speeds |= HBA_PORTSPEED_32GFC;
2813		} else {
2814			if (phba->lmt & LMT_256Gb)
2815				speeds |= HBA_PORTSPEED_256GFC;
2816			if (phba->lmt & LMT_128Gb)
2817				speeds |= HBA_PORTSPEED_128GFC;
2818			if (phba->lmt & LMT_64Gb)
2819				speeds |= HBA_PORTSPEED_64GFC;
2820			if (phba->lmt & LMT_32Gb)
2821				speeds |= HBA_PORTSPEED_32GFC;
2822			if (phba->lmt & LMT_16Gb)
2823				speeds |= HBA_PORTSPEED_16GFC;
2824			if (phba->lmt & LMT_10Gb)
2825				speeds |= HBA_PORTSPEED_10GFC;
2826			if (phba->lmt & LMT_8Gb)
2827				speeds |= HBA_PORTSPEED_8GFC;
2828			if (phba->lmt & LMT_4Gb)
2829				speeds |= HBA_PORTSPEED_4GFC;
2830			if (phba->lmt & LMT_2Gb)
2831				speeds |= HBA_PORTSPEED_2GFC;
2832			if (phba->lmt & LMT_1Gb)
2833				speeds |= HBA_PORTSPEED_1GFC;
2834		}
2835	} else {
2836		/* FCoE links support only one speed */
2837		switch (phba->fc_linkspeed) {
2838		case LPFC_ASYNC_LINK_SPEED_10GBPS:
2839			speeds = HBA_PORTSPEED_10GE;
2840			break;
2841		case LPFC_ASYNC_LINK_SPEED_25GBPS:
2842			speeds = HBA_PORTSPEED_25GE;
2843			break;
2844		case LPFC_ASYNC_LINK_SPEED_40GBPS:
2845			speeds = HBA_PORTSPEED_40GE;
2846			break;
2847		case LPFC_ASYNC_LINK_SPEED_100GBPS:
2848			speeds = HBA_PORTSPEED_100GE;
2849			break;
2850		}
2851	}
2852
2853	return lpfc_fdmi_set_attr_u32(attr, RPRT_SUPPORTED_SPEED, speeds);
2854}
2855
2856static int
2857lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport, void *attr)
2858{
2859	struct lpfc_hba   *phba = vport->phba;
2860	u32 speeds = 0;
2861
2862	if (!(phba->hba_flag & HBA_FCOE_MODE)) {
2863		switch (phba->fc_linkspeed) {
2864		case LPFC_LINK_SPEED_1GHZ:
2865			speeds = HBA_PORTSPEED_1GFC;
2866			break;
2867		case LPFC_LINK_SPEED_2GHZ:
2868			speeds = HBA_PORTSPEED_2GFC;
2869			break;
2870		case LPFC_LINK_SPEED_4GHZ:
2871			speeds = HBA_PORTSPEED_4GFC;
2872			break;
2873		case LPFC_LINK_SPEED_8GHZ:
2874			speeds = HBA_PORTSPEED_8GFC;
2875			break;
2876		case LPFC_LINK_SPEED_10GHZ:
2877			speeds = HBA_PORTSPEED_10GFC;
2878			break;
2879		case LPFC_LINK_SPEED_16GHZ:
2880			speeds = HBA_PORTSPEED_16GFC;
2881			break;
2882		case LPFC_LINK_SPEED_32GHZ:
2883			speeds = HBA_PORTSPEED_32GFC;
2884			break;
2885		case LPFC_LINK_SPEED_64GHZ:
2886			speeds = HBA_PORTSPEED_64GFC;
2887			break;
2888		case LPFC_LINK_SPEED_128GHZ:
2889			speeds = HBA_PORTSPEED_128GFC;
2890			break;
2891		case LPFC_LINK_SPEED_256GHZ:
2892			speeds = HBA_PORTSPEED_256GFC;
2893			break;
2894		default:
2895			speeds = HBA_PORTSPEED_UNKNOWN;
2896			break;
2897		}
2898	} else {
2899		switch (phba->fc_linkspeed) {
2900		case LPFC_ASYNC_LINK_SPEED_10GBPS:
2901			speeds = HBA_PORTSPEED_10GE;
2902			break;
2903		case LPFC_ASYNC_LINK_SPEED_25GBPS:
2904			speeds = HBA_PORTSPEED_25GE;
2905			break;
2906		case LPFC_ASYNC_LINK_SPEED_40GBPS:
2907			speeds = HBA_PORTSPEED_40GE;
2908			break;
2909		case LPFC_ASYNC_LINK_SPEED_100GBPS:
2910			speeds = HBA_PORTSPEED_100GE;
2911			break;
2912		default:
2913			speeds = HBA_PORTSPEED_UNKNOWN;
2914			break;
2915		}
2916	}
2917
2918	return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_SPEED, speeds);
2919}
2920
2921static int
2922lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport, void *attr)
2923{
2924	struct serv_parm *hsp = (struct serv_parm *)&vport->fc_sparam;
2925
2926	return lpfc_fdmi_set_attr_u32(attr, RPRT_MAX_FRAME_SIZE,
2927			(((uint32_t)hsp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
2928			  (uint32_t)hsp->cmn.bbRcvSizeLsb);
2929}
2930
2931static int
2932lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport, void *attr)
2933{
2934	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2935	char buf[64] = { 0 };
2936
2937	snprintf(buf, sizeof(buf), "/sys/class/scsi_host/host%d",
2938		 shost->host_no);
2939
2940	return lpfc_fdmi_set_attr_string(attr, RPRT_OS_DEVICE_NAME, buf);
2941}
2942
2943static int
2944lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport, void *attr)
2945{
2946	char buf[64] = { 0 };
2947
2948	scnprintf(buf, sizeof(buf), "%s", vport->phba->os_host_name);
2949
2950	return lpfc_fdmi_set_attr_string(attr, RPRT_HOST_NAME, buf);
2951}
2952
2953static int
2954lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport, void *attr)
2955{
2956	return lpfc_fdmi_set_attr_wwn(attr, RPRT_NODENAME,
2957			&vport->fc_sparam.nodeName);
2958}
2959
2960static int
2961lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport, void *attr)
2962{
2963	return lpfc_fdmi_set_attr_wwn(attr, RPRT_PORTNAME,
2964			&vport->fc_sparam.portName);
2965}
2966
2967static int
2968lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport, void *attr)
2969{
2970	char buf[256] = { 0 };
2971
2972	lpfc_vport_symbolic_port_name(vport, buf, sizeof(buf));
2973
2974	return lpfc_fdmi_set_attr_string(attr, RPRT_SYM_PORTNAME, buf);
2975}
2976
2977static int
2978lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport, void *attr)
2979{
2980	struct lpfc_hba *phba = vport->phba;
2981
2982	return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_TYPE,
2983			(phba->fc_topology == LPFC_TOPOLOGY_LOOP) ?
2984				LPFC_FDMI_PORTTYPE_NLPORT :
2985				LPFC_FDMI_PORTTYPE_NPORT);
2986}
2987
2988static int
2989lpfc_fdmi_port_attr_class(struct lpfc_vport *vport, void *attr)
2990{
2991	return lpfc_fdmi_set_attr_u32(attr, RPRT_SUPPORTED_CLASS,
2992			FC_COS_CLASS2 | FC_COS_CLASS3);
2993}
2994
2995static int
2996lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport, void *attr)
2997{
2998	return lpfc_fdmi_set_attr_wwn(attr, RPRT_FABRICNAME,
2999			&vport->fabric_portname);
3000}
3001
3002static int
3003lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport, void *attr)
3004{
3005	struct lpfc_hba *phba = vport->phba;
3006	u32 fc4types;
3007
3008	fc4types = (ATTR_FC4_CT | ATTR_FC4_FCP);
3009
3010	/* Check to see if NVME is configured or not */
3011	if (vport == phba->pport &&
3012	    phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
3013		fc4types |= ATTR_FC4_NVME;
3014
3015	return lpfc_fdmi_set_attr_fc4types(attr, RPRT_ACTIVE_FC4_TYPES,
3016			fc4types);
3017}
3018
3019static int
3020lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport, void *attr)
3021{
3022	return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_STATE,
3023			LPFC_FDMI_PORTSTATE_ONLINE);
3024}
3025
3026static int
3027lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport, void *attr)
3028{
3029	vport->fdmi_num_disc = lpfc_find_map_node(vport);
3030
3031	return lpfc_fdmi_set_attr_u32(attr, RPRT_DISC_PORT,
3032			vport->fdmi_num_disc);
3033}
3034
3035static int
3036lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport, void *attr)
3037{
3038	return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_ID, vport->fc_myDID);
3039}
3040
3041static int
3042lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport, void *attr)
3043{
3044	return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_SERVICE,
3045			"Smart SAN Initiator");
3046}
3047
3048static int
3049lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport, void *attr)
3050{
3051	return lpfc_fdmi_set_attr_fullwwn(attr, RPRT_SMART_GUID,
3052			&vport->fc_sparam.nodeName,
3053			&vport->fc_sparam.portName);
3054}
3055
3056static int
3057lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport, void *attr)
3058{
3059	return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_VERSION,
3060			"Smart SAN Version 2.0");
3061}
3062
3063static int
3064lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport, void *attr)
3065{
3066	struct lpfc_hba *phba = vport->phba;
3067
3068	return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_MODEL,
3069			phba->ModelName);
3070}
3071
3072static int
3073lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport, void *attr)
3074{
3075	/* SRIOV (type 3) is not supported */
3076
3077	return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_PORT_INFO,
3078			(vport->vpi) ?  2 /* NPIV */ : 1 /* Physical */);
3079}
3080
3081static int
3082lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport, void *attr)
3083{
3084	return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_QOS, 0);
3085}
3086
3087static int
3088lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport, void *attr)
3089{
3090	return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_SECURITY, 1);
3091}
3092
3093static int
3094lpfc_fdmi_vendor_attr_mi(struct lpfc_vport *vport, void *attr)
3095{
3096	struct lpfc_hba *phba = vport->phba;
3097	char buf[32] = { 0 };
3098
3099	sprintf(buf, "ELXE2EM:%04d", phba->sli4_hba.pc_sli4_params.mi_ver);
3100
3101	return lpfc_fdmi_set_attr_string(attr, RPRT_VENDOR_MI, buf);
3102}
3103
3104/* RHBA attribute jump table */
3105static int (*lpfc_fdmi_hba_action[])
3106	(struct lpfc_vport *vport, void *attrbuf) = {
3107	/* Action routine                 Mask bit     Attribute type */
3108	lpfc_fdmi_hba_attr_wwnn,	  /* bit0     RHBA_NODENAME           */
3109	lpfc_fdmi_hba_attr_manufacturer,  /* bit1     RHBA_MANUFACTURER       */
3110	lpfc_fdmi_hba_attr_sn,		  /* bit2     RHBA_SERIAL_NUMBER      */
3111	lpfc_fdmi_hba_attr_model,	  /* bit3     RHBA_MODEL              */
3112	lpfc_fdmi_hba_attr_description,	  /* bit4     RHBA_MODEL_DESCRIPTION  */
3113	lpfc_fdmi_hba_attr_hdw_ver,	  /* bit5     RHBA_HARDWARE_VERSION   */
3114	lpfc_fdmi_hba_attr_drvr_ver,	  /* bit6     RHBA_DRIVER_VERSION     */
3115	lpfc_fdmi_hba_attr_rom_ver,	  /* bit7     RHBA_OPTION_ROM_VERSION */
3116	lpfc_fdmi_hba_attr_fmw_ver,	  /* bit8     RHBA_FIRMWARE_VERSION   */
3117	lpfc_fdmi_hba_attr_os_ver,	  /* bit9     RHBA_OS_NAME_VERSION    */
3118	lpfc_fdmi_hba_attr_ct_len,	  /* bit10    RHBA_MAX_CT_PAYLOAD_LEN */
3119	lpfc_fdmi_hba_attr_symbolic_name, /* bit11    RHBA_SYM_NODENAME       */
3120	lpfc_fdmi_hba_attr_vendor_info,	  /* bit12    RHBA_VENDOR_INFO        */
3121	lpfc_fdmi_hba_attr_num_ports,	  /* bit13    RHBA_NUM_PORTS          */
3122	lpfc_fdmi_hba_attr_fabric_wwnn,	  /* bit14    RHBA_FABRIC_WWNN        */
3123	lpfc_fdmi_hba_attr_bios_ver,	  /* bit15    RHBA_BIOS_VERSION       */
3124	lpfc_fdmi_hba_attr_bios_state,	  /* bit16    RHBA_BIOS_STATE         */
3125	lpfc_fdmi_hba_attr_vendor_id,	  /* bit17    RHBA_VENDOR_ID          */
3126};
3127
3128/* RPA / RPRT attribute jump table */
3129static int (*lpfc_fdmi_port_action[])
3130	(struct lpfc_vport *vport, void *attrbuf) = {
3131	/* Action routine                   Mask bit   Attribute type */
3132	lpfc_fdmi_port_attr_fc4type,        /* bit0   RPRT_SUPPORT_FC4_TYPES  */
3133	lpfc_fdmi_port_attr_support_speed,  /* bit1   RPRT_SUPPORTED_SPEED    */
3134	lpfc_fdmi_port_attr_speed,          /* bit2   RPRT_PORT_SPEED         */
3135	lpfc_fdmi_port_attr_max_frame,      /* bit3   RPRT_MAX_FRAME_SIZE     */
3136	lpfc_fdmi_port_attr_os_devname,     /* bit4   RPRT_OS_DEVICE_NAME     */
3137	lpfc_fdmi_port_attr_host_name,      /* bit5   RPRT_HOST_NAME          */
3138	lpfc_fdmi_port_attr_wwnn,           /* bit6   RPRT_NODENAME           */
3139	lpfc_fdmi_port_attr_wwpn,           /* bit7   RPRT_PORTNAME           */
3140	lpfc_fdmi_port_attr_symbolic_name,  /* bit8   RPRT_SYM_PORTNAME       */
3141	lpfc_fdmi_port_attr_port_type,      /* bit9   RPRT_PORT_TYPE          */
3142	lpfc_fdmi_port_attr_class,          /* bit10  RPRT_SUPPORTED_CLASS    */
3143	lpfc_fdmi_port_attr_fabric_wwpn,    /* bit11  RPRT_FABRICNAME         */
3144	lpfc_fdmi_port_attr_active_fc4type, /* bit12  RPRT_ACTIVE_FC4_TYPES   */
3145	lpfc_fdmi_port_attr_port_state,     /* bit13  RPRT_PORT_STATE         */
3146	lpfc_fdmi_port_attr_num_disc,       /* bit14  RPRT_DISC_PORT          */
3147	lpfc_fdmi_port_attr_nportid,        /* bit15  RPRT_PORT_ID            */
3148	lpfc_fdmi_smart_attr_service,       /* bit16  RPRT_SMART_SERVICE      */
3149	lpfc_fdmi_smart_attr_guid,          /* bit17  RPRT_SMART_GUID         */
3150	lpfc_fdmi_smart_attr_version,       /* bit18  RPRT_SMART_VERSION      */
3151	lpfc_fdmi_smart_attr_model,         /* bit19  RPRT_SMART_MODEL        */
3152	lpfc_fdmi_smart_attr_port_info,     /* bit20  RPRT_SMART_PORT_INFO    */
3153	lpfc_fdmi_smart_attr_qos,           /* bit21  RPRT_SMART_QOS          */
3154	lpfc_fdmi_smart_attr_security,      /* bit22  RPRT_SMART_SECURITY     */
3155	lpfc_fdmi_vendor_attr_mi,           /* bit23  RPRT_VENDOR_MI          */
3156};
3157
3158/**
3159 * lpfc_fdmi_cmd - Build and send a FDMI cmd to the specified NPort
3160 * @vport: pointer to a host virtual N_Port data structure.
3161 * @ndlp: ndlp to send FDMI cmd to (if NULL use FDMI_DID)
3162 * @cmdcode: FDMI command to send
3163 * @new_mask: Mask of HBA or PORT Attributes to send
3164 *
3165 * Builds and sends a FDMI command using the CT subsystem.
3166 */
3167int
3168lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
3169	      int cmdcode, uint32_t new_mask)
3170{
3171	struct lpfc_hba *phba = vport->phba;
3172	struct lpfc_dmabuf *rq, *rsp;
3173	struct lpfc_sli_ct_request *CtReq;
3174	struct ulp_bde64_le *bde;
3175	uint32_t bit_pos;
3176	uint32_t size, addsz;
3177	uint32_t rsp_size;
3178	uint32_t mask;
3179	struct lpfc_fdmi_reg_hba *rh;
3180	struct lpfc_fdmi_port_entry *pe;
3181	struct lpfc_fdmi_reg_portattr *pab = NULL, *base = NULL;
3182	struct lpfc_fdmi_attr_block *ab = NULL;
3183	int  (*func)(struct lpfc_vport *vport, void *attrbuf);
3184	void (*cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3185		     struct lpfc_iocbq *rspiocb);
3186
3187	if (!ndlp)
3188		return 0;
3189
3190	cmpl = lpfc_cmpl_ct_disc_fdmi; /* called from discovery */
3191
3192	/* fill in BDEs for command */
3193	/* Allocate buffer for command payload */
3194	rq = kmalloc(sizeof(*rq), GFP_KERNEL);
3195	if (!rq)
3196		goto fdmi_cmd_exit;
3197
3198	rq->virt = lpfc_mbuf_alloc(phba, 0, &rq->phys);
3199	if (!rq->virt)
3200		goto fdmi_cmd_free_rq;
3201
3202	/* Allocate buffer for Buffer ptr list */
3203	rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
3204	if (!rsp)
3205		goto fdmi_cmd_free_rqvirt;
3206
3207	rsp->virt = lpfc_mbuf_alloc(phba, 0, &rsp->phys);
3208	if (!rsp->virt)
3209		goto fdmi_cmd_free_rsp;
3210
3211	INIT_LIST_HEAD(&rq->list);
3212	INIT_LIST_HEAD(&rsp->list);
3213
3214	/* mbuf buffers are 1K in length - aka LPFC_BPL_SIZE */
3215	memset(rq->virt, 0, LPFC_BPL_SIZE);
3216	rsp_size = LPFC_BPL_SIZE;
3217
3218	/* FDMI request */
3219	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3220			 "0218 FDMI Request x%x mask x%x Data: x%x x%lx x%x\n",
3221			 cmdcode, new_mask, vport->fdmi_port_mask,
3222			 vport->fc_flag, vport->port_state);
3223
3224	CtReq = (struct lpfc_sli_ct_request *)rq->virt;
3225
3226	/* First populate the CT_IU preamble */
3227	CtReq->RevisionId.bits.Revision = SLI_CT_REVISION;
3228	CtReq->RevisionId.bits.InId = 0;
3229
3230	CtReq->FsType = SLI_CT_MANAGEMENT_SERVICE;
3231	CtReq->FsSubType = SLI_CT_FDMI_Subtypes;
3232
3233	CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(cmdcode);
3234
3235	size = 0;
3236
3237	/* Next fill in the specific FDMI cmd information */
3238	switch (cmdcode) {
3239	case SLI_MGMT_RHAT:
3240	case SLI_MGMT_RHBA:
3241		rh = (struct lpfc_fdmi_reg_hba *)&CtReq->un;
3242		/* HBA Identifier */
3243		memcpy(&rh->hi.PortName, &phba->pport->fc_sparam.portName,
3244		       sizeof(struct lpfc_name));
3245		size += sizeof(struct lpfc_fdmi_hba_ident);
3246
3247		if (cmdcode == SLI_MGMT_RHBA) {
3248			/* Registered Port List */
3249			/* One entry (port) per adapter */
3250			rh->rpl.EntryCnt = cpu_to_be32(1);
3251			memcpy(&rh->rpl.pe.PortName,
3252			       &phba->pport->fc_sparam.portName,
3253			       sizeof(struct lpfc_name));
3254			size += sizeof(struct lpfc_fdmi_reg_port_list);
3255		}
3256
3257		ab = (struct lpfc_fdmi_attr_block *)((uint8_t *)rh + size);
3258		ab->EntryCnt = 0;
3259		size += FOURBYTES;	/* add length of EntryCnt field */
3260
3261		bit_pos = 0;
3262		if (new_mask)
3263			mask = new_mask;
3264		else
3265			mask = vport->fdmi_hba_mask;
3266
3267		/* Mask will dictate what attributes to build in the request */
3268		while (mask) {
3269			if (mask & 0x1) {
3270				func = lpfc_fdmi_hba_action[bit_pos];
3271				addsz = func(vport, ((uint8_t *)rh + size));
3272				if (addsz) {
3273					ab->EntryCnt++;
3274					size += addsz;
3275				}
3276				/* check if another attribute fits */
3277				if ((size + FDMI_MAX_ATTRLEN) >
3278				    (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
3279					goto hba_out;
3280			}
3281			mask = mask >> 1;
3282			bit_pos++;
3283		}
3284hba_out:
3285		ab->EntryCnt = cpu_to_be32(ab->EntryCnt);
3286		/* Total size */
3287		size += GID_REQUEST_SZ - 4;
3288		break;
3289
3290	case SLI_MGMT_RPRT:
3291		if (vport->port_type != LPFC_PHYSICAL_PORT) {
3292			ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
3293			if (!ndlp)
3294				return 0;
3295		}
3296		fallthrough;
3297	case SLI_MGMT_RPA:
3298		/* Store base ptr right after preamble */
3299		base = (struct lpfc_fdmi_reg_portattr *)&CtReq->un;
3300
3301		if (cmdcode == SLI_MGMT_RPRT) {
3302			rh = (struct lpfc_fdmi_reg_hba *)base;
3303			/* HBA Identifier */
3304			memcpy(&rh->hi.PortName,
3305			       &phba->pport->fc_sparam.portName,
3306			       sizeof(struct lpfc_name));
3307			pab = (struct lpfc_fdmi_reg_portattr *)
3308				((uint8_t *)base + sizeof(struct lpfc_name));
3309			size += sizeof(struct lpfc_name);
3310		} else {
3311			pab = base;
3312		}
3313
3314		memcpy((uint8_t *)&pab->PortName,
3315		       (uint8_t *)&vport->fc_sparam.portName,
3316		       sizeof(struct lpfc_name));
3317		pab->ab.EntryCnt = 0;
3318		/* add length of name and EntryCnt field */
3319		size += sizeof(struct lpfc_name) + FOURBYTES;
3320
3321		bit_pos = 0;
3322		if (new_mask)
3323			mask = new_mask;
3324		else
3325			mask = vport->fdmi_port_mask;
3326
3327		/* Mask will dictate what attributes to build in the request */
3328		while (mask) {
3329			if (mask & 0x1) {
3330				func = lpfc_fdmi_port_action[bit_pos];
3331				addsz = func(vport, ((uint8_t *)base + size));
3332				if (addsz) {
3333					pab->ab.EntryCnt++;
3334					size += addsz;
3335				}
3336				/* check if another attribute fits */
3337				if ((size + FDMI_MAX_ATTRLEN) >
3338				    (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE))
3339					goto port_out;
3340			}
3341			mask = mask >> 1;
3342			bit_pos++;
3343		}
3344port_out:
3345		pab->ab.EntryCnt = cpu_to_be32(pab->ab.EntryCnt);
3346		size += GID_REQUEST_SZ - 4;
3347		break;
3348
3349	case SLI_MGMT_GHAT:
3350	case SLI_MGMT_GRPL:
3351		rsp_size = FC_MAX_NS_RSP;
3352		fallthrough;
3353	case SLI_MGMT_DHBA:
3354	case SLI_MGMT_DHAT:
3355		pe = (struct lpfc_fdmi_port_entry *)&CtReq->un;
3356		memcpy((uint8_t *)&pe->PortName,
3357		       (uint8_t *)&vport->fc_sparam.portName,
3358		       sizeof(struct lpfc_name));
3359		size = GID_REQUEST_SZ - 4 + sizeof(struct lpfc_name);
3360		break;
3361
3362	case SLI_MGMT_GPAT:
3363	case SLI_MGMT_GPAS:
3364		rsp_size = FC_MAX_NS_RSP;
3365		fallthrough;
3366	case SLI_MGMT_DPRT:
3367		if (vport->port_type != LPFC_PHYSICAL_PORT) {
3368			ndlp = lpfc_findnode_did(phba->pport, FDMI_DID);
3369			if (!ndlp)
3370				return 0;
3371		}
3372		fallthrough;
3373	case SLI_MGMT_DPA:
3374		pe = (struct lpfc_fdmi_port_entry *)&CtReq->un;
3375		memcpy((uint8_t *)&pe->PortName,
3376		       (uint8_t *)&vport->fc_sparam.portName,
3377		       sizeof(struct lpfc_name));
3378		size = GID_REQUEST_SZ - 4 + sizeof(struct lpfc_name);
3379		break;
3380	case SLI_MGMT_GRHL:
3381		size = GID_REQUEST_SZ - 4;
3382		break;
3383	default:
3384		lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY,
3385				 "0298 FDMI cmdcode x%x not supported\n",
3386				 cmdcode);
3387		goto fdmi_cmd_free_rspvirt;
3388	}
3389	CtReq->CommandResponse.bits.Size = cpu_to_be16(rsp_size);
3390
3391	bde = (struct ulp_bde64_le *)rsp->virt;
3392	bde->addr_high = cpu_to_le32(putPaddrHigh(rq->phys));
3393	bde->addr_low = cpu_to_le32(putPaddrLow(rq->phys));
3394	bde->type_size = cpu_to_le32(ULP_BDE64_TYPE_BDE_64 <<
3395				     ULP_BDE64_TYPE_SHIFT);
3396	bde->type_size |= cpu_to_le32(size);
3397
3398	/*
3399	 * The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
3400	 * to hold ndlp reference for the corresponding callback function.
3401	 */
3402	if (!lpfc_ct_cmd(vport, rq, rsp, ndlp, cmpl, rsp_size, 0))
3403		return 0;
3404
3405fdmi_cmd_free_rspvirt:
3406	lpfc_mbuf_free(phba, rsp->virt, rsp->phys);
3407fdmi_cmd_free_rsp:
3408	kfree(rsp);
3409fdmi_cmd_free_rqvirt:
3410	lpfc_mbuf_free(phba, rq->virt, rq->phys);
3411fdmi_cmd_free_rq:
3412	kfree(rq);
3413fdmi_cmd_exit:
3414	/* Issue FDMI request failed */
3415	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3416			 "0244 Issue FDMI request failed Data: x%x\n",
3417			 cmdcode);
3418	return 1;
3419}
3420
3421/**
3422 * lpfc_delayed_disc_tmo - Timeout handler for delayed discovery timer.
3423 * @t: Context object of the timer.
3424 *
3425 * This function set the WORKER_DELAYED_DISC_TMO flag and wake up
3426 * the worker thread.
3427 **/
3428void
3429lpfc_delayed_disc_tmo(struct timer_list *t)
3430{
3431	struct lpfc_vport *vport = from_timer(vport, t, delayed_disc_tmo);
3432	struct lpfc_hba   *phba = vport->phba;
3433	uint32_t tmo_posted;
3434	unsigned long iflag;
3435
3436	spin_lock_irqsave(&vport->work_port_lock, iflag);
3437	tmo_posted = vport->work_port_events & WORKER_DELAYED_DISC_TMO;
3438	if (!tmo_posted)
3439		vport->work_port_events |= WORKER_DELAYED_DISC_TMO;
3440	spin_unlock_irqrestore(&vport->work_port_lock, iflag);
3441
3442	if (!tmo_posted)
3443		lpfc_worker_wake_up(phba);
3444	return;
3445}
3446
3447/**
3448 * lpfc_delayed_disc_timeout_handler - Function called by worker thread to
3449 *      handle delayed discovery.
3450 * @vport: pointer to a host virtual N_Port data structure.
3451 *
3452 * This function start nport discovery of the vport.
3453 **/
3454void
3455lpfc_delayed_disc_timeout_handler(struct lpfc_vport *vport)
3456{
3457	if (!test_and_clear_bit(FC_DISC_DELAYED, &vport->fc_flag))
3458		return;
3459
3460	lpfc_do_scr_ns_plogi(vport->phba, vport);
3461}
3462
3463void
3464lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
3465{
3466	struct lpfc_sli *psli = &phba->sli;
3467	lpfc_vpd_t *vp = &phba->vpd;
3468	uint32_t b1, b2, b3, b4, i, rev;
3469	char c;
3470	uint32_t *ptr, str[4];
3471	uint8_t *fwname;
3472
3473	if (phba->sli_rev == LPFC_SLI_REV4)
3474		snprintf(fwrevision, FW_REV_STR_SIZE, "%s", vp->rev.opFwName);
3475	else if (vp->rev.rBit) {
3476		if (psli->sli_flag & LPFC_SLI_ACTIVE)
3477			rev = vp->rev.sli2FwRev;
3478		else
3479			rev = vp->rev.sli1FwRev;
3480
3481		b1 = (rev & 0x0000f000) >> 12;
3482		b2 = (rev & 0x00000f00) >> 8;
3483		b3 = (rev & 0x000000c0) >> 6;
3484		b4 = (rev & 0x00000030) >> 4;
3485
3486		switch (b4) {
3487		case 0:
3488			c = 'N';
3489			break;
3490		case 1:
3491			c = 'A';
3492			break;
3493		case 2:
3494			c = 'B';
3495			break;
3496		case 3:
3497			c = 'X';
3498			break;
3499		default:
3500			c = 0;
3501			break;
3502		}
3503		b4 = (rev & 0x0000000f);
3504
3505		if (psli->sli_flag & LPFC_SLI_ACTIVE)
3506			fwname = vp->rev.sli2FwName;
3507		else
3508			fwname = vp->rev.sli1FwName;
3509
3510		for (i = 0; i < 16; i++)
3511			if (fwname[i] == 0x20)
3512				fwname[i] = 0;
3513
3514		ptr = (uint32_t*)fwname;
3515
3516		for (i = 0; i < 3; i++)
3517			str[i] = be32_to_cpu(*ptr++);
3518
3519		if (c == 0) {
3520			if (flag)
3521				sprintf(fwrevision, "%d.%d%d (%s)",
3522					b1, b2, b3, (char *)str);
3523			else
3524				sprintf(fwrevision, "%d.%d%d", b1,
3525					b2, b3);
3526		} else {
3527			if (flag)
3528				sprintf(fwrevision, "%d.%d%d%c%d (%s)",
3529					b1, b2, b3, c,
3530					b4, (char *)str);
3531			else
3532				sprintf(fwrevision, "%d.%d%d%c%d",
3533					b1, b2, b3, c, b4);
3534		}
3535	} else {
3536		rev = vp->rev.smFwRev;
3537
3538		b1 = (rev & 0xff000000) >> 24;
3539		b2 = (rev & 0x00f00000) >> 20;
3540		b3 = (rev & 0x000f0000) >> 16;
3541		c  = (rev & 0x0000ff00) >> 8;
3542		b4 = (rev & 0x000000ff);
3543
3544		sprintf(fwrevision, "%d.%d%d%c%d", b1, b2, b3, c, b4);
3545	}
3546	return;
3547}
3548
3549static void
3550lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3551		      struct lpfc_iocbq *rspiocb)
3552{
3553	struct lpfc_vport *vport = cmdiocb->vport;
3554	struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf;
3555	struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf;
3556	struct lpfc_sli_ct_request *ctcmd = inp->virt;
3557	struct lpfc_sli_ct_request *ctrsp = outp->virt;
3558	__be16 rsp = ctrsp->CommandResponse.bits.CmdRsp;
3559	struct app_id_object *app;
3560	struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
3561	u32 cmd, hash, bucket;
3562	struct lpfc_vmid *vmp, *cur;
3563	u8 *data = outp->virt;
3564	int i;
3565
3566	cmd = be16_to_cpu(ctcmd->CommandResponse.bits.CmdRsp);
3567	if (cmd == SLI_CTAS_DALLAPP_ID)
3568		lpfc_ct_free_iocb(phba, cmdiocb);
3569
3570	if (lpfc_els_chk_latt(vport) || get_job_ulpstatus(phba, rspiocb)) {
3571		if (cmd != SLI_CTAS_DALLAPP_ID)
3572			goto free_res;
3573	}
3574	/* Check for a CT LS_RJT response */
3575	if (be16_to_cpu(rsp) == SLI_CT_RESPONSE_FS_RJT) {
3576		if (cmd != SLI_CTAS_DALLAPP_ID)
3577			lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
3578					 "3306 VMID FS_RJT Data: x%x x%x x%x\n",
3579					 cmd, ctrsp->ReasonCode,
3580					 ctrsp->Explanation);
3581		if ((cmd != SLI_CTAS_DALLAPP_ID) ||
3582		    (ctrsp->ReasonCode != SLI_CT_UNABLE_TO_PERFORM_REQ) ||
3583		    (ctrsp->Explanation != SLI_CT_APP_ID_NOT_AVAILABLE)) {
3584			/* If DALLAPP_ID failed retry later */
3585			if (cmd == SLI_CTAS_DALLAPP_ID)
3586				set_bit(FC_DEREGISTER_ALL_APP_ID,
3587					&vport->load_flag);
3588			goto free_res;
3589		}
3590	}
3591
3592	switch (cmd) {
3593	case SLI_CTAS_RAPP_IDENT:
3594		app = (struct app_id_object *)(RAPP_IDENT_OFFSET + data);
3595		lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
3596				 "6712 RAPP_IDENT app id %d  port id x%x id "
3597				 "len %d\n", be32_to_cpu(app->app_id),
3598				 be32_to_cpu(app->port_id),
3599				 app->obj.entity_id_len);
3600
3601		if (app->obj.entity_id_len == 0 || app->port_id == 0)
3602			goto free_res;
3603
3604		hash = lpfc_vmid_hash_fn(app->obj.entity_id,
3605					 app->obj.entity_id_len);
3606		vmp = lpfc_get_vmid_from_hashtable(vport, hash,
3607						  app->obj.entity_id);
3608		if (vmp) {
3609			write_lock(&vport->vmid_lock);
3610			vmp->un.app_id = be32_to_cpu(app->app_id);
3611			vmp->flag |= LPFC_VMID_REGISTERED;
3612			vmp->flag &= ~LPFC_VMID_REQ_REGISTER;
3613			write_unlock(&vport->vmid_lock);
3614			/* Set IN USE flag */
3615			vport->vmid_flag |= LPFC_VMID_IN_USE;
3616		} else {
3617			lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
3618					 "6901 No entry found %s hash %d\n",
3619					 app->obj.entity_id, hash);
3620		}
3621		break;
3622	case SLI_CTAS_DAPP_IDENT:
3623		app = (struct app_id_object *)(DAPP_IDENT_OFFSET + data);
3624		lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
3625				 "6713 DAPP_IDENT app id %d  port id x%x\n",
3626				 be32_to_cpu(app->app_id),
3627				 be32_to_cpu(app->port_id));
3628		break;
3629	case SLI_CTAS_DALLAPP_ID:
3630		lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
3631				 "8856 Deregistered all app ids\n");
3632		read_lock(&vport->vmid_lock);
3633		for (i = 0; i < phba->cfg_max_vmid; i++) {
3634			vmp = &vport->vmid[i];
3635			if (vmp->flag != LPFC_VMID_SLOT_FREE)
3636				memset(vmp, 0, sizeof(struct lpfc_vmid));
3637		}
3638		read_unlock(&vport->vmid_lock);
3639		/* for all elements in the hash table */
3640		if (!hash_empty(vport->hash_table))
3641			hash_for_each(vport->hash_table, bucket, cur, hnode)
3642				hash_del(&cur->hnode);
3643		set_bit(FC_ALLOW_VMID, &vport->load_flag);
3644		break;
3645	default:
3646		lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
3647				 "8857 Invalid command code\n");
3648	}
3649free_res:
3650	lpfc_ct_free_iocb(phba, cmdiocb);
3651	lpfc_nlp_put(ndlp);
3652}
3653
3654/**
3655 * lpfc_vmid_cmd - Build and send a FDMI cmd to the specified NPort
3656 * @vport: pointer to a host virtual N_Port data structure.
3657 * @cmdcode: application server command code to send
3658 * @vmid: pointer to vmid info structure
3659 *
3660 * Builds and sends a FDMI command using the CT subsystem.
3661 */
3662int
3663lpfc_vmid_cmd(struct lpfc_vport *vport,
3664	      int cmdcode, struct lpfc_vmid *vmid)
3665{
3666	struct lpfc_hba *phba = vport->phba;
3667	struct lpfc_dmabuf *mp, *bmp;
3668	struct lpfc_sli_ct_request *ctreq;
3669	struct ulp_bde64 *bpl;
3670	u32 size;
3671	u32 rsp_size;
3672	u8 *data;
3673	struct lpfc_vmid_rapp_ident_list *rap;
3674	struct lpfc_vmid_dapp_ident_list *dap;
3675	u8 retry = 0;
3676	struct lpfc_nodelist *ndlp;
3677
3678	void (*cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3679		     struct lpfc_iocbq *rspiocb);
3680
3681	ndlp = lpfc_findnode_did(vport, FDMI_DID);
3682	if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE)
3683		return 0;
3684
3685	cmpl = lpfc_cmpl_ct_cmd_vmid;
3686
3687	/* fill in BDEs for command */
3688	/* Allocate buffer for command payload */
3689	mp = kmalloc(sizeof(*mp), GFP_KERNEL);
3690	if (!mp)
3691		goto vmid_free_mp_exit;
3692
3693	mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
3694	if (!mp->virt)
3695		goto vmid_free_mp_virt_exit;
3696
3697	/* Allocate buffer for Buffer ptr list */
3698	bmp = kmalloc(sizeof(*bmp), GFP_KERNEL);
3699	if (!bmp)
3700		goto vmid_free_bmp_exit;
3701
3702	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
3703	if (!bmp->virt)
3704		goto vmid_free_bmp_virt_exit;
3705
3706	INIT_LIST_HEAD(&mp->list);
3707	INIT_LIST_HEAD(&bmp->list);
3708
3709	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
3710			 "3275 VMID Request Data: x%lx x%x x%x\n",
3711			 vport->fc_flag, vport->port_state, cmdcode);
3712	ctreq = (struct lpfc_sli_ct_request *)mp->virt;
3713	data = mp->virt;
3714	/* First populate the CT_IU preamble */
3715	memset(data, 0, LPFC_BPL_SIZE);
3716	ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
3717	ctreq->RevisionId.bits.InId = 0;
3718
3719	ctreq->FsType = SLI_CT_MANAGEMENT_SERVICE;
3720	ctreq->FsSubType = SLI_CT_APP_SEV_Subtypes;
3721
3722	ctreq->CommandResponse.bits.CmdRsp = cpu_to_be16(cmdcode);
3723	rsp_size = LPFC_BPL_SIZE;
3724	size = 0;
3725
3726	switch (cmdcode) {
3727	case SLI_CTAS_RAPP_IDENT:
3728		lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
3729				 "1329 RAPP_IDENT for %s\n", vmid->host_vmid);
3730		ctreq->un.PortID = cpu_to_be32(vport->fc_myDID);
3731		rap = (struct lpfc_vmid_rapp_ident_list *)
3732			(DAPP_IDENT_OFFSET + data);
3733		rap->no_of_objects = cpu_to_be32(1);
3734		rap->obj[0].entity_id_len = vmid->vmid_len;
3735		memcpy(rap->obj[0].entity_id, vmid->host_vmid, vmid->vmid_len);
3736		size = RAPP_IDENT_OFFSET +
3737		       struct_size(rap, obj, be32_to_cpu(rap->no_of_objects));
3738		retry = 1;
3739		break;
3740
3741	case SLI_CTAS_GALLAPPIA_ID:
3742		ctreq->un.PortID = cpu_to_be32(vport->fc_myDID);
3743		size = GALLAPPIA_ID_SIZE;
3744		break;
3745
3746	case SLI_CTAS_DAPP_IDENT:
3747		lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
3748				 "1469 DAPP_IDENT for %s\n", vmid->host_vmid);
3749		ctreq->un.PortID = cpu_to_be32(vport->fc_myDID);
3750		dap = (struct lpfc_vmid_dapp_ident_list *)
3751			(DAPP_IDENT_OFFSET + data);
3752		dap->no_of_objects = cpu_to_be32(1);
3753		dap->obj[0].entity_id_len = vmid->vmid_len;
3754		memcpy(dap->obj[0].entity_id, vmid->host_vmid, vmid->vmid_len);
3755		size = DAPP_IDENT_OFFSET +
3756		       struct_size(dap, obj, be32_to_cpu(dap->no_of_objects));
3757		write_lock(&vport->vmid_lock);
3758		vmid->flag &= ~LPFC_VMID_REGISTERED;
3759		write_unlock(&vport->vmid_lock);
3760		retry = 1;
3761		break;
3762
3763	case SLI_CTAS_DALLAPP_ID:
3764		ctreq->un.PortID = cpu_to_be32(vport->fc_myDID);
3765		size = DALLAPP_ID_SIZE;
3766		break;
3767
3768	default:
3769		lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
3770				 "7062 VMID cmdcode x%x not supported\n",
3771				 cmdcode);
3772		goto vmid_free_all_mem;
3773	}
3774
3775	ctreq->CommandResponse.bits.Size = cpu_to_be16(rsp_size);
3776
3777	bpl = (struct ulp_bde64 *)bmp->virt;
3778	bpl->addrHigh = putPaddrHigh(mp->phys);
3779	bpl->addrLow = putPaddrLow(mp->phys);
3780	bpl->tus.f.bdeFlags = 0;
3781	bpl->tus.f.bdeSize = size;
3782
3783	/* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count
3784	 * to hold ndlp reference for the corresponding callback function.
3785	 */
3786	if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry))
3787		return 0;
3788
3789 vmid_free_all_mem:
3790	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
3791 vmid_free_bmp_virt_exit:
3792	kfree(bmp);
3793 vmid_free_bmp_exit:
3794	lpfc_mbuf_free(phba, mp->virt, mp->phys);
3795 vmid_free_mp_virt_exit:
3796	kfree(mp);
3797 vmid_free_mp_exit:
3798
3799	/* Issue CT request failed */
3800	lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY,
3801			 "3276 VMID CT request failed Data: x%x\n", cmdcode);
3802	return -EIO;
3803}
3804