• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/drivers/scsi/lpfc/
1/*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for         *
3 * Fibre Channel Host Bus Adapters.                                *
4 * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
5 * EMULEX and SLI are trademarks of Emulex.                        *
6 * www.emulex.com                                                  *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8 *                                                                 *
9 * This program is free software; you can redistribute it and/or   *
10 * modify it under the terms of version 2 of the GNU General       *
11 * Public License as published by the Free Software Foundation.    *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18 * more details, a copy of which can be found in the file COPYING  *
19 * included with this package.                                     *
20 *******************************************************************/
21/* See Fibre Channel protocol T11 FC-LS for details */
22#include <linux/blkdev.h>
23#include <linux/pci.h>
24#include <linux/slab.h>
25#include <linux/interrupt.h>
26
27#include <scsi/scsi.h>
28#include <scsi/scsi_device.h>
29#include <scsi/scsi_host.h>
30#include <scsi/scsi_transport_fc.h>
31
32#include "lpfc_hw4.h"
33#include "lpfc_hw.h"
34#include "lpfc_sli.h"
35#include "lpfc_sli4.h"
36#include "lpfc_nl.h"
37#include "lpfc_disc.h"
38#include "lpfc_scsi.h"
39#include "lpfc.h"
40#include "lpfc_logmsg.h"
41#include "lpfc_crtn.h"
42#include "lpfc_vport.h"
43#include "lpfc_debugfs.h"
44
45static int lpfc_els_retry(struct lpfc_hba *, struct lpfc_iocbq *,
46			  struct lpfc_iocbq *);
47static void lpfc_cmpl_fabric_iocb(struct lpfc_hba *, struct lpfc_iocbq *,
48			struct lpfc_iocbq *);
49static void lpfc_fabric_abort_vport(struct lpfc_vport *vport);
50static int lpfc_issue_els_fdisc(struct lpfc_vport *vport,
51				struct lpfc_nodelist *ndlp, uint8_t retry);
52static int lpfc_issue_fabric_iocb(struct lpfc_hba *phba,
53				  struct lpfc_iocbq *iocb);
54
55static int lpfc_max_els_tries = 3;
56
57/**
58 * lpfc_els_chk_latt - Check host link attention event for a vport
59 * @vport: pointer to a host virtual N_Port data structure.
60 *
61 * This routine checks whether there is an outstanding host link
62 * attention event during the discovery process with the @vport. It is done
63 * by reading the HBA's Host Attention (HA) register. If there is any host
64 * link attention events during this @vport's discovery process, the @vport
65 * shall be marked as FC_ABORT_DISCOVERY, a host link attention clear shall
66 * be issued if the link state is not already in host link cleared state,
67 * and a return code shall indicate whether the host link attention event
68 * had happened.
69 *
70 * Note that, if either the host link is in state LPFC_LINK_DOWN or @vport
71 * state in LPFC_VPORT_READY, the request for checking host link attention
72 * event will be ignored and a return code shall indicate no host link
73 * attention event had happened.
74 *
75 * Return codes
76 *   0 - no host link attention event happened
77 *   1 - host link attention event happened
78 **/
79int
80lpfc_els_chk_latt(struct lpfc_vport *vport)
81{
82	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
83	struct lpfc_hba  *phba = vport->phba;
84	uint32_t ha_copy;
85
86	if (vport->port_state >= LPFC_VPORT_READY ||
87	    phba->link_state == LPFC_LINK_DOWN ||
88	    phba->sli_rev > LPFC_SLI_REV3)
89		return 0;
90
91	/* Read the HBA Host Attention Register */
92	ha_copy = readl(phba->HAregaddr);
93
94	if (!(ha_copy & HA_LATT))
95		return 0;
96
97	/* Pending Link Event during Discovery */
98	lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
99			 "0237 Pending Link Event during "
100			 "Discovery: State x%x\n",
101			 phba->pport->port_state);
102
103	/* CLEAR_LA should re-enable link attention events and
104	 * we should then imediately take a LATT event. The
105	 * LATT processing should call lpfc_linkdown() which
106	 * will cleanup any left over in-progress discovery
107	 * events.
108	 */
109	spin_lock_irq(shost->host_lock);
110	vport->fc_flag |= FC_ABORT_DISCOVERY;
111	spin_unlock_irq(shost->host_lock);
112
113	if (phba->link_state != LPFC_CLEAR_LA)
114		lpfc_issue_clear_la(phba, vport);
115
116	return 1;
117}
118
119/**
120 * lpfc_prep_els_iocb - Allocate and prepare a lpfc iocb data structure
121 * @vport: pointer to a host virtual N_Port data structure.
122 * @expectRsp: flag indicating whether response is expected.
123 * @cmdSize: size of the ELS command.
124 * @retry: number of retries to the command IOCB when it fails.
125 * @ndlp: pointer to a node-list data structure.
126 * @did: destination identifier.
127 * @elscmd: the ELS command code.
128 *
129 * This routine is used for allocating a lpfc-IOCB data structure from
130 * the driver lpfc-IOCB free-list and prepare the IOCB with the parameters
131 * passed into the routine for discovery state machine to issue an Extended
132 * Link Service (ELS) commands. It is a generic lpfc-IOCB allocation
133 * and preparation routine that is used by all the discovery state machine
134 * routines and the ELS command-specific fields will be later set up by
135 * the individual discovery machine routines after calling this routine
136 * allocating and preparing a generic IOCB data structure. It fills in the
137 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
138 * payload and response payload (if expected). The reference count on the
139 * ndlp is incremented by 1 and the reference to the ndlp is put into
140 * context1 of the IOCB data structure for this IOCB to hold the ndlp
141 * reference for the command's callback function to access later.
142 *
143 * Return code
144 *   Pointer to the newly allocated/prepared els iocb data structure
145 *   NULL - when els iocb data structure allocation/preparation failed
146 **/
147struct lpfc_iocbq *
148lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
149		   uint16_t cmdSize, uint8_t retry,
150		   struct lpfc_nodelist *ndlp, uint32_t did,
151		   uint32_t elscmd)
152{
153	struct lpfc_hba  *phba = vport->phba;
154	struct lpfc_iocbq *elsiocb;
155	struct lpfc_dmabuf *pcmd, *prsp, *pbuflist;
156	struct ulp_bde64 *bpl;
157	IOCB_t *icmd;
158
159
160	if (!lpfc_is_link_up(phba))
161		return NULL;
162
163	/* Allocate buffer for  command iocb */
164	elsiocb = lpfc_sli_get_iocbq(phba);
165
166	if (elsiocb == NULL)
167		return NULL;
168
169	/*
170	 * If this command is for fabric controller and HBA running
171	 * in FIP mode send FLOGI, FDISC and LOGO as FIP frames.
172	 */
173	if ((did == Fabric_DID) &&
174		(phba->hba_flag & HBA_FIP_SUPPORT) &&
175		((elscmd == ELS_CMD_FLOGI) ||
176		 (elscmd == ELS_CMD_FDISC) ||
177		 (elscmd == ELS_CMD_LOGO)))
178		switch (elscmd) {
179		case ELS_CMD_FLOGI:
180		elsiocb->iocb_flag |= ((ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
181					& LPFC_FIP_ELS_ID_MASK);
182		break;
183		case ELS_CMD_FDISC:
184		elsiocb->iocb_flag |= ((ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
185					& LPFC_FIP_ELS_ID_MASK);
186		break;
187		case ELS_CMD_LOGO:
188		elsiocb->iocb_flag |= ((ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
189					& LPFC_FIP_ELS_ID_MASK);
190		break;
191		}
192	else
193		elsiocb->iocb_flag &= ~LPFC_FIP_ELS_ID_MASK;
194
195	icmd = &elsiocb->iocb;
196
197	/* fill in BDEs for command */
198	/* Allocate buffer for command payload */
199	pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
200	if (pcmd)
201		pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
202	if (!pcmd || !pcmd->virt)
203		goto els_iocb_free_pcmb_exit;
204
205	INIT_LIST_HEAD(&pcmd->list);
206
207	/* Allocate buffer for response payload */
208	if (expectRsp) {
209		prsp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
210		if (prsp)
211			prsp->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
212						     &prsp->phys);
213		if (!prsp || !prsp->virt)
214			goto els_iocb_free_prsp_exit;
215		INIT_LIST_HEAD(&prsp->list);
216	} else
217		prsp = NULL;
218
219	/* Allocate buffer for Buffer ptr list */
220	pbuflist = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
221	if (pbuflist)
222		pbuflist->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
223						 &pbuflist->phys);
224	if (!pbuflist || !pbuflist->virt)
225		goto els_iocb_free_pbuf_exit;
226
227	INIT_LIST_HEAD(&pbuflist->list);
228
229	icmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys);
230	icmd->un.elsreq64.bdl.addrLow = putPaddrLow(pbuflist->phys);
231	icmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
232	icmd->un.elsreq64.remoteID = did;	/* DID */
233	if (expectRsp) {
234		icmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
235		icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
236		icmd->ulpTimeout = phba->fc_ratov * 2;
237	} else {
238		icmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
239		icmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
240	}
241	icmd->ulpBdeCount = 1;
242	icmd->ulpLe = 1;
243	icmd->ulpClass = CLASS3;
244
245	if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
246		icmd->un.elsreq64.myID = vport->fc_myDID;
247
248		/* For ELS_REQUEST64_CR, use the VPI by default */
249		icmd->ulpContext = vport->vpi + phba->vpi_base;
250		icmd->ulpCt_h = 0;
251		/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
252		if (elscmd == ELS_CMD_ECHO)
253			icmd->ulpCt_l = 0; /* context = invalid RPI */
254		else
255			icmd->ulpCt_l = 1; /* context = VPI */
256	}
257
258	bpl = (struct ulp_bde64 *) pbuflist->virt;
259	bpl->addrLow = le32_to_cpu(putPaddrLow(pcmd->phys));
260	bpl->addrHigh = le32_to_cpu(putPaddrHigh(pcmd->phys));
261	bpl->tus.f.bdeSize = cmdSize;
262	bpl->tus.f.bdeFlags = 0;
263	bpl->tus.w = le32_to_cpu(bpl->tus.w);
264
265	if (expectRsp) {
266		bpl++;
267		bpl->addrLow = le32_to_cpu(putPaddrLow(prsp->phys));
268		bpl->addrHigh = le32_to_cpu(putPaddrHigh(prsp->phys));
269		bpl->tus.f.bdeSize = FCELSSIZE;
270		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
271		bpl->tus.w = le32_to_cpu(bpl->tus.w);
272	}
273
274	/* prevent preparing iocb with NULL ndlp reference */
275	elsiocb->context1 = lpfc_nlp_get(ndlp);
276	if (!elsiocb->context1)
277		goto els_iocb_free_pbuf_exit;
278	elsiocb->context2 = pcmd;
279	elsiocb->context3 = pbuflist;
280	elsiocb->retry = retry;
281	elsiocb->vport = vport;
282	elsiocb->drvrTimeout = (phba->fc_ratov << 1) + LPFC_DRVR_TIMEOUT;
283
284	if (prsp) {
285		list_add(&prsp->list, &pcmd->list);
286	}
287	if (expectRsp) {
288		/* Xmit ELS command <elsCmd> to remote NPORT <did> */
289		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
290				 "0116 Xmit ELS command x%x to remote "
291				 "NPORT x%x I/O tag: x%x, port state: x%x\n",
292				 elscmd, did, elsiocb->iotag,
293				 vport->port_state);
294	} else {
295		/* Xmit ELS response <elsCmd> to remote NPORT <did> */
296		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
297				 "0117 Xmit ELS response x%x to remote "
298				 "NPORT x%x I/O tag: x%x, size: x%x\n",
299				 elscmd, ndlp->nlp_DID, elsiocb->iotag,
300				 cmdSize);
301	}
302	return elsiocb;
303
304els_iocb_free_pbuf_exit:
305	if (expectRsp)
306		lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
307	kfree(pbuflist);
308
309els_iocb_free_prsp_exit:
310	lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
311	kfree(prsp);
312
313els_iocb_free_pcmb_exit:
314	kfree(pcmd);
315	lpfc_sli_release_iocbq(phba, elsiocb);
316	return NULL;
317}
318
319/**
320 * lpfc_issue_fabric_reglogin - Issue fabric registration login for a vport
321 * @vport: pointer to a host virtual N_Port data structure.
322 *
323 * This routine issues a fabric registration login for a @vport. An
324 * active ndlp node with Fabric_DID must already exist for this @vport.
325 * The routine invokes two mailbox commands to carry out fabric registration
326 * login through the HBA firmware: the first mailbox command requests the
327 * HBA to perform link configuration for the @vport; and the second mailbox
328 * command requests the HBA to perform the actual fabric registration login
329 * with the @vport.
330 *
331 * Return code
332 *   0 - successfully issued fabric registration login for @vport
333 *   -ENXIO -- failed to issue fabric registration login for @vport
334 **/
335int
336lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
337{
338	struct lpfc_hba  *phba = vport->phba;
339	LPFC_MBOXQ_t *mbox;
340	struct lpfc_dmabuf *mp;
341	struct lpfc_nodelist *ndlp;
342	struct serv_parm *sp;
343	int rc;
344	int err = 0;
345
346	sp = &phba->fc_fabparam;
347	ndlp = lpfc_findnode_did(vport, Fabric_DID);
348	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
349		err = 1;
350		goto fail;
351	}
352
353	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
354	if (!mbox) {
355		err = 2;
356		goto fail;
357	}
358
359	vport->port_state = LPFC_FABRIC_CFG_LINK;
360	lpfc_config_link(phba, mbox);
361	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
362	mbox->vport = vport;
363
364	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
365	if (rc == MBX_NOT_FINISHED) {
366		err = 3;
367		goto fail_free_mbox;
368	}
369
370	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
371	if (!mbox) {
372		err = 4;
373		goto fail;
374	}
375	rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 0);
376	if (rc) {
377		err = 5;
378		goto fail_free_mbox;
379	}
380
381	mbox->mbox_cmpl = lpfc_mbx_cmpl_fabric_reg_login;
382	mbox->vport = vport;
383	/* increment the reference count on ndlp to hold reference
384	 * for the callback routine.
385	 */
386	mbox->context2 = lpfc_nlp_get(ndlp);
387
388	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
389	if (rc == MBX_NOT_FINISHED) {
390		err = 6;
391		goto fail_issue_reg_login;
392	}
393
394	return 0;
395
396fail_issue_reg_login:
397	/* decrement the reference count on ndlp just incremented
398	 * for the failed mbox command.
399	 */
400	lpfc_nlp_put(ndlp);
401	mp = (struct lpfc_dmabuf *) mbox->context1;
402	lpfc_mbuf_free(phba, mp->virt, mp->phys);
403	kfree(mp);
404fail_free_mbox:
405	mempool_free(mbox, phba->mbox_mem_pool);
406
407fail:
408	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
409	lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
410		"0249 Cannot issue Register Fabric login: Err %d\n", err);
411	return -ENXIO;
412}
413
414/**
415 * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
416 * @vport: pointer to a host virtual N_Port data structure.
417 *
418 * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
419 * the @vport. This mailbox command is necessary for FCoE only.
420 *
421 * Return code
422 *   0 - successfully issued REG_VFI for @vport
423 *   A failure code otherwise.
424 **/
425static int
426lpfc_issue_reg_vfi(struct lpfc_vport *vport)
427{
428	struct lpfc_hba  *phba = vport->phba;
429	LPFC_MBOXQ_t *mboxq;
430	struct lpfc_nodelist *ndlp;
431	struct serv_parm *sp;
432	struct lpfc_dmabuf *dmabuf;
433	int rc = 0;
434
435	sp = &phba->fc_fabparam;
436	ndlp = lpfc_findnode_did(vport, Fabric_DID);
437	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
438		rc = -ENODEV;
439		goto fail;
440	}
441
442	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
443	if (!dmabuf) {
444		rc = -ENOMEM;
445		goto fail;
446	}
447	dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
448	if (!dmabuf->virt) {
449		rc = -ENOMEM;
450		goto fail_free_dmabuf;
451	}
452	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
453	if (!mboxq) {
454		rc = -ENOMEM;
455		goto fail_free_coherent;
456	}
457	vport->port_state = LPFC_FABRIC_CFG_LINK;
458	memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
459	lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
460	mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
461	mboxq->vport = vport;
462	mboxq->context1 = dmabuf;
463	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
464	if (rc == MBX_NOT_FINISHED) {
465		rc = -ENXIO;
466		goto fail_free_mbox;
467	}
468	return 0;
469
470fail_free_mbox:
471	mempool_free(mboxq, phba->mbox_mem_pool);
472fail_free_coherent:
473	lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
474fail_free_dmabuf:
475	kfree(dmabuf);
476fail:
477	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
478	lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
479		"0289 Issue Register VFI failed: Err %d\n", rc);
480	return rc;
481}
482
483/**
484 * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
485 * @vport: pointer to a host virtual N_Port data structure.
486 * @ndlp: pointer to a node-list data structure.
487 * @sp: pointer to service parameter data structure.
488 * @irsp: pointer to the IOCB within the lpfc response IOCB.
489 *
490 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
491 * function to handle the completion of a Fabric Login (FLOGI) into a fabric
492 * port in a fabric topology. It properly sets up the parameters to the @ndlp
493 * from the IOCB response. It also check the newly assigned N_Port ID to the
494 * @vport against the previously assigned N_Port ID. If it is different from
495 * the previously assigned Destination ID (DID), the lpfc_unreg_rpi() routine
496 * is invoked on all the remaining nodes with the @vport to unregister the
497 * Remote Port Indicators (RPIs). Finally, the lpfc_issue_fabric_reglogin()
498 * is invoked to register login to the fabric.
499 *
500 * Return code
501 *   0 - Success (currently, always return 0)
502 **/
503static int
504lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
505			   struct serv_parm *sp, IOCB_t *irsp)
506{
507	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
508	struct lpfc_hba  *phba = vport->phba;
509	struct lpfc_nodelist *np;
510	struct lpfc_nodelist *next_np;
511
512	spin_lock_irq(shost->host_lock);
513	vport->fc_flag |= FC_FABRIC;
514	spin_unlock_irq(shost->host_lock);
515
516	phba->fc_edtov = be32_to_cpu(sp->cmn.e_d_tov);
517	if (sp->cmn.edtovResolution)	/* E_D_TOV ticks are in nanoseconds */
518		phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
519
520	phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
521
522	if (phba->fc_topology == TOPOLOGY_LOOP) {
523		spin_lock_irq(shost->host_lock);
524		vport->fc_flag |= FC_PUBLIC_LOOP;
525		spin_unlock_irq(shost->host_lock);
526	} else {
527		/*
528		 * If we are a N-port connected to a Fabric, fixup sparam's so
529		 * logins to devices on remote loops work.
530		 */
531		vport->fc_sparam.cmn.altBbCredit = 1;
532	}
533
534	vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
535	memcpy(&ndlp->nlp_portname, &sp->portName, sizeof(struct lpfc_name));
536	memcpy(&ndlp->nlp_nodename, &sp->nodeName, sizeof(struct lpfc_name));
537	ndlp->nlp_class_sup = 0;
538	if (sp->cls1.classValid)
539		ndlp->nlp_class_sup |= FC_COS_CLASS1;
540	if (sp->cls2.classValid)
541		ndlp->nlp_class_sup |= FC_COS_CLASS2;
542	if (sp->cls3.classValid)
543		ndlp->nlp_class_sup |= FC_COS_CLASS3;
544	if (sp->cls4.classValid)
545		ndlp->nlp_class_sup |= FC_COS_CLASS4;
546	ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) |
547				sp->cmn.bbRcvSizeLsb;
548	memcpy(&phba->fc_fabparam, sp, sizeof(struct serv_parm));
549
550	if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
551		if (sp->cmn.response_multiple_NPort) {
552			lpfc_printf_vlog(vport, KERN_WARNING,
553					 LOG_ELS | LOG_VPORT,
554					 "1816 FLOGI NPIV supported, "
555					 "response data 0x%x\n",
556					 sp->cmn.response_multiple_NPort);
557			phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
558		} else {
559			/* Because we asked f/w for NPIV it still expects us
560			to call reg_vnpid atleast for the physcial host */
561			lpfc_printf_vlog(vport, KERN_WARNING,
562					 LOG_ELS | LOG_VPORT,
563					 "1817 Fabric does not support NPIV "
564					 "- configuring single port mode.\n");
565			phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
566		}
567	}
568
569	if ((vport->fc_prevDID != vport->fc_myDID) &&
570		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
571
572		/* If our NportID changed, we need to ensure all
573		 * remaining NPORTs get unreg_login'ed.
574		 */
575		list_for_each_entry_safe(np, next_np,
576					&vport->fc_nodes, nlp_listp) {
577			if (!NLP_CHK_NODE_ACT(np))
578				continue;
579			if ((np->nlp_state != NLP_STE_NPR_NODE) ||
580				   !(np->nlp_flag & NLP_NPR_ADISC))
581				continue;
582			spin_lock_irq(shost->host_lock);
583			np->nlp_flag &= ~NLP_NPR_ADISC;
584			spin_unlock_irq(shost->host_lock);
585			lpfc_unreg_rpi(vport, np);
586		}
587		lpfc_cleanup_pending_mbox(vport);
588		if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
589			lpfc_mbx_unreg_vpi(vport);
590			spin_lock_irq(shost->host_lock);
591			vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
592			spin_unlock_irq(shost->host_lock);
593		}
594		/*
595		 * If VPI is unreged, driver need to do INIT_VPI
596		 * before re-registering
597		 */
598		if (phba->sli_rev == LPFC_SLI_REV4) {
599			spin_lock_irq(shost->host_lock);
600			vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
601			spin_unlock_irq(shost->host_lock);
602		}
603	} else if ((phba->sli_rev == LPFC_SLI_REV4) &&
604		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
605			/*
606			 * Driver needs to re-reg VPI in order for f/w
607			 * to update the MAC address.
608			 */
609			lpfc_register_new_vport(phba, vport, ndlp);
610			return 0;
611	}
612
613	if (phba->sli_rev < LPFC_SLI_REV4) {
614		lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
615		if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
616		    vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
617			lpfc_register_new_vport(phba, vport, ndlp);
618		else
619			lpfc_issue_fabric_reglogin(vport);
620	} else {
621		ndlp->nlp_type |= NLP_FABRIC;
622		lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
623		if ((!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) &&
624			(vport->vpi_state & LPFC_VPI_REGISTERED)) {
625			lpfc_start_fdiscs(phba);
626			lpfc_do_scr_ns_plogi(phba, vport);
627		} else if (vport->fc_flag & FC_VFI_REGISTERED)
628			lpfc_issue_init_vpi(vport);
629		else
630			lpfc_issue_reg_vfi(vport);
631	}
632	return 0;
633}
634/**
635 * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
636 * @vport: pointer to a host virtual N_Port data structure.
637 * @ndlp: pointer to a node-list data structure.
638 * @sp: pointer to service parameter data structure.
639 *
640 * This routine is invoked by the lpfc_cmpl_els_flogi() completion callback
641 * function to handle the completion of a Fabric Login (FLOGI) into an N_Port
642 * in a point-to-point topology. First, the @vport's N_Port Name is compared
643 * with the received N_Port Name: if the @vport's N_Port Name is greater than
644 * the received N_Port Name lexicographically, this node shall assign local
645 * N_Port ID (PT2PT_LocalID: 1) and remote N_Port ID (PT2PT_RemoteID: 2) and
646 * will send out Port Login (PLOGI) with the N_Port IDs assigned. Otherwise,
647 * this node shall just wait for the remote node to issue PLOGI and assign
648 * N_Port IDs.
649 *
650 * Return code
651 *   0 - Success
652 *   -ENXIO - Fail
653 **/
654static int
655lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
656			  struct serv_parm *sp)
657{
658	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
659	struct lpfc_hba  *phba = vport->phba;
660	LPFC_MBOXQ_t *mbox;
661	int rc;
662
663	spin_lock_irq(shost->host_lock);
664	vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
665	spin_unlock_irq(shost->host_lock);
666
667	phba->fc_edtov = FF_DEF_EDTOV;
668	phba->fc_ratov = FF_DEF_RATOV;
669	rc = memcmp(&vport->fc_portname, &sp->portName,
670		    sizeof(vport->fc_portname));
671	if (rc >= 0) {
672		/* This side will initiate the PLOGI */
673		spin_lock_irq(shost->host_lock);
674		vport->fc_flag |= FC_PT2PT_PLOGI;
675		spin_unlock_irq(shost->host_lock);
676
677		/*
678		 * N_Port ID cannot be 0, set our to LocalID the other
679		 * side will be RemoteID.
680		 */
681
682		/* not equal */
683		if (rc)
684			vport->fc_myDID = PT2PT_LocalID;
685
686		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
687		if (!mbox)
688			goto fail;
689
690		lpfc_config_link(phba, mbox);
691
692		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
693		mbox->vport = vport;
694		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
695		if (rc == MBX_NOT_FINISHED) {
696			mempool_free(mbox, phba->mbox_mem_pool);
697			goto fail;
698		}
699		/* Decrement ndlp reference count indicating that ndlp can be
700		 * safely released when other references to it are done.
701		 */
702		lpfc_nlp_put(ndlp);
703
704		ndlp = lpfc_findnode_did(vport, PT2PT_RemoteID);
705		if (!ndlp) {
706			/*
707			 * Cannot find existing Fabric ndlp, so allocate a
708			 * new one
709			 */
710			ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
711			if (!ndlp)
712				goto fail;
713			lpfc_nlp_init(vport, ndlp, PT2PT_RemoteID);
714		} else if (!NLP_CHK_NODE_ACT(ndlp)) {
715			ndlp = lpfc_enable_node(vport, ndlp,
716						NLP_STE_UNUSED_NODE);
717			if(!ndlp)
718				goto fail;
719		}
720
721		memcpy(&ndlp->nlp_portname, &sp->portName,
722		       sizeof(struct lpfc_name));
723		memcpy(&ndlp->nlp_nodename, &sp->nodeName,
724		       sizeof(struct lpfc_name));
725		/* Set state will put ndlp onto node list if not already done */
726		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
727		spin_lock_irq(shost->host_lock);
728		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
729		spin_unlock_irq(shost->host_lock);
730	} else
731		/* This side will wait for the PLOGI, decrement ndlp reference
732		 * count indicating that ndlp can be released when other
733		 * references to it are done.
734		 */
735		lpfc_nlp_put(ndlp);
736
737	/* If we are pt2pt with another NPort, force NPIV off! */
738	phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
739
740	spin_lock_irq(shost->host_lock);
741	vport->fc_flag |= FC_PT2PT;
742	spin_unlock_irq(shost->host_lock);
743
744	/* Start discovery - this should just do CLEAR_LA */
745	lpfc_disc_start(vport);
746	return 0;
747fail:
748	return -ENXIO;
749}
750
751/**
752 * lpfc_cmpl_els_flogi - Completion callback function for flogi
753 * @phba: pointer to lpfc hba data structure.
754 * @cmdiocb: pointer to lpfc command iocb data structure.
755 * @rspiocb: pointer to lpfc response iocb data structure.
756 *
757 * This routine is the top-level completion callback function for issuing
758 * a Fabric Login (FLOGI) command. If the response IOCB reported error,
759 * the lpfc_els_retry() routine shall be invoked to retry the FLOGI. If
760 * retry has been made (either immediately or delayed with lpfc_els_retry()
761 * returning 1), the command IOCB will be released and function returned.
762 * If the retry attempt has been given up (possibly reach the maximum
763 * number of retries), one additional decrement of ndlp reference shall be
764 * invoked before going out after releasing the command IOCB. This will
765 * actually release the remote node (Note, lpfc_els_free_iocb() will also
766 * invoke one decrement of ndlp reference count). If no error reported in
767 * the IOCB status, the command Port ID field is used to determine whether
768 * this is a point-to-point topology or a fabric topology: if the Port ID
769 * field is assigned, it is a fabric topology; otherwise, it is a
770 * point-to-point topology. The routine lpfc_cmpl_els_flogi_fabric() or
771 * lpfc_cmpl_els_flogi_nport() shall be invoked accordingly to handle the
772 * specific topology completion conditions.
773 **/
774static void
775lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
776		    struct lpfc_iocbq *rspiocb)
777{
778	struct lpfc_vport *vport = cmdiocb->vport;
779	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
780	IOCB_t *irsp = &rspiocb->iocb;
781	struct lpfc_nodelist *ndlp = cmdiocb->context1;
782	struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
783	struct serv_parm *sp;
784	uint16_t fcf_index;
785	int rc;
786
787	/* Check to see if link went down during discovery */
788	if (lpfc_els_chk_latt(vport)) {
789		/* One additional decrement on node reference count to
790		 * trigger the release of the node
791		 */
792		lpfc_nlp_put(ndlp);
793		goto out;
794	}
795
796	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
797		"FLOGI cmpl:      status:x%x/x%x state:x%x",
798		irsp->ulpStatus, irsp->un.ulpWord[4],
799		vport->port_state);
800
801	if (irsp->ulpStatus) {
802		/*
803		 * In case of FIP mode, perform round robin FCF failover
804		 * due to new FCF discovery
805		 */
806		if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
807		    (phba->fcf.fcf_flag & FCF_DISCOVERY) &&
808		    (irsp->ulpStatus != IOSTAT_LOCAL_REJECT) &&
809		    (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED)) {
810			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
811					"2611 FLOGI failed on registered "
812					"FCF record fcf_index(%d), status: "
813					"x%x/x%x, tmo:x%x, trying to perform "
814					"round robin failover\n",
815					phba->fcf.current_rec.fcf_indx,
816					irsp->ulpStatus, irsp->un.ulpWord[4],
817					irsp->ulpTimeout);
818			fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
819			if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
820				/*
821				 * Exhausted the eligible FCF record list,
822				 * fail through to retry FLOGI on current
823				 * FCF record.
824				 */
825				lpfc_printf_log(phba, KERN_WARNING,
826						LOG_FIP | LOG_ELS,
827						"2760 Completed one round "
828						"of FLOGI FCF round robin "
829						"failover list, retry FLOGI "
830						"on currently registered "
831						"FCF index:%d\n",
832						phba->fcf.current_rec.fcf_indx);
833			} else {
834				lpfc_printf_log(phba, KERN_INFO,
835						LOG_FIP | LOG_ELS,
836						"2794 FLOGI FCF round robin "
837						"failover to FCF index x%x\n",
838						fcf_index);
839				rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba,
840								   fcf_index);
841				if (rc)
842					lpfc_printf_log(phba, KERN_WARNING,
843							LOG_FIP | LOG_ELS,
844							"2761 FLOGI round "
845							"robin FCF failover "
846							"read FCF failed "
847							"rc:x%x, fcf_index:"
848							"%d\n", rc,
849						phba->fcf.current_rec.fcf_indx);
850				else
851					goto out;
852			}
853		}
854
855		/* FLOGI failure */
856		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
857				"2858 FLOGI failure Status:x%x/x%x TMO:x%x\n",
858				irsp->ulpStatus, irsp->un.ulpWord[4],
859				irsp->ulpTimeout);
860
861		/* Check for retry */
862		if (lpfc_els_retry(phba, cmdiocb, rspiocb))
863			goto out;
864
865		/* FLOGI failed, so there is no fabric */
866		spin_lock_irq(shost->host_lock);
867		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
868		spin_unlock_irq(shost->host_lock);
869
870		/* If private loop, then allow max outstanding els to be
871		 * LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
872		 * alpa map would take too long otherwise.
873		 */
874		if (phba->alpa_map[0] == 0) {
875			vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
876		}
877
878		/* FLOGI failure */
879		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
880				 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
881				 irsp->ulpStatus, irsp->un.ulpWord[4],
882				 irsp->ulpTimeout);
883		goto flogifail;
884	}
885	spin_lock_irq(shost->host_lock);
886	vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
887	vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
888	spin_unlock_irq(shost->host_lock);
889
890	/*
891	 * The FLogI succeeded.  Sync the data for the CPU before
892	 * accessing it.
893	 */
894	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
895
896	sp = prsp->virt + sizeof(uint32_t);
897
898	/* FLOGI completes successfully */
899	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
900			 "0101 FLOGI completes successfully "
901			 "Data: x%x x%x x%x x%x\n",
902			 irsp->un.ulpWord[4], sp->cmn.e_d_tov,
903			 sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution);
904
905	if (vport->port_state == LPFC_FLOGI) {
906		/*
907		 * If Common Service Parameters indicate Nport
908		 * we are point to point, if Fport we are Fabric.
909		 */
910		if (sp->cmn.fPort)
911			rc = lpfc_cmpl_els_flogi_fabric(vport, ndlp, sp, irsp);
912		else if (!(phba->hba_flag & HBA_FCOE_SUPPORT))
913			rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
914		else {
915			lpfc_printf_vlog(vport, KERN_ERR,
916				LOG_FIP | LOG_ELS,
917				"2831 FLOGI response with cleared Fabric "
918				"bit fcf_index 0x%x "
919				"Switch Name %02x%02x%02x%02x%02x%02x%02x%02x "
920				"Fabric Name "
921				"%02x%02x%02x%02x%02x%02x%02x%02x\n",
922				phba->fcf.current_rec.fcf_indx,
923				phba->fcf.current_rec.switch_name[0],
924				phba->fcf.current_rec.switch_name[1],
925				phba->fcf.current_rec.switch_name[2],
926				phba->fcf.current_rec.switch_name[3],
927				phba->fcf.current_rec.switch_name[4],
928				phba->fcf.current_rec.switch_name[5],
929				phba->fcf.current_rec.switch_name[6],
930				phba->fcf.current_rec.switch_name[7],
931				phba->fcf.current_rec.fabric_name[0],
932				phba->fcf.current_rec.fabric_name[1],
933				phba->fcf.current_rec.fabric_name[2],
934				phba->fcf.current_rec.fabric_name[3],
935				phba->fcf.current_rec.fabric_name[4],
936				phba->fcf.current_rec.fabric_name[5],
937				phba->fcf.current_rec.fabric_name[6],
938				phba->fcf.current_rec.fabric_name[7]);
939			lpfc_nlp_put(ndlp);
940			spin_lock_irq(&phba->hbalock);
941			phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
942			spin_unlock_irq(&phba->hbalock);
943			goto out;
944		}
945		if (!rc) {
946			/* Mark the FCF discovery process done */
947			if (phba->hba_flag & HBA_FIP_SUPPORT)
948				lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
949						LOG_ELS,
950						"2769 FLOGI successful on FCF "
951						"record: current_fcf_index:"
952						"x%x, terminate FCF round "
953						"robin failover process\n",
954						phba->fcf.current_rec.fcf_indx);
955			spin_lock_irq(&phba->hbalock);
956			phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
957			spin_unlock_irq(&phba->hbalock);
958			goto out;
959		}
960	}
961
962flogifail:
963	lpfc_nlp_put(ndlp);
964
965	if (!lpfc_error_lost_link(irsp)) {
966		/* FLOGI failed, so just use loop map to make discovery list */
967		lpfc_disc_list_loopmap(vport);
968
969		/* Start discovery */
970		lpfc_disc_start(vport);
971	} else if (((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
972			((irsp->un.ulpWord[4] != IOERR_SLI_ABORTED) &&
973			(irsp->un.ulpWord[4] != IOERR_SLI_DOWN))) &&
974			(phba->link_state != LPFC_CLEAR_LA)) {
975		/* If FLOGI failed enable link interrupt. */
976		lpfc_issue_clear_la(phba, vport);
977	}
978out:
979	lpfc_els_free_iocb(phba, cmdiocb);
980}
981
982/**
983 * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport
984 * @vport: pointer to a host virtual N_Port data structure.
985 * @ndlp: pointer to a node-list data structure.
986 * @retry: number of retries to the command IOCB.
987 *
988 * This routine issues a Fabric Login (FLOGI) Request ELS command
989 * for a @vport. The initiator service parameters are put into the payload
990 * of the FLOGI Request IOCB and the top-level callback function pointer
991 * to lpfc_cmpl_els_flogi() routine is put to the IOCB completion callback
992 * function field. The lpfc_issue_fabric_iocb routine is invoked to send
993 * out FLOGI ELS command with one outstanding fabric IOCB at a time.
994 *
995 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
996 * will be incremented by 1 for holding the ndlp and the reference to ndlp
997 * will be stored into the context1 field of the IOCB for the completion
998 * callback function to the FLOGI ELS command.
999 *
1000 * Return code
1001 *   0 - successfully issued flogi iocb for @vport
1002 *   1 - failed to issue flogi iocb for @vport
1003 **/
1004static int
1005lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1006		     uint8_t retry)
1007{
1008	struct lpfc_hba  *phba = vport->phba;
1009	struct serv_parm *sp;
1010	IOCB_t *icmd;
1011	struct lpfc_iocbq *elsiocb;
1012	struct lpfc_sli_ring *pring;
1013	uint8_t *pcmd;
1014	uint16_t cmdsize;
1015	uint32_t tmo;
1016	int rc;
1017
1018	pring = &phba->sli.ring[LPFC_ELS_RING];
1019
1020	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
1021	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1022				     ndlp->nlp_DID, ELS_CMD_FLOGI);
1023
1024	if (!elsiocb)
1025		return 1;
1026
1027	icmd = &elsiocb->iocb;
1028	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1029
1030	/* For FLOGI request, remainder of payload is service parameters */
1031	*((uint32_t *) (pcmd)) = ELS_CMD_FLOGI;
1032	pcmd += sizeof(uint32_t);
1033	memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
1034	sp = (struct serv_parm *) pcmd;
1035
1036	/* Setup CSPs accordingly for Fabric */
1037	sp->cmn.e_d_tov = 0;
1038	sp->cmn.w2.r_a_tov = 0;
1039	sp->cls1.classValid = 0;
1040	sp->cls2.seqDelivery = 1;
1041	sp->cls3.seqDelivery = 1;
1042	if (sp->cmn.fcphLow < FC_PH3)
1043		sp->cmn.fcphLow = FC_PH3;
1044	if (sp->cmn.fcphHigh < FC_PH3)
1045		sp->cmn.fcphHigh = FC_PH3;
1046
1047	if  (phba->sli_rev == LPFC_SLI_REV4) {
1048		elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
1049		elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
1050		/* FLOGI needs to be 3 for WQE FCFI */
1051		/* Set the fcfi to the fcfi we registered with */
1052		elsiocb->iocb.ulpContext = phba->fcf.fcfi;
1053	} else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
1054		sp->cmn.request_multiple_Nport = 1;
1055		/* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
1056		icmd->ulpCt_h = 1;
1057		icmd->ulpCt_l = 0;
1058	}
1059
1060	if (phba->fc_topology != TOPOLOGY_LOOP) {
1061		icmd->un.elsreq64.myID = 0;
1062		icmd->un.elsreq64.fl = 1;
1063	}
1064
1065	tmo = phba->fc_ratov;
1066	phba->fc_ratov = LPFC_DISC_FLOGI_TMO;
1067	lpfc_set_disctmo(vport);
1068	phba->fc_ratov = tmo;
1069
1070	phba->fc_stat.elsXmitFLOGI++;
1071	elsiocb->iocb_cmpl = lpfc_cmpl_els_flogi;
1072
1073	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1074		"Issue FLOGI:     opt:x%x",
1075		phba->sli3_options, 0, 0);
1076
1077	rc = lpfc_issue_fabric_iocb(phba, elsiocb);
1078	if (rc == IOCB_ERROR) {
1079		lpfc_els_free_iocb(phba, elsiocb);
1080		return 1;
1081	}
1082	return 0;
1083}
1084
1085/**
1086 * lpfc_els_abort_flogi - Abort all outstanding flogi iocbs
1087 * @phba: pointer to lpfc hba data structure.
1088 *
1089 * This routine aborts all the outstanding Fabric Login (FLOGI) IOCBs
1090 * with a @phba. This routine walks all the outstanding IOCBs on the txcmplq
1091 * list and issues an abort IOCB commond on each outstanding IOCB that
1092 * contains a active Fabric_DID ndlp. Note that this function is to issue
1093 * the abort IOCB command on all the outstanding IOCBs, thus when this
1094 * function returns, it does not guarantee all the IOCBs are actually aborted.
1095 *
1096 * Return code
1097 *   0 - Successfully issued abort iocb on all outstanding flogis (Always 0)
1098 **/
1099int
1100lpfc_els_abort_flogi(struct lpfc_hba *phba)
1101{
1102	struct lpfc_sli_ring *pring;
1103	struct lpfc_iocbq *iocb, *next_iocb;
1104	struct lpfc_nodelist *ndlp;
1105	IOCB_t *icmd;
1106
1107	/* Abort outstanding I/O on NPort <nlp_DID> */
1108	lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY,
1109			"0201 Abort outstanding I/O on NPort x%x\n",
1110			Fabric_DID);
1111
1112	pring = &phba->sli.ring[LPFC_ELS_RING];
1113
1114	/*
1115	 * Check the txcmplq for an iocb that matches the nport the driver is
1116	 * searching for.
1117	 */
1118	spin_lock_irq(&phba->hbalock);
1119	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
1120		icmd = &iocb->iocb;
1121		if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
1122		    icmd->un.elsreq64.bdl.ulpIoTag32) {
1123			ndlp = (struct lpfc_nodelist *)(iocb->context1);
1124			if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1125			    (ndlp->nlp_DID == Fabric_DID))
1126				lpfc_sli_issue_abort_iotag(phba, pring, iocb);
1127		}
1128	}
1129	spin_unlock_irq(&phba->hbalock);
1130
1131	return 0;
1132}
1133
1134/**
1135 * lpfc_initial_flogi - Issue an initial fabric login for a vport
1136 * @vport: pointer to a host virtual N_Port data structure.
1137 *
1138 * This routine issues an initial Fabric Login (FLOGI) for the @vport
1139 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1140 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1141 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1142 * it will just be enabled and made active. The lpfc_issue_els_flogi() routine
1143 * is then invoked with the @vport and the ndlp to perform the FLOGI for the
1144 * @vport.
1145 *
1146 * Return code
1147 *   0 - failed to issue initial flogi for @vport
1148 *   1 - successfully issued initial flogi for @vport
1149 **/
1150int
1151lpfc_initial_flogi(struct lpfc_vport *vport)
1152{
1153	struct lpfc_hba *phba = vport->phba;
1154	struct lpfc_nodelist *ndlp;
1155
1156	vport->port_state = LPFC_FLOGI;
1157	lpfc_set_disctmo(vport);
1158
1159	/* First look for the Fabric ndlp */
1160	ndlp = lpfc_findnode_did(vport, Fabric_DID);
1161	if (!ndlp) {
1162		/* Cannot find existing Fabric ndlp, so allocate a new one */
1163		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1164		if (!ndlp)
1165			return 0;
1166		lpfc_nlp_init(vport, ndlp, Fabric_DID);
1167		/* Set the node type */
1168		ndlp->nlp_type |= NLP_FABRIC;
1169		/* Put ndlp onto node list */
1170		lpfc_enqueue_node(vport, ndlp);
1171	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
1172		/* re-setup ndlp without removing from node list */
1173		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1174		if (!ndlp)
1175			return 0;
1176	}
1177
1178	if (lpfc_issue_els_flogi(vport, ndlp, 0))
1179		/* This decrement of reference count to node shall kick off
1180		 * the release of the node.
1181		 */
1182		lpfc_nlp_put(ndlp);
1183
1184	return 1;
1185}
1186
1187/**
1188 * lpfc_initial_fdisc - Issue an initial fabric discovery for a vport
1189 * @vport: pointer to a host virtual N_Port data structure.
1190 *
1191 * This routine issues an initial Fabric Discover (FDISC) for the @vport
1192 * specified. It first searches the ndlp with the Fabric_DID (0xfffffe) from
1193 * the @vport's ndlp list. If no such ndlp found, it will create an ndlp and
1194 * put it into the @vport's ndlp list. If an inactive ndlp found on the list,
1195 * it will just be enabled and made active. The lpfc_issue_els_fdisc() routine
1196 * is then invoked with the @vport and the ndlp to perform the FDISC for the
1197 * @vport.
1198 *
1199 * Return code
1200 *   0 - failed to issue initial fdisc for @vport
1201 *   1 - successfully issued initial fdisc for @vport
1202 **/
1203int
1204lpfc_initial_fdisc(struct lpfc_vport *vport)
1205{
1206	struct lpfc_hba *phba = vport->phba;
1207	struct lpfc_nodelist *ndlp;
1208
1209	/* First look for the Fabric ndlp */
1210	ndlp = lpfc_findnode_did(vport, Fabric_DID);
1211	if (!ndlp) {
1212		/* Cannot find existing Fabric ndlp, so allocate a new one */
1213		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
1214		if (!ndlp)
1215			return 0;
1216		lpfc_nlp_init(vport, ndlp, Fabric_DID);
1217		/* Put ndlp onto node list */
1218		lpfc_enqueue_node(vport, ndlp);
1219	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
1220		/* re-setup ndlp without removing from node list */
1221		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
1222		if (!ndlp)
1223			return 0;
1224	}
1225
1226	if (lpfc_issue_els_fdisc(vport, ndlp, 0)) {
1227		/* decrement node reference count to trigger the release of
1228		 * the node.
1229		 */
1230		lpfc_nlp_put(ndlp);
1231		return 0;
1232	}
1233	return 1;
1234}
1235
1236/**
1237 * lpfc_more_plogi - Check and issue remaining plogis for a vport
1238 * @vport: pointer to a host virtual N_Port data structure.
1239 *
1240 * This routine checks whether there are more remaining Port Logins
1241 * (PLOGI) to be issued for the @vport. If so, it will invoke the routine
1242 * lpfc_els_disc_plogi() to go through the Node Port Recovery (NPR) nodes
1243 * to issue ELS PLOGIs up to the configured discover threads with the
1244 * @vport (@vport->cfg_discovery_threads). The function also decrement
1245 * the @vport's num_disc_node by 1 if it is not already 0.
1246 **/
1247void
1248lpfc_more_plogi(struct lpfc_vport *vport)
1249{
1250	int sentplogi;
1251
1252	if (vport->num_disc_nodes)
1253		vport->num_disc_nodes--;
1254
1255	/* Continue discovery with <num_disc_nodes> PLOGIs to go */
1256	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1257			 "0232 Continue discovery with %d PLOGIs to go "
1258			 "Data: x%x x%x x%x\n",
1259			 vport->num_disc_nodes, vport->fc_plogi_cnt,
1260			 vport->fc_flag, vport->port_state);
1261	/* Check to see if there are more PLOGIs to be sent */
1262	if (vport->fc_flag & FC_NLP_MORE)
1263		/* go thru NPR nodes and issue any remaining ELS PLOGIs */
1264		sentplogi = lpfc_els_disc_plogi(vport);
1265
1266	return;
1267}
1268
1269/**
1270 * lpfc_plogi_confirm_nport - Confirm pologi wwpn matches stored ndlp
1271 * @phba: pointer to lpfc hba data structure.
1272 * @prsp: pointer to response IOCB payload.
1273 * @ndlp: pointer to a node-list data structure.
1274 *
1275 * This routine checks and indicates whether the WWPN of an N_Port, retrieved
1276 * from a PLOGI, matches the WWPN that is stored in the @ndlp for that N_POrt.
1277 * The following cases are considered N_Port confirmed:
1278 * 1) The N_Port is a Fabric ndlp; 2) The @ndlp is on vport list and matches
1279 * the WWPN of the N_Port logged into; 3) The @ndlp is not on vport list but
1280 * it does not have WWPN assigned either. If the WWPN is confirmed, the
1281 * pointer to the @ndlp will be returned. If the WWPN is not confirmed:
1282 * 1) if there is a node on vport list other than the @ndlp with the same
1283 * WWPN of the N_Port PLOGI logged into, the lpfc_unreg_rpi() will be invoked
1284 * on that node to release the RPI associated with the node; 2) if there is
1285 * no node found on vport list with the same WWPN of the N_Port PLOGI logged
1286 * into, a new node shall be allocated (or activated). In either case, the
1287 * parameters of the @ndlp shall be copied to the new_ndlp, the @ndlp shall
1288 * be released and the new_ndlp shall be put on to the vport node list and
1289 * its pointer returned as the confirmed node.
1290 *
1291 * Note that before the @ndlp got "released", the keepDID from not-matching
1292 * or inactive "new_ndlp" on the vport node list is assigned to the nlp_DID
1293 * of the @ndlp. This is because the release of @ndlp is actually to put it
1294 * into an inactive state on the vport node list and the vport node list
1295 * management algorithm does not allow two node with a same DID.
1296 *
1297 * Return code
1298 *   pointer to the PLOGI N_Port @ndlp
1299 **/
1300static struct lpfc_nodelist *
1301lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp,
1302			 struct lpfc_nodelist *ndlp)
1303{
1304	struct lpfc_vport    *vport = ndlp->vport;
1305	struct lpfc_nodelist *new_ndlp;
1306	struct lpfc_rport_data *rdata;
1307	struct fc_rport *rport;
1308	struct serv_parm *sp;
1309	uint8_t  name[sizeof(struct lpfc_name)];
1310	uint32_t rc, keepDID = 0;
1311	int  put_node;
1312	int  put_rport;
1313
1314	/* Fabric nodes can have the same WWPN so we don't bother searching
1315	 * by WWPN.  Just return the ndlp that was given to us.
1316	 */
1317	if (ndlp->nlp_type & NLP_FABRIC)
1318		return ndlp;
1319
1320	sp = (struct serv_parm *) ((uint8_t *) prsp + sizeof(uint32_t));
1321	memset(name, 0, sizeof(struct lpfc_name));
1322
1323	/* Now we find out if the NPort we are logging into, matches the WWPN
1324	 * we have for that ndlp. If not, we have some work to do.
1325	 */
1326	new_ndlp = lpfc_findnode_wwpn(vport, &sp->portName);
1327
1328	if (new_ndlp == ndlp && NLP_CHK_NODE_ACT(new_ndlp))
1329		return ndlp;
1330
1331	if (!new_ndlp) {
1332		rc = memcmp(&ndlp->nlp_portname, name,
1333			    sizeof(struct lpfc_name));
1334		if (!rc)
1335			return ndlp;
1336		new_ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_ATOMIC);
1337		if (!new_ndlp)
1338			return ndlp;
1339		lpfc_nlp_init(vport, new_ndlp, ndlp->nlp_DID);
1340	} else if (!NLP_CHK_NODE_ACT(new_ndlp)) {
1341		rc = memcmp(&ndlp->nlp_portname, name,
1342			    sizeof(struct lpfc_name));
1343		if (!rc)
1344			return ndlp;
1345		new_ndlp = lpfc_enable_node(vport, new_ndlp,
1346						NLP_STE_UNUSED_NODE);
1347		if (!new_ndlp)
1348			return ndlp;
1349		keepDID = new_ndlp->nlp_DID;
1350	} else
1351		keepDID = new_ndlp->nlp_DID;
1352
1353	lpfc_unreg_rpi(vport, new_ndlp);
1354	new_ndlp->nlp_DID = ndlp->nlp_DID;
1355	new_ndlp->nlp_prev_state = ndlp->nlp_prev_state;
1356
1357	if (ndlp->nlp_flag & NLP_NPR_2B_DISC)
1358		new_ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1359	ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1360
1361	/* Set state will put new_ndlp on to node list if not already done */
1362	lpfc_nlp_set_state(vport, new_ndlp, ndlp->nlp_state);
1363
1364	/* Move this back to NPR state */
1365	if (memcmp(&ndlp->nlp_portname, name, sizeof(struct lpfc_name)) == 0) {
1366		/* The new_ndlp is replacing ndlp totally, so we need
1367		 * to put ndlp on UNUSED list and try to free it.
1368		 */
1369
1370		/* Fix up the rport accordingly */
1371		rport =  ndlp->rport;
1372		if (rport) {
1373			rdata = rport->dd_data;
1374			if (rdata->pnode == ndlp) {
1375				lpfc_nlp_put(ndlp);
1376				ndlp->rport = NULL;
1377				rdata->pnode = lpfc_nlp_get(new_ndlp);
1378				new_ndlp->rport = rport;
1379			}
1380			new_ndlp->nlp_type = ndlp->nlp_type;
1381		}
1382		/* We shall actually free the ndlp with both nlp_DID and
1383		 * nlp_portname fields equals 0 to avoid any ndlp on the
1384		 * nodelist never to be used.
1385		 */
1386		if (ndlp->nlp_DID == 0) {
1387			spin_lock_irq(&phba->ndlp_lock);
1388			NLP_SET_FREE_REQ(ndlp);
1389			spin_unlock_irq(&phba->ndlp_lock);
1390		}
1391
1392		/* Two ndlps cannot have the same did on the nodelist */
1393		ndlp->nlp_DID = keepDID;
1394		lpfc_drop_node(vport, ndlp);
1395	}
1396	else {
1397		lpfc_unreg_rpi(vport, ndlp);
1398		/* Two ndlps cannot have the same did */
1399		ndlp->nlp_DID = keepDID;
1400		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
1401		/* Since we are swapping the ndlp passed in with the new one
1402		 * and the did has already been swapped, copy over the
1403		 * state and names.
1404		 */
1405		memcpy(&new_ndlp->nlp_portname, &ndlp->nlp_portname,
1406			sizeof(struct lpfc_name));
1407		memcpy(&new_ndlp->nlp_nodename, &ndlp->nlp_nodename,
1408			sizeof(struct lpfc_name));
1409		new_ndlp->nlp_state = ndlp->nlp_state;
1410		/* Fix up the rport accordingly */
1411		rport = ndlp->rport;
1412		if (rport) {
1413			rdata = rport->dd_data;
1414			put_node = rdata->pnode != NULL;
1415			put_rport = ndlp->rport != NULL;
1416			rdata->pnode = NULL;
1417			ndlp->rport = NULL;
1418			if (put_node)
1419				lpfc_nlp_put(ndlp);
1420			if (put_rport)
1421				put_device(&rport->dev);
1422		}
1423	}
1424	return new_ndlp;
1425}
1426
1427/**
1428 * lpfc_end_rscn - Check and handle more rscn for a vport
1429 * @vport: pointer to a host virtual N_Port data structure.
1430 *
1431 * This routine checks whether more Registration State Change
1432 * Notifications (RSCNs) came in while the discovery state machine was in
1433 * the FC_RSCN_MODE. If so, the lpfc_els_handle_rscn() routine will be
1434 * invoked to handle the additional RSCNs for the @vport. Otherwise, the
1435 * FC_RSCN_MODE bit will be cleared with the @vport to mark as the end of
1436 * handling the RSCNs.
1437 **/
1438void
1439lpfc_end_rscn(struct lpfc_vport *vport)
1440{
1441	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1442
1443	if (vport->fc_flag & FC_RSCN_MODE) {
1444		/*
1445		 * Check to see if more RSCNs came in while we were
1446		 * processing this one.
1447		 */
1448		if (vport->fc_rscn_id_cnt ||
1449		    (vport->fc_flag & FC_RSCN_DISCOVERY) != 0)
1450			lpfc_els_handle_rscn(vport);
1451		else {
1452			spin_lock_irq(shost->host_lock);
1453			vport->fc_flag &= ~FC_RSCN_MODE;
1454			spin_unlock_irq(shost->host_lock);
1455		}
1456	}
1457}
1458
1459/**
1460 * lpfc_cmpl_els_plogi - Completion callback function for plogi
1461 * @phba: pointer to lpfc hba data structure.
1462 * @cmdiocb: pointer to lpfc command iocb data structure.
1463 * @rspiocb: pointer to lpfc response iocb data structure.
1464 *
1465 * This routine is the completion callback function for issuing the Port
1466 * Login (PLOGI) command. For PLOGI completion, there must be an active
1467 * ndlp on the vport node list that matches the remote node ID from the
1468 * PLOGI reponse IOCB. If such ndlp does not exist, the PLOGI is simply
1469 * ignored and command IOCB released. The PLOGI response IOCB status is
1470 * checked for error conditons. If there is error status reported, PLOGI
1471 * retry shall be attempted by invoking the lpfc_els_retry() routine.
1472 * Otherwise, the lpfc_plogi_confirm_nport() routine shall be invoked on
1473 * the ndlp and the NLP_EVT_CMPL_PLOGI state to the Discover State Machine
1474 * (DSM) is set for this PLOGI completion. Finally, it checks whether
1475 * there are additional N_Port nodes with the vport that need to perform
1476 * PLOGI. If so, the lpfc_more_plogi() routine is invoked to issue addition
1477 * PLOGIs.
1478 **/
1479static void
1480lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1481		    struct lpfc_iocbq *rspiocb)
1482{
1483	struct lpfc_vport *vport = cmdiocb->vport;
1484	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1485	IOCB_t *irsp;
1486	struct lpfc_nodelist *ndlp;
1487	struct lpfc_dmabuf *prsp;
1488	int disc, rc, did, type;
1489
1490	/* we pass cmdiocb to state machine which needs rspiocb as well */
1491	cmdiocb->context_un.rsp_iocb = rspiocb;
1492
1493	irsp = &rspiocb->iocb;
1494	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1495		"PLOGI cmpl:      status:x%x/x%x did:x%x",
1496		irsp->ulpStatus, irsp->un.ulpWord[4],
1497		irsp->un.elsreq64.remoteID);
1498
1499	ndlp = lpfc_findnode_did(vport, irsp->un.elsreq64.remoteID);
1500	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1501		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1502				 "0136 PLOGI completes to NPort x%x "
1503				 "with no ndlp. Data: x%x x%x x%x\n",
1504				 irsp->un.elsreq64.remoteID,
1505				 irsp->ulpStatus, irsp->un.ulpWord[4],
1506				 irsp->ulpIoTag);
1507		goto out;
1508	}
1509
1510	/* Since ndlp can be freed in the disc state machine, note if this node
1511	 * is being used during discovery.
1512	 */
1513	spin_lock_irq(shost->host_lock);
1514	disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
1515	ndlp->nlp_flag &= ~NLP_NPR_2B_DISC;
1516	spin_unlock_irq(shost->host_lock);
1517	rc   = 0;
1518
1519	/* PLOGI completes to NPort <nlp_DID> */
1520	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1521			 "0102 PLOGI completes to NPort x%x "
1522			 "Data: x%x x%x x%x x%x x%x\n",
1523			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1524			 irsp->ulpTimeout, disc, vport->num_disc_nodes);
1525	/* Check to see if link went down during discovery */
1526	if (lpfc_els_chk_latt(vport)) {
1527		spin_lock_irq(shost->host_lock);
1528		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1529		spin_unlock_irq(shost->host_lock);
1530		goto out;
1531	}
1532
1533	/* ndlp could be freed in DSM, save these values now */
1534	type = ndlp->nlp_type;
1535	did = ndlp->nlp_DID;
1536
1537	if (irsp->ulpStatus) {
1538		/* Check for retry */
1539		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1540			/* ELS command is being retried */
1541			if (disc) {
1542				spin_lock_irq(shost->host_lock);
1543				ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1544				spin_unlock_irq(shost->host_lock);
1545			}
1546			goto out;
1547		}
1548		/* PLOGI failed Don't print the vport to vport rjts */
1549		if (irsp->ulpStatus != IOSTAT_LS_RJT ||
1550			(((irsp->un.ulpWord[4]) >> 16 != LSRJT_INVALID_CMD) &&
1551			((irsp->un.ulpWord[4]) >> 16 != LSRJT_UNABLE_TPC)) ||
1552			(phba)->pport->cfg_log_verbose & LOG_ELS)
1553			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1554				 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
1555				 ndlp->nlp_DID, irsp->ulpStatus,
1556				 irsp->un.ulpWord[4]);
1557		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1558		if (lpfc_error_lost_link(irsp))
1559			rc = NLP_STE_FREED_NODE;
1560		else
1561			rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1562						     NLP_EVT_CMPL_PLOGI);
1563	} else {
1564		/* Good status, call state machine */
1565		prsp = list_entry(((struct lpfc_dmabuf *)
1566				   cmdiocb->context2)->list.next,
1567				  struct lpfc_dmabuf, list);
1568		ndlp = lpfc_plogi_confirm_nport(phba, prsp->virt, ndlp);
1569		rc = lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1570					     NLP_EVT_CMPL_PLOGI);
1571	}
1572
1573	if (disc && vport->num_disc_nodes) {
1574		/* Check to see if there are more PLOGIs to be sent */
1575		lpfc_more_plogi(vport);
1576
1577		if (vport->num_disc_nodes == 0) {
1578			spin_lock_irq(shost->host_lock);
1579			vport->fc_flag &= ~FC_NDISC_ACTIVE;
1580			spin_unlock_irq(shost->host_lock);
1581
1582			lpfc_can_disctmo(vport);
1583			lpfc_end_rscn(vport);
1584		}
1585	}
1586
1587out:
1588	lpfc_els_free_iocb(phba, cmdiocb);
1589	return;
1590}
1591
1592/**
1593 * lpfc_issue_els_plogi - Issue an plogi iocb command for a vport
1594 * @vport: pointer to a host virtual N_Port data structure.
1595 * @did: destination port identifier.
1596 * @retry: number of retries to the command IOCB.
1597 *
1598 * This routine issues a Port Login (PLOGI) command to a remote N_Port
1599 * (with the @did) for a @vport. Before issuing a PLOGI to a remote N_Port,
1600 * the ndlp with the remote N_Port DID must exist on the @vport's ndlp list.
1601 * This routine constructs the proper feilds of the PLOGI IOCB and invokes
1602 * the lpfc_sli_issue_iocb() routine to send out PLOGI ELS command.
1603 *
1604 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1605 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1606 * will be stored into the context1 field of the IOCB for the completion
1607 * callback function to the PLOGI ELS command.
1608 *
1609 * Return code
1610 *   0 - Successfully issued a plogi for @vport
1611 *   1 - failed to issue a plogi for @vport
1612 **/
1613int
1614lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
1615{
1616	struct lpfc_hba  *phba = vport->phba;
1617	struct serv_parm *sp;
1618	IOCB_t *icmd;
1619	struct lpfc_nodelist *ndlp;
1620	struct lpfc_iocbq *elsiocb;
1621	struct lpfc_sli *psli;
1622	uint8_t *pcmd;
1623	uint16_t cmdsize;
1624	int ret;
1625
1626	psli = &phba->sli;
1627
1628	ndlp = lpfc_findnode_did(vport, did);
1629	if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
1630		ndlp = NULL;
1631
1632	/* If ndlp is not NULL, we will bump the reference count on it */
1633	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
1634	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
1635				     ELS_CMD_PLOGI);
1636	if (!elsiocb)
1637		return 1;
1638
1639	icmd = &elsiocb->iocb;
1640	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1641
1642	/* For PLOGI request, remainder of payload is service parameters */
1643	*((uint32_t *) (pcmd)) = ELS_CMD_PLOGI;
1644	pcmd += sizeof(uint32_t);
1645	memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
1646	sp = (struct serv_parm *) pcmd;
1647
1648	if (sp->cmn.fcphLow < FC_PH_4_3)
1649		sp->cmn.fcphLow = FC_PH_4_3;
1650
1651	if (sp->cmn.fcphHigh < FC_PH3)
1652		sp->cmn.fcphHigh = FC_PH3;
1653
1654	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1655		"Issue PLOGI:     did:x%x",
1656		did, 0, 0);
1657
1658	phba->fc_stat.elsXmitPLOGI++;
1659	elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
1660	ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
1661
1662	if (ret == IOCB_ERROR) {
1663		lpfc_els_free_iocb(phba, elsiocb);
1664		return 1;
1665	}
1666	return 0;
1667}
1668
1669/**
1670 * lpfc_cmpl_els_prli - Completion callback function for prli
1671 * @phba: pointer to lpfc hba data structure.
1672 * @cmdiocb: pointer to lpfc command iocb data structure.
1673 * @rspiocb: pointer to lpfc response iocb data structure.
1674 *
1675 * This routine is the completion callback function for a Process Login
1676 * (PRLI) ELS command. The PRLI response IOCB status is checked for error
1677 * status. If there is error status reported, PRLI retry shall be attempted
1678 * by invoking the lpfc_els_retry() routine. Otherwise, the state
1679 * NLP_EVT_CMPL_PRLI is sent to the Discover State Machine (DSM) for this
1680 * ndlp to mark the PRLI completion.
1681 **/
1682static void
1683lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1684		   struct lpfc_iocbq *rspiocb)
1685{
1686	struct lpfc_vport *vport = cmdiocb->vport;
1687	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1688	IOCB_t *irsp;
1689	struct lpfc_sli *psli;
1690	struct lpfc_nodelist *ndlp;
1691
1692	psli = &phba->sli;
1693	/* we pass cmdiocb to state machine which needs rspiocb as well */
1694	cmdiocb->context_un.rsp_iocb = rspiocb;
1695
1696	irsp = &(rspiocb->iocb);
1697	ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1698	spin_lock_irq(shost->host_lock);
1699	ndlp->nlp_flag &= ~NLP_PRLI_SND;
1700	spin_unlock_irq(shost->host_lock);
1701
1702	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1703		"PRLI cmpl:       status:x%x/x%x did:x%x",
1704		irsp->ulpStatus, irsp->un.ulpWord[4],
1705		ndlp->nlp_DID);
1706	/* PRLI completes to NPort <nlp_DID> */
1707	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1708			 "0103 PRLI completes to NPort x%x "
1709			 "Data: x%x x%x x%x x%x\n",
1710			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1711			 irsp->ulpTimeout, vport->num_disc_nodes);
1712
1713	vport->fc_prli_sent--;
1714	/* Check to see if link went down during discovery */
1715	if (lpfc_els_chk_latt(vport))
1716		goto out;
1717
1718	if (irsp->ulpStatus) {
1719		/* Check for retry */
1720		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
1721			/* ELS command is being retried */
1722			goto out;
1723		}
1724		/* PRLI failed */
1725		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
1726				 "2754 PRLI failure DID:%06X Status:x%x/x%x\n",
1727				 ndlp->nlp_DID, irsp->ulpStatus,
1728				 irsp->un.ulpWord[4]);
1729		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
1730		if (lpfc_error_lost_link(irsp))
1731			goto out;
1732		else
1733			lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1734						NLP_EVT_CMPL_PRLI);
1735	} else
1736		/* Good status, call state machine */
1737		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
1738					NLP_EVT_CMPL_PRLI);
1739out:
1740	lpfc_els_free_iocb(phba, cmdiocb);
1741	return;
1742}
1743
1744/**
1745 * lpfc_issue_els_prli - Issue a prli iocb command for a vport
1746 * @vport: pointer to a host virtual N_Port data structure.
1747 * @ndlp: pointer to a node-list data structure.
1748 * @retry: number of retries to the command IOCB.
1749 *
1750 * This routine issues a Process Login (PRLI) ELS command for the
1751 * @vport. The PRLI service parameters are set up in the payload of the
1752 * PRLI Request command and the pointer to lpfc_cmpl_els_prli() routine
1753 * is put to the IOCB completion callback func field before invoking the
1754 * routine lpfc_sli_issue_iocb() to send out PRLI command.
1755 *
1756 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
1757 * will be incremented by 1 for holding the ndlp and the reference to ndlp
1758 * will be stored into the context1 field of the IOCB for the completion
1759 * callback function to the PRLI ELS command.
1760 *
1761 * Return code
1762 *   0 - successfully issued prli iocb command for @vport
1763 *   1 - failed to issue prli iocb command for @vport
1764 **/
1765int
1766lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
1767		    uint8_t retry)
1768{
1769	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
1770	struct lpfc_hba *phba = vport->phba;
1771	PRLI *npr;
1772	IOCB_t *icmd;
1773	struct lpfc_iocbq *elsiocb;
1774	uint8_t *pcmd;
1775	uint16_t cmdsize;
1776
1777	cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
1778	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
1779				     ndlp->nlp_DID, ELS_CMD_PRLI);
1780	if (!elsiocb)
1781		return 1;
1782
1783	icmd = &elsiocb->iocb;
1784	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
1785
1786	/* For PRLI request, remainder of payload is service parameters */
1787	memset(pcmd, 0, (sizeof(PRLI) + sizeof(uint32_t)));
1788	*((uint32_t *) (pcmd)) = ELS_CMD_PRLI;
1789	pcmd += sizeof(uint32_t);
1790
1791	/* For PRLI, remainder of payload is PRLI parameter page */
1792	npr = (PRLI *) pcmd;
1793	/*
1794	 * If our firmware version is 3.20 or later,
1795	 * set the following bits for FC-TAPE support.
1796	 */
1797	if (phba->vpd.rev.feaLevelHigh >= 0x02) {
1798		npr->ConfmComplAllowed = 1;
1799		npr->Retry = 1;
1800		npr->TaskRetryIdReq = 1;
1801	}
1802	npr->estabImagePair = 1;
1803	npr->readXferRdyDis = 1;
1804
1805	/* For FCP support */
1806	npr->prliType = PRLI_FCP_TYPE;
1807	npr->initiatorFunc = 1;
1808
1809	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1810		"Issue PRLI:      did:x%x",
1811		ndlp->nlp_DID, 0, 0);
1812
1813	phba->fc_stat.elsXmitPRLI++;
1814	elsiocb->iocb_cmpl = lpfc_cmpl_els_prli;
1815	spin_lock_irq(shost->host_lock);
1816	ndlp->nlp_flag |= NLP_PRLI_SND;
1817	spin_unlock_irq(shost->host_lock);
1818	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
1819	    IOCB_ERROR) {
1820		spin_lock_irq(shost->host_lock);
1821		ndlp->nlp_flag &= ~NLP_PRLI_SND;
1822		spin_unlock_irq(shost->host_lock);
1823		lpfc_els_free_iocb(phba, elsiocb);
1824		return 1;
1825	}
1826	vport->fc_prli_sent++;
1827	return 0;
1828}
1829
1830/**
1831 * lpfc_rscn_disc - Perform rscn discovery for a vport
1832 * @vport: pointer to a host virtual N_Port data structure.
1833 *
1834 * This routine performs Registration State Change Notification (RSCN)
1835 * discovery for a @vport. If the @vport's node port recovery count is not
1836 * zero, it will invoke the lpfc_els_disc_plogi() to perform PLOGI for all
1837 * the nodes that need recovery. If none of the PLOGI were needed through
1838 * the lpfc_els_disc_plogi() routine, the lpfc_end_rscn() routine shall be
1839 * invoked to check and handle possible more RSCN came in during the period
1840 * of processing the current ones.
1841 **/
1842static void
1843lpfc_rscn_disc(struct lpfc_vport *vport)
1844{
1845	lpfc_can_disctmo(vport);
1846
1847	/* RSCN discovery */
1848	/* go thru NPR nodes and issue ELS PLOGIs */
1849	if (vport->fc_npr_cnt)
1850		if (lpfc_els_disc_plogi(vport))
1851			return;
1852
1853	lpfc_end_rscn(vport);
1854}
1855
1856/**
1857 * lpfc_adisc_done - Complete the adisc phase of discovery
1858 * @vport: pointer to lpfc_vport hba data structure that finished all ADISCs.
1859 *
1860 * This function is called when the final ADISC is completed during discovery.
1861 * This function handles clearing link attention or issuing reg_vpi depending
1862 * on whether npiv is enabled. This function also kicks off the PLOGI phase of
1863 * discovery.
1864 * This function is called with no locks held.
1865 **/
1866static void
1867lpfc_adisc_done(struct lpfc_vport *vport)
1868{
1869	struct Scsi_Host   *shost = lpfc_shost_from_vport(vport);
1870	struct lpfc_hba   *phba = vport->phba;
1871
1872	/*
1873	 * For NPIV, cmpl_reg_vpi will set port_state to READY,
1874	 * and continue discovery.
1875	 */
1876	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
1877	    !(vport->fc_flag & FC_RSCN_MODE) &&
1878	    (phba->sli_rev < LPFC_SLI_REV4)) {
1879		lpfc_issue_reg_vpi(phba, vport);
1880		return;
1881	}
1882	/*
1883	* For SLI2, we need to set port_state to READY
1884	* and continue discovery.
1885	*/
1886	if (vport->port_state < LPFC_VPORT_READY) {
1887		/* If we get here, there is nothing to ADISC */
1888		if (vport->port_type == LPFC_PHYSICAL_PORT)
1889			lpfc_issue_clear_la(phba, vport);
1890		if (!(vport->fc_flag & FC_ABORT_DISCOVERY)) {
1891			vport->num_disc_nodes = 0;
1892			/* go thru NPR list, issue ELS PLOGIs */
1893			if (vport->fc_npr_cnt)
1894				lpfc_els_disc_plogi(vport);
1895			if (!vport->num_disc_nodes) {
1896				spin_lock_irq(shost->host_lock);
1897				vport->fc_flag &= ~FC_NDISC_ACTIVE;
1898				spin_unlock_irq(shost->host_lock);
1899				lpfc_can_disctmo(vport);
1900				lpfc_end_rscn(vport);
1901			}
1902		}
1903		vport->port_state = LPFC_VPORT_READY;
1904	} else
1905		lpfc_rscn_disc(vport);
1906}
1907
1908/**
1909 * lpfc_more_adisc - Issue more adisc as needed
1910 * @vport: pointer to a host virtual N_Port data structure.
1911 *
1912 * This routine determines whether there are more ndlps on a @vport
1913 * node list need to have Address Discover (ADISC) issued. If so, it will
1914 * invoke the lpfc_els_disc_adisc() routine to issue ADISC on the @vport's
1915 * remaining nodes which need to have ADISC sent.
1916 **/
1917void
1918lpfc_more_adisc(struct lpfc_vport *vport)
1919{
1920	int sentadisc;
1921
1922	if (vport->num_disc_nodes)
1923		vport->num_disc_nodes--;
1924	/* Continue discovery with <num_disc_nodes> ADISCs to go */
1925	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
1926			 "0210 Continue discovery with %d ADISCs to go "
1927			 "Data: x%x x%x x%x\n",
1928			 vport->num_disc_nodes, vport->fc_adisc_cnt,
1929			 vport->fc_flag, vport->port_state);
1930	/* Check to see if there are more ADISCs to be sent */
1931	if (vport->fc_flag & FC_NLP_MORE) {
1932		lpfc_set_disctmo(vport);
1933		/* go thru NPR nodes and issue any remaining ELS ADISCs */
1934		sentadisc = lpfc_els_disc_adisc(vport);
1935	}
1936	if (!vport->num_disc_nodes)
1937		lpfc_adisc_done(vport);
1938	return;
1939}
1940
1941/**
1942 * lpfc_cmpl_els_adisc - Completion callback function for adisc
1943 * @phba: pointer to lpfc hba data structure.
1944 * @cmdiocb: pointer to lpfc command iocb data structure.
1945 * @rspiocb: pointer to lpfc response iocb data structure.
1946 *
1947 * This routine is the completion function for issuing the Address Discover
1948 * (ADISC) command. It first checks to see whether link went down during
1949 * the discovery process. If so, the node will be marked as node port
1950 * recovery for issuing discover IOCB by the link attention handler and
1951 * exit. Otherwise, the response status is checked. If error was reported
1952 * in the response status, the ADISC command shall be retried by invoking
1953 * the lpfc_els_retry() routine. Otherwise, if no error was reported in
1954 * the response status, the state machine is invoked to set transition
1955 * with respect to NLP_EVT_CMPL_ADISC event.
1956 **/
1957static void
1958lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1959		    struct lpfc_iocbq *rspiocb)
1960{
1961	struct lpfc_vport *vport = cmdiocb->vport;
1962	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
1963	IOCB_t *irsp;
1964	struct lpfc_nodelist *ndlp;
1965	int  disc;
1966
1967	/* we pass cmdiocb to state machine which needs rspiocb as well */
1968	cmdiocb->context_un.rsp_iocb = rspiocb;
1969
1970	irsp = &(rspiocb->iocb);
1971	ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
1972
1973	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
1974		"ADISC cmpl:      status:x%x/x%x did:x%x",
1975		irsp->ulpStatus, irsp->un.ulpWord[4],
1976		ndlp->nlp_DID);
1977
1978	/* Since ndlp can be freed in the disc state machine, note if this node
1979	 * is being used during discovery.
1980	 */
1981	spin_lock_irq(shost->host_lock);
1982	disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC);
1983	ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC);
1984	spin_unlock_irq(shost->host_lock);
1985	/* ADISC completes to NPort <nlp_DID> */
1986	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
1987			 "0104 ADISC completes to NPort x%x "
1988			 "Data: x%x x%x x%x x%x x%x\n",
1989			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
1990			 irsp->ulpTimeout, disc, vport->num_disc_nodes);
1991	/* Check to see if link went down during discovery */
1992	if (lpfc_els_chk_latt(vport)) {
1993		spin_lock_irq(shost->host_lock);
1994		ndlp->nlp_flag |= NLP_NPR_2B_DISC;
1995		spin_unlock_irq(shost->host_lock);
1996		goto out;
1997	}
1998
1999	if (irsp->ulpStatus) {
2000		/* Check for retry */
2001		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
2002			/* ELS command is being retried */
2003			if (disc) {
2004				spin_lock_irq(shost->host_lock);
2005				ndlp->nlp_flag |= NLP_NPR_2B_DISC;
2006				spin_unlock_irq(shost->host_lock);
2007				lpfc_set_disctmo(vport);
2008			}
2009			goto out;
2010		}
2011		/* ADISC failed */
2012		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2013				 "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
2014				 ndlp->nlp_DID, irsp->ulpStatus,
2015				 irsp->un.ulpWord[4]);
2016		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2017		if (!lpfc_error_lost_link(irsp))
2018			lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2019						NLP_EVT_CMPL_ADISC);
2020	} else
2021		/* Good status, call state machine */
2022		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2023					NLP_EVT_CMPL_ADISC);
2024
2025	/* Check to see if there are more ADISCs to be sent */
2026	if (disc && vport->num_disc_nodes)
2027		lpfc_more_adisc(vport);
2028out:
2029	lpfc_els_free_iocb(phba, cmdiocb);
2030	return;
2031}
2032
2033/**
2034 * lpfc_issue_els_adisc - Issue an address discover iocb to an node on a vport
2035 * @vport: pointer to a virtual N_Port data structure.
2036 * @ndlp: pointer to a node-list data structure.
2037 * @retry: number of retries to the command IOCB.
2038 *
2039 * This routine issues an Address Discover (ADISC) for an @ndlp on a
2040 * @vport. It prepares the payload of the ADISC ELS command, updates the
2041 * and states of the ndlp, and invokes the lpfc_sli_issue_iocb() routine
2042 * to issue the ADISC ELS command.
2043 *
2044 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2045 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2046 * will be stored into the context1 field of the IOCB for the completion
2047 * callback function to the ADISC ELS command.
2048 *
2049 * Return code
2050 *   0 - successfully issued adisc
2051 *   1 - failed to issue adisc
2052 **/
2053int
2054lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2055		     uint8_t retry)
2056{
2057	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2058	struct lpfc_hba  *phba = vport->phba;
2059	ADISC *ap;
2060	IOCB_t *icmd;
2061	struct lpfc_iocbq *elsiocb;
2062	uint8_t *pcmd;
2063	uint16_t cmdsize;
2064
2065	cmdsize = (sizeof(uint32_t) + sizeof(ADISC));
2066	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2067				     ndlp->nlp_DID, ELS_CMD_ADISC);
2068	if (!elsiocb)
2069		return 1;
2070
2071	icmd = &elsiocb->iocb;
2072	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2073
2074	/* For ADISC request, remainder of payload is service parameters */
2075	*((uint32_t *) (pcmd)) = ELS_CMD_ADISC;
2076	pcmd += sizeof(uint32_t);
2077
2078	/* Fill in ADISC payload */
2079	ap = (ADISC *) pcmd;
2080	ap->hardAL_PA = phba->fc_pref_ALPA;
2081	memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
2082	memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2083	ap->DID = be32_to_cpu(vport->fc_myDID);
2084
2085	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2086		"Issue ADISC:     did:x%x",
2087		ndlp->nlp_DID, 0, 0);
2088
2089	phba->fc_stat.elsXmitADISC++;
2090	elsiocb->iocb_cmpl = lpfc_cmpl_els_adisc;
2091	spin_lock_irq(shost->host_lock);
2092	ndlp->nlp_flag |= NLP_ADISC_SND;
2093	spin_unlock_irq(shost->host_lock);
2094	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2095	    IOCB_ERROR) {
2096		spin_lock_irq(shost->host_lock);
2097		ndlp->nlp_flag &= ~NLP_ADISC_SND;
2098		spin_unlock_irq(shost->host_lock);
2099		lpfc_els_free_iocb(phba, elsiocb);
2100		return 1;
2101	}
2102	return 0;
2103}
2104
2105/**
2106 * lpfc_cmpl_els_logo - Completion callback function for logo
2107 * @phba: pointer to lpfc hba data structure.
2108 * @cmdiocb: pointer to lpfc command iocb data structure.
2109 * @rspiocb: pointer to lpfc response iocb data structure.
2110 *
2111 * This routine is the completion function for issuing the ELS Logout (LOGO)
2112 * command. If no error status was reported from the LOGO response, the
2113 * state machine of the associated ndlp shall be invoked for transition with
2114 * respect to NLP_EVT_CMPL_LOGO event. Otherwise, if error status was reported,
2115 * the lpfc_els_retry() routine will be invoked to retry the LOGO command.
2116 **/
2117static void
2118lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2119		   struct lpfc_iocbq *rspiocb)
2120{
2121	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2122	struct lpfc_vport *vport = ndlp->vport;
2123	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
2124	IOCB_t *irsp;
2125	struct lpfc_sli *psli;
2126
2127	psli = &phba->sli;
2128	/* we pass cmdiocb to state machine which needs rspiocb as well */
2129	cmdiocb->context_un.rsp_iocb = rspiocb;
2130
2131	irsp = &(rspiocb->iocb);
2132	spin_lock_irq(shost->host_lock);
2133	ndlp->nlp_flag &= ~NLP_LOGO_SND;
2134	spin_unlock_irq(shost->host_lock);
2135
2136	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2137		"LOGO cmpl:       status:x%x/x%x did:x%x",
2138		irsp->ulpStatus, irsp->un.ulpWord[4],
2139		ndlp->nlp_DID);
2140	/* LOGO completes to NPort <nlp_DID> */
2141	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2142			 "0105 LOGO completes to NPort x%x "
2143			 "Data: x%x x%x x%x x%x\n",
2144			 ndlp->nlp_DID, irsp->ulpStatus, irsp->un.ulpWord[4],
2145			 irsp->ulpTimeout, vport->num_disc_nodes);
2146	/* Check to see if link went down during discovery */
2147	if (lpfc_els_chk_latt(vport))
2148		goto out;
2149
2150	if (ndlp->nlp_flag & NLP_TARGET_REMOVE) {
2151	        /* NLP_EVT_DEVICE_RM should unregister the RPI
2152		 * which should abort all outstanding IOs.
2153		 */
2154		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2155					NLP_EVT_DEVICE_RM);
2156		goto out;
2157	}
2158
2159	if (irsp->ulpStatus) {
2160		/* Check for retry */
2161		if (lpfc_els_retry(phba, cmdiocb, rspiocb))
2162			/* ELS command is being retried */
2163			goto out;
2164		/* LOGO failed */
2165		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2166				 "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
2167				 ndlp->nlp_DID, irsp->ulpStatus,
2168				 irsp->un.ulpWord[4]);
2169		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
2170		if (lpfc_error_lost_link(irsp))
2171			goto out;
2172		else
2173			lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2174						NLP_EVT_CMPL_LOGO);
2175	} else
2176		/* Good status, call state machine.
2177		 * This will unregister the rpi if needed.
2178		 */
2179		lpfc_disc_state_machine(vport, ndlp, cmdiocb,
2180					NLP_EVT_CMPL_LOGO);
2181out:
2182	lpfc_els_free_iocb(phba, cmdiocb);
2183	return;
2184}
2185
2186/**
2187 * lpfc_issue_els_logo - Issue a logo to an node on a vport
2188 * @vport: pointer to a virtual N_Port data structure.
2189 * @ndlp: pointer to a node-list data structure.
2190 * @retry: number of retries to the command IOCB.
2191 *
2192 * This routine constructs and issues an ELS Logout (LOGO) iocb command
2193 * to a remote node, referred by an @ndlp on a @vport. It constructs the
2194 * payload of the IOCB, properly sets up the @ndlp state, and invokes the
2195 * lpfc_sli_issue_iocb() routine to send out the LOGO ELS command.
2196 *
2197 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2198 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2199 * will be stored into the context1 field of the IOCB for the completion
2200 * callback function to the LOGO ELS command.
2201 *
2202 * Return code
2203 *   0 - successfully issued logo
2204 *   1 - failed to issue logo
2205 **/
2206int
2207lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
2208		    uint8_t retry)
2209{
2210	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2211	struct lpfc_hba  *phba = vport->phba;
2212	IOCB_t *icmd;
2213	struct lpfc_iocbq *elsiocb;
2214	uint8_t *pcmd;
2215	uint16_t cmdsize;
2216	int rc;
2217
2218	spin_lock_irq(shost->host_lock);
2219	if (ndlp->nlp_flag & NLP_LOGO_SND) {
2220		spin_unlock_irq(shost->host_lock);
2221		return 0;
2222	}
2223	spin_unlock_irq(shost->host_lock);
2224
2225	cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name);
2226	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2227				     ndlp->nlp_DID, ELS_CMD_LOGO);
2228	if (!elsiocb)
2229		return 1;
2230
2231	icmd = &elsiocb->iocb;
2232	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2233	*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
2234	pcmd += sizeof(uint32_t);
2235
2236	/* Fill in LOGO payload */
2237	*((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
2238	pcmd += sizeof(uint32_t);
2239	memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
2240
2241	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2242		"Issue LOGO:      did:x%x",
2243		ndlp->nlp_DID, 0, 0);
2244
2245	phba->fc_stat.elsXmitLOGO++;
2246	elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
2247	spin_lock_irq(shost->host_lock);
2248	ndlp->nlp_flag |= NLP_LOGO_SND;
2249	spin_unlock_irq(shost->host_lock);
2250	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
2251
2252	if (rc == IOCB_ERROR) {
2253		spin_lock_irq(shost->host_lock);
2254		ndlp->nlp_flag &= ~NLP_LOGO_SND;
2255		spin_unlock_irq(shost->host_lock);
2256		lpfc_els_free_iocb(phba, elsiocb);
2257		return 1;
2258	}
2259	return 0;
2260}
2261
2262/**
2263 * lpfc_cmpl_els_cmd - Completion callback function for generic els command
2264 * @phba: pointer to lpfc hba data structure.
2265 * @cmdiocb: pointer to lpfc command iocb data structure.
2266 * @rspiocb: pointer to lpfc response iocb data structure.
2267 *
2268 * This routine is a generic completion callback function for ELS commands.
2269 * Specifically, it is the callback function which does not need to perform
2270 * any command specific operations. It is currently used by the ELS command
2271 * issuing routines for the ELS State Change  Request (SCR),
2272 * lpfc_issue_els_scr(), and the ELS Fibre Channel Address Resolution
2273 * Protocol Response (FARPR) routine, lpfc_issue_els_farpr(). Other than
2274 * certain debug loggings, this callback function simply invokes the
2275 * lpfc_els_chk_latt() routine to check whether link went down during the
2276 * discovery process.
2277 **/
2278static void
2279lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2280		  struct lpfc_iocbq *rspiocb)
2281{
2282	struct lpfc_vport *vport = cmdiocb->vport;
2283	IOCB_t *irsp;
2284
2285	irsp = &rspiocb->iocb;
2286
2287	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2288		"ELS cmd cmpl:    status:x%x/x%x did:x%x",
2289		irsp->ulpStatus, irsp->un.ulpWord[4],
2290		irsp->un.elsreq64.remoteID);
2291	/* ELS cmd tag <ulpIoTag> completes */
2292	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2293			 "0106 ELS cmd tag x%x completes Data: x%x x%x x%x\n",
2294			 irsp->ulpIoTag, irsp->ulpStatus,
2295			 irsp->un.ulpWord[4], irsp->ulpTimeout);
2296	/* Check to see if link went down during discovery */
2297	lpfc_els_chk_latt(vport);
2298	lpfc_els_free_iocb(phba, cmdiocb);
2299	return;
2300}
2301
2302/**
2303 * lpfc_issue_els_scr - Issue a scr to an node on a vport
2304 * @vport: pointer to a host virtual N_Port data structure.
2305 * @nportid: N_Port identifier to the remote node.
2306 * @retry: number of retries to the command IOCB.
2307 *
2308 * This routine issues a State Change Request (SCR) to a fabric node
2309 * on a @vport. The remote node @nportid is passed into the function. It
2310 * first search the @vport node list to find the matching ndlp. If no such
2311 * ndlp is found, a new ndlp shall be created for this (SCR) purpose. An
2312 * IOCB is allocated, payload prepared, and the lpfc_sli_issue_iocb()
2313 * routine is invoked to send the SCR IOCB.
2314 *
2315 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2316 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2317 * will be stored into the context1 field of the IOCB for the completion
2318 * callback function to the SCR ELS command.
2319 *
2320 * Return code
2321 *   0 - Successfully issued scr command
2322 *   1 - Failed to issue scr command
2323 **/
2324int
2325lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2326{
2327	struct lpfc_hba  *phba = vport->phba;
2328	IOCB_t *icmd;
2329	struct lpfc_iocbq *elsiocb;
2330	struct lpfc_sli *psli;
2331	uint8_t *pcmd;
2332	uint16_t cmdsize;
2333	struct lpfc_nodelist *ndlp;
2334
2335	psli = &phba->sli;
2336	cmdsize = (sizeof(uint32_t) + sizeof(SCR));
2337
2338	ndlp = lpfc_findnode_did(vport, nportid);
2339	if (!ndlp) {
2340		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
2341		if (!ndlp)
2342			return 1;
2343		lpfc_nlp_init(vport, ndlp, nportid);
2344		lpfc_enqueue_node(vport, ndlp);
2345	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
2346		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
2347		if (!ndlp)
2348			return 1;
2349	}
2350
2351	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2352				     ndlp->nlp_DID, ELS_CMD_SCR);
2353
2354	if (!elsiocb) {
2355		/* This will trigger the release of the node just
2356		 * allocated
2357		 */
2358		lpfc_nlp_put(ndlp);
2359		return 1;
2360	}
2361
2362	icmd = &elsiocb->iocb;
2363	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2364
2365	*((uint32_t *) (pcmd)) = ELS_CMD_SCR;
2366	pcmd += sizeof(uint32_t);
2367
2368	/* For SCR, remainder of payload is SCR parameter page */
2369	memset(pcmd, 0, sizeof(SCR));
2370	((SCR *) pcmd)->Function = SCR_FUNC_FULL;
2371
2372	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2373		"Issue SCR:       did:x%x",
2374		ndlp->nlp_DID, 0, 0);
2375
2376	phba->fc_stat.elsXmitSCR++;
2377	elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
2378	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2379	    IOCB_ERROR) {
2380		/* The additional lpfc_nlp_put will cause the following
2381		 * lpfc_els_free_iocb routine to trigger the rlease of
2382		 * the node.
2383		 */
2384		lpfc_nlp_put(ndlp);
2385		lpfc_els_free_iocb(phba, elsiocb);
2386		return 1;
2387	}
2388	/* This will cause the callback-function lpfc_cmpl_els_cmd to
2389	 * trigger the release of node.
2390	 */
2391	lpfc_nlp_put(ndlp);
2392	return 0;
2393}
2394
2395/**
2396 * lpfc_issue_els_farpr - Issue a farp to an node on a vport
2397 * @vport: pointer to a host virtual N_Port data structure.
2398 * @nportid: N_Port identifier to the remote node.
2399 * @retry: number of retries to the command IOCB.
2400 *
2401 * This routine issues a Fibre Channel Address Resolution Response
2402 * (FARPR) to a node on a vport. The remote node N_Port identifier (@nportid)
2403 * is passed into the function. It first search the @vport node list to find
2404 * the matching ndlp. If no such ndlp is found, a new ndlp shall be created
2405 * for this (FARPR) purpose. An IOCB is allocated, payload prepared, and the
2406 * lpfc_sli_issue_iocb() routine is invoked to send the FARPR ELS command.
2407 *
2408 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
2409 * will be incremented by 1 for holding the ndlp and the reference to ndlp
2410 * will be stored into the context1 field of the IOCB for the completion
2411 * callback function to the PARPR ELS command.
2412 *
2413 * Return code
2414 *   0 - Successfully issued farpr command
2415 *   1 - Failed to issue farpr command
2416 **/
2417static int
2418lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
2419{
2420	struct lpfc_hba  *phba = vport->phba;
2421	IOCB_t *icmd;
2422	struct lpfc_iocbq *elsiocb;
2423	struct lpfc_sli *psli;
2424	FARP *fp;
2425	uint8_t *pcmd;
2426	uint32_t *lp;
2427	uint16_t cmdsize;
2428	struct lpfc_nodelist *ondlp;
2429	struct lpfc_nodelist *ndlp;
2430
2431	psli = &phba->sli;
2432	cmdsize = (sizeof(uint32_t) + sizeof(FARP));
2433
2434	ndlp = lpfc_findnode_did(vport, nportid);
2435	if (!ndlp) {
2436		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
2437		if (!ndlp)
2438			return 1;
2439		lpfc_nlp_init(vport, ndlp, nportid);
2440		lpfc_enqueue_node(vport, ndlp);
2441	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
2442		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
2443		if (!ndlp)
2444			return 1;
2445	}
2446
2447	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
2448				     ndlp->nlp_DID, ELS_CMD_RNID);
2449	if (!elsiocb) {
2450		/* This will trigger the release of the node just
2451		 * allocated
2452		 */
2453		lpfc_nlp_put(ndlp);
2454		return 1;
2455	}
2456
2457	icmd = &elsiocb->iocb;
2458	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
2459
2460	*((uint32_t *) (pcmd)) = ELS_CMD_FARPR;
2461	pcmd += sizeof(uint32_t);
2462
2463	/* Fill in FARPR payload */
2464	fp = (FARP *) (pcmd);
2465	memset(fp, 0, sizeof(FARP));
2466	lp = (uint32_t *) pcmd;
2467	*lp++ = be32_to_cpu(nportid);
2468	*lp++ = be32_to_cpu(vport->fc_myDID);
2469	fp->Rflags = 0;
2470	fp->Mflags = (FARP_MATCH_PORT | FARP_MATCH_NODE);
2471
2472	memcpy(&fp->RportName, &vport->fc_portname, sizeof(struct lpfc_name));
2473	memcpy(&fp->RnodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
2474	ondlp = lpfc_findnode_did(vport, nportid);
2475	if (ondlp && NLP_CHK_NODE_ACT(ondlp)) {
2476		memcpy(&fp->OportName, &ondlp->nlp_portname,
2477		       sizeof(struct lpfc_name));
2478		memcpy(&fp->OnodeName, &ondlp->nlp_nodename,
2479		       sizeof(struct lpfc_name));
2480	}
2481
2482	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2483		"Issue FARPR:     did:x%x",
2484		ndlp->nlp_DID, 0, 0);
2485
2486	phba->fc_stat.elsXmitFARPR++;
2487	elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
2488	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
2489	    IOCB_ERROR) {
2490		/* The additional lpfc_nlp_put will cause the following
2491		 * lpfc_els_free_iocb routine to trigger the release of
2492		 * the node.
2493		 */
2494		lpfc_nlp_put(ndlp);
2495		lpfc_els_free_iocb(phba, elsiocb);
2496		return 1;
2497	}
2498	/* This will cause the callback-function lpfc_cmpl_els_cmd to
2499	 * trigger the release of the node.
2500	 */
2501	lpfc_nlp_put(ndlp);
2502	return 0;
2503}
2504
2505/**
2506 * lpfc_cancel_retry_delay_tmo - Cancel the timer with delayed iocb-cmd retry
2507 * @vport: pointer to a host virtual N_Port data structure.
2508 * @nlp: pointer to a node-list data structure.
2509 *
2510 * This routine cancels the timer with a delayed IOCB-command retry for
2511 * a @vport's @ndlp. It stops the timer for the delayed function retrial and
2512 * removes the ELS retry event if it presents. In addition, if the
2513 * NLP_NPR_2B_DISC bit is set in the @nlp's nlp_flag bitmap, ADISC IOCB
2514 * commands are sent for the @vport's nodes that require issuing discovery
2515 * ADISC.
2516 **/
2517void
2518lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp)
2519{
2520	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
2521	struct lpfc_work_evt *evtp;
2522
2523	if (!(nlp->nlp_flag & NLP_DELAY_TMO))
2524		return;
2525	spin_lock_irq(shost->host_lock);
2526	nlp->nlp_flag &= ~NLP_DELAY_TMO;
2527	spin_unlock_irq(shost->host_lock);
2528	del_timer_sync(&nlp->nlp_delayfunc);
2529	nlp->nlp_last_elscmd = 0;
2530	if (!list_empty(&nlp->els_retry_evt.evt_listp)) {
2531		list_del_init(&nlp->els_retry_evt.evt_listp);
2532		/* Decrement nlp reference count held for the delayed retry */
2533		evtp = &nlp->els_retry_evt;
2534		lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1);
2535	}
2536	if (nlp->nlp_flag & NLP_NPR_2B_DISC) {
2537		spin_lock_irq(shost->host_lock);
2538		nlp->nlp_flag &= ~NLP_NPR_2B_DISC;
2539		spin_unlock_irq(shost->host_lock);
2540		if (vport->num_disc_nodes) {
2541			if (vport->port_state < LPFC_VPORT_READY) {
2542				/* Check if there are more ADISCs to be sent */
2543				lpfc_more_adisc(vport);
2544			} else {
2545				/* Check if there are more PLOGIs to be sent */
2546				lpfc_more_plogi(vport);
2547				if (vport->num_disc_nodes == 0) {
2548					spin_lock_irq(shost->host_lock);
2549					vport->fc_flag &= ~FC_NDISC_ACTIVE;
2550					spin_unlock_irq(shost->host_lock);
2551					lpfc_can_disctmo(vport);
2552					lpfc_end_rscn(vport);
2553				}
2554			}
2555		}
2556	}
2557	return;
2558}
2559
2560/**
2561 * lpfc_els_retry_delay - Timer function with a ndlp delayed function timer
2562 * @ptr: holder for the pointer to the timer function associated data (ndlp).
2563 *
2564 * This routine is invoked by the ndlp delayed-function timer to check
2565 * whether there is any pending ELS retry event(s) with the node. If not, it
2566 * simply returns. Otherwise, if there is at least one ELS delayed event, it
2567 * adds the delayed events to the HBA work list and invokes the
2568 * lpfc_worker_wake_up() routine to wake up worker thread to process the
2569 * event. Note that lpfc_nlp_get() is called before posting the event to
2570 * the work list to hold reference count of ndlp so that it guarantees the
2571 * reference to ndlp will still be available when the worker thread gets
2572 * to the event associated with the ndlp.
2573 **/
2574void
2575lpfc_els_retry_delay(unsigned long ptr)
2576{
2577	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr;
2578	struct lpfc_vport *vport = ndlp->vport;
2579	struct lpfc_hba   *phba = vport->phba;
2580	unsigned long flags;
2581	struct lpfc_work_evt  *evtp = &ndlp->els_retry_evt;
2582
2583	spin_lock_irqsave(&phba->hbalock, flags);
2584	if (!list_empty(&evtp->evt_listp)) {
2585		spin_unlock_irqrestore(&phba->hbalock, flags);
2586		return;
2587	}
2588
2589	/* We need to hold the node by incrementing the reference
2590	 * count until the queued work is done
2591	 */
2592	evtp->evt_arg1  = lpfc_nlp_get(ndlp);
2593	if (evtp->evt_arg1) {
2594		evtp->evt = LPFC_EVT_ELS_RETRY;
2595		list_add_tail(&evtp->evt_listp, &phba->work_list);
2596		lpfc_worker_wake_up(phba);
2597	}
2598	spin_unlock_irqrestore(&phba->hbalock, flags);
2599	return;
2600}
2601
2602/**
2603 * lpfc_els_retry_delay_handler - Work thread handler for ndlp delayed function
2604 * @ndlp: pointer to a node-list data structure.
2605 *
2606 * This routine is the worker-thread handler for processing the @ndlp delayed
2607 * event(s), posted by the lpfc_els_retry_delay() routine. It simply retrieves
2608 * the last ELS command from the associated ndlp and invokes the proper ELS
2609 * function according to the delayed ELS command to retry the command.
2610 **/
2611void
2612lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp)
2613{
2614	struct lpfc_vport *vport = ndlp->vport;
2615	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
2616	uint32_t cmd, did, retry;
2617
2618	spin_lock_irq(shost->host_lock);
2619	did = ndlp->nlp_DID;
2620	cmd = ndlp->nlp_last_elscmd;
2621	ndlp->nlp_last_elscmd = 0;
2622
2623	if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) {
2624		spin_unlock_irq(shost->host_lock);
2625		return;
2626	}
2627
2628	ndlp->nlp_flag &= ~NLP_DELAY_TMO;
2629	spin_unlock_irq(shost->host_lock);
2630	/*
2631	 * If a discovery event readded nlp_delayfunc after timer
2632	 * firing and before processing the timer, cancel the
2633	 * nlp_delayfunc.
2634	 */
2635	del_timer_sync(&ndlp->nlp_delayfunc);
2636	retry = ndlp->nlp_retry;
2637	ndlp->nlp_retry = 0;
2638
2639	switch (cmd) {
2640	case ELS_CMD_FLOGI:
2641		lpfc_issue_els_flogi(vport, ndlp, retry);
2642		break;
2643	case ELS_CMD_PLOGI:
2644		if (!lpfc_issue_els_plogi(vport, ndlp->nlp_DID, retry)) {
2645			ndlp->nlp_prev_state = ndlp->nlp_state;
2646			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
2647		}
2648		break;
2649	case ELS_CMD_ADISC:
2650		if (!lpfc_issue_els_adisc(vport, ndlp, retry)) {
2651			ndlp->nlp_prev_state = ndlp->nlp_state;
2652			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
2653		}
2654		break;
2655	case ELS_CMD_PRLI:
2656		if (!lpfc_issue_els_prli(vport, ndlp, retry)) {
2657			ndlp->nlp_prev_state = ndlp->nlp_state;
2658			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
2659		}
2660		break;
2661	case ELS_CMD_LOGO:
2662		if (!lpfc_issue_els_logo(vport, ndlp, retry)) {
2663			ndlp->nlp_prev_state = ndlp->nlp_state;
2664			lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
2665		}
2666		break;
2667	case ELS_CMD_FDISC:
2668		lpfc_issue_els_fdisc(vport, ndlp, retry);
2669		break;
2670	}
2671	return;
2672}
2673
2674/**
2675 * lpfc_els_retry - Make retry decision on an els command iocb
2676 * @phba: pointer to lpfc hba data structure.
2677 * @cmdiocb: pointer to lpfc command iocb data structure.
2678 * @rspiocb: pointer to lpfc response iocb data structure.
2679 *
2680 * This routine makes a retry decision on an ELS command IOCB, which has
2681 * failed. The following ELS IOCBs use this function for retrying the command
2682 * when previously issued command responsed with error status: FLOGI, PLOGI,
2683 * PRLI, ADISC, LOGO, and FDISC. Based on the ELS command type and the
2684 * returned error status, it makes the decision whether a retry shall be
2685 * issued for the command, and whether a retry shall be made immediately or
2686 * delayed. In the former case, the corresponding ELS command issuing-function
2687 * is called to retry the command. In the later case, the ELS command shall
2688 * be posted to the ndlp delayed event and delayed function timer set to the
2689 * ndlp for the delayed command issusing.
2690 *
2691 * Return code
2692 *   0 - No retry of els command is made
2693 *   1 - Immediate or delayed retry of els command is made
2694 **/
2695static int
2696lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
2697	       struct lpfc_iocbq *rspiocb)
2698{
2699	struct lpfc_vport *vport = cmdiocb->vport;
2700	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
2701	IOCB_t *irsp = &rspiocb->iocb;
2702	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
2703	struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
2704	uint32_t *elscmd;
2705	struct ls_rjt stat;
2706	int retry = 0, maxretry = lpfc_max_els_tries, delay = 0;
2707	int logerr = 0;
2708	uint32_t cmd = 0;
2709	uint32_t did;
2710
2711
2712	/* Note: context2 may be 0 for internal driver abort
2713	 * of delays ELS command.
2714	 */
2715
2716	if (pcmd && pcmd->virt) {
2717		elscmd = (uint32_t *) (pcmd->virt);
2718		cmd = *elscmd++;
2719	}
2720
2721	if (ndlp && NLP_CHK_NODE_ACT(ndlp))
2722		did = ndlp->nlp_DID;
2723	else {
2724		/* We should only hit this case for retrying PLOGI */
2725		did = irsp->un.elsreq64.remoteID;
2726		ndlp = lpfc_findnode_did(vport, did);
2727		if ((!ndlp || !NLP_CHK_NODE_ACT(ndlp))
2728		    && (cmd != ELS_CMD_PLOGI))
2729			return 1;
2730	}
2731
2732	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
2733		"Retry ELS:       wd7:x%x wd4:x%x did:x%x",
2734		*(((uint32_t *) irsp) + 7), irsp->un.ulpWord[4], ndlp->nlp_DID);
2735
2736	switch (irsp->ulpStatus) {
2737	case IOSTAT_FCP_RSP_ERROR:
2738	case IOSTAT_REMOTE_STOP:
2739		break;
2740
2741	case IOSTAT_LOCAL_REJECT:
2742		switch ((irsp->un.ulpWord[4] & 0xff)) {
2743		case IOERR_LOOP_OPEN_FAILURE:
2744			if (cmd == ELS_CMD_FLOGI) {
2745				if (PCI_DEVICE_ID_HORNET ==
2746					phba->pcidev->device) {
2747					phba->fc_topology = TOPOLOGY_LOOP;
2748					phba->pport->fc_myDID = 0;
2749					phba->alpa_map[0] = 0;
2750					phba->alpa_map[1] = 0;
2751				}
2752			}
2753			if (cmd == ELS_CMD_PLOGI && cmdiocb->retry == 0)
2754				delay = 1000;
2755			retry = 1;
2756			break;
2757
2758		case IOERR_ILLEGAL_COMMAND:
2759			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2760					 "0124 Retry illegal cmd x%x "
2761					 "retry:x%x delay:x%x\n",
2762					 cmd, cmdiocb->retry, delay);
2763			retry = 1;
2764			/* All command's retry policy */
2765			maxretry = 8;
2766			if (cmdiocb->retry > 2)
2767				delay = 1000;
2768			break;
2769
2770		case IOERR_NO_RESOURCES:
2771			logerr = 1; /* HBA out of resources */
2772			retry = 1;
2773			if (cmdiocb->retry > 100)
2774				delay = 100;
2775			maxretry = 250;
2776			break;
2777
2778		case IOERR_ILLEGAL_FRAME:
2779			delay = 100;
2780			retry = 1;
2781			break;
2782
2783		case IOERR_SEQUENCE_TIMEOUT:
2784		case IOERR_INVALID_RPI:
2785			retry = 1;
2786			break;
2787		}
2788		break;
2789
2790	case IOSTAT_NPORT_RJT:
2791	case IOSTAT_FABRIC_RJT:
2792		if (irsp->un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
2793			retry = 1;
2794			break;
2795		}
2796		break;
2797
2798	case IOSTAT_NPORT_BSY:
2799	case IOSTAT_FABRIC_BSY:
2800		logerr = 1; /* Fabric / Remote NPort out of resources */
2801		retry = 1;
2802		break;
2803
2804	case IOSTAT_LS_RJT:
2805		stat.un.lsRjtError = be32_to_cpu(irsp->un.ulpWord[4]);
2806		/* Added for Vendor specifc support
2807		 * Just keep retrying for these Rsn / Exp codes
2808		 */
2809		switch (stat.un.b.lsRjtRsnCode) {
2810		case LSRJT_UNABLE_TPC:
2811			if (stat.un.b.lsRjtRsnCodeExp ==
2812			    LSEXP_CMD_IN_PROGRESS) {
2813				if (cmd == ELS_CMD_PLOGI) {
2814					delay = 1000;
2815					maxretry = 48;
2816				}
2817				retry = 1;
2818				break;
2819			}
2820			if (stat.un.b.lsRjtRsnCodeExp ==
2821			    LSEXP_CANT_GIVE_DATA) {
2822				if (cmd == ELS_CMD_PLOGI) {
2823					delay = 1000;
2824					maxretry = 48;
2825				}
2826				retry = 1;
2827				break;
2828			}
2829			if (cmd == ELS_CMD_PLOGI) {
2830				delay = 1000;
2831				maxretry = lpfc_max_els_tries + 1;
2832				retry = 1;
2833				break;
2834			}
2835			if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2836			  (cmd == ELS_CMD_FDISC) &&
2837			  (stat.un.b.lsRjtRsnCodeExp == LSEXP_OUT_OF_RESOURCE)){
2838				lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2839						 "0125 FDISC Failed (x%x). "
2840						 "Fabric out of resources\n",
2841						 stat.un.lsRjtError);
2842				lpfc_vport_set_state(vport,
2843						     FC_VPORT_NO_FABRIC_RSCS);
2844			}
2845			break;
2846
2847		case LSRJT_LOGICAL_BSY:
2848			if ((cmd == ELS_CMD_PLOGI) ||
2849			    (cmd == ELS_CMD_PRLI)) {
2850				delay = 1000;
2851				maxretry = 48;
2852			} else if (cmd == ELS_CMD_FDISC) {
2853				/* FDISC retry policy */
2854				maxretry = 48;
2855				if (cmdiocb->retry >= 32)
2856					delay = 1000;
2857			}
2858			retry = 1;
2859			break;
2860
2861		case LSRJT_LOGICAL_ERR:
2862			/* There are some cases where switches return this
2863			 * error when they are not ready and should be returning
2864			 * Logical Busy. We should delay every time.
2865			 */
2866			if (cmd == ELS_CMD_FDISC &&
2867			    stat.un.b.lsRjtRsnCodeExp == LSEXP_PORT_LOGIN_REQ) {
2868				maxretry = 3;
2869				delay = 1000;
2870				retry = 1;
2871				break;
2872			}
2873		case LSRJT_PROTOCOL_ERR:
2874			if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
2875			  (cmd == ELS_CMD_FDISC) &&
2876			  ((stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_PNAME) ||
2877			  (stat.un.b.lsRjtRsnCodeExp == LSEXP_INVALID_NPORT_ID))
2878			  ) {
2879				lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
2880						 "0122 FDISC Failed (x%x). "
2881						 "Fabric Detected Bad WWN\n",
2882						 stat.un.lsRjtError);
2883				lpfc_vport_set_state(vport,
2884						     FC_VPORT_FABRIC_REJ_WWN);
2885			}
2886			break;
2887		}
2888		break;
2889
2890	case IOSTAT_INTERMED_RSP:
2891	case IOSTAT_BA_RJT:
2892		break;
2893
2894	default:
2895		break;
2896	}
2897
2898	if (did == FDMI_DID)
2899		retry = 1;
2900
2901	if (((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) &&
2902	    (phba->fc_topology != TOPOLOGY_LOOP) &&
2903	    !lpfc_error_lost_link(irsp)) {
2904		/* FLOGI retry policy */
2905		retry = 1;
2906		/* retry forever */
2907		maxretry = 0;
2908		if (cmdiocb->retry >= 100)
2909			delay = 5000;
2910		else if (cmdiocb->retry >= 32)
2911			delay = 1000;
2912	}
2913
2914	cmdiocb->retry++;
2915	if (maxretry && (cmdiocb->retry >= maxretry)) {
2916		phba->fc_stat.elsRetryExceeded++;
2917		retry = 0;
2918	}
2919
2920	if ((vport->load_flag & FC_UNLOADING) != 0)
2921		retry = 0;
2922
2923	if (retry) {
2924		if ((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_FDISC)) {
2925			/* Stop retrying PLOGI and FDISC if in FCF discovery */
2926			if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
2927				lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2928						 "2849 Stop retry ELS command "
2929						 "x%x to remote NPORT x%x, "
2930						 "Data: x%x x%x\n", cmd, did,
2931						 cmdiocb->retry, delay);
2932				return 0;
2933			}
2934		}
2935
2936		/* Retry ELS command <elsCmd> to remote NPORT <did> */
2937		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
2938				 "0107 Retry ELS command x%x to remote "
2939				 "NPORT x%x Data: x%x x%x\n",
2940				 cmd, did, cmdiocb->retry, delay);
2941
2942		if (((cmd == ELS_CMD_PLOGI) || (cmd == ELS_CMD_ADISC)) &&
2943			((irsp->ulpStatus != IOSTAT_LOCAL_REJECT) ||
2944			((irsp->un.ulpWord[4] & 0xff) != IOERR_NO_RESOURCES))) {
2945			/* Don't reset timer for no resources */
2946
2947			/* If discovery / RSCN timer is running, reset it */
2948			if (timer_pending(&vport->fc_disctmo) ||
2949			    (vport->fc_flag & FC_RSCN_MODE))
2950				lpfc_set_disctmo(vport);
2951		}
2952
2953		phba->fc_stat.elsXmitRetry++;
2954		if (ndlp && NLP_CHK_NODE_ACT(ndlp) && delay) {
2955			phba->fc_stat.elsDelayRetry++;
2956			ndlp->nlp_retry = cmdiocb->retry;
2957
2958			/* delay is specified in milliseconds */
2959			mod_timer(&ndlp->nlp_delayfunc,
2960				jiffies + msecs_to_jiffies(delay));
2961			spin_lock_irq(shost->host_lock);
2962			ndlp->nlp_flag |= NLP_DELAY_TMO;
2963			spin_unlock_irq(shost->host_lock);
2964
2965			ndlp->nlp_prev_state = ndlp->nlp_state;
2966			if (cmd == ELS_CMD_PRLI)
2967				lpfc_nlp_set_state(vport, ndlp,
2968					NLP_STE_REG_LOGIN_ISSUE);
2969			else
2970				lpfc_nlp_set_state(vport, ndlp,
2971					NLP_STE_NPR_NODE);
2972			ndlp->nlp_last_elscmd = cmd;
2973
2974			return 1;
2975		}
2976		switch (cmd) {
2977		case ELS_CMD_FLOGI:
2978			lpfc_issue_els_flogi(vport, ndlp, cmdiocb->retry);
2979			return 1;
2980		case ELS_CMD_FDISC:
2981			lpfc_issue_els_fdisc(vport, ndlp, cmdiocb->retry);
2982			return 1;
2983		case ELS_CMD_PLOGI:
2984			if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
2985				ndlp->nlp_prev_state = ndlp->nlp_state;
2986				lpfc_nlp_set_state(vport, ndlp,
2987						   NLP_STE_PLOGI_ISSUE);
2988			}
2989			lpfc_issue_els_plogi(vport, did, cmdiocb->retry);
2990			return 1;
2991		case ELS_CMD_ADISC:
2992			ndlp->nlp_prev_state = ndlp->nlp_state;
2993			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
2994			lpfc_issue_els_adisc(vport, ndlp, cmdiocb->retry);
2995			return 1;
2996		case ELS_CMD_PRLI:
2997			ndlp->nlp_prev_state = ndlp->nlp_state;
2998			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
2999			lpfc_issue_els_prli(vport, ndlp, cmdiocb->retry);
3000			return 1;
3001		case ELS_CMD_LOGO:
3002			ndlp->nlp_prev_state = ndlp->nlp_state;
3003			lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
3004			lpfc_issue_els_logo(vport, ndlp, cmdiocb->retry);
3005			return 1;
3006		}
3007	}
3008	/* No retry ELS command <elsCmd> to remote NPORT <did> */
3009	if (logerr) {
3010		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3011			 "0137 No retry ELS command x%x to remote "
3012			 "NPORT x%x: Out of Resources: Error:x%x/%x\n",
3013			 cmd, did, irsp->ulpStatus,
3014			 irsp->un.ulpWord[4]);
3015	}
3016	else {
3017		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3018			 "0108 No retry ELS command x%x to remote "
3019			 "NPORT x%x Retried:%d Error:x%x/%x\n",
3020			 cmd, did, cmdiocb->retry, irsp->ulpStatus,
3021			 irsp->un.ulpWord[4]);
3022	}
3023	return 0;
3024}
3025
3026/**
3027 * lpfc_els_free_data - Free lpfc dma buffer and data structure with an iocb
3028 * @phba: pointer to lpfc hba data structure.
3029 * @buf_ptr1: pointer to the lpfc DMA buffer data structure.
3030 *
3031 * This routine releases the lpfc DMA (Direct Memory Access) buffer(s)
3032 * associated with a command IOCB back to the lpfc DMA buffer pool. It first
3033 * checks to see whether there is a lpfc DMA buffer associated with the
3034 * response of the command IOCB. If so, it will be released before releasing
3035 * the lpfc DMA buffer associated with the IOCB itself.
3036 *
3037 * Return code
3038 *   0 - Successfully released lpfc DMA buffer (currently, always return 0)
3039 **/
3040static int
3041lpfc_els_free_data(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr1)
3042{
3043	struct lpfc_dmabuf *buf_ptr;
3044
3045	/* Free the response before processing the command. */
3046	if (!list_empty(&buf_ptr1->list)) {
3047		list_remove_head(&buf_ptr1->list, buf_ptr,
3048				 struct lpfc_dmabuf,
3049				 list);
3050		lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
3051		kfree(buf_ptr);
3052	}
3053	lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
3054	kfree(buf_ptr1);
3055	return 0;
3056}
3057
3058/**
3059 * lpfc_els_free_bpl - Free lpfc dma buffer and data structure with bpl
3060 * @phba: pointer to lpfc hba data structure.
3061 * @buf_ptr: pointer to the lpfc dma buffer data structure.
3062 *
3063 * This routine releases the lpfc Direct Memory Access (DMA) buffer
3064 * associated with a Buffer Pointer List (BPL) back to the lpfc DMA buffer
3065 * pool.
3066 *
3067 * Return code
3068 *   0 - Successfully released lpfc DMA buffer (currently, always return 0)
3069 **/
3070static int
3071lpfc_els_free_bpl(struct lpfc_hba *phba, struct lpfc_dmabuf *buf_ptr)
3072{
3073	lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
3074	kfree(buf_ptr);
3075	return 0;
3076}
3077
3078/**
3079 * lpfc_els_free_iocb - Free a command iocb and its associated resources
3080 * @phba: pointer to lpfc hba data structure.
3081 * @elsiocb: pointer to lpfc els command iocb data structure.
3082 *
3083 * This routine frees a command IOCB and its associated resources. The
3084 * command IOCB data structure contains the reference to various associated
3085 * resources, these fields must be set to NULL if the associated reference
3086 * not present:
3087 *   context1 - reference to ndlp
3088 *   context2 - reference to cmd
3089 *   context2->next - reference to rsp
3090 *   context3 - reference to bpl
3091 *
3092 * It first properly decrements the reference count held on ndlp for the
3093 * IOCB completion callback function. If LPFC_DELAY_MEM_FREE flag is not
3094 * set, it invokes the lpfc_els_free_data() routine to release the Direct
3095 * Memory Access (DMA) buffers associated with the IOCB. Otherwise, it
3096 * adds the DMA buffer the @phba data structure for the delayed release.
3097 * If reference to the Buffer Pointer List (BPL) is present, the
3098 * lpfc_els_free_bpl() routine is invoked to release the DMA memory
3099 * associated with BPL. Finally, the lpfc_sli_release_iocbq() routine is
3100 * invoked to release the IOCB data structure back to @phba IOCBQ list.
3101 *
3102 * Return code
3103 *   0 - Success (currently, always return 0)
3104 **/
3105int
3106lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
3107{
3108	struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
3109	struct lpfc_nodelist *ndlp;
3110
3111	ndlp = (struct lpfc_nodelist *)elsiocb->context1;
3112	if (ndlp) {
3113		if (ndlp->nlp_flag & NLP_DEFER_RM) {
3114			lpfc_nlp_put(ndlp);
3115
3116			/* If the ndlp is not being used by another discovery
3117			 * thread, free it.
3118			 */
3119			if (!lpfc_nlp_not_used(ndlp)) {
3120				/* If ndlp is being used by another discovery
3121				 * thread, just clear NLP_DEFER_RM
3122				 */
3123				ndlp->nlp_flag &= ~NLP_DEFER_RM;
3124			}
3125		}
3126		else
3127			lpfc_nlp_put(ndlp);
3128		elsiocb->context1 = NULL;
3129	}
3130	/* context2  = cmd,  context2->next = rsp, context3 = bpl */
3131	if (elsiocb->context2) {
3132		if (elsiocb->iocb_flag & LPFC_DELAY_MEM_FREE) {
3133			/* Firmware could still be in progress of DMAing
3134			 * payload, so don't free data buffer till after
3135			 * a hbeat.
3136			 */
3137			elsiocb->iocb_flag &= ~LPFC_DELAY_MEM_FREE;
3138			buf_ptr = elsiocb->context2;
3139			elsiocb->context2 = NULL;
3140			if (buf_ptr) {
3141				buf_ptr1 = NULL;
3142				spin_lock_irq(&phba->hbalock);
3143				if (!list_empty(&buf_ptr->list)) {
3144					list_remove_head(&buf_ptr->list,
3145						buf_ptr1, struct lpfc_dmabuf,
3146						list);
3147					INIT_LIST_HEAD(&buf_ptr1->list);
3148					list_add_tail(&buf_ptr1->list,
3149						&phba->elsbuf);
3150					phba->elsbuf_cnt++;
3151				}
3152				INIT_LIST_HEAD(&buf_ptr->list);
3153				list_add_tail(&buf_ptr->list, &phba->elsbuf);
3154				phba->elsbuf_cnt++;
3155				spin_unlock_irq(&phba->hbalock);
3156			}
3157		} else {
3158			buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
3159			lpfc_els_free_data(phba, buf_ptr1);
3160		}
3161	}
3162
3163	if (elsiocb->context3) {
3164		buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
3165		lpfc_els_free_bpl(phba, buf_ptr);
3166	}
3167	lpfc_sli_release_iocbq(phba, elsiocb);
3168	return 0;
3169}
3170
3171/**
3172 * lpfc_cmpl_els_logo_acc - Completion callback function to logo acc response
3173 * @phba: pointer to lpfc hba data structure.
3174 * @cmdiocb: pointer to lpfc command iocb data structure.
3175 * @rspiocb: pointer to lpfc response iocb data structure.
3176 *
3177 * This routine is the completion callback function to the Logout (LOGO)
3178 * Accept (ACC) Response ELS command. This routine is invoked to indicate
3179 * the completion of the LOGO process. It invokes the lpfc_nlp_not_used() to
3180 * release the ndlp if it has the last reference remaining (reference count
3181 * is 1). If succeeded (meaning ndlp released), it sets the IOCB context1
3182 * field to NULL to inform the following lpfc_els_free_iocb() routine no
3183 * ndlp reference count needs to be decremented. Otherwise, the ndlp
3184 * reference use-count shall be decremented by the lpfc_els_free_iocb()
3185 * routine. Finally, the lpfc_els_free_iocb() is invoked to release the
3186 * IOCB data structure.
3187 **/
3188static void
3189lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3190		       struct lpfc_iocbq *rspiocb)
3191{
3192	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3193	struct lpfc_vport *vport = cmdiocb->vport;
3194	IOCB_t *irsp;
3195
3196	irsp = &rspiocb->iocb;
3197	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3198		"ACC LOGO cmpl:   status:x%x/x%x did:x%x",
3199		irsp->ulpStatus, irsp->un.ulpWord[4], ndlp->nlp_DID);
3200	/* ACC to LOGO completes to NPort <nlp_DID> */
3201	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3202			 "0109 ACC to LOGO completes to NPort x%x "
3203			 "Data: x%x x%x x%x\n",
3204			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3205			 ndlp->nlp_rpi);
3206
3207	if (ndlp->nlp_state == NLP_STE_NPR_NODE) {
3208		/* NPort Recovery mode or node is just allocated */
3209		if (!lpfc_nlp_not_used(ndlp)) {
3210			/* If the ndlp is being used by another discovery
3211			 * thread, just unregister the RPI.
3212			 */
3213			lpfc_unreg_rpi(vport, ndlp);
3214		} else {
3215			/* Indicate the node has already released, should
3216			 * not reference to it from within lpfc_els_free_iocb.
3217			 */
3218			cmdiocb->context1 = NULL;
3219		}
3220	}
3221	lpfc_els_free_iocb(phba, cmdiocb);
3222	return;
3223}
3224
3225/**
3226 * lpfc_mbx_cmpl_dflt_rpi - Completion callbk func for unreg dflt rpi mbox cmd
3227 * @phba: pointer to lpfc hba data structure.
3228 * @pmb: pointer to the driver internal queue element for mailbox command.
3229 *
3230 * This routine is the completion callback function for unregister default
3231 * RPI (Remote Port Index) mailbox command to the @phba. It simply releases
3232 * the associated lpfc Direct Memory Access (DMA) buffer back to the pool and
3233 * decrements the ndlp reference count held for this completion callback
3234 * function. After that, it invokes the lpfc_nlp_not_used() to check
3235 * whether there is only one reference left on the ndlp. If so, it will
3236 * perform one more decrement and trigger the release of the ndlp.
3237 **/
3238void
3239lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
3240{
3241	struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
3242	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
3243
3244	/*
3245	 * This routine is used to register and unregister in previous SLI
3246	 * modes.
3247	 */
3248	if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
3249	    (phba->sli_rev == LPFC_SLI_REV4))
3250		lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
3251
3252	pmb->context1 = NULL;
3253	lpfc_mbuf_free(phba, mp->virt, mp->phys);
3254	kfree(mp);
3255	mempool_free(pmb, phba->mbox_mem_pool);
3256	if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
3257		lpfc_nlp_put(ndlp);
3258		/* This is the end of the default RPI cleanup logic for this
3259		 * ndlp. If no other discovery threads are using this ndlp.
3260		 * we should free all resources associated with it.
3261		 */
3262		lpfc_nlp_not_used(ndlp);
3263	}
3264
3265	return;
3266}
3267
3268/**
3269 * lpfc_cmpl_els_rsp - Completion callback function for els response iocb cmd
3270 * @phba: pointer to lpfc hba data structure.
3271 * @cmdiocb: pointer to lpfc command iocb data structure.
3272 * @rspiocb: pointer to lpfc response iocb data structure.
3273 *
3274 * This routine is the completion callback function for ELS Response IOCB
3275 * command. In normal case, this callback function just properly sets the
3276 * nlp_flag bitmap in the ndlp data structure, if the mbox command reference
3277 * field in the command IOCB is not NULL, the referred mailbox command will
3278 * be send out, and then invokes the lpfc_els_free_iocb() routine to release
3279 * the IOCB. Under error conditions, such as when a LS_RJT is returned or a
3280 * link down event occurred during the discovery, the lpfc_nlp_not_used()
3281 * routine shall be invoked trying to release the ndlp if no other threads
3282 * are currently referring it.
3283 **/
3284static void
3285lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3286		  struct lpfc_iocbq *rspiocb)
3287{
3288	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
3289	struct lpfc_vport *vport = ndlp ? ndlp->vport : NULL;
3290	struct Scsi_Host  *shost = vport ? lpfc_shost_from_vport(vport) : NULL;
3291	IOCB_t  *irsp;
3292	uint8_t *pcmd;
3293	LPFC_MBOXQ_t *mbox = NULL;
3294	struct lpfc_dmabuf *mp = NULL;
3295	uint32_t ls_rjt = 0;
3296
3297	irsp = &rspiocb->iocb;
3298
3299	if (cmdiocb->context_un.mbox)
3300		mbox = cmdiocb->context_un.mbox;
3301
3302	/* First determine if this is a LS_RJT cmpl. Note, this callback
3303	 * function can have cmdiocb->contest1 (ndlp) field set to NULL.
3304	 */
3305	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
3306	if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
3307	    (*((uint32_t *) (pcmd)) == ELS_CMD_LS_RJT)) {
3308		/* A LS_RJT associated with Default RPI cleanup has its own
3309		 * separate code path.
3310		 */
3311		if (!(ndlp->nlp_flag & NLP_RM_DFLT_RPI))
3312			ls_rjt = 1;
3313	}
3314
3315	/* Check to see if link went down during discovery */
3316	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) || lpfc_els_chk_latt(vport)) {
3317		if (mbox) {
3318			mp = (struct lpfc_dmabuf *) mbox->context1;
3319			if (mp) {
3320				lpfc_mbuf_free(phba, mp->virt, mp->phys);
3321				kfree(mp);
3322			}
3323			mempool_free(mbox, phba->mbox_mem_pool);
3324		}
3325		if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
3326		    (ndlp->nlp_flag & NLP_RM_DFLT_RPI))
3327			if (lpfc_nlp_not_used(ndlp)) {
3328				ndlp = NULL;
3329				/* Indicate the node has already released,
3330				 * should not reference to it from within
3331				 * the routine lpfc_els_free_iocb.
3332				 */
3333				cmdiocb->context1 = NULL;
3334			}
3335		goto out;
3336	}
3337
3338	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3339		"ELS rsp cmpl:    status:x%x/x%x did:x%x",
3340		irsp->ulpStatus, irsp->un.ulpWord[4],
3341		cmdiocb->iocb.un.elsreq64.remoteID);
3342	/* ELS response tag <ulpIoTag> completes */
3343	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3344			 "0110 ELS response tag x%x completes "
3345			 "Data: x%x x%x x%x x%x x%x x%x x%x\n",
3346			 cmdiocb->iocb.ulpIoTag, rspiocb->iocb.ulpStatus,
3347			 rspiocb->iocb.un.ulpWord[4], rspiocb->iocb.ulpTimeout,
3348			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3349			 ndlp->nlp_rpi);
3350	if (mbox) {
3351		if ((rspiocb->iocb.ulpStatus == 0)
3352		    && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) {
3353			lpfc_unreg_rpi(vport, ndlp);
3354			/* Increment reference count to ndlp to hold the
3355			 * reference to ndlp for the callback function.
3356			 */
3357			mbox->context2 = lpfc_nlp_get(ndlp);
3358			mbox->vport = vport;
3359			if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) {
3360				mbox->mbox_flag |= LPFC_MBX_IMED_UNREG;
3361				mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
3362			}
3363			else {
3364				mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login;
3365				ndlp->nlp_prev_state = ndlp->nlp_state;
3366				lpfc_nlp_set_state(vport, ndlp,
3367					   NLP_STE_REG_LOGIN_ISSUE);
3368			}
3369			if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
3370			    != MBX_NOT_FINISHED)
3371				goto out;
3372			else
3373				/* Decrement the ndlp reference count we
3374				 * set for this failed mailbox command.
3375				 */
3376				lpfc_nlp_put(ndlp);
3377
3378			/* ELS rsp: Cannot issue reg_login for <NPortid> */
3379			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
3380				"0138 ELS rsp: Cannot issue reg_login for x%x "
3381				"Data: x%x x%x x%x\n",
3382				ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3383				ndlp->nlp_rpi);
3384
3385			if (lpfc_nlp_not_used(ndlp)) {
3386				ndlp = NULL;
3387				/* Indicate node has already been released,
3388				 * should not reference to it from within
3389				 * the routine lpfc_els_free_iocb.
3390				 */
3391				cmdiocb->context1 = NULL;
3392			}
3393		} else {
3394			/* Do not drop node for lpfc_els_abort'ed ELS cmds */
3395			if (!lpfc_error_lost_link(irsp) &&
3396			    ndlp->nlp_flag & NLP_ACC_REGLOGIN) {
3397				if (lpfc_nlp_not_used(ndlp)) {
3398					ndlp = NULL;
3399					/* Indicate node has already been
3400					 * released, should not reference
3401					 * to it from within the routine
3402					 * lpfc_els_free_iocb.
3403					 */
3404					cmdiocb->context1 = NULL;
3405				}
3406			}
3407		}
3408		mp = (struct lpfc_dmabuf *) mbox->context1;
3409		if (mp) {
3410			lpfc_mbuf_free(phba, mp->virt, mp->phys);
3411			kfree(mp);
3412		}
3413		mempool_free(mbox, phba->mbox_mem_pool);
3414	}
3415out:
3416	if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
3417		spin_lock_irq(shost->host_lock);
3418		ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI);
3419		spin_unlock_irq(shost->host_lock);
3420
3421		/* If the node is not being used by another discovery thread,
3422		 * and we are sending a reject, we are done with it.
3423		 * Release driver reference count here and free associated
3424		 * resources.
3425		 */
3426		if (ls_rjt)
3427			if (lpfc_nlp_not_used(ndlp))
3428				/* Indicate node has already been released,
3429				 * should not reference to it from within
3430				 * the routine lpfc_els_free_iocb.
3431				 */
3432				cmdiocb->context1 = NULL;
3433	}
3434
3435	lpfc_els_free_iocb(phba, cmdiocb);
3436	return;
3437}
3438
3439/**
3440 * lpfc_els_rsp_acc - Prepare and issue an acc response iocb command
3441 * @vport: pointer to a host virtual N_Port data structure.
3442 * @flag: the els command code to be accepted.
3443 * @oldiocb: pointer to the original lpfc command iocb data structure.
3444 * @ndlp: pointer to a node-list data structure.
3445 * @mbox: pointer to the driver internal queue element for mailbox command.
3446 *
3447 * This routine prepares and issues an Accept (ACC) response IOCB
3448 * command. It uses the @flag to properly set up the IOCB field for the
3449 * specific ACC response command to be issued and invokes the
3450 * lpfc_sli_issue_iocb() routine to send out ACC response IOCB. If a
3451 * @mbox pointer is passed in, it will be put into the context_un.mbox
3452 * field of the IOCB for the completion callback function to issue the
3453 * mailbox command to the HBA later when callback is invoked.
3454 *
3455 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3456 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3457 * will be stored into the context1 field of the IOCB for the completion
3458 * callback function to the corresponding response ELS IOCB command.
3459 *
3460 * Return code
3461 *   0 - Successfully issued acc response
3462 *   1 - Failed to issue acc response
3463 **/
3464int
3465lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
3466		 struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
3467		 LPFC_MBOXQ_t *mbox)
3468{
3469	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3470	struct lpfc_hba  *phba = vport->phba;
3471	IOCB_t *icmd;
3472	IOCB_t *oldcmd;
3473	struct lpfc_iocbq *elsiocb;
3474	struct lpfc_sli *psli;
3475	uint8_t *pcmd;
3476	uint16_t cmdsize;
3477	int rc;
3478	ELS_PKT *els_pkt_ptr;
3479
3480	psli = &phba->sli;
3481	oldcmd = &oldiocb->iocb;
3482
3483	switch (flag) {
3484	case ELS_CMD_ACC:
3485		cmdsize = sizeof(uint32_t);
3486		elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
3487					     ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
3488		if (!elsiocb) {
3489			spin_lock_irq(shost->host_lock);
3490			ndlp->nlp_flag &= ~NLP_LOGO_ACC;
3491			spin_unlock_irq(shost->host_lock);
3492			return 1;
3493		}
3494
3495		icmd = &elsiocb->iocb;
3496		icmd->ulpContext = oldcmd->ulpContext;	/* Xri */
3497		pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3498		*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
3499		pcmd += sizeof(uint32_t);
3500
3501		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3502			"Issue ACC:       did:x%x flg:x%x",
3503			ndlp->nlp_DID, ndlp->nlp_flag, 0);
3504		break;
3505	case ELS_CMD_PLOGI:
3506		cmdsize = (sizeof(struct serv_parm) + sizeof(uint32_t));
3507		elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
3508					     ndlp, ndlp->nlp_DID, ELS_CMD_ACC);
3509		if (!elsiocb)
3510			return 1;
3511
3512		icmd = &elsiocb->iocb;
3513		icmd->ulpContext = oldcmd->ulpContext;	/* Xri */
3514		pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3515
3516		if (mbox)
3517			elsiocb->context_un.mbox = mbox;
3518
3519		*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
3520		pcmd += sizeof(uint32_t);
3521		memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
3522
3523		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3524			"Issue ACC PLOGI: did:x%x flg:x%x",
3525			ndlp->nlp_DID, ndlp->nlp_flag, 0);
3526		break;
3527	case ELS_CMD_PRLO:
3528		cmdsize = sizeof(uint32_t) + sizeof(PRLO);
3529		elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry,
3530					     ndlp, ndlp->nlp_DID, ELS_CMD_PRLO);
3531		if (!elsiocb)
3532			return 1;
3533
3534		icmd = &elsiocb->iocb;
3535		icmd->ulpContext = oldcmd->ulpContext; /* Xri */
3536		pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3537
3538		memcpy(pcmd, ((struct lpfc_dmabuf *) oldiocb->context2)->virt,
3539		       sizeof(uint32_t) + sizeof(PRLO));
3540		*((uint32_t *) (pcmd)) = ELS_CMD_PRLO_ACC;
3541		els_pkt_ptr = (ELS_PKT *) pcmd;
3542		els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED;
3543
3544		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3545			"Issue ACC PRLO:  did:x%x flg:x%x",
3546			ndlp->nlp_DID, ndlp->nlp_flag, 0);
3547		break;
3548	default:
3549		return 1;
3550	}
3551	/* Xmit ELS ACC response tag <ulpIoTag> */
3552	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3553			 "0128 Xmit ELS ACC response tag x%x, XRI: x%x, "
3554			 "DID: x%x, nlp_flag: x%x nlp_state: x%x RPI: x%x\n",
3555			 elsiocb->iotag, elsiocb->iocb.ulpContext,
3556			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3557			 ndlp->nlp_rpi);
3558	if (ndlp->nlp_flag & NLP_LOGO_ACC) {
3559		spin_lock_irq(shost->host_lock);
3560		ndlp->nlp_flag &= ~NLP_LOGO_ACC;
3561		spin_unlock_irq(shost->host_lock);
3562		elsiocb->iocb_cmpl = lpfc_cmpl_els_logo_acc;
3563	} else {
3564		elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3565	}
3566
3567	phba->fc_stat.elsXmitACC++;
3568	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3569	if (rc == IOCB_ERROR) {
3570		lpfc_els_free_iocb(phba, elsiocb);
3571		return 1;
3572	}
3573	return 0;
3574}
3575
3576/**
3577 * lpfc_els_rsp_reject - Propare and issue a rjt response iocb command
3578 * @vport: pointer to a virtual N_Port data structure.
3579 * @rejectError:
3580 * @oldiocb: pointer to the original lpfc command iocb data structure.
3581 * @ndlp: pointer to a node-list data structure.
3582 * @mbox: pointer to the driver internal queue element for mailbox command.
3583 *
3584 * This routine prepares and issue an Reject (RJT) response IOCB
3585 * command. If a @mbox pointer is passed in, it will be put into the
3586 * context_un.mbox field of the IOCB for the completion callback function
3587 * to issue to the HBA later.
3588 *
3589 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3590 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3591 * will be stored into the context1 field of the IOCB for the completion
3592 * callback function to the reject response ELS IOCB command.
3593 *
3594 * Return code
3595 *   0 - Successfully issued reject response
3596 *   1 - Failed to issue reject response
3597 **/
3598int
3599lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
3600		    struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp,
3601		    LPFC_MBOXQ_t *mbox)
3602{
3603	struct lpfc_hba  *phba = vport->phba;
3604	IOCB_t *icmd;
3605	IOCB_t *oldcmd;
3606	struct lpfc_iocbq *elsiocb;
3607	struct lpfc_sli *psli;
3608	uint8_t *pcmd;
3609	uint16_t cmdsize;
3610	int rc;
3611
3612	psli = &phba->sli;
3613	cmdsize = 2 * sizeof(uint32_t);
3614	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3615				     ndlp->nlp_DID, ELS_CMD_LS_RJT);
3616	if (!elsiocb)
3617		return 1;
3618
3619	icmd = &elsiocb->iocb;
3620	oldcmd = &oldiocb->iocb;
3621	icmd->ulpContext = oldcmd->ulpContext;	/* Xri */
3622	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3623
3624	*((uint32_t *) (pcmd)) = ELS_CMD_LS_RJT;
3625	pcmd += sizeof(uint32_t);
3626	*((uint32_t *) (pcmd)) = rejectError;
3627
3628	if (mbox)
3629		elsiocb->context_un.mbox = mbox;
3630
3631	/* Xmit ELS RJT <err> response tag <ulpIoTag> */
3632	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3633			 "0129 Xmit ELS RJT x%x response tag x%x "
3634			 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
3635			 "rpi x%x\n",
3636			 rejectError, elsiocb->iotag,
3637			 elsiocb->iocb.ulpContext, ndlp->nlp_DID,
3638			 ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi);
3639	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3640		"Issue LS_RJT:    did:x%x flg:x%x err:x%x",
3641		ndlp->nlp_DID, ndlp->nlp_flag, rejectError);
3642
3643	phba->fc_stat.elsXmitLSRJT++;
3644	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3645	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3646
3647	if (rc == IOCB_ERROR) {
3648		lpfc_els_free_iocb(phba, elsiocb);
3649		return 1;
3650	}
3651	return 0;
3652}
3653
3654/**
3655 * lpfc_els_rsp_adisc_acc - Prepare and issue acc response to adisc iocb cmd
3656 * @vport: pointer to a virtual N_Port data structure.
3657 * @oldiocb: pointer to the original lpfc command iocb data structure.
3658 * @ndlp: pointer to a node-list data structure.
3659 *
3660 * This routine prepares and issues an Accept (ACC) response to Address
3661 * Discover (ADISC) ELS command. It simply prepares the payload of the IOCB
3662 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
3663 *
3664 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3665 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3666 * will be stored into the context1 field of the IOCB for the completion
3667 * callback function to the ADISC Accept response ELS IOCB command.
3668 *
3669 * Return code
3670 *   0 - Successfully issued acc adisc response
3671 *   1 - Failed to issue adisc acc response
3672 **/
3673int
3674lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3675		       struct lpfc_nodelist *ndlp)
3676{
3677	struct lpfc_hba  *phba = vport->phba;
3678	ADISC *ap;
3679	IOCB_t *icmd, *oldcmd;
3680	struct lpfc_iocbq *elsiocb;
3681	uint8_t *pcmd;
3682	uint16_t cmdsize;
3683	int rc;
3684
3685	cmdsize = sizeof(uint32_t) + sizeof(ADISC);
3686	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3687				     ndlp->nlp_DID, ELS_CMD_ACC);
3688	if (!elsiocb)
3689		return 1;
3690
3691	icmd = &elsiocb->iocb;
3692	oldcmd = &oldiocb->iocb;
3693	icmd->ulpContext = oldcmd->ulpContext;	/* Xri */
3694
3695	/* Xmit ADISC ACC response tag <ulpIoTag> */
3696	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3697			 "0130 Xmit ADISC ACC response iotag x%x xri: "
3698			 "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n",
3699			 elsiocb->iotag, elsiocb->iocb.ulpContext,
3700			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3701			 ndlp->nlp_rpi);
3702	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3703
3704	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
3705	pcmd += sizeof(uint32_t);
3706
3707	ap = (ADISC *) (pcmd);
3708	ap->hardAL_PA = phba->fc_pref_ALPA;
3709	memcpy(&ap->portName, &vport->fc_portname, sizeof(struct lpfc_name));
3710	memcpy(&ap->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
3711	ap->DID = be32_to_cpu(vport->fc_myDID);
3712
3713	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3714		"Issue ACC ADISC: did:x%x flg:x%x",
3715		ndlp->nlp_DID, ndlp->nlp_flag, 0);
3716
3717	phba->fc_stat.elsXmitACC++;
3718	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3719	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3720	if (rc == IOCB_ERROR) {
3721		lpfc_els_free_iocb(phba, elsiocb);
3722		return 1;
3723	}
3724	return 0;
3725}
3726
3727/**
3728 * lpfc_els_rsp_prli_acc - Prepare and issue acc response to prli iocb cmd
3729 * @vport: pointer to a virtual N_Port data structure.
3730 * @oldiocb: pointer to the original lpfc command iocb data structure.
3731 * @ndlp: pointer to a node-list data structure.
3732 *
3733 * This routine prepares and issues an Accept (ACC) response to Process
3734 * Login (PRLI) ELS command. It simply prepares the payload of the IOCB
3735 * and invokes the lpfc_sli_issue_iocb() routine to send out the command.
3736 *
3737 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3738 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3739 * will be stored into the context1 field of the IOCB for the completion
3740 * callback function to the PRLI Accept response ELS IOCB command.
3741 *
3742 * Return code
3743 *   0 - Successfully issued acc prli response
3744 *   1 - Failed to issue acc prli response
3745 **/
3746int
3747lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
3748		      struct lpfc_nodelist *ndlp)
3749{
3750	struct lpfc_hba  *phba = vport->phba;
3751	PRLI *npr;
3752	lpfc_vpd_t *vpd;
3753	IOCB_t *icmd;
3754	IOCB_t *oldcmd;
3755	struct lpfc_iocbq *elsiocb;
3756	struct lpfc_sli *psli;
3757	uint8_t *pcmd;
3758	uint16_t cmdsize;
3759	int rc;
3760
3761	psli = &phba->sli;
3762
3763	cmdsize = sizeof(uint32_t) + sizeof(PRLI);
3764	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3765		ndlp->nlp_DID, (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK)));
3766	if (!elsiocb)
3767		return 1;
3768
3769	icmd = &elsiocb->iocb;
3770	oldcmd = &oldiocb->iocb;
3771	icmd->ulpContext = oldcmd->ulpContext;	/* Xri */
3772	/* Xmit PRLI ACC response tag <ulpIoTag> */
3773	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3774			 "0131 Xmit PRLI ACC response tag x%x xri x%x, "
3775			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
3776			 elsiocb->iotag, elsiocb->iocb.ulpContext,
3777			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
3778			 ndlp->nlp_rpi);
3779	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3780
3781	*((uint32_t *) (pcmd)) = (ELS_CMD_ACC | (ELS_CMD_PRLI & ~ELS_RSP_MASK));
3782	pcmd += sizeof(uint32_t);
3783
3784	/* For PRLI, remainder of payload is PRLI parameter page */
3785	memset(pcmd, 0, sizeof(PRLI));
3786
3787	npr = (PRLI *) pcmd;
3788	vpd = &phba->vpd;
3789	/*
3790	 * If the remote port is a target and our firmware version is 3.20 or
3791	 * later, set the following bits for FC-TAPE support.
3792	 */
3793	if ((ndlp->nlp_type & NLP_FCP_TARGET) &&
3794	    (vpd->rev.feaLevelHigh >= 0x02)) {
3795		npr->ConfmComplAllowed = 1;
3796		npr->Retry = 1;
3797		npr->TaskRetryIdReq = 1;
3798	}
3799
3800	npr->acceptRspCode = PRLI_REQ_EXECUTED;
3801	npr->estabImagePair = 1;
3802	npr->readXferRdyDis = 1;
3803	npr->ConfmComplAllowed = 1;
3804
3805	npr->prliType = PRLI_FCP_TYPE;
3806	npr->initiatorFunc = 1;
3807
3808	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3809		"Issue ACC PRLI:  did:x%x flg:x%x",
3810		ndlp->nlp_DID, ndlp->nlp_flag, 0);
3811
3812	phba->fc_stat.elsXmitACC++;
3813	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3814
3815	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3816	if (rc == IOCB_ERROR) {
3817		lpfc_els_free_iocb(phba, elsiocb);
3818		return 1;
3819	}
3820	return 0;
3821}
3822
3823/**
3824 * lpfc_els_rsp_rnid_acc - Issue rnid acc response iocb command
3825 * @vport: pointer to a virtual N_Port data structure.
3826 * @format: rnid command format.
3827 * @oldiocb: pointer to the original lpfc command iocb data structure.
3828 * @ndlp: pointer to a node-list data structure.
3829 *
3830 * This routine issues a Request Node Identification Data (RNID) Accept
3831 * (ACC) response. It constructs the RNID ACC response command according to
3832 * the proper @format and then calls the lpfc_sli_issue_iocb() routine to
3833 * issue the response. Note that this command does not need to hold the ndlp
3834 * reference count for the callback. So, the ndlp reference count taken by
3835 * the lpfc_prep_els_iocb() routine is put back and the context1 field of
3836 * IOCB is set to NULL to indicate to the lpfc_els_free_iocb() routine that
3837 * there is no ndlp reference available.
3838 *
3839 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
3840 * will be incremented by 1 for holding the ndlp and the reference to ndlp
3841 * will be stored into the context1 field of the IOCB for the completion
3842 * callback function. However, for the RNID Accept Response ELS command,
3843 * this is undone later by this routine after the IOCB is allocated.
3844 *
3845 * Return code
3846 *   0 - Successfully issued acc rnid response
3847 *   1 - Failed to issue acc rnid response
3848 **/
3849static int
3850lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
3851		      struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
3852{
3853	struct lpfc_hba  *phba = vport->phba;
3854	RNID *rn;
3855	IOCB_t *icmd, *oldcmd;
3856	struct lpfc_iocbq *elsiocb;
3857	struct lpfc_sli *psli;
3858	uint8_t *pcmd;
3859	uint16_t cmdsize;
3860	int rc;
3861
3862	psli = &phba->sli;
3863	cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
3864					+ (2 * sizeof(struct lpfc_name));
3865	if (format)
3866		cmdsize += sizeof(RNID_TOP_DISC);
3867
3868	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
3869				     ndlp->nlp_DID, ELS_CMD_ACC);
3870	if (!elsiocb)
3871		return 1;
3872
3873	icmd = &elsiocb->iocb;
3874	oldcmd = &oldiocb->iocb;
3875	icmd->ulpContext = oldcmd->ulpContext;	/* Xri */
3876	/* Xmit RNID ACC response tag <ulpIoTag> */
3877	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
3878			 "0132 Xmit RNID ACC response tag x%x xri x%x\n",
3879			 elsiocb->iotag, elsiocb->iocb.ulpContext);
3880	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
3881	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
3882	pcmd += sizeof(uint32_t);
3883
3884	memset(pcmd, 0, sizeof(RNID));
3885	rn = (RNID *) (pcmd);
3886	rn->Format = format;
3887	rn->CommonLen = (2 * sizeof(struct lpfc_name));
3888	memcpy(&rn->portName, &vport->fc_portname, sizeof(struct lpfc_name));
3889	memcpy(&rn->nodeName, &vport->fc_nodename, sizeof(struct lpfc_name));
3890	switch (format) {
3891	case 0:
3892		rn->SpecificLen = 0;
3893		break;
3894	case RNID_TOPOLOGY_DISC:
3895		rn->SpecificLen = sizeof(RNID_TOP_DISC);
3896		memcpy(&rn->un.topologyDisc.portName,
3897		       &vport->fc_portname, sizeof(struct lpfc_name));
3898		rn->un.topologyDisc.unitType = RNID_HBA;
3899		rn->un.topologyDisc.physPort = 0;
3900		rn->un.topologyDisc.attachedNodes = 0;
3901		break;
3902	default:
3903		rn->CommonLen = 0;
3904		rn->SpecificLen = 0;
3905		break;
3906	}
3907
3908	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
3909		"Issue ACC RNID:  did:x%x flg:x%x",
3910		ndlp->nlp_DID, ndlp->nlp_flag, 0);
3911
3912	phba->fc_stat.elsXmitACC++;
3913	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
3914	lpfc_nlp_put(ndlp);
3915	elsiocb->context1 = NULL;  /* Don't need ndlp for cmpl,
3916				    * it could be freed */
3917
3918	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
3919	if (rc == IOCB_ERROR) {
3920		lpfc_els_free_iocb(phba, elsiocb);
3921		return 1;
3922	}
3923	return 0;
3924}
3925
3926/**
3927 * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
3928 * @vport: pointer to a host virtual N_Port data structure.
3929 *
3930 * This routine issues Address Discover (ADISC) ELS commands to those
3931 * N_Ports which are in node port recovery state and ADISC has not been issued
3932 * for the @vport. Each time an ELS ADISC IOCB is issued by invoking the
3933 * lpfc_issue_els_adisc() routine, the per @vport number of discover count
3934 * (num_disc_nodes) shall be incremented. If the num_disc_nodes reaches a
3935 * pre-configured threshold (cfg_discovery_threads), the @vport fc_flag will
3936 * be marked with FC_NLP_MORE bit and the process of issuing remaining ADISC
3937 * IOCBs quit for later pick up. On the other hand, after walking through
3938 * all the ndlps with the @vport and there is none ADISC IOCB issued, the
3939 * @vport fc_flag shall be cleared with FC_NLP_MORE bit indicating there is
3940 * no more ADISC need to be sent.
3941 *
3942 * Return code
3943 *    The number of N_Ports with adisc issued.
3944 **/
3945int
3946lpfc_els_disc_adisc(struct lpfc_vport *vport)
3947{
3948	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
3949	struct lpfc_nodelist *ndlp, *next_ndlp;
3950	int sentadisc = 0;
3951
3952	/* go thru NPR nodes and issue any remaining ELS ADISCs */
3953	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
3954		if (!NLP_CHK_NODE_ACT(ndlp))
3955			continue;
3956		if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
3957		    (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
3958		    (ndlp->nlp_flag & NLP_NPR_ADISC) != 0) {
3959			spin_lock_irq(shost->host_lock);
3960			ndlp->nlp_flag &= ~NLP_NPR_ADISC;
3961			spin_unlock_irq(shost->host_lock);
3962			ndlp->nlp_prev_state = ndlp->nlp_state;
3963			lpfc_nlp_set_state(vport, ndlp, NLP_STE_ADISC_ISSUE);
3964			lpfc_issue_els_adisc(vport, ndlp, 0);
3965			sentadisc++;
3966			vport->num_disc_nodes++;
3967			if (vport->num_disc_nodes >=
3968			    vport->cfg_discovery_threads) {
3969				spin_lock_irq(shost->host_lock);
3970				vport->fc_flag |= FC_NLP_MORE;
3971				spin_unlock_irq(shost->host_lock);
3972				break;
3973			}
3974		}
3975	}
3976	if (sentadisc == 0) {
3977		spin_lock_irq(shost->host_lock);
3978		vport->fc_flag &= ~FC_NLP_MORE;
3979		spin_unlock_irq(shost->host_lock);
3980	}
3981	return sentadisc;
3982}
3983
3984/**
3985 * lpfc_els_disc_plogi - Issue plogi for all npr nodes of a vport before adisc
3986 * @vport: pointer to a host virtual N_Port data structure.
3987 *
3988 * This routine issues Port Login (PLOGI) ELS commands to all the N_Ports
3989 * which are in node port recovery state, with a @vport. Each time an ELS
3990 * ADISC PLOGI IOCB is issued by invoking the lpfc_issue_els_plogi() routine,
3991 * the per @vport number of discover count (num_disc_nodes) shall be
3992 * incremented. If the num_disc_nodes reaches a pre-configured threshold
3993 * (cfg_discovery_threads), the @vport fc_flag will be marked with FC_NLP_MORE
3994 * bit set and quit the process of issuing remaining ADISC PLOGIN IOCBs for
3995 * later pick up. On the other hand, after walking through all the ndlps with
3996 * the @vport and there is none ADISC PLOGI IOCB issued, the @vport fc_flag
3997 * shall be cleared with the FC_NLP_MORE bit indicating there is no more ADISC
3998 * PLOGI need to be sent.
3999 *
4000 * Return code
4001 *   The number of N_Ports with plogi issued.
4002 **/
4003int
4004lpfc_els_disc_plogi(struct lpfc_vport *vport)
4005{
4006	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4007	struct lpfc_nodelist *ndlp, *next_ndlp;
4008	int sentplogi = 0;
4009
4010	/* go thru NPR nodes and issue any remaining ELS PLOGIs */
4011	list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
4012		if (!NLP_CHK_NODE_ACT(ndlp))
4013			continue;
4014		if (ndlp->nlp_state == NLP_STE_NPR_NODE &&
4015		    (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 &&
4016		    (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 &&
4017		    (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) {
4018			ndlp->nlp_prev_state = ndlp->nlp_state;
4019			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
4020			lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
4021			sentplogi++;
4022			vport->num_disc_nodes++;
4023			if (vport->num_disc_nodes >=
4024			    vport->cfg_discovery_threads) {
4025				spin_lock_irq(shost->host_lock);
4026				vport->fc_flag |= FC_NLP_MORE;
4027				spin_unlock_irq(shost->host_lock);
4028				break;
4029			}
4030		}
4031	}
4032	if (sentplogi) {
4033		lpfc_set_disctmo(vport);
4034	}
4035	else {
4036		spin_lock_irq(shost->host_lock);
4037		vport->fc_flag &= ~FC_NLP_MORE;
4038		spin_unlock_irq(shost->host_lock);
4039	}
4040	return sentplogi;
4041}
4042
4043/**
4044 * lpfc_els_flush_rscn - Clean up any rscn activities with a vport
4045 * @vport: pointer to a host virtual N_Port data structure.
4046 *
4047 * This routine cleans up any Registration State Change Notification
4048 * (RSCN) activity with a @vport. Note that the fc_rscn_flush flag of the
4049 * @vport together with the host_lock is used to prevent multiple thread
4050 * trying to access the RSCN array on a same @vport at the same time.
4051 **/
4052void
4053lpfc_els_flush_rscn(struct lpfc_vport *vport)
4054{
4055	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4056	struct lpfc_hba  *phba = vport->phba;
4057	int i;
4058
4059	spin_lock_irq(shost->host_lock);
4060	if (vport->fc_rscn_flush) {
4061		/* Another thread is walking fc_rscn_id_list on this vport */
4062		spin_unlock_irq(shost->host_lock);
4063		return;
4064	}
4065	/* Indicate we are walking lpfc_els_flush_rscn on this vport */
4066	vport->fc_rscn_flush = 1;
4067	spin_unlock_irq(shost->host_lock);
4068
4069	for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
4070		lpfc_in_buf_free(phba, vport->fc_rscn_id_list[i]);
4071		vport->fc_rscn_id_list[i] = NULL;
4072	}
4073	spin_lock_irq(shost->host_lock);
4074	vport->fc_rscn_id_cnt = 0;
4075	vport->fc_flag &= ~(FC_RSCN_MODE | FC_RSCN_DISCOVERY);
4076	spin_unlock_irq(shost->host_lock);
4077	lpfc_can_disctmo(vport);
4078	/* Indicate we are done walking this fc_rscn_id_list */
4079	vport->fc_rscn_flush = 0;
4080}
4081
4082/**
4083 * lpfc_rscn_payload_check - Check whether there is a pending rscn to a did
4084 * @vport: pointer to a host virtual N_Port data structure.
4085 * @did: remote destination port identifier.
4086 *
4087 * This routine checks whether there is any pending Registration State
4088 * Configuration Notification (RSCN) to a @did on @vport.
4089 *
4090 * Return code
4091 *   None zero - The @did matched with a pending rscn
4092 *   0 - not able to match @did with a pending rscn
4093 **/
4094int
4095lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
4096{
4097	D_ID ns_did;
4098	D_ID rscn_did;
4099	uint32_t *lp;
4100	uint32_t payload_len, i;
4101	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4102
4103	ns_did.un.word = did;
4104
4105	/* Never match fabric nodes for RSCNs */
4106	if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
4107		return 0;
4108
4109	/* If we are doing a FULL RSCN rediscovery, match everything */
4110	if (vport->fc_flag & FC_RSCN_DISCOVERY)
4111		return did;
4112
4113	spin_lock_irq(shost->host_lock);
4114	if (vport->fc_rscn_flush) {
4115		/* Another thread is walking fc_rscn_id_list on this vport */
4116		spin_unlock_irq(shost->host_lock);
4117		return 0;
4118	}
4119	/* Indicate we are walking fc_rscn_id_list on this vport */
4120	vport->fc_rscn_flush = 1;
4121	spin_unlock_irq(shost->host_lock);
4122	for (i = 0; i < vport->fc_rscn_id_cnt; i++) {
4123		lp = vport->fc_rscn_id_list[i]->virt;
4124		payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
4125		payload_len -= sizeof(uint32_t);	/* take off word 0 */
4126		while (payload_len) {
4127			rscn_did.un.word = be32_to_cpu(*lp++);
4128			payload_len -= sizeof(uint32_t);
4129			switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
4130			case RSCN_ADDRESS_FORMAT_PORT:
4131				if ((ns_did.un.b.domain == rscn_did.un.b.domain)
4132				    && (ns_did.un.b.area == rscn_did.un.b.area)
4133				    && (ns_did.un.b.id == rscn_did.un.b.id))
4134					goto return_did_out;
4135				break;
4136			case RSCN_ADDRESS_FORMAT_AREA:
4137				if ((ns_did.un.b.domain == rscn_did.un.b.domain)
4138				    && (ns_did.un.b.area == rscn_did.un.b.area))
4139					goto return_did_out;
4140				break;
4141			case RSCN_ADDRESS_FORMAT_DOMAIN:
4142				if (ns_did.un.b.domain == rscn_did.un.b.domain)
4143					goto return_did_out;
4144				break;
4145			case RSCN_ADDRESS_FORMAT_FABRIC:
4146				goto return_did_out;
4147			}
4148		}
4149	}
4150	/* Indicate we are done with walking fc_rscn_id_list on this vport */
4151	vport->fc_rscn_flush = 0;
4152	return 0;
4153return_did_out:
4154	/* Indicate we are done with walking fc_rscn_id_list on this vport */
4155	vport->fc_rscn_flush = 0;
4156	return did;
4157}
4158
4159/**
4160 * lpfc_rscn_recovery_check - Send recovery event to vport nodes matching rscn
4161 * @vport: pointer to a host virtual N_Port data structure.
4162 *
4163 * This routine sends recovery (NLP_EVT_DEVICE_RECOVERY) event to the
4164 * state machine for a @vport's nodes that are with pending RSCN (Registration
4165 * State Change Notification).
4166 *
4167 * Return code
4168 *   0 - Successful (currently alway return 0)
4169 **/
4170static int
4171lpfc_rscn_recovery_check(struct lpfc_vport *vport)
4172{
4173	struct lpfc_nodelist *ndlp = NULL;
4174
4175	/* Move all affected nodes by pending RSCNs to NPR state. */
4176	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
4177		if (!NLP_CHK_NODE_ACT(ndlp) ||
4178		    (ndlp->nlp_state == NLP_STE_UNUSED_NODE) ||
4179		    !lpfc_rscn_payload_check(vport, ndlp->nlp_DID))
4180			continue;
4181		lpfc_disc_state_machine(vport, ndlp, NULL,
4182					NLP_EVT_DEVICE_RECOVERY);
4183		lpfc_cancel_retry_delay_tmo(vport, ndlp);
4184	}
4185	return 0;
4186}
4187
4188/**
4189 * lpfc_send_rscn_event - Send an RSCN event to management application
4190 * @vport: pointer to a host virtual N_Port data structure.
4191 * @cmdiocb: pointer to lpfc command iocb data structure.
4192 *
4193 * lpfc_send_rscn_event sends an RSCN netlink event to management
4194 * applications.
4195 */
4196static void
4197lpfc_send_rscn_event(struct lpfc_vport *vport,
4198		struct lpfc_iocbq *cmdiocb)
4199{
4200	struct lpfc_dmabuf *pcmd;
4201	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4202	uint32_t *payload_ptr;
4203	uint32_t payload_len;
4204	struct lpfc_rscn_event_header *rscn_event_data;
4205
4206	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4207	payload_ptr = (uint32_t *) pcmd->virt;
4208	payload_len = be32_to_cpu(*payload_ptr & ~ELS_CMD_MASK);
4209
4210	rscn_event_data = kmalloc(sizeof(struct lpfc_rscn_event_header) +
4211		payload_len, GFP_KERNEL);
4212	if (!rscn_event_data) {
4213		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4214			"0147 Failed to allocate memory for RSCN event\n");
4215		return;
4216	}
4217	rscn_event_data->event_type = FC_REG_RSCN_EVENT;
4218	rscn_event_data->payload_length = payload_len;
4219	memcpy(rscn_event_data->rscn_payload, payload_ptr,
4220		payload_len);
4221
4222	fc_host_post_vendor_event(shost,
4223		fc_get_event_number(),
4224		sizeof(struct lpfc_els_event_header) + payload_len,
4225		(char *)rscn_event_data,
4226		LPFC_NL_VENDOR_ID);
4227
4228	kfree(rscn_event_data);
4229}
4230
4231/**
4232 * lpfc_els_rcv_rscn - Process an unsolicited rscn iocb
4233 * @vport: pointer to a host virtual N_Port data structure.
4234 * @cmdiocb: pointer to lpfc command iocb data structure.
4235 * @ndlp: pointer to a node-list data structure.
4236 *
4237 * This routine processes an unsolicited RSCN (Registration State Change
4238 * Notification) IOCB. First, the payload of the unsolicited RSCN is walked
4239 * to invoke fc_host_post_event() routine to the FC transport layer. If the
4240 * discover state machine is about to begin discovery, it just accepts the
4241 * RSCN and the discovery process will satisfy the RSCN. If this RSCN only
4242 * contains N_Port IDs for other vports on this HBA, it just accepts the
4243 * RSCN and ignore processing it. If the state machine is in the recovery
4244 * state, the fc_rscn_id_list of this @vport is walked and the
4245 * lpfc_rscn_recovery_check() routine is invoked to send recovery event for
4246 * all nodes that match RSCN payload. Otherwise, the lpfc_els_handle_rscn()
4247 * routine is invoked to handle the RSCN event.
4248 *
4249 * Return code
4250 *   0 - Just sent the acc response
4251 *   1 - Sent the acc response and waited for name server completion
4252 **/
4253static int
4254lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4255		  struct lpfc_nodelist *ndlp)
4256{
4257	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4258	struct lpfc_hba  *phba = vport->phba;
4259	struct lpfc_dmabuf *pcmd;
4260	uint32_t *lp, *datap;
4261	IOCB_t *icmd;
4262	uint32_t payload_len, length, nportid, *cmd;
4263	int rscn_cnt;
4264	int rscn_id = 0, hba_id = 0;
4265	int i;
4266
4267	icmd = &cmdiocb->iocb;
4268	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4269	lp = (uint32_t *) pcmd->virt;
4270
4271	payload_len = be32_to_cpu(*lp++ & ~ELS_CMD_MASK);
4272	payload_len -= sizeof(uint32_t);	/* take off word 0 */
4273	/* RSCN received */
4274	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4275			 "0214 RSCN received Data: x%x x%x x%x x%x\n",
4276			 vport->fc_flag, payload_len, *lp,
4277			 vport->fc_rscn_id_cnt);
4278
4279	/* Send an RSCN event to the management application */
4280	lpfc_send_rscn_event(vport, cmdiocb);
4281
4282	for (i = 0; i < payload_len/sizeof(uint32_t); i++)
4283		fc_host_post_event(shost, fc_get_event_number(),
4284			FCH_EVT_RSCN, lp[i]);
4285
4286	/* If we are about to begin discovery, just ACC the RSCN.
4287	 * Discovery processing will satisfy it.
4288	 */
4289	if (vport->port_state <= LPFC_NS_QRY) {
4290		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4291			"RCV RSCN ignore: did:x%x/ste:x%x flg:x%x",
4292			ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
4293
4294		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4295		return 0;
4296	}
4297
4298	/* If this RSCN just contains NPortIDs for other vports on this HBA,
4299	 * just ACC and ignore it.
4300	 */
4301	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
4302		!(vport->cfg_peer_port_login)) {
4303		i = payload_len;
4304		datap = lp;
4305		while (i > 0) {
4306			nportid = *datap++;
4307			nportid = ((be32_to_cpu(nportid)) & Mask_DID);
4308			i -= sizeof(uint32_t);
4309			rscn_id++;
4310			if (lpfc_find_vport_by_did(phba, nportid))
4311				hba_id++;
4312		}
4313		if (rscn_id == hba_id) {
4314			/* ALL NPortIDs in RSCN are on HBA */
4315			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4316					 "0219 Ignore RSCN "
4317					 "Data: x%x x%x x%x x%x\n",
4318					 vport->fc_flag, payload_len,
4319					 *lp, vport->fc_rscn_id_cnt);
4320			lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4321				"RCV RSCN vport:  did:x%x/ste:x%x flg:x%x",
4322				ndlp->nlp_DID, vport->port_state,
4323				ndlp->nlp_flag);
4324
4325			lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb,
4326				ndlp, NULL);
4327			return 0;
4328		}
4329	}
4330
4331	spin_lock_irq(shost->host_lock);
4332	if (vport->fc_rscn_flush) {
4333		/* Another thread is walking fc_rscn_id_list on this vport */
4334		vport->fc_flag |= FC_RSCN_DISCOVERY;
4335		spin_unlock_irq(shost->host_lock);
4336		/* Send back ACC */
4337		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4338		return 0;
4339	}
4340	/* Indicate we are walking fc_rscn_id_list on this vport */
4341	vport->fc_rscn_flush = 1;
4342	spin_unlock_irq(shost->host_lock);
4343	/* Get the array count after successfully have the token */
4344	rscn_cnt = vport->fc_rscn_id_cnt;
4345	/* If we are already processing an RSCN, save the received
4346	 * RSCN payload buffer, cmdiocb->context2 to process later.
4347	 */
4348	if (vport->fc_flag & (FC_RSCN_MODE | FC_NDISC_ACTIVE)) {
4349		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4350			"RCV RSCN defer:  did:x%x/ste:x%x flg:x%x",
4351			ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
4352
4353		spin_lock_irq(shost->host_lock);
4354		vport->fc_flag |= FC_RSCN_DEFERRED;
4355		if ((rscn_cnt < FC_MAX_HOLD_RSCN) &&
4356		    !(vport->fc_flag & FC_RSCN_DISCOVERY)) {
4357			vport->fc_flag |= FC_RSCN_MODE;
4358			spin_unlock_irq(shost->host_lock);
4359			if (rscn_cnt) {
4360				cmd = vport->fc_rscn_id_list[rscn_cnt-1]->virt;
4361				length = be32_to_cpu(*cmd & ~ELS_CMD_MASK);
4362			}
4363			if ((rscn_cnt) &&
4364			    (payload_len + length <= LPFC_BPL_SIZE)) {
4365				*cmd &= ELS_CMD_MASK;
4366				*cmd |= cpu_to_be32(payload_len + length);
4367				memcpy(((uint8_t *)cmd) + length, lp,
4368				       payload_len);
4369			} else {
4370				vport->fc_rscn_id_list[rscn_cnt] = pcmd;
4371				vport->fc_rscn_id_cnt++;
4372				/* If we zero, cmdiocb->context2, the calling
4373				 * routine will not try to free it.
4374				 */
4375				cmdiocb->context2 = NULL;
4376			}
4377			/* Deferred RSCN */
4378			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4379					 "0235 Deferred RSCN "
4380					 "Data: x%x x%x x%x\n",
4381					 vport->fc_rscn_id_cnt, vport->fc_flag,
4382					 vport->port_state);
4383		} else {
4384			vport->fc_flag |= FC_RSCN_DISCOVERY;
4385			spin_unlock_irq(shost->host_lock);
4386			/* ReDiscovery RSCN */
4387			lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4388					 "0234 ReDiscovery RSCN "
4389					 "Data: x%x x%x x%x\n",
4390					 vport->fc_rscn_id_cnt, vport->fc_flag,
4391					 vport->port_state);
4392		}
4393		/* Indicate we are done walking fc_rscn_id_list on this vport */
4394		vport->fc_rscn_flush = 0;
4395		/* Send back ACC */
4396		lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4397		/* send RECOVERY event for ALL nodes that match RSCN payload */
4398		lpfc_rscn_recovery_check(vport);
4399		spin_lock_irq(shost->host_lock);
4400		vport->fc_flag &= ~FC_RSCN_DEFERRED;
4401		spin_unlock_irq(shost->host_lock);
4402		return 0;
4403	}
4404	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
4405		"RCV RSCN:        did:x%x/ste:x%x flg:x%x",
4406		ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag);
4407
4408	spin_lock_irq(shost->host_lock);
4409	vport->fc_flag |= FC_RSCN_MODE;
4410	spin_unlock_irq(shost->host_lock);
4411	vport->fc_rscn_id_list[vport->fc_rscn_id_cnt++] = pcmd;
4412	/* Indicate we are done walking fc_rscn_id_list on this vport */
4413	vport->fc_rscn_flush = 0;
4414	/*
4415	 * If we zero, cmdiocb->context2, the calling routine will
4416	 * not try to free it.
4417	 */
4418	cmdiocb->context2 = NULL;
4419	lpfc_set_disctmo(vport);
4420	/* Send back ACC */
4421	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4422	/* send RECOVERY event for ALL nodes that match RSCN payload */
4423	lpfc_rscn_recovery_check(vport);
4424	return lpfc_els_handle_rscn(vport);
4425}
4426
4427/**
4428 * lpfc_els_handle_rscn - Handle rscn for a vport
4429 * @vport: pointer to a host virtual N_Port data structure.
4430 *
4431 * This routine handles the Registration State Configuration Notification
4432 * (RSCN) for a @vport. If login to NameServer does not exist, a new ndlp shall
4433 * be created and a Port Login (PLOGI) to the NameServer is issued. Otherwise,
4434 * if the ndlp to NameServer exists, a Common Transport (CT) command to the
4435 * NameServer shall be issued. If CT command to the NameServer fails to be
4436 * issued, the lpfc_els_flush_rscn() routine shall be invoked to clean up any
4437 * RSCN activities with the @vport.
4438 *
4439 * Return code
4440 *   0 - Cleaned up rscn on the @vport
4441 *   1 - Wait for plogi to name server before proceed
4442 **/
4443int
4444lpfc_els_handle_rscn(struct lpfc_vport *vport)
4445{
4446	struct lpfc_nodelist *ndlp;
4447	struct lpfc_hba *phba = vport->phba;
4448
4449	/* Ignore RSCN if the port is being torn down. */
4450	if (vport->load_flag & FC_UNLOADING) {
4451		lpfc_els_flush_rscn(vport);
4452		return 0;
4453	}
4454
4455	/* Start timer for RSCN processing */
4456	lpfc_set_disctmo(vport);
4457
4458	/* RSCN processed */
4459	lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
4460			 "0215 RSCN processed Data: x%x x%x x%x x%x\n",
4461			 vport->fc_flag, 0, vport->fc_rscn_id_cnt,
4462			 vport->port_state);
4463
4464	/* To process RSCN, first compare RSCN data with NameServer */
4465	vport->fc_ns_retry = 0;
4466	vport->num_disc_nodes = 0;
4467
4468	ndlp = lpfc_findnode_did(vport, NameServer_DID);
4469	if (ndlp && NLP_CHK_NODE_ACT(ndlp)
4470	    && ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) {
4471		/* Good ndlp, issue CT Request to NameServer */
4472		if (lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, 0, 0) == 0)
4473			/* Wait for NameServer query cmpl before we can
4474			   continue */
4475			return 1;
4476	} else {
4477		/* If login to NameServer does not exist, issue one */
4478		/* Good status, issue PLOGI to NameServer */
4479		ndlp = lpfc_findnode_did(vport, NameServer_DID);
4480		if (ndlp && NLP_CHK_NODE_ACT(ndlp))
4481			/* Wait for NameServer login cmpl before we can
4482			   continue */
4483			return 1;
4484
4485		if (ndlp) {
4486			ndlp = lpfc_enable_node(vport, ndlp,
4487						NLP_STE_PLOGI_ISSUE);
4488			if (!ndlp) {
4489				lpfc_els_flush_rscn(vport);
4490				return 0;
4491			}
4492			ndlp->nlp_prev_state = NLP_STE_UNUSED_NODE;
4493		} else {
4494			ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
4495			if (!ndlp) {
4496				lpfc_els_flush_rscn(vport);
4497				return 0;
4498			}
4499			lpfc_nlp_init(vport, ndlp, NameServer_DID);
4500			ndlp->nlp_prev_state = ndlp->nlp_state;
4501			lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
4502		}
4503		ndlp->nlp_type |= NLP_FABRIC;
4504		lpfc_issue_els_plogi(vport, NameServer_DID, 0);
4505		/* Wait for NameServer login cmpl before we can
4506		 * continue
4507		 */
4508		return 1;
4509	}
4510
4511	lpfc_els_flush_rscn(vport);
4512	return 0;
4513}
4514
4515/**
4516 * lpfc_els_rcv_flogi - Process an unsolicited flogi iocb
4517 * @vport: pointer to a host virtual N_Port data structure.
4518 * @cmdiocb: pointer to lpfc command iocb data structure.
4519 * @ndlp: pointer to a node-list data structure.
4520 *
4521 * This routine processes Fabric Login (FLOGI) IOCB received as an ELS
4522 * unsolicited event. An unsolicited FLOGI can be received in a point-to-
4523 * point topology. As an unsolicited FLOGI should not be received in a loop
4524 * mode, any unsolicited FLOGI received in loop mode shall be ignored. The
4525 * lpfc_check_sparm() routine is invoked to check the parameters in the
4526 * unsolicited FLOGI. If parameters validation failed, the routine
4527 * lpfc_els_rsp_reject() shall be called with reject reason code set to
4528 * LSEXP_SPARM_OPTIONS to reject the FLOGI. Otherwise, the Port WWN in the
4529 * FLOGI shall be compared with the Port WWN of the @vport to determine who
4530 * will initiate PLOGI. The higher lexicographical value party shall has
4531 * higher priority (as the winning port) and will initiate PLOGI and
4532 * communicate Port_IDs (Addresses) for both nodes in PLOGI. The result
4533 * of this will be marked in the @vport fc_flag field with FC_PT2PT_PLOGI
4534 * and then the lpfc_els_rsp_acc() routine is invoked to accept the FLOGI.
4535 *
4536 * Return code
4537 *   0 - Successfully processed the unsolicited flogi
4538 *   1 - Failed to process the unsolicited flogi
4539 **/
4540static int
4541lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4542		   struct lpfc_nodelist *ndlp)
4543{
4544	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
4545	struct lpfc_hba  *phba = vport->phba;
4546	struct lpfc_dmabuf *pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4547	uint32_t *lp = (uint32_t *) pcmd->virt;
4548	IOCB_t *icmd = &cmdiocb->iocb;
4549	struct serv_parm *sp;
4550	LPFC_MBOXQ_t *mbox;
4551	struct ls_rjt stat;
4552	uint32_t cmd, did;
4553	int rc;
4554
4555	cmd = *lp++;
4556	sp = (struct serv_parm *) lp;
4557
4558	/* FLOGI received */
4559
4560	lpfc_set_disctmo(vport);
4561
4562	if (phba->fc_topology == TOPOLOGY_LOOP) {
4563		/* We should never receive a FLOGI in loop mode, ignore it */
4564		did = icmd->un.elsreq64.remoteID;
4565
4566		/* An FLOGI ELS command <elsCmd> was received from DID <did> in
4567		   Loop Mode */
4568		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
4569				 "0113 An FLOGI ELS command x%x was "
4570				 "received from DID x%x in Loop Mode\n",
4571				 cmd, did);
4572		return 1;
4573	}
4574
4575	did = Fabric_DID;
4576
4577	if ((lpfc_check_sparm(vport, ndlp, sp, CLASS3, 1))) {
4578		/* For a FLOGI we accept, then if our portname is greater
4579		 * then the remote portname we initiate Nport login.
4580		 */
4581
4582		rc = memcmp(&vport->fc_portname, &sp->portName,
4583			    sizeof(struct lpfc_name));
4584
4585		if (!rc) {
4586			mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4587			if (!mbox)
4588				return 1;
4589
4590			lpfc_linkdown(phba);
4591			lpfc_init_link(phba, mbox,
4592				       phba->cfg_topology,
4593				       phba->cfg_link_speed);
4594			mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
4595			mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4596			mbox->vport = vport;
4597			rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
4598			lpfc_set_loopback_flag(phba);
4599			if (rc == MBX_NOT_FINISHED) {
4600				mempool_free(mbox, phba->mbox_mem_pool);
4601			}
4602			return 1;
4603		} else if (rc > 0) {	/* greater than */
4604			spin_lock_irq(shost->host_lock);
4605			vport->fc_flag |= FC_PT2PT_PLOGI;
4606			spin_unlock_irq(shost->host_lock);
4607		}
4608		spin_lock_irq(shost->host_lock);
4609		vport->fc_flag |= FC_PT2PT;
4610		vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
4611		spin_unlock_irq(shost->host_lock);
4612	} else {
4613		/* Reject this request because invalid parameters */
4614		stat.un.b.lsRjtRsvd0 = 0;
4615		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
4616		stat.un.b.lsRjtRsnCodeExp = LSEXP_SPARM_OPTIONS;
4617		stat.un.b.vendorUnique = 0;
4618		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
4619			NULL);
4620		return 1;
4621	}
4622
4623	/* Send back ACC */
4624	lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL);
4625
4626	return 0;
4627}
4628
4629/**
4630 * lpfc_els_rcv_rnid - Process an unsolicited rnid iocb
4631 * @vport: pointer to a host virtual N_Port data structure.
4632 * @cmdiocb: pointer to lpfc command iocb data structure.
4633 * @ndlp: pointer to a node-list data structure.
4634 *
4635 * This routine processes Request Node Identification Data (RNID) IOCB
4636 * received as an ELS unsolicited event. Only when the RNID specified format
4637 * 0x0 or 0xDF (Topology Discovery Specific Node Identification Data)
4638 * present, this routine will invoke the lpfc_els_rsp_rnid_acc() routine to
4639 * Accept (ACC) the RNID ELS command. All the other RNID formats are
4640 * rejected by invoking the lpfc_els_rsp_reject() routine.
4641 *
4642 * Return code
4643 *   0 - Successfully processed rnid iocb (currently always return 0)
4644 **/
4645static int
4646lpfc_els_rcv_rnid(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4647		  struct lpfc_nodelist *ndlp)
4648{
4649	struct lpfc_dmabuf *pcmd;
4650	uint32_t *lp;
4651	IOCB_t *icmd;
4652	RNID *rn;
4653	struct ls_rjt stat;
4654	uint32_t cmd, did;
4655
4656	icmd = &cmdiocb->iocb;
4657	did = icmd->un.elsreq64.remoteID;
4658	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4659	lp = (uint32_t *) pcmd->virt;
4660
4661	cmd = *lp++;
4662	rn = (RNID *) lp;
4663
4664	/* RNID received */
4665
4666	switch (rn->Format) {
4667	case 0:
4668	case RNID_TOPOLOGY_DISC:
4669		/* Send back ACC */
4670		lpfc_els_rsp_rnid_acc(vport, rn->Format, cmdiocb, ndlp);
4671		break;
4672	default:
4673		/* Reject this request because format not supported */
4674		stat.un.b.lsRjtRsvd0 = 0;
4675		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
4676		stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
4677		stat.un.b.vendorUnique = 0;
4678		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
4679			NULL);
4680	}
4681	return 0;
4682}
4683
4684/**
4685 * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb
4686 * @vport: pointer to a host virtual N_Port data structure.
4687 * @cmdiocb: pointer to lpfc command iocb data structure.
4688 * @ndlp: pointer to a node-list data structure.
4689 *
4690 * This routine processes a Link Incident Report Registration(LIRR) IOCB
4691 * received as an ELS unsolicited event. Currently, this function just invokes
4692 * the lpfc_els_rsp_reject() routine to reject the LIRR IOCB unconditionally.
4693 *
4694 * Return code
4695 *   0 - Successfully processed lirr iocb (currently always return 0)
4696 **/
4697static int
4698lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4699		  struct lpfc_nodelist *ndlp)
4700{
4701	struct ls_rjt stat;
4702
4703	/* For now, unconditionally reject this command */
4704	stat.un.b.lsRjtRsvd0 = 0;
4705	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
4706	stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
4707	stat.un.b.vendorUnique = 0;
4708	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
4709	return 0;
4710}
4711
4712/**
4713 * lpfc_els_rcv_rrq - Process an unsolicited rrq iocb
4714 * @vport: pointer to a host virtual N_Port data structure.
4715 * @cmdiocb: pointer to lpfc command iocb data structure.
4716 * @ndlp: pointer to a node-list data structure.
4717 *
4718 * This routine processes a Reinstate Recovery Qualifier (RRQ) IOCB
4719 * received as an ELS unsolicited event. A request to RRQ shall only
4720 * be accepted if the Originator Nx_Port N_Port_ID or the Responder
4721 * Nx_Port N_Port_ID of the target Exchange is the same as the
4722 * N_Port_ID of the Nx_Port that makes the request. If the RRQ is
4723 * not accepted, an LS_RJT with reason code "Unable to perform
4724 * command request" and reason code explanation "Invalid Originator
4725 * S_ID" shall be returned. For now, we just unconditionally accept
4726 * RRQ from the target.
4727 **/
4728static void
4729lpfc_els_rcv_rrq(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4730		 struct lpfc_nodelist *ndlp)
4731{
4732	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
4733}
4734
4735/**
4736 * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
4737 * @phba: pointer to lpfc hba data structure.
4738 * @pmb: pointer to the driver internal queue element for mailbox command.
4739 *
4740 * This routine is the completion callback function for the MBX_READ_LNK_STAT
4741 * mailbox command. This callback function is to actually send the Accept
4742 * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
4743 * collects the link statistics from the completion of the MBX_READ_LNK_STAT
4744 * mailbox command, constructs the RPS response with the link statistics
4745 * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
4746 * response to the RPS.
4747 *
4748 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4749 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4750 * will be stored into the context1 field of the IOCB for the completion
4751 * callback function to the RPS Accept Response ELS IOCB command.
4752 *
4753 **/
4754static void
4755lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
4756{
4757	MAILBOX_t *mb;
4758	IOCB_t *icmd;
4759	RPS_RSP *rps_rsp;
4760	uint8_t *pcmd;
4761	struct lpfc_iocbq *elsiocb;
4762	struct lpfc_nodelist *ndlp;
4763	uint16_t xri, status;
4764	uint32_t cmdsize;
4765
4766	mb = &pmb->u.mb;
4767
4768	ndlp = (struct lpfc_nodelist *) pmb->context2;
4769	xri = (uint16_t) ((unsigned long)(pmb->context1));
4770	pmb->context1 = NULL;
4771	pmb->context2 = NULL;
4772
4773	if (mb->mbxStatus) {
4774		mempool_free(pmb, phba->mbox_mem_pool);
4775		return;
4776	}
4777
4778	cmdsize = sizeof(RPS_RSP) + sizeof(uint32_t);
4779	mempool_free(pmb, phba->mbox_mem_pool);
4780	elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
4781				     lpfc_max_els_tries, ndlp,
4782				     ndlp->nlp_DID, ELS_CMD_ACC);
4783
4784	/* Decrement the ndlp reference count from previous mbox command */
4785	lpfc_nlp_put(ndlp);
4786
4787	if (!elsiocb)
4788		return;
4789
4790	icmd = &elsiocb->iocb;
4791	icmd->ulpContext = xri;
4792
4793	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4794	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4795	pcmd += sizeof(uint32_t); /* Skip past command */
4796	rps_rsp = (RPS_RSP *)pcmd;
4797
4798	if (phba->fc_topology != TOPOLOGY_LOOP)
4799		status = 0x10;
4800	else
4801		status = 0x8;
4802	if (phba->pport->fc_flag & FC_FABRIC)
4803		status |= 0x4;
4804
4805	rps_rsp->rsvd1 = 0;
4806	rps_rsp->portStatus = cpu_to_be16(status);
4807	rps_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
4808	rps_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
4809	rps_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
4810	rps_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
4811	rps_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
4812	rps_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
4813	/* Xmit ELS RPS ACC response tag <ulpIoTag> */
4814	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
4815			 "0118 Xmit ELS RPS ACC response tag x%x xri x%x, "
4816			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
4817			 elsiocb->iotag, elsiocb->iocb.ulpContext,
4818			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4819			 ndlp->nlp_rpi);
4820	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4821	phba->fc_stat.elsXmitACC++;
4822	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
4823		lpfc_els_free_iocb(phba, elsiocb);
4824	return;
4825}
4826
4827/**
4828 * lpfc_els_rcv_rps - Process an unsolicited rps iocb
4829 * @vport: pointer to a host virtual N_Port data structure.
4830 * @cmdiocb: pointer to lpfc command iocb data structure.
4831 * @ndlp: pointer to a node-list data structure.
4832 *
4833 * This routine processes Read Port Status (RPS) IOCB received as an
4834 * ELS unsolicited event. It first checks the remote port state. If the
4835 * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
4836 * state, it invokes the lpfc_els_rsp_reject() routine to send the reject
4837 * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
4838 * for reading the HBA link statistics. It is for the callback function,
4839 * lpfc_els_rsp_rps_acc(), set to the MBX_READ_LNK_STAT mailbox command
4840 * to actually sending out RPS Accept (ACC) response.
4841 *
4842 * Return codes
4843 *   0 - Successfully processed rps iocb (currently always return 0)
4844 **/
4845static int
4846lpfc_els_rcv_rps(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4847		 struct lpfc_nodelist *ndlp)
4848{
4849	struct lpfc_hba *phba = vport->phba;
4850	uint32_t *lp;
4851	uint8_t flag;
4852	LPFC_MBOXQ_t *mbox;
4853	struct lpfc_dmabuf *pcmd;
4854	RPS *rps;
4855	struct ls_rjt stat;
4856
4857	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
4858	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
4859		/* reject the unsolicited RPS request and done with it */
4860		goto reject_out;
4861
4862	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
4863	lp = (uint32_t *) pcmd->virt;
4864	flag = (be32_to_cpu(*lp++) & 0xf);
4865	rps = (RPS *) lp;
4866
4867	if ((flag == 0) ||
4868	    ((flag == 1) && (be32_to_cpu(rps->un.portNum) == 0)) ||
4869	    ((flag == 2) && (memcmp(&rps->un.portName, &vport->fc_portname,
4870				    sizeof(struct lpfc_name)) == 0))) {
4871
4872		printk("Fix me....\n");
4873		dump_stack();
4874		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
4875		if (mbox) {
4876			lpfc_read_lnk_stat(phba, mbox);
4877			mbox->context1 =
4878			    (void *)((unsigned long) cmdiocb->iocb.ulpContext);
4879			mbox->context2 = lpfc_nlp_get(ndlp);
4880			mbox->vport = vport;
4881			mbox->mbox_cmpl = lpfc_els_rsp_rps_acc;
4882			if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
4883				!= MBX_NOT_FINISHED)
4884				/* Mbox completion will send ELS Response */
4885				return 0;
4886			/* Decrement reference count used for the failed mbox
4887			 * command.
4888			 */
4889			lpfc_nlp_put(ndlp);
4890			mempool_free(mbox, phba->mbox_mem_pool);
4891		}
4892	}
4893
4894reject_out:
4895	/* issue rejection response */
4896	stat.un.b.lsRjtRsvd0 = 0;
4897	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
4898	stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
4899	stat.un.b.vendorUnique = 0;
4900	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
4901	return 0;
4902}
4903
4904/**
4905 * lpfc_els_rsp_rpl_acc - Issue an accept rpl els command
4906 * @vport: pointer to a host virtual N_Port data structure.
4907 * @cmdsize: size of the ELS command.
4908 * @oldiocb: pointer to the original lpfc command iocb data structure.
4909 * @ndlp: pointer to a node-list data structure.
4910 *
4911 * This routine issuees an Accept (ACC) Read Port List (RPL) ELS command.
4912 * It is to be called by the lpfc_els_rcv_rpl() routine to accept the RPL.
4913 *
4914 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
4915 * will be incremented by 1 for holding the ndlp and the reference to ndlp
4916 * will be stored into the context1 field of the IOCB for the completion
4917 * callback function to the RPL Accept Response ELS command.
4918 *
4919 * Return code
4920 *   0 - Successfully issued ACC RPL ELS command
4921 *   1 - Failed to issue ACC RPL ELS command
4922 **/
4923static int
4924lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
4925		     struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
4926{
4927	struct lpfc_hba *phba = vport->phba;
4928	IOCB_t *icmd, *oldcmd;
4929	RPL_RSP rpl_rsp;
4930	struct lpfc_iocbq *elsiocb;
4931	uint8_t *pcmd;
4932
4933	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
4934				     ndlp->nlp_DID, ELS_CMD_ACC);
4935
4936	if (!elsiocb)
4937		return 1;
4938
4939	icmd = &elsiocb->iocb;
4940	oldcmd = &oldiocb->iocb;
4941	icmd->ulpContext = oldcmd->ulpContext;	/* Xri */
4942
4943	pcmd = (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
4944	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
4945	pcmd += sizeof(uint16_t);
4946	*((uint16_t *)(pcmd)) = be16_to_cpu(cmdsize);
4947	pcmd += sizeof(uint16_t);
4948
4949	/* Setup the RPL ACC payload */
4950	rpl_rsp.listLen = be32_to_cpu(1);
4951	rpl_rsp.index = 0;
4952	rpl_rsp.port_num_blk.portNum = 0;
4953	rpl_rsp.port_num_blk.portID = be32_to_cpu(vport->fc_myDID);
4954	memcpy(&rpl_rsp.port_num_blk.portName, &vport->fc_portname,
4955	    sizeof(struct lpfc_name));
4956	memcpy(pcmd, &rpl_rsp, cmdsize - sizeof(uint32_t));
4957	/* Xmit ELS RPL ACC response tag <ulpIoTag> */
4958	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
4959			 "0120 Xmit ELS RPL ACC response tag x%x "
4960			 "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, "
4961			 "rpi x%x\n",
4962			 elsiocb->iotag, elsiocb->iocb.ulpContext,
4963			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
4964			 ndlp->nlp_rpi);
4965	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
4966	phba->fc_stat.elsXmitACC++;
4967	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
4968	    IOCB_ERROR) {
4969		lpfc_els_free_iocb(phba, elsiocb);
4970		return 1;
4971	}
4972	return 0;
4973}
4974
4975/**
4976 * lpfc_els_rcv_rpl - Process an unsolicited rpl iocb
4977 * @vport: pointer to a host virtual N_Port data structure.
4978 * @cmdiocb: pointer to lpfc command iocb data structure.
4979 * @ndlp: pointer to a node-list data structure.
4980 *
4981 * This routine processes Read Port List (RPL) IOCB received as an ELS
4982 * unsolicited event. It first checks the remote port state. If the remote
4983 * port is not in NLP_STE_UNMAPPED_NODE and NLP_STE_MAPPED_NODE states, it
4984 * invokes the lpfc_els_rsp_reject() routine to send reject response.
4985 * Otherwise, this routine then invokes the lpfc_els_rsp_rpl_acc() routine
4986 * to accept the RPL.
4987 *
4988 * Return code
4989 *   0 - Successfully processed rpl iocb (currently always return 0)
4990 **/
4991static int
4992lpfc_els_rcv_rpl(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
4993		 struct lpfc_nodelist *ndlp)
4994{
4995	struct lpfc_dmabuf *pcmd;
4996	uint32_t *lp;
4997	uint32_t maxsize;
4998	uint16_t cmdsize;
4999	RPL *rpl;
5000	struct ls_rjt stat;
5001
5002	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
5003	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
5004		/* issue rejection response */
5005		stat.un.b.lsRjtRsvd0 = 0;
5006		stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
5007		stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
5008		stat.un.b.vendorUnique = 0;
5009		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp,
5010			NULL);
5011		/* rejected the unsolicited RPL request and done with it */
5012		return 0;
5013	}
5014
5015	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5016	lp = (uint32_t *) pcmd->virt;
5017	rpl = (RPL *) (lp + 1);
5018
5019	maxsize = be32_to_cpu(rpl->maxsize);
5020
5021	/* We support only one port */
5022	if ((rpl->index == 0) &&
5023	    ((maxsize == 0) ||
5024	     ((maxsize * sizeof(uint32_t)) >= sizeof(RPL_RSP)))) {
5025		cmdsize = sizeof(uint32_t) + sizeof(RPL_RSP);
5026	} else {
5027		cmdsize = sizeof(uint32_t) + maxsize * sizeof(uint32_t);
5028	}
5029	lpfc_els_rsp_rpl_acc(vport, cmdsize, cmdiocb, ndlp);
5030
5031	return 0;
5032}
5033
5034/**
5035 * lpfc_els_rcv_farp - Process an unsolicited farp request els command
5036 * @vport: pointer to a virtual N_Port data structure.
5037 * @cmdiocb: pointer to lpfc command iocb data structure.
5038 * @ndlp: pointer to a node-list data structure.
5039 *
5040 * This routine processes Fibre Channel Address Resolution Protocol
5041 * (FARP) Request IOCB received as an ELS unsolicited event. Currently,
5042 * the lpfc driver only supports matching on WWPN or WWNN for FARP. As such,
5043 * FARP_MATCH_PORT flag and FARP_MATCH_NODE flag are checked against the
5044 * Match Flag in the FARP request IOCB: if FARP_MATCH_PORT flag is set, the
5045 * remote PortName is compared against the FC PortName stored in the @vport
5046 * data structure; if FARP_MATCH_NODE flag is set, the remote NodeName is
5047 * compared against the FC NodeName stored in the @vport data structure.
5048 * If any of these matches and the FARP_REQUEST_FARPR flag is set in the
5049 * FARP request IOCB Response Flag, the lpfc_issue_els_farpr() routine is
5050 * invoked to send out FARP Response to the remote node. Before sending the
5051 * FARP Response, however, the FARP_REQUEST_PLOGI flag is check in the FARP
5052 * request IOCB Response Flag and, if it is set, the lpfc_issue_els_plogi()
5053 * routine is invoked to log into the remote port first.
5054 *
5055 * Return code
5056 *   0 - Either the FARP Match Mode not supported or successfully processed
5057 **/
5058static int
5059lpfc_els_rcv_farp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5060		  struct lpfc_nodelist *ndlp)
5061{
5062	struct lpfc_dmabuf *pcmd;
5063	uint32_t *lp;
5064	IOCB_t *icmd;
5065	FARP *fp;
5066	uint32_t cmd, cnt, did;
5067
5068	icmd = &cmdiocb->iocb;
5069	did = icmd->un.elsreq64.remoteID;
5070	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5071	lp = (uint32_t *) pcmd->virt;
5072
5073	cmd = *lp++;
5074	fp = (FARP *) lp;
5075	/* FARP-REQ received from DID <did> */
5076	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5077			 "0601 FARP-REQ received from DID x%x\n", did);
5078	/* We will only support match on WWPN or WWNN */
5079	if (fp->Mflags & ~(FARP_MATCH_NODE | FARP_MATCH_PORT)) {
5080		return 0;
5081	}
5082
5083	cnt = 0;
5084	/* If this FARP command is searching for my portname */
5085	if (fp->Mflags & FARP_MATCH_PORT) {
5086		if (memcmp(&fp->RportName, &vport->fc_portname,
5087			   sizeof(struct lpfc_name)) == 0)
5088			cnt = 1;
5089	}
5090
5091	/* If this FARP command is searching for my nodename */
5092	if (fp->Mflags & FARP_MATCH_NODE) {
5093		if (memcmp(&fp->RnodeName, &vport->fc_nodename,
5094			   sizeof(struct lpfc_name)) == 0)
5095			cnt = 1;
5096	}
5097
5098	if (cnt) {
5099		if ((ndlp->nlp_state == NLP_STE_UNMAPPED_NODE) ||
5100		   (ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
5101			/* Log back into the node before sending the FARP. */
5102			if (fp->Rflags & FARP_REQUEST_PLOGI) {
5103				ndlp->nlp_prev_state = ndlp->nlp_state;
5104				lpfc_nlp_set_state(vport, ndlp,
5105						   NLP_STE_PLOGI_ISSUE);
5106				lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
5107			}
5108
5109			/* Send a FARP response to that node */
5110			if (fp->Rflags & FARP_REQUEST_FARPR)
5111				lpfc_issue_els_farpr(vport, did, 0);
5112		}
5113	}
5114	return 0;
5115}
5116
5117/**
5118 * lpfc_els_rcv_farpr - Process an unsolicited farp response iocb
5119 * @vport: pointer to a host virtual N_Port data structure.
5120 * @cmdiocb: pointer to lpfc command iocb data structure.
5121 * @ndlp: pointer to a node-list data structure.
5122 *
5123 * This routine processes Fibre Channel Address Resolution Protocol
5124 * Response (FARPR) IOCB received as an ELS unsolicited event. It simply
5125 * invokes the lpfc_els_rsp_acc() routine to the remote node to accept
5126 * the FARP response request.
5127 *
5128 * Return code
5129 *   0 - Successfully processed FARPR IOCB (currently always return 0)
5130 **/
5131static int
5132lpfc_els_rcv_farpr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5133		   struct lpfc_nodelist  *ndlp)
5134{
5135	struct lpfc_dmabuf *pcmd;
5136	uint32_t *lp;
5137	IOCB_t *icmd;
5138	uint32_t cmd, did;
5139
5140	icmd = &cmdiocb->iocb;
5141	did = icmd->un.elsreq64.remoteID;
5142	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
5143	lp = (uint32_t *) pcmd->virt;
5144
5145	cmd = *lp++;
5146	/* FARP-RSP received from DID <did> */
5147	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5148			 "0600 FARP-RSP received from DID x%x\n", did);
5149	/* ACCEPT the Farp resp request */
5150	lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
5151
5152	return 0;
5153}
5154
5155/**
5156 * lpfc_els_rcv_fan - Process an unsolicited fan iocb command
5157 * @vport: pointer to a host virtual N_Port data structure.
5158 * @cmdiocb: pointer to lpfc command iocb data structure.
5159 * @fan_ndlp: pointer to a node-list data structure.
5160 *
5161 * This routine processes a Fabric Address Notification (FAN) IOCB
5162 * command received as an ELS unsolicited event. The FAN ELS command will
5163 * only be processed on a physical port (i.e., the @vport represents the
5164 * physical port). The fabric NodeName and PortName from the FAN IOCB are
5165 * compared against those in the phba data structure. If any of those is
5166 * different, the lpfc_initial_flogi() routine is invoked to initialize
5167 * Fabric Login (FLOGI) to the fabric to start the discover over. Otherwise,
5168 * if both of those are identical, the lpfc_issue_fabric_reglogin() routine
5169 * is invoked to register login to the fabric.
5170 *
5171 * Return code
5172 *   0 - Successfully processed fan iocb (currently always return 0).
5173 **/
5174static int
5175lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
5176		 struct lpfc_nodelist *fan_ndlp)
5177{
5178	struct lpfc_hba *phba = vport->phba;
5179	uint32_t *lp;
5180	FAN *fp;
5181
5182	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0265 FAN received\n");
5183	lp = (uint32_t *)((struct lpfc_dmabuf *)cmdiocb->context2)->virt;
5184	fp = (FAN *) ++lp;
5185	/* FAN received; Fan does not have a reply sequence */
5186	if ((vport == phba->pport) &&
5187	    (vport->port_state == LPFC_LOCAL_CFG_LINK)) {
5188		if ((memcmp(&phba->fc_fabparam.nodeName, &fp->FnodeName,
5189			    sizeof(struct lpfc_name))) ||
5190		    (memcmp(&phba->fc_fabparam.portName, &fp->FportName,
5191			    sizeof(struct lpfc_name)))) {
5192			/* This port has switched fabrics. FLOGI is required */
5193			lpfc_initial_flogi(vport);
5194		} else {
5195			/* FAN verified - skip FLOGI */
5196			vport->fc_myDID = vport->fc_prevDID;
5197			if (phba->sli_rev < LPFC_SLI_REV4)
5198				lpfc_issue_fabric_reglogin(vport);
5199			else
5200				lpfc_issue_reg_vfi(vport);
5201		}
5202	}
5203	return 0;
5204}
5205
5206/**
5207 * lpfc_els_timeout - Handler funciton to the els timer
5208 * @ptr: holder for the timer function associated data.
5209 *
5210 * This routine is invoked by the ELS timer after timeout. It posts the ELS
5211 * timer timeout event by setting the WORKER_ELS_TMO bit to the work port
5212 * event bitmap and then invokes the lpfc_worker_wake_up() routine to wake
5213 * up the worker thread. It is for the worker thread to invoke the routine
5214 * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO.
5215 **/
5216void
5217lpfc_els_timeout(unsigned long ptr)
5218{
5219	struct lpfc_vport *vport = (struct lpfc_vport *) ptr;
5220	struct lpfc_hba   *phba = vport->phba;
5221	uint32_t tmo_posted;
5222	unsigned long iflag;
5223
5224	spin_lock_irqsave(&vport->work_port_lock, iflag);
5225	tmo_posted = vport->work_port_events & WORKER_ELS_TMO;
5226	if (!tmo_posted)
5227		vport->work_port_events |= WORKER_ELS_TMO;
5228	spin_unlock_irqrestore(&vport->work_port_lock, iflag);
5229
5230	if (!tmo_posted)
5231		lpfc_worker_wake_up(phba);
5232	return;
5233}
5234
5235
5236/**
5237 * lpfc_els_timeout_handler - Process an els timeout event
5238 * @vport: pointer to a virtual N_Port data structure.
5239 *
5240 * This routine is the actual handler function that processes an ELS timeout
5241 * event. It walks the ELS ring to get and abort all the IOCBs (except the
5242 * ABORT/CLOSE/FARP/FARPR/FDISC), which are associated with the @vport by
5243 * invoking the lpfc_sli_issue_abort_iotag() routine.
5244 **/
5245void
5246lpfc_els_timeout_handler(struct lpfc_vport *vport)
5247{
5248	struct lpfc_hba  *phba = vport->phba;
5249	struct lpfc_sli_ring *pring;
5250	struct lpfc_iocbq *tmp_iocb, *piocb;
5251	IOCB_t *cmd = NULL;
5252	struct lpfc_dmabuf *pcmd;
5253	uint32_t els_command = 0;
5254	uint32_t timeout;
5255	uint32_t remote_ID = 0xffffffff;
5256	LIST_HEAD(txcmplq_completions);
5257	LIST_HEAD(abort_list);
5258
5259
5260	timeout = (uint32_t)(phba->fc_ratov << 1);
5261
5262	pring = &phba->sli.ring[LPFC_ELS_RING];
5263
5264	spin_lock_irq(&phba->hbalock);
5265	list_splice_init(&pring->txcmplq, &txcmplq_completions);
5266	spin_unlock_irq(&phba->hbalock);
5267
5268	list_for_each_entry_safe(piocb, tmp_iocb, &txcmplq_completions, list) {
5269		cmd = &piocb->iocb;
5270
5271		if ((piocb->iocb_flag & LPFC_IO_LIBDFC) != 0 ||
5272		    piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
5273		    piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
5274			continue;
5275
5276		if (piocb->vport != vport)
5277			continue;
5278
5279		pcmd = (struct lpfc_dmabuf *) piocb->context2;
5280		if (pcmd)
5281			els_command = *(uint32_t *) (pcmd->virt);
5282
5283		if (els_command == ELS_CMD_FARP ||
5284		    els_command == ELS_CMD_FARPR ||
5285		    els_command == ELS_CMD_FDISC)
5286			continue;
5287
5288		if (piocb->drvrTimeout > 0) {
5289			if (piocb->drvrTimeout >= timeout)
5290				piocb->drvrTimeout -= timeout;
5291			else
5292				piocb->drvrTimeout = 0;
5293			continue;
5294		}
5295
5296		remote_ID = 0xffffffff;
5297		if (cmd->ulpCommand != CMD_GEN_REQUEST64_CR)
5298			remote_ID = cmd->un.elsreq64.remoteID;
5299		else {
5300			struct lpfc_nodelist *ndlp;
5301			ndlp = __lpfc_findnode_rpi(vport, cmd->ulpContext);
5302			if (ndlp && NLP_CHK_NODE_ACT(ndlp))
5303				remote_ID = ndlp->nlp_DID;
5304		}
5305		list_add_tail(&piocb->dlist, &abort_list);
5306	}
5307	spin_lock_irq(&phba->hbalock);
5308	list_splice(&txcmplq_completions, &pring->txcmplq);
5309	spin_unlock_irq(&phba->hbalock);
5310
5311	list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) {
5312		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5313			 "0127 ELS timeout Data: x%x x%x x%x "
5314			 "x%x\n", els_command,
5315			 remote_ID, cmd->ulpCommand, cmd->ulpIoTag);
5316		spin_lock_irq(&phba->hbalock);
5317		list_del_init(&piocb->dlist);
5318		lpfc_sli_issue_abort_iotag(phba, pring, piocb);
5319		spin_unlock_irq(&phba->hbalock);
5320	}
5321
5322	if (phba->sli.ring[LPFC_ELS_RING].txcmplq_cnt)
5323		mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout);
5324}
5325
5326/**
5327 * lpfc_els_flush_cmd - Clean up the outstanding els commands to a vport
5328 * @vport: pointer to a host virtual N_Port data structure.
5329 *
5330 * This routine is used to clean up all the outstanding ELS commands on a
5331 * @vport. It first aborts the @vport by invoking lpfc_fabric_abort_vport()
5332 * routine. After that, it walks the ELS transmit queue to remove all the
5333 * IOCBs with the @vport other than the QUE_RING and ABORT/CLOSE IOCBs. For
5334 * the IOCBs with a non-NULL completion callback function, the callback
5335 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
5336 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs with a NULL completion
5337 * callback function, the IOCB will simply be released. Finally, it walks
5338 * the ELS transmit completion queue to issue an abort IOCB to any transmit
5339 * completion queue IOCB that is associated with the @vport and is not
5340 * an IOCB from libdfc (i.e., the management plane IOCBs that are not
5341 * part of the discovery state machine) out to HBA by invoking the
5342 * lpfc_sli_issue_abort_iotag() routine. Note that this function issues the
5343 * abort IOCB to any transmit completion queueed IOCB, it does not guarantee
5344 * the IOCBs are aborted when this function returns.
5345 **/
5346void
5347lpfc_els_flush_cmd(struct lpfc_vport *vport)
5348{
5349	LIST_HEAD(completions);
5350	struct lpfc_hba  *phba = vport->phba;
5351	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
5352	struct lpfc_iocbq *tmp_iocb, *piocb;
5353	IOCB_t *cmd = NULL;
5354
5355	lpfc_fabric_abort_vport(vport);
5356
5357	spin_lock_irq(&phba->hbalock);
5358	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
5359		cmd = &piocb->iocb;
5360
5361		if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
5362			continue;
5363		}
5364
5365		/* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
5366		if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
5367		    cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
5368		    cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
5369		    cmd->ulpCommand == CMD_ABORT_XRI_CN)
5370			continue;
5371
5372		if (piocb->vport != vport)
5373			continue;
5374
5375		list_move_tail(&piocb->list, &completions);
5376		pring->txq_cnt--;
5377	}
5378
5379	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
5380		if (piocb->iocb_flag & LPFC_IO_LIBDFC) {
5381			continue;
5382		}
5383
5384		if (piocb->vport != vport)
5385			continue;
5386
5387		lpfc_sli_issue_abort_iotag(phba, pring, piocb);
5388	}
5389	spin_unlock_irq(&phba->hbalock);
5390
5391	/* Cancell all the IOCBs from the completions list */
5392	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
5393			      IOERR_SLI_ABORTED);
5394
5395	return;
5396}
5397
5398/**
5399 * lpfc_els_flush_all_cmd - Clean up all the outstanding els commands to a HBA
5400 * @phba: pointer to lpfc hba data structure.
5401 *
5402 * This routine is used to clean up all the outstanding ELS commands on a
5403 * @phba. It first aborts the @phba by invoking the lpfc_fabric_abort_hba()
5404 * routine. After that, it walks the ELS transmit queue to remove all the
5405 * IOCBs to the @phba other than the QUE_RING and ABORT/CLOSE IOCBs. For
5406 * the IOCBs with the completion callback function associated, the callback
5407 * function will be invoked with the status set to IOSTAT_LOCAL_REJECT and
5408 * un.ulpWord[4] set to IOERR_SLI_ABORTED. For IOCBs without the completion
5409 * callback function associated, the IOCB will simply be released. Finally,
5410 * it walks the ELS transmit completion queue to issue an abort IOCB to any
5411 * transmit completion queue IOCB that is not an IOCB from libdfc (i.e., the
5412 * management plane IOCBs that are not part of the discovery state machine)
5413 * out to HBA by invoking the lpfc_sli_issue_abort_iotag() routine.
5414 **/
5415void
5416lpfc_els_flush_all_cmd(struct lpfc_hba  *phba)
5417{
5418	LIST_HEAD(completions);
5419	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
5420	struct lpfc_iocbq *tmp_iocb, *piocb;
5421	IOCB_t *cmd = NULL;
5422
5423	lpfc_fabric_abort_hba(phba);
5424	spin_lock_irq(&phba->hbalock);
5425	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) {
5426		cmd = &piocb->iocb;
5427		if (piocb->iocb_flag & LPFC_IO_LIBDFC)
5428			continue;
5429		/* Do not flush out the QUE_RING and ABORT/CLOSE iocbs */
5430		if (cmd->ulpCommand == CMD_QUE_RING_BUF_CN ||
5431		    cmd->ulpCommand == CMD_QUE_RING_BUF64_CN ||
5432		    cmd->ulpCommand == CMD_CLOSE_XRI_CN ||
5433		    cmd->ulpCommand == CMD_ABORT_XRI_CN)
5434			continue;
5435		list_move_tail(&piocb->list, &completions);
5436		pring->txq_cnt--;
5437	}
5438	list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) {
5439		if (piocb->iocb_flag & LPFC_IO_LIBDFC)
5440			continue;
5441		lpfc_sli_issue_abort_iotag(phba, pring, piocb);
5442	}
5443	spin_unlock_irq(&phba->hbalock);
5444
5445	/* Cancel all the IOCBs from the completions list */
5446	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
5447			      IOERR_SLI_ABORTED);
5448
5449	return;
5450}
5451
5452/**
5453 * lpfc_send_els_failure_event - Posts an ELS command failure event
5454 * @phba: Pointer to hba context object.
5455 * @cmdiocbp: Pointer to command iocb which reported error.
5456 * @rspiocbp: Pointer to response iocb which reported error.
5457 *
5458 * This function sends an event when there is an ELS command
5459 * failure.
5460 **/
5461void
5462lpfc_send_els_failure_event(struct lpfc_hba *phba,
5463			struct lpfc_iocbq *cmdiocbp,
5464			struct lpfc_iocbq *rspiocbp)
5465{
5466	struct lpfc_vport *vport = cmdiocbp->vport;
5467	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5468	struct lpfc_lsrjt_event lsrjt_event;
5469	struct lpfc_fabric_event_header fabric_event;
5470	struct ls_rjt stat;
5471	struct lpfc_nodelist *ndlp;
5472	uint32_t *pcmd;
5473
5474	ndlp = cmdiocbp->context1;
5475	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
5476		return;
5477
5478	if (rspiocbp->iocb.ulpStatus == IOSTAT_LS_RJT) {
5479		lsrjt_event.header.event_type = FC_REG_ELS_EVENT;
5480		lsrjt_event.header.subcategory = LPFC_EVENT_LSRJT_RCV;
5481		memcpy(lsrjt_event.header.wwpn, &ndlp->nlp_portname,
5482			sizeof(struct lpfc_name));
5483		memcpy(lsrjt_event.header.wwnn, &ndlp->nlp_nodename,
5484			sizeof(struct lpfc_name));
5485		pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
5486			cmdiocbp->context2)->virt);
5487		lsrjt_event.command = (pcmd != NULL) ? *pcmd : 0;
5488		stat.un.lsRjtError = be32_to_cpu(rspiocbp->iocb.un.ulpWord[4]);
5489		lsrjt_event.reason_code = stat.un.b.lsRjtRsnCode;
5490		lsrjt_event.explanation = stat.un.b.lsRjtRsnCodeExp;
5491		fc_host_post_vendor_event(shost,
5492			fc_get_event_number(),
5493			sizeof(lsrjt_event),
5494			(char *)&lsrjt_event,
5495			LPFC_NL_VENDOR_ID);
5496		return;
5497	}
5498	if ((rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY) ||
5499		(rspiocbp->iocb.ulpStatus == IOSTAT_FABRIC_BSY)) {
5500		fabric_event.event_type = FC_REG_FABRIC_EVENT;
5501		if (rspiocbp->iocb.ulpStatus == IOSTAT_NPORT_BSY)
5502			fabric_event.subcategory = LPFC_EVENT_PORT_BUSY;
5503		else
5504			fabric_event.subcategory = LPFC_EVENT_FABRIC_BUSY;
5505		memcpy(fabric_event.wwpn, &ndlp->nlp_portname,
5506			sizeof(struct lpfc_name));
5507		memcpy(fabric_event.wwnn, &ndlp->nlp_nodename,
5508			sizeof(struct lpfc_name));
5509		fc_host_post_vendor_event(shost,
5510			fc_get_event_number(),
5511			sizeof(fabric_event),
5512			(char *)&fabric_event,
5513			LPFC_NL_VENDOR_ID);
5514		return;
5515	}
5516
5517}
5518
5519/**
5520 * lpfc_send_els_event - Posts unsolicited els event
5521 * @vport: Pointer to vport object.
5522 * @ndlp: Pointer FC node object.
5523 * @cmd: ELS command code.
5524 *
5525 * This function posts an event when there is an incoming
5526 * unsolicited ELS command.
5527 **/
5528static void
5529lpfc_send_els_event(struct lpfc_vport *vport,
5530		    struct lpfc_nodelist *ndlp,
5531		    uint32_t *payload)
5532{
5533	struct lpfc_els_event_header *els_data = NULL;
5534	struct lpfc_logo_event *logo_data = NULL;
5535	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
5536
5537	if (*payload == ELS_CMD_LOGO) {
5538		logo_data = kmalloc(sizeof(struct lpfc_logo_event), GFP_KERNEL);
5539		if (!logo_data) {
5540			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5541				"0148 Failed to allocate memory "
5542				"for LOGO event\n");
5543			return;
5544		}
5545		els_data = &logo_data->header;
5546	} else {
5547		els_data = kmalloc(sizeof(struct lpfc_els_event_header),
5548			GFP_KERNEL);
5549		if (!els_data) {
5550			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5551				"0149 Failed to allocate memory "
5552				"for ELS event\n");
5553			return;
5554		}
5555	}
5556	els_data->event_type = FC_REG_ELS_EVENT;
5557	switch (*payload) {
5558	case ELS_CMD_PLOGI:
5559		els_data->subcategory = LPFC_EVENT_PLOGI_RCV;
5560		break;
5561	case ELS_CMD_PRLO:
5562		els_data->subcategory = LPFC_EVENT_PRLO_RCV;
5563		break;
5564	case ELS_CMD_ADISC:
5565		els_data->subcategory = LPFC_EVENT_ADISC_RCV;
5566		break;
5567	case ELS_CMD_LOGO:
5568		els_data->subcategory = LPFC_EVENT_LOGO_RCV;
5569		/* Copy the WWPN in the LOGO payload */
5570		memcpy(logo_data->logo_wwpn, &payload[2],
5571			sizeof(struct lpfc_name));
5572		break;
5573	default:
5574		kfree(els_data);
5575		return;
5576	}
5577	memcpy(els_data->wwpn, &ndlp->nlp_portname, sizeof(struct lpfc_name));
5578	memcpy(els_data->wwnn, &ndlp->nlp_nodename, sizeof(struct lpfc_name));
5579	if (*payload == ELS_CMD_LOGO) {
5580		fc_host_post_vendor_event(shost,
5581			fc_get_event_number(),
5582			sizeof(struct lpfc_logo_event),
5583			(char *)logo_data,
5584			LPFC_NL_VENDOR_ID);
5585		kfree(logo_data);
5586	} else {
5587		fc_host_post_vendor_event(shost,
5588			fc_get_event_number(),
5589			sizeof(struct lpfc_els_event_header),
5590			(char *)els_data,
5591			LPFC_NL_VENDOR_ID);
5592		kfree(els_data);
5593	}
5594
5595	return;
5596}
5597
5598
5599/**
5600 * lpfc_els_unsol_buffer - Process an unsolicited event data buffer
5601 * @phba: pointer to lpfc hba data structure.
5602 * @pring: pointer to a SLI ring.
5603 * @vport: pointer to a host virtual N_Port data structure.
5604 * @elsiocb: pointer to lpfc els command iocb data structure.
5605 *
5606 * This routine is used for processing the IOCB associated with a unsolicited
5607 * event. It first determines whether there is an existing ndlp that matches
5608 * the DID from the unsolicited IOCB. If not, it will create a new one with
5609 * the DID from the unsolicited IOCB. The ELS command from the unsolicited
5610 * IOCB is then used to invoke the proper routine and to set up proper state
5611 * of the discovery state machine.
5612 **/
5613static void
5614lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5615		      struct lpfc_vport *vport, struct lpfc_iocbq *elsiocb)
5616{
5617	struct Scsi_Host  *shost;
5618	struct lpfc_nodelist *ndlp;
5619	struct ls_rjt stat;
5620	uint32_t *payload;
5621	uint32_t cmd, did, newnode, rjt_err = 0;
5622	IOCB_t *icmd = &elsiocb->iocb;
5623
5624	if (!vport || !(elsiocb->context2))
5625		goto dropit;
5626
5627	newnode = 0;
5628	payload = ((struct lpfc_dmabuf *)elsiocb->context2)->virt;
5629	cmd = *payload;
5630	if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) == 0)
5631		lpfc_post_buffer(phba, pring, 1);
5632
5633	did = icmd->un.rcvels.remoteID;
5634	if (icmd->ulpStatus) {
5635		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5636			"RCV Unsol ELS:  status:x%x/x%x did:x%x",
5637			icmd->ulpStatus, icmd->un.ulpWord[4], did);
5638		goto dropit;
5639	}
5640
5641	/* Check to see if link went down during discovery */
5642	if (lpfc_els_chk_latt(vport))
5643		goto dropit;
5644
5645	/* Ignore traffic received during vport shutdown. */
5646	if (vport->load_flag & FC_UNLOADING)
5647		goto dropit;
5648
5649	ndlp = lpfc_findnode_did(vport, did);
5650	if (!ndlp) {
5651		/* Cannot find existing Fabric ndlp, so allocate a new one */
5652		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
5653		if (!ndlp)
5654			goto dropit;
5655
5656		lpfc_nlp_init(vport, ndlp, did);
5657		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5658		newnode = 1;
5659		if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
5660			ndlp->nlp_type |= NLP_FABRIC;
5661	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
5662		ndlp = lpfc_enable_node(vport, ndlp,
5663					NLP_STE_UNUSED_NODE);
5664		if (!ndlp)
5665			goto dropit;
5666		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5667		newnode = 1;
5668		if ((did & Fabric_DID_MASK) == Fabric_DID_MASK)
5669			ndlp->nlp_type |= NLP_FABRIC;
5670	} else if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
5671		/* This is similar to the new node path */
5672		ndlp = lpfc_nlp_get(ndlp);
5673		if (!ndlp)
5674			goto dropit;
5675		lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
5676		newnode = 1;
5677	}
5678
5679	phba->fc_stat.elsRcvFrame++;
5680
5681	elsiocb->context1 = lpfc_nlp_get(ndlp);
5682	elsiocb->vport = vport;
5683
5684	if ((cmd & ELS_CMD_MASK) == ELS_CMD_RSCN) {
5685		cmd &= ELS_CMD_MASK;
5686	}
5687	/* ELS command <elsCmd> received from NPORT <did> */
5688	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
5689			 "0112 ELS command x%x received from NPORT x%x "
5690			 "Data: x%x\n", cmd, did, vport->port_state);
5691	switch (cmd) {
5692	case ELS_CMD_PLOGI:
5693		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5694			"RCV PLOGI:       did:x%x/ste:x%x flg:x%x",
5695			did, vport->port_state, ndlp->nlp_flag);
5696
5697		phba->fc_stat.elsRcvPLOGI++;
5698		ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp);
5699
5700		lpfc_send_els_event(vport, ndlp, payload);
5701		if (vport->port_state < LPFC_DISC_AUTH) {
5702			if (!(phba->pport->fc_flag & FC_PT2PT) ||
5703				(phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
5704				rjt_err = LSRJT_UNABLE_TPC;
5705				break;
5706			}
5707			/* We get here, and drop thru, if we are PT2PT with
5708			 * another NPort and the other side has initiated
5709			 * the PLOGI before responding to our FLOGI.
5710			 */
5711		}
5712
5713		shost = lpfc_shost_from_vport(vport);
5714		spin_lock_irq(shost->host_lock);
5715		ndlp->nlp_flag &= ~NLP_TARGET_REMOVE;
5716		spin_unlock_irq(shost->host_lock);
5717
5718		lpfc_disc_state_machine(vport, ndlp, elsiocb,
5719					NLP_EVT_RCV_PLOGI);
5720
5721		break;
5722	case ELS_CMD_FLOGI:
5723		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5724			"RCV FLOGI:       did:x%x/ste:x%x flg:x%x",
5725			did, vport->port_state, ndlp->nlp_flag);
5726
5727		phba->fc_stat.elsRcvFLOGI++;
5728		lpfc_els_rcv_flogi(vport, elsiocb, ndlp);
5729		if (newnode)
5730			lpfc_nlp_put(ndlp);
5731		break;
5732	case ELS_CMD_LOGO:
5733		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5734			"RCV LOGO:        did:x%x/ste:x%x flg:x%x",
5735			did, vport->port_state, ndlp->nlp_flag);
5736
5737		phba->fc_stat.elsRcvLOGO++;
5738		lpfc_send_els_event(vport, ndlp, payload);
5739		if (vport->port_state < LPFC_DISC_AUTH) {
5740			rjt_err = LSRJT_UNABLE_TPC;
5741			break;
5742		}
5743		lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
5744		break;
5745	case ELS_CMD_PRLO:
5746		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5747			"RCV PRLO:        did:x%x/ste:x%x flg:x%x",
5748			did, vport->port_state, ndlp->nlp_flag);
5749
5750		phba->fc_stat.elsRcvPRLO++;
5751		lpfc_send_els_event(vport, ndlp, payload);
5752		if (vport->port_state < LPFC_DISC_AUTH) {
5753			rjt_err = LSRJT_UNABLE_TPC;
5754			break;
5755		}
5756		lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
5757		break;
5758	case ELS_CMD_RSCN:
5759		phba->fc_stat.elsRcvRSCN++;
5760		lpfc_els_rcv_rscn(vport, elsiocb, ndlp);
5761		if (newnode)
5762			lpfc_nlp_put(ndlp);
5763		break;
5764	case ELS_CMD_ADISC:
5765		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5766			"RCV ADISC:       did:x%x/ste:x%x flg:x%x",
5767			did, vport->port_state, ndlp->nlp_flag);
5768
5769		lpfc_send_els_event(vport, ndlp, payload);
5770		phba->fc_stat.elsRcvADISC++;
5771		if (vport->port_state < LPFC_DISC_AUTH) {
5772			rjt_err = LSRJT_UNABLE_TPC;
5773			break;
5774		}
5775		lpfc_disc_state_machine(vport, ndlp, elsiocb,
5776					NLP_EVT_RCV_ADISC);
5777		break;
5778	case ELS_CMD_PDISC:
5779		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5780			"RCV PDISC:       did:x%x/ste:x%x flg:x%x",
5781			did, vport->port_state, ndlp->nlp_flag);
5782
5783		phba->fc_stat.elsRcvPDISC++;
5784		if (vport->port_state < LPFC_DISC_AUTH) {
5785			rjt_err = LSRJT_UNABLE_TPC;
5786			break;
5787		}
5788		lpfc_disc_state_machine(vport, ndlp, elsiocb,
5789					NLP_EVT_RCV_PDISC);
5790		break;
5791	case ELS_CMD_FARPR:
5792		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5793			"RCV FARPR:       did:x%x/ste:x%x flg:x%x",
5794			did, vport->port_state, ndlp->nlp_flag);
5795
5796		phba->fc_stat.elsRcvFARPR++;
5797		lpfc_els_rcv_farpr(vport, elsiocb, ndlp);
5798		break;
5799	case ELS_CMD_FARP:
5800		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5801			"RCV FARP:        did:x%x/ste:x%x flg:x%x",
5802			did, vport->port_state, ndlp->nlp_flag);
5803
5804		phba->fc_stat.elsRcvFARP++;
5805		lpfc_els_rcv_farp(vport, elsiocb, ndlp);
5806		break;
5807	case ELS_CMD_FAN:
5808		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5809			"RCV FAN:         did:x%x/ste:x%x flg:x%x",
5810			did, vport->port_state, ndlp->nlp_flag);
5811
5812		phba->fc_stat.elsRcvFAN++;
5813		lpfc_els_rcv_fan(vport, elsiocb, ndlp);
5814		break;
5815	case ELS_CMD_PRLI:
5816		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5817			"RCV PRLI:        did:x%x/ste:x%x flg:x%x",
5818			did, vport->port_state, ndlp->nlp_flag);
5819
5820		phba->fc_stat.elsRcvPRLI++;
5821		if (vport->port_state < LPFC_DISC_AUTH) {
5822			rjt_err = LSRJT_UNABLE_TPC;
5823			break;
5824		}
5825		lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
5826		break;
5827	case ELS_CMD_LIRR:
5828		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5829			"RCV LIRR:        did:x%x/ste:x%x flg:x%x",
5830			did, vport->port_state, ndlp->nlp_flag);
5831
5832		phba->fc_stat.elsRcvLIRR++;
5833		lpfc_els_rcv_lirr(vport, elsiocb, ndlp);
5834		if (newnode)
5835			lpfc_nlp_put(ndlp);
5836		break;
5837	case ELS_CMD_RPS:
5838		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5839			"RCV RPS:         did:x%x/ste:x%x flg:x%x",
5840			did, vport->port_state, ndlp->nlp_flag);
5841
5842		phba->fc_stat.elsRcvRPS++;
5843		lpfc_els_rcv_rps(vport, elsiocb, ndlp);
5844		if (newnode)
5845			lpfc_nlp_put(ndlp);
5846		break;
5847	case ELS_CMD_RPL:
5848		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5849			"RCV RPL:         did:x%x/ste:x%x flg:x%x",
5850			did, vport->port_state, ndlp->nlp_flag);
5851
5852		phba->fc_stat.elsRcvRPL++;
5853		lpfc_els_rcv_rpl(vport, elsiocb, ndlp);
5854		if (newnode)
5855			lpfc_nlp_put(ndlp);
5856		break;
5857	case ELS_CMD_RNID:
5858		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5859			"RCV RNID:        did:x%x/ste:x%x flg:x%x",
5860			did, vport->port_state, ndlp->nlp_flag);
5861
5862		phba->fc_stat.elsRcvRNID++;
5863		lpfc_els_rcv_rnid(vport, elsiocb, ndlp);
5864		if (newnode)
5865			lpfc_nlp_put(ndlp);
5866		break;
5867	case ELS_CMD_RRQ:
5868		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5869			"RCV RRQ:         did:x%x/ste:x%x flg:x%x",
5870			did, vport->port_state, ndlp->nlp_flag);
5871
5872		phba->fc_stat.elsRcvRRQ++;
5873		lpfc_els_rcv_rrq(vport, elsiocb, ndlp);
5874		if (newnode)
5875			lpfc_nlp_put(ndlp);
5876		break;
5877	default:
5878		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
5879			"RCV ELS cmd:     cmd:x%x did:x%x/ste:x%x",
5880			cmd, did, vport->port_state);
5881
5882		/* Unsupported ELS command, reject */
5883		rjt_err = LSRJT_INVALID_CMD;
5884
5885		/* Unknown ELS command <elsCmd> received from NPORT <did> */
5886		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5887				 "0115 Unknown ELS command x%x "
5888				 "received from NPORT x%x\n", cmd, did);
5889		if (newnode)
5890			lpfc_nlp_put(ndlp);
5891		break;
5892	}
5893
5894	/* check if need to LS_RJT received ELS cmd */
5895	if (rjt_err) {
5896		memset(&stat, 0, sizeof(stat));
5897		stat.un.b.lsRjtRsnCode = rjt_err;
5898		stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
5899		lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
5900			NULL);
5901	}
5902
5903	lpfc_nlp_put(elsiocb->context1);
5904	elsiocb->context1 = NULL;
5905	return;
5906
5907dropit:
5908	if (vport && !(vport->load_flag & FC_UNLOADING))
5909		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
5910			"0111 Dropping received ELS cmd "
5911			"Data: x%x x%x x%x\n",
5912			icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
5913	phba->fc_stat.elsRcvDrop++;
5914}
5915
5916/**
5917 * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
5918 * @phba: pointer to lpfc hba data structure.
5919 * @vpi: host virtual N_Port identifier.
5920 *
5921 * This routine finds a vport on a HBA (referred by @phba) through a
5922 * @vpi. The function walks the HBA's vport list and returns the address
5923 * of the vport with the matching @vpi.
5924 *
5925 * Return code
5926 *    NULL - No vport with the matching @vpi found
5927 *    Otherwise - Address to the vport with the matching @vpi.
5928 **/
5929struct lpfc_vport *
5930lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
5931{
5932	struct lpfc_vport *vport;
5933	unsigned long flags;
5934
5935	spin_lock_irqsave(&phba->hbalock, flags);
5936	list_for_each_entry(vport, &phba->port_list, listentry) {
5937		if (vport->vpi == vpi) {
5938			spin_unlock_irqrestore(&phba->hbalock, flags);
5939			return vport;
5940		}
5941	}
5942	spin_unlock_irqrestore(&phba->hbalock, flags);
5943	return NULL;
5944}
5945
5946/**
5947 * lpfc_els_unsol_event - Process an unsolicited event from an els sli ring
5948 * @phba: pointer to lpfc hba data structure.
5949 * @pring: pointer to a SLI ring.
5950 * @elsiocb: pointer to lpfc els iocb data structure.
5951 *
5952 * This routine is used to process an unsolicited event received from a SLI
5953 * (Service Level Interface) ring. The actual processing of the data buffer
5954 * associated with the unsolicited event is done by invoking the routine
5955 * lpfc_els_unsol_buffer() after properly set up the iocb buffer from the
5956 * SLI ring on which the unsolicited event was received.
5957 **/
5958void
5959lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
5960		     struct lpfc_iocbq *elsiocb)
5961{
5962	struct lpfc_vport *vport = phba->pport;
5963	IOCB_t *icmd = &elsiocb->iocb;
5964	dma_addr_t paddr;
5965	struct lpfc_dmabuf *bdeBuf1 = elsiocb->context2;
5966	struct lpfc_dmabuf *bdeBuf2 = elsiocb->context3;
5967
5968	elsiocb->context1 = NULL;
5969	elsiocb->context2 = NULL;
5970	elsiocb->context3 = NULL;
5971
5972	if (icmd->ulpStatus == IOSTAT_NEED_BUFFER) {
5973		lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
5974	} else if (icmd->ulpStatus == IOSTAT_LOCAL_REJECT &&
5975	    (icmd->un.ulpWord[4] & 0xff) == IOERR_RCV_BUFFER_WAITING) {
5976		phba->fc_stat.NoRcvBuf++;
5977		/* Not enough posted buffers; Try posting more buffers */
5978		if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
5979			lpfc_post_buffer(phba, pring, 0);
5980		return;
5981	}
5982
5983	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
5984	    (icmd->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
5985	     icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
5986		if (icmd->unsli3.rcvsli3.vpi == 0xffff)
5987			vport = phba->pport;
5988		else
5989			vport = lpfc_find_vport_by_vpid(phba,
5990				icmd->unsli3.rcvsli3.vpi - phba->vpi_base);
5991	}
5992	/* If there are no BDEs associated
5993	 * with this IOCB, there is nothing to do.
5994	 */
5995	if (icmd->ulpBdeCount == 0)
5996		return;
5997
5998	/* type of ELS cmd is first 32bit word
5999	 * in packet
6000	 */
6001	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
6002		elsiocb->context2 = bdeBuf1;
6003	} else {
6004		paddr = getPaddr(icmd->un.cont64[0].addrHigh,
6005				 icmd->un.cont64[0].addrLow);
6006		elsiocb->context2 = lpfc_sli_ringpostbuf_get(phba, pring,
6007							     paddr);
6008	}
6009
6010	lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
6011	/*
6012	 * The different unsolicited event handlers would tell us
6013	 * if they are done with "mp" by setting context2 to NULL.
6014	 */
6015	if (elsiocb->context2) {
6016		lpfc_in_buf_free(phba, (struct lpfc_dmabuf *)elsiocb->context2);
6017		elsiocb->context2 = NULL;
6018	}
6019
6020	/* RCV_ELS64_CX provide for 2 BDEs - process 2nd if included */
6021	if ((phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) &&
6022	    icmd->ulpBdeCount == 2) {
6023		elsiocb->context2 = bdeBuf2;
6024		lpfc_els_unsol_buffer(phba, pring, vport, elsiocb);
6025		/* free mp if we are done with it */
6026		if (elsiocb->context2) {
6027			lpfc_in_buf_free(phba, elsiocb->context2);
6028			elsiocb->context2 = NULL;
6029		}
6030	}
6031}
6032
6033/**
6034 * lpfc_do_scr_ns_plogi - Issue a plogi to the name server for scr
6035 * @phba: pointer to lpfc hba data structure.
6036 * @vport: pointer to a virtual N_Port data structure.
6037 *
6038 * This routine issues a Port Login (PLOGI) to the Name Server with
6039 * State Change Request (SCR) for a @vport. This routine will create an
6040 * ndlp for the Name Server associated to the @vport if such node does
6041 * not already exist. The PLOGI to Name Server is issued by invoking the
6042 * lpfc_issue_els_plogi() routine. If Fabric-Device Management Interface
6043 * (FDMI) is configured to the @vport, a FDMI node will be created and
6044 * the PLOGI to FDMI is issued by invoking lpfc_issue_els_plogi() routine.
6045 **/
6046void
6047lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport)
6048{
6049	struct lpfc_nodelist *ndlp, *ndlp_fdmi;
6050
6051	ndlp = lpfc_findnode_did(vport, NameServer_DID);
6052	if (!ndlp) {
6053		ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
6054		if (!ndlp) {
6055			if (phba->fc_topology == TOPOLOGY_LOOP) {
6056				lpfc_disc_start(vport);
6057				return;
6058			}
6059			lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6060			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6061					 "0251 NameServer login: no memory\n");
6062			return;
6063		}
6064		lpfc_nlp_init(vport, ndlp, NameServer_DID);
6065	} else if (!NLP_CHK_NODE_ACT(ndlp)) {
6066		ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
6067		if (!ndlp) {
6068			if (phba->fc_topology == TOPOLOGY_LOOP) {
6069				lpfc_disc_start(vport);
6070				return;
6071			}
6072			lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6073			lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6074					"0348 NameServer login: node freed\n");
6075			return;
6076		}
6077	}
6078	ndlp->nlp_type |= NLP_FABRIC;
6079
6080	lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE);
6081
6082	if (lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0)) {
6083		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6084		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6085				 "0252 Cannot issue NameServer login\n");
6086		return;
6087	}
6088
6089	if (vport->cfg_fdmi_on) {
6090		ndlp_fdmi = mempool_alloc(phba->nlp_mem_pool,
6091					  GFP_KERNEL);
6092		if (ndlp_fdmi) {
6093			lpfc_nlp_init(vport, ndlp_fdmi, FDMI_DID);
6094			ndlp_fdmi->nlp_type |= NLP_FABRIC;
6095			lpfc_nlp_set_state(vport, ndlp_fdmi,
6096				NLP_STE_PLOGI_ISSUE);
6097			lpfc_issue_els_plogi(vport, ndlp_fdmi->nlp_DID,
6098					     0);
6099		}
6100	}
6101	return;
6102}
6103
6104/**
6105 * lpfc_cmpl_reg_new_vport - Completion callback function to register new vport
6106 * @phba: pointer to lpfc hba data structure.
6107 * @pmb: pointer to the driver internal queue element for mailbox command.
6108 *
6109 * This routine is the completion callback function to register new vport
6110 * mailbox command. If the new vport mailbox command completes successfully,
6111 * the fabric registration login shall be performed on physical port (the
6112 * new vport created is actually a physical port, with VPI 0) or the port
6113 * login to Name Server for State Change Request (SCR) will be performed
6114 * on virtual port (real virtual port, with VPI greater than 0).
6115 **/
6116static void
6117lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6118{
6119	struct lpfc_vport *vport = pmb->vport;
6120	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
6121	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
6122	MAILBOX_t *mb = &pmb->u.mb;
6123	int rc;
6124
6125	spin_lock_irq(shost->host_lock);
6126	vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
6127	spin_unlock_irq(shost->host_lock);
6128
6129	if (mb->mbxStatus) {
6130		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
6131				"0915 Register VPI failed : Status: x%x"
6132				" upd bit: x%x \n", mb->mbxStatus,
6133				 mb->un.varRegVpi.upd);
6134		if (phba->sli_rev == LPFC_SLI_REV4 &&
6135			mb->un.varRegVpi.upd)
6136			goto mbox_err_exit ;
6137
6138		switch (mb->mbxStatus) {
6139		case 0x11:	/* unsupported feature */
6140		case 0x9603:	/* max_vpi exceeded */
6141		case 0x9602:	/* Link event since CLEAR_LA */
6142			/* giving up on vport registration */
6143			lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6144			spin_lock_irq(shost->host_lock);
6145			vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
6146			spin_unlock_irq(shost->host_lock);
6147			lpfc_can_disctmo(vport);
6148			break;
6149		/* If reg_vpi fail with invalid VPI status, re-init VPI */
6150		case 0x20:
6151			spin_lock_irq(shost->host_lock);
6152			vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
6153			spin_unlock_irq(shost->host_lock);
6154			lpfc_init_vpi(phba, pmb, vport->vpi);
6155			pmb->vport = vport;
6156			pmb->mbox_cmpl = lpfc_init_vpi_cmpl;
6157			rc = lpfc_sli_issue_mbox(phba, pmb,
6158				MBX_NOWAIT);
6159			if (rc == MBX_NOT_FINISHED) {
6160				lpfc_printf_vlog(vport,
6161					KERN_ERR, LOG_MBOX,
6162					"2732 Failed to issue INIT_VPI"
6163					" mailbox command\n");
6164			} else {
6165				lpfc_nlp_put(ndlp);
6166				return;
6167			}
6168
6169		default:
6170			/* Try to recover from this error */
6171			lpfc_mbx_unreg_vpi(vport);
6172			spin_lock_irq(shost->host_lock);
6173			vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
6174			spin_unlock_irq(shost->host_lock);
6175			if (vport->port_type == LPFC_PHYSICAL_PORT
6176				&& !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
6177				lpfc_initial_flogi(vport);
6178			else
6179				lpfc_initial_fdisc(vport);
6180			break;
6181		}
6182	} else {
6183		spin_lock_irq(shost->host_lock);
6184		vport->vpi_state |= LPFC_VPI_REGISTERED;
6185		spin_unlock_irq(shost->host_lock);
6186		if (vport == phba->pport) {
6187			if (phba->sli_rev < LPFC_SLI_REV4)
6188				lpfc_issue_fabric_reglogin(vport);
6189			else {
6190				/*
6191				 * If the physical port is instantiated using
6192				 * FDISC, do not start vport discovery.
6193				 */
6194				if (vport->port_state != LPFC_FDISC)
6195					lpfc_start_fdiscs(phba);
6196				lpfc_do_scr_ns_plogi(phba, vport);
6197			}
6198		} else
6199			lpfc_do_scr_ns_plogi(phba, vport);
6200	}
6201mbox_err_exit:
6202	/* Now, we decrement the ndlp reference count held for this
6203	 * callback function
6204	 */
6205	lpfc_nlp_put(ndlp);
6206
6207	mempool_free(pmb, phba->mbox_mem_pool);
6208	return;
6209}
6210
6211/**
6212 * lpfc_register_new_vport - Register a new vport with a HBA
6213 * @phba: pointer to lpfc hba data structure.
6214 * @vport: pointer to a host virtual N_Port data structure.
6215 * @ndlp: pointer to a node-list data structure.
6216 *
6217 * This routine registers the @vport as a new virtual port with a HBA.
6218 * It is done through a registering vpi mailbox command.
6219 **/
6220void
6221lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
6222			struct lpfc_nodelist *ndlp)
6223{
6224	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6225	LPFC_MBOXQ_t *mbox;
6226
6227	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6228	if (mbox) {
6229		lpfc_reg_vpi(vport, mbox);
6230		mbox->vport = vport;
6231		mbox->context2 = lpfc_nlp_get(ndlp);
6232		mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
6233		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
6234		    == MBX_NOT_FINISHED) {
6235			/* mailbox command not success, decrement ndlp
6236			 * reference count for this command
6237			 */
6238			lpfc_nlp_put(ndlp);
6239			mempool_free(mbox, phba->mbox_mem_pool);
6240
6241			lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
6242				"0253 Register VPI: Can't send mbox\n");
6243			goto mbox_err_exit;
6244		}
6245	} else {
6246		lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
6247				 "0254 Register VPI: no memory\n");
6248		goto mbox_err_exit;
6249	}
6250	return;
6251
6252mbox_err_exit:
6253	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6254	spin_lock_irq(shost->host_lock);
6255	vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
6256	spin_unlock_irq(shost->host_lock);
6257	return;
6258}
6259
6260/**
6261 * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer
6262 * @phba: pointer to lpfc hba data structure.
6263 *
6264 * This routine cancels the retry delay timers to all the vports.
6265 **/
6266void
6267lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba)
6268{
6269	struct lpfc_vport **vports;
6270	struct lpfc_nodelist *ndlp;
6271	uint32_t link_state;
6272	int i;
6273
6274	/* Treat this failure as linkdown for all vports */
6275	link_state = phba->link_state;
6276	lpfc_linkdown(phba);
6277	phba->link_state = link_state;
6278
6279	vports = lpfc_create_vport_work_array(phba);
6280
6281	if (vports) {
6282		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
6283			ndlp = lpfc_findnode_did(vports[i], Fabric_DID);
6284			if (ndlp)
6285				lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
6286			lpfc_els_flush_cmd(vports[i]);
6287		}
6288		lpfc_destroy_vport_work_array(phba, vports);
6289	}
6290}
6291
6292/**
6293 * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
6294 * @phba: pointer to lpfc hba data structure.
6295 *
6296 * This routine abort all pending discovery commands and
6297 * start a timer to retry FLOGI for the physical port
6298 * discovery.
6299 **/
6300void
6301lpfc_retry_pport_discovery(struct lpfc_hba *phba)
6302{
6303	struct lpfc_nodelist *ndlp;
6304	struct Scsi_Host  *shost;
6305
6306	/* Cancel the all vports retry delay retry timers */
6307	lpfc_cancel_all_vport_retry_delay_timer(phba);
6308
6309	/* If fabric require FLOGI, then re-instantiate physical login */
6310	ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
6311	if (!ndlp)
6312		return;
6313
6314	shost = lpfc_shost_from_vport(phba->pport);
6315	mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
6316	spin_lock_irq(shost->host_lock);
6317	ndlp->nlp_flag |= NLP_DELAY_TMO;
6318	spin_unlock_irq(shost->host_lock);
6319	ndlp->nlp_last_elscmd = ELS_CMD_FLOGI;
6320	phba->pport->port_state = LPFC_FLOGI;
6321	return;
6322}
6323
6324/**
6325 * lpfc_fabric_login_reqd - Check if FLOGI required.
6326 * @phba: pointer to lpfc hba data structure.
6327 * @cmdiocb: pointer to FDISC command iocb.
6328 * @rspiocb: pointer to FDISC response iocb.
6329 *
6330 * This routine checks if a FLOGI is reguired for FDISC
6331 * to succeed.
6332 **/
6333static int
6334lpfc_fabric_login_reqd(struct lpfc_hba *phba,
6335		struct lpfc_iocbq *cmdiocb,
6336		struct lpfc_iocbq *rspiocb)
6337{
6338
6339	if ((rspiocb->iocb.ulpStatus != IOSTAT_FABRIC_RJT) ||
6340		(rspiocb->iocb.un.ulpWord[4] != RJT_LOGIN_REQUIRED))
6341		return 0;
6342	else
6343		return 1;
6344}
6345
6346/**
6347 * lpfc_cmpl_els_fdisc - Completion function for fdisc iocb command
6348 * @phba: pointer to lpfc hba data structure.
6349 * @cmdiocb: pointer to lpfc command iocb data structure.
6350 * @rspiocb: pointer to lpfc response iocb data structure.
6351 *
6352 * This routine is the completion callback function to a Fabric Discover
6353 * (FDISC) ELS command. Since all the FDISC ELS commands are issued
6354 * single threaded, each FDISC completion callback function will reset
6355 * the discovery timer for all vports such that the timers will not get
6356 * unnecessary timeout. The function checks the FDISC IOCB status. If error
6357 * detected, the vport will be set to FC_VPORT_FAILED state. Otherwise,the
6358 * vport will set to FC_VPORT_ACTIVE state. It then checks whether the DID
6359 * assigned to the vport has been changed with the completion of the FDISC
6360 * command. If so, both RPI (Remote Port Index) and VPI (Virtual Port Index)
6361 * are unregistered from the HBA, and then the lpfc_register_new_vport()
6362 * routine is invoked to register new vport with the HBA. Otherwise, the
6363 * lpfc_do_scr_ns_plogi() routine is invoked to issue a PLOGI to the Name
6364 * Server for State Change Request (SCR).
6365 **/
6366static void
6367lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6368		    struct lpfc_iocbq *rspiocb)
6369{
6370	struct lpfc_vport *vport = cmdiocb->vport;
6371	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
6372	struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
6373	struct lpfc_nodelist *np;
6374	struct lpfc_nodelist *next_np;
6375	IOCB_t *irsp = &rspiocb->iocb;
6376	struct lpfc_iocbq *piocb;
6377
6378	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
6379			 "0123 FDISC completes. x%x/x%x prevDID: x%x\n",
6380			 irsp->ulpStatus, irsp->un.ulpWord[4],
6381			 vport->fc_prevDID);
6382	/* Since all FDISCs are being single threaded, we
6383	 * must reset the discovery timer for ALL vports
6384	 * waiting to send FDISC when one completes.
6385	 */
6386	list_for_each_entry(piocb, &phba->fabric_iocb_list, list) {
6387		lpfc_set_disctmo(piocb->vport);
6388	}
6389
6390	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
6391		"FDISC cmpl:      status:x%x/x%x prevdid:x%x",
6392		irsp->ulpStatus, irsp->un.ulpWord[4], vport->fc_prevDID);
6393
6394	if (irsp->ulpStatus) {
6395
6396		if (lpfc_fabric_login_reqd(phba, cmdiocb, rspiocb)) {
6397			lpfc_retry_pport_discovery(phba);
6398			goto out;
6399		}
6400
6401		/* Check for retry */
6402		if (lpfc_els_retry(phba, cmdiocb, rspiocb))
6403			goto out;
6404		/* FDISC failed */
6405		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6406				 "0126 FDISC failed. (%d/%d)\n",
6407				 irsp->ulpStatus, irsp->un.ulpWord[4]);
6408		goto fdisc_failed;
6409	}
6410	spin_lock_irq(shost->host_lock);
6411	vport->fc_flag &= ~FC_VPORT_CVL_RCVD;
6412	vport->fc_flag &= ~FC_VPORT_LOGO_RCVD;
6413	vport->fc_flag |= FC_FABRIC;
6414	if (vport->phba->fc_topology == TOPOLOGY_LOOP)
6415		vport->fc_flag |=  FC_PUBLIC_LOOP;
6416	spin_unlock_irq(shost->host_lock);
6417
6418	vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
6419	lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
6420	if ((vport->fc_prevDID != vport->fc_myDID) &&
6421		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
6422		/* If our NportID changed, we need to ensure all
6423		 * remaining NPORTs get unreg_login'ed so we can
6424		 * issue unreg_vpi.
6425		 */
6426		list_for_each_entry_safe(np, next_np,
6427			&vport->fc_nodes, nlp_listp) {
6428			if (!NLP_CHK_NODE_ACT(ndlp) ||
6429			    (np->nlp_state != NLP_STE_NPR_NODE) ||
6430			    !(np->nlp_flag & NLP_NPR_ADISC))
6431				continue;
6432			spin_lock_irq(shost->host_lock);
6433			np->nlp_flag &= ~NLP_NPR_ADISC;
6434			spin_unlock_irq(shost->host_lock);
6435			lpfc_unreg_rpi(vport, np);
6436		}
6437		lpfc_cleanup_pending_mbox(vport);
6438		lpfc_mbx_unreg_vpi(vport);
6439		spin_lock_irq(shost->host_lock);
6440		vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
6441		if (phba->sli_rev == LPFC_SLI_REV4)
6442			vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
6443		else
6444			vport->fc_flag |= FC_LOGO_RCVD_DID_CHNG;
6445		spin_unlock_irq(shost->host_lock);
6446	} else if ((phba->sli_rev == LPFC_SLI_REV4) &&
6447		!(vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)) {
6448		/*
6449		 * Driver needs to re-reg VPI in order for f/w
6450		 * to update the MAC address.
6451		 */
6452		lpfc_register_new_vport(phba, vport, ndlp);
6453		return ;
6454	}
6455
6456	if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
6457		lpfc_issue_init_vpi(vport);
6458	else if (vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
6459		lpfc_register_new_vport(phba, vport, ndlp);
6460	else
6461		lpfc_do_scr_ns_plogi(phba, vport);
6462	goto out;
6463fdisc_failed:
6464	lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6465	/* Cancel discovery timer */
6466	lpfc_can_disctmo(vport);
6467	lpfc_nlp_put(ndlp);
6468out:
6469	lpfc_els_free_iocb(phba, cmdiocb);
6470}
6471
6472/**
6473 * lpfc_issue_els_fdisc - Issue a fdisc iocb command
6474 * @vport: pointer to a virtual N_Port data structure.
6475 * @ndlp: pointer to a node-list data structure.
6476 * @retry: number of retries to the command IOCB.
6477 *
6478 * This routine prepares and issues a Fabric Discover (FDISC) IOCB to
6479 * a remote node (@ndlp) off a @vport. It uses the lpfc_issue_fabric_iocb()
6480 * routine to issue the IOCB, which makes sure only one outstanding fabric
6481 * IOCB will be sent off HBA at any given time.
6482 *
6483 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
6484 * will be incremented by 1 for holding the ndlp and the reference to ndlp
6485 * will be stored into the context1 field of the IOCB for the completion
6486 * callback function to the FDISC ELS command.
6487 *
6488 * Return code
6489 *   0 - Successfully issued fdisc iocb command
6490 *   1 - Failed to issue fdisc iocb command
6491 **/
6492static int
6493lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
6494		     uint8_t retry)
6495{
6496	struct lpfc_hba *phba = vport->phba;
6497	IOCB_t *icmd;
6498	struct lpfc_iocbq *elsiocb;
6499	struct serv_parm *sp;
6500	uint8_t *pcmd;
6501	uint16_t cmdsize;
6502	int did = ndlp->nlp_DID;
6503	int rc;
6504
6505	vport->port_state = LPFC_FDISC;
6506	cmdsize = (sizeof(uint32_t) + sizeof(struct serv_parm));
6507	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, did,
6508				     ELS_CMD_FDISC);
6509	if (!elsiocb) {
6510		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6511		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6512				 "0255 Issue FDISC: no IOCB\n");
6513		return 1;
6514	}
6515
6516	icmd = &elsiocb->iocb;
6517	icmd->un.elsreq64.myID = 0;
6518	icmd->un.elsreq64.fl = 1;
6519
6520	if  (phba->sli_rev == LPFC_SLI_REV4) {
6521		/* FDISC needs to be 1 for WQE VPI */
6522		elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1;
6523		elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ;
6524		/* Set the ulpContext to the vpi */
6525		elsiocb->iocb.ulpContext = vport->vpi + phba->vpi_base;
6526	} else {
6527		/* For FDISC, Let FDISC rsp set the NPortID for this VPI */
6528		icmd->ulpCt_h = 1;
6529		icmd->ulpCt_l = 0;
6530	}
6531
6532	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6533	*((uint32_t *) (pcmd)) = ELS_CMD_FDISC;
6534	pcmd += sizeof(uint32_t); /* CSP Word 1 */
6535	memcpy(pcmd, &vport->phba->pport->fc_sparam, sizeof(struct serv_parm));
6536	sp = (struct serv_parm *) pcmd;
6537	/* Setup CSPs accordingly for Fabric */
6538	sp->cmn.e_d_tov = 0;
6539	sp->cmn.w2.r_a_tov = 0;
6540	sp->cls1.classValid = 0;
6541	sp->cls2.seqDelivery = 1;
6542	sp->cls3.seqDelivery = 1;
6543
6544	pcmd += sizeof(uint32_t); /* CSP Word 2 */
6545	pcmd += sizeof(uint32_t); /* CSP Word 3 */
6546	pcmd += sizeof(uint32_t); /* CSP Word 4 */
6547	pcmd += sizeof(uint32_t); /* Port Name */
6548	memcpy(pcmd, &vport->fc_portname, 8);
6549	pcmd += sizeof(uint32_t); /* Node Name */
6550	pcmd += sizeof(uint32_t); /* Node Name */
6551	memcpy(pcmd, &vport->fc_nodename, 8);
6552
6553	lpfc_set_disctmo(vport);
6554
6555	phba->fc_stat.elsXmitFDISC++;
6556	elsiocb->iocb_cmpl = lpfc_cmpl_els_fdisc;
6557
6558	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
6559		"Issue FDISC:     did:x%x",
6560		did, 0, 0);
6561
6562	rc = lpfc_issue_fabric_iocb(phba, elsiocb);
6563	if (rc == IOCB_ERROR) {
6564		lpfc_els_free_iocb(phba, elsiocb);
6565		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
6566		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
6567				 "0256 Issue FDISC: Cannot send IOCB\n");
6568		return 1;
6569	}
6570	lpfc_vport_set_state(vport, FC_VPORT_INITIALIZING);
6571	return 0;
6572}
6573
6574/**
6575 * lpfc_cmpl_els_npiv_logo - Completion function with vport logo
6576 * @phba: pointer to lpfc hba data structure.
6577 * @cmdiocb: pointer to lpfc command iocb data structure.
6578 * @rspiocb: pointer to lpfc response iocb data structure.
6579 *
6580 * This routine is the completion callback function to the issuing of a LOGO
6581 * ELS command off a vport. It frees the command IOCB and then decrement the
6582 * reference count held on ndlp for this completion function, indicating that
6583 * the reference to the ndlp is no long needed. Note that the
6584 * lpfc_els_free_iocb() routine decrements the ndlp reference held for this
6585 * callback function and an additional explicit ndlp reference decrementation
6586 * will trigger the actual release of the ndlp.
6587 **/
6588static void
6589lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6590			struct lpfc_iocbq *rspiocb)
6591{
6592	struct lpfc_vport *vport = cmdiocb->vport;
6593	IOCB_t *irsp;
6594	struct lpfc_nodelist *ndlp;
6595	ndlp = (struct lpfc_nodelist *)cmdiocb->context1;
6596
6597	irsp = &rspiocb->iocb;
6598	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
6599		"LOGO npiv cmpl:  status:x%x/x%x did:x%x",
6600		irsp->ulpStatus, irsp->un.ulpWord[4], irsp->un.rcvels.remoteID);
6601
6602	lpfc_els_free_iocb(phba, cmdiocb);
6603	vport->unreg_vpi_cmpl = VPORT_ERROR;
6604
6605	/* Trigger the release of the ndlp after logo */
6606	lpfc_nlp_put(ndlp);
6607}
6608
6609/**
6610 * lpfc_issue_els_npiv_logo - Issue a logo off a vport
6611 * @vport: pointer to a virtual N_Port data structure.
6612 * @ndlp: pointer to a node-list data structure.
6613 *
6614 * This routine issues a LOGO ELS command to an @ndlp off a @vport.
6615 *
6616 * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
6617 * will be incremented by 1 for holding the ndlp and the reference to ndlp
6618 * will be stored into the context1 field of the IOCB for the completion
6619 * callback function to the LOGO ELS command.
6620 *
6621 * Return codes
6622 *   0 - Successfully issued logo off the @vport
6623 *   1 - Failed to issue logo off the @vport
6624 **/
6625int
6626lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
6627{
6628	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
6629	struct lpfc_hba  *phba = vport->phba;
6630	IOCB_t *icmd;
6631	struct lpfc_iocbq *elsiocb;
6632	uint8_t *pcmd;
6633	uint16_t cmdsize;
6634
6635	cmdsize = 2 * sizeof(uint32_t) + sizeof(struct lpfc_name);
6636	elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp, ndlp->nlp_DID,
6637				     ELS_CMD_LOGO);
6638	if (!elsiocb)
6639		return 1;
6640
6641	icmd = &elsiocb->iocb;
6642	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
6643	*((uint32_t *) (pcmd)) = ELS_CMD_LOGO;
6644	pcmd += sizeof(uint32_t);
6645
6646	/* Fill in LOGO payload */
6647	*((uint32_t *) (pcmd)) = be32_to_cpu(vport->fc_myDID);
6648	pcmd += sizeof(uint32_t);
6649	memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name));
6650
6651	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
6652		"Issue LOGO npiv  did:x%x flg:x%x",
6653		ndlp->nlp_DID, ndlp->nlp_flag, 0);
6654
6655	elsiocb->iocb_cmpl = lpfc_cmpl_els_npiv_logo;
6656	spin_lock_irq(shost->host_lock);
6657	ndlp->nlp_flag |= NLP_LOGO_SND;
6658	spin_unlock_irq(shost->host_lock);
6659	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
6660	    IOCB_ERROR) {
6661		spin_lock_irq(shost->host_lock);
6662		ndlp->nlp_flag &= ~NLP_LOGO_SND;
6663		spin_unlock_irq(shost->host_lock);
6664		lpfc_els_free_iocb(phba, elsiocb);
6665		return 1;
6666	}
6667	return 0;
6668}
6669
6670/**
6671 * lpfc_fabric_block_timeout - Handler function to the fabric block timer
6672 * @ptr: holder for the timer function associated data.
6673 *
6674 * This routine is invoked by the fabric iocb block timer after
6675 * timeout. It posts the fabric iocb block timeout event by setting the
6676 * WORKER_FABRIC_BLOCK_TMO bit to work port event bitmap and then invokes
6677 * lpfc_worker_wake_up() routine to wake up the worker thread. It is for
6678 * the worker thread to invoke the lpfc_unblock_fabric_iocbs() on the
6679 * posted event WORKER_FABRIC_BLOCK_TMO.
6680 **/
6681void
6682lpfc_fabric_block_timeout(unsigned long ptr)
6683{
6684	struct lpfc_hba  *phba = (struct lpfc_hba *) ptr;
6685	unsigned long iflags;
6686	uint32_t tmo_posted;
6687
6688	spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
6689	tmo_posted = phba->pport->work_port_events & WORKER_FABRIC_BLOCK_TMO;
6690	if (!tmo_posted)
6691		phba->pport->work_port_events |= WORKER_FABRIC_BLOCK_TMO;
6692	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
6693
6694	if (!tmo_posted)
6695		lpfc_worker_wake_up(phba);
6696	return;
6697}
6698
6699/**
6700 * lpfc_resume_fabric_iocbs - Issue a fabric iocb from driver internal list
6701 * @phba: pointer to lpfc hba data structure.
6702 *
6703 * This routine issues one fabric iocb from the driver internal list to
6704 * the HBA. It first checks whether it's ready to issue one fabric iocb to
6705 * the HBA (whether there is no outstanding fabric iocb). If so, it shall
6706 * remove one pending fabric iocb from the driver internal list and invokes
6707 * lpfc_sli_issue_iocb() routine to send the fabric iocb to the HBA.
6708 **/
6709static void
6710lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
6711{
6712	struct lpfc_iocbq *iocb;
6713	unsigned long iflags;
6714	int ret;
6715	IOCB_t *cmd;
6716
6717repeat:
6718	iocb = NULL;
6719	spin_lock_irqsave(&phba->hbalock, iflags);
6720	/* Post any pending iocb to the SLI layer */
6721	if (atomic_read(&phba->fabric_iocb_count) == 0) {
6722		list_remove_head(&phba->fabric_iocb_list, iocb, typeof(*iocb),
6723				 list);
6724		if (iocb)
6725			/* Increment fabric iocb count to hold the position */
6726			atomic_inc(&phba->fabric_iocb_count);
6727	}
6728	spin_unlock_irqrestore(&phba->hbalock, iflags);
6729	if (iocb) {
6730		iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
6731		iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
6732		iocb->iocb_flag |= LPFC_IO_FABRIC;
6733
6734		lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
6735			"Fabric sched1:   ste:x%x",
6736			iocb->vport->port_state, 0, 0);
6737
6738		ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
6739
6740		if (ret == IOCB_ERROR) {
6741			iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
6742			iocb->fabric_iocb_cmpl = NULL;
6743			iocb->iocb_flag &= ~LPFC_IO_FABRIC;
6744			cmd = &iocb->iocb;
6745			cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
6746			cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
6747			iocb->iocb_cmpl(phba, iocb, iocb);
6748
6749			atomic_dec(&phba->fabric_iocb_count);
6750			goto repeat;
6751		}
6752	}
6753
6754	return;
6755}
6756
6757/**
6758 * lpfc_unblock_fabric_iocbs - Unblock issuing fabric iocb command
6759 * @phba: pointer to lpfc hba data structure.
6760 *
6761 * This routine unblocks the  issuing fabric iocb command. The function
6762 * will clear the fabric iocb block bit and then invoke the routine
6763 * lpfc_resume_fabric_iocbs() to issue one of the pending fabric iocb
6764 * from the driver internal fabric iocb list.
6765 **/
6766void
6767lpfc_unblock_fabric_iocbs(struct lpfc_hba *phba)
6768{
6769	clear_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
6770
6771	lpfc_resume_fabric_iocbs(phba);
6772	return;
6773}
6774
6775/**
6776 * lpfc_block_fabric_iocbs - Block issuing fabric iocb command
6777 * @phba: pointer to lpfc hba data structure.
6778 *
6779 * This routine blocks the issuing fabric iocb for a specified amount of
6780 * time (currently 100 ms). This is done by set the fabric iocb block bit
6781 * and set up a timeout timer for 100ms. When the block bit is set, no more
6782 * fabric iocb will be issued out of the HBA.
6783 **/
6784static void
6785lpfc_block_fabric_iocbs(struct lpfc_hba *phba)
6786{
6787	int blocked;
6788
6789	blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
6790	/* Start a timer to unblock fabric iocbs after 100ms */
6791	if (!blocked)
6792		mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 );
6793
6794	return;
6795}
6796
6797/**
6798 * lpfc_cmpl_fabric_iocb - Completion callback function for fabric iocb
6799 * @phba: pointer to lpfc hba data structure.
6800 * @cmdiocb: pointer to lpfc command iocb data structure.
6801 * @rspiocb: pointer to lpfc response iocb data structure.
6802 *
6803 * This routine is the callback function that is put to the fabric iocb's
6804 * callback function pointer (iocb->iocb_cmpl). The original iocb's callback
6805 * function pointer has been stored in iocb->fabric_iocb_cmpl. This callback
6806 * function first restores and invokes the original iocb's callback function
6807 * and then invokes the lpfc_resume_fabric_iocbs() routine to issue the next
6808 * fabric bound iocb from the driver internal fabric iocb list onto the wire.
6809 **/
6810static void
6811lpfc_cmpl_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
6812	struct lpfc_iocbq *rspiocb)
6813{
6814	struct ls_rjt stat;
6815
6816	if ((cmdiocb->iocb_flag & LPFC_IO_FABRIC) != LPFC_IO_FABRIC)
6817		BUG();
6818
6819	switch (rspiocb->iocb.ulpStatus) {
6820		case IOSTAT_NPORT_RJT:
6821		case IOSTAT_FABRIC_RJT:
6822			if (rspiocb->iocb.un.ulpWord[4] & RJT_UNAVAIL_TEMP) {
6823				lpfc_block_fabric_iocbs(phba);
6824			}
6825			break;
6826
6827		case IOSTAT_NPORT_BSY:
6828		case IOSTAT_FABRIC_BSY:
6829			lpfc_block_fabric_iocbs(phba);
6830			break;
6831
6832		case IOSTAT_LS_RJT:
6833			stat.un.lsRjtError =
6834				be32_to_cpu(rspiocb->iocb.un.ulpWord[4]);
6835			if ((stat.un.b.lsRjtRsnCode == LSRJT_UNABLE_TPC) ||
6836				(stat.un.b.lsRjtRsnCode == LSRJT_LOGICAL_BSY))
6837				lpfc_block_fabric_iocbs(phba);
6838			break;
6839	}
6840
6841	if (atomic_read(&phba->fabric_iocb_count) == 0)
6842		BUG();
6843
6844	cmdiocb->iocb_cmpl = cmdiocb->fabric_iocb_cmpl;
6845	cmdiocb->fabric_iocb_cmpl = NULL;
6846	cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC;
6847	cmdiocb->iocb_cmpl(phba, cmdiocb, rspiocb);
6848
6849	atomic_dec(&phba->fabric_iocb_count);
6850	if (!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags)) {
6851		/* Post any pending iocbs to HBA */
6852		lpfc_resume_fabric_iocbs(phba);
6853	}
6854}
6855
6856/**
6857 * lpfc_issue_fabric_iocb - Issue a fabric iocb command
6858 * @phba: pointer to lpfc hba data structure.
6859 * @iocb: pointer to lpfc command iocb data structure.
6860 *
6861 * This routine is used as the top-level API for issuing a fabric iocb command
6862 * such as FLOGI and FDISC. To accommodate certain switch fabric, this driver
6863 * function makes sure that only one fabric bound iocb will be outstanding at
6864 * any given time. As such, this function will first check to see whether there
6865 * is already an outstanding fabric iocb on the wire. If so, it will put the
6866 * newly issued iocb onto the driver internal fabric iocb list, waiting to be
6867 * issued later. Otherwise, it will issue the iocb on the wire and update the
6868 * fabric iocb count it indicate that there is one fabric iocb on the wire.
6869 *
6870 * Note, this implementation has a potential sending out fabric IOCBs out of
6871 * order. The problem is caused by the construction of the "ready" boolen does
6872 * not include the condition that the internal fabric IOCB list is empty. As
6873 * such, it is possible a fabric IOCB issued by this routine might be "jump"
6874 * ahead of the fabric IOCBs in the internal list.
6875 *
6876 * Return code
6877 *   IOCB_SUCCESS - either fabric iocb put on the list or issued successfully
6878 *   IOCB_ERROR - failed to issue fabric iocb
6879 **/
6880static int
6881lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
6882{
6883	unsigned long iflags;
6884	int ready;
6885	int ret;
6886
6887	if (atomic_read(&phba->fabric_iocb_count) > 1)
6888		BUG();
6889
6890	spin_lock_irqsave(&phba->hbalock, iflags);
6891	ready = atomic_read(&phba->fabric_iocb_count) == 0 &&
6892		!test_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags);
6893
6894	if (ready)
6895		/* Increment fabric iocb count to hold the position */
6896		atomic_inc(&phba->fabric_iocb_count);
6897	spin_unlock_irqrestore(&phba->hbalock, iflags);
6898	if (ready) {
6899		iocb->fabric_iocb_cmpl = iocb->iocb_cmpl;
6900		iocb->iocb_cmpl = lpfc_cmpl_fabric_iocb;
6901		iocb->iocb_flag |= LPFC_IO_FABRIC;
6902
6903		lpfc_debugfs_disc_trc(iocb->vport, LPFC_DISC_TRC_ELS_CMD,
6904			"Fabric sched2:   ste:x%x",
6905			iocb->vport->port_state, 0, 0);
6906
6907		ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
6908
6909		if (ret == IOCB_ERROR) {
6910			iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
6911			iocb->fabric_iocb_cmpl = NULL;
6912			iocb->iocb_flag &= ~LPFC_IO_FABRIC;
6913			atomic_dec(&phba->fabric_iocb_count);
6914		}
6915	} else {
6916		spin_lock_irqsave(&phba->hbalock, iflags);
6917		list_add_tail(&iocb->list, &phba->fabric_iocb_list);
6918		spin_unlock_irqrestore(&phba->hbalock, iflags);
6919		ret = IOCB_SUCCESS;
6920	}
6921	return ret;
6922}
6923
6924/**
6925 * lpfc_fabric_abort_vport - Abort a vport's iocbs from driver fabric iocb list
6926 * @vport: pointer to a virtual N_Port data structure.
6927 *
6928 * This routine aborts all the IOCBs associated with a @vport from the
6929 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
6930 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
6931 * list, removes each IOCB associated with the @vport off the list, set the
6932 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
6933 * associated with the IOCB.
6934 **/
6935static void lpfc_fabric_abort_vport(struct lpfc_vport *vport)
6936{
6937	LIST_HEAD(completions);
6938	struct lpfc_hba  *phba = vport->phba;
6939	struct lpfc_iocbq *tmp_iocb, *piocb;
6940
6941	spin_lock_irq(&phba->hbalock);
6942	list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
6943				 list) {
6944
6945		if (piocb->vport != vport)
6946			continue;
6947
6948		list_move_tail(&piocb->list, &completions);
6949	}
6950	spin_unlock_irq(&phba->hbalock);
6951
6952	/* Cancel all the IOCBs from the completions list */
6953	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6954			      IOERR_SLI_ABORTED);
6955}
6956
6957/**
6958 * lpfc_fabric_abort_nport - Abort a ndlp's iocbs from driver fabric iocb list
6959 * @ndlp: pointer to a node-list data structure.
6960 *
6961 * This routine aborts all the IOCBs associated with an @ndlp from the
6962 * driver internal fabric IOCB list. The list contains fabric IOCBs to be
6963 * issued to the ELS IOCB ring. This abort function walks the fabric IOCB
6964 * list, removes each IOCB associated with the @ndlp off the list, set the
6965 * status feild to IOSTAT_LOCAL_REJECT, and invokes the callback function
6966 * associated with the IOCB.
6967 **/
6968void lpfc_fabric_abort_nport(struct lpfc_nodelist *ndlp)
6969{
6970	LIST_HEAD(completions);
6971	struct lpfc_hba  *phba = ndlp->phba;
6972	struct lpfc_iocbq *tmp_iocb, *piocb;
6973	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
6974
6975	spin_lock_irq(&phba->hbalock);
6976	list_for_each_entry_safe(piocb, tmp_iocb, &phba->fabric_iocb_list,
6977				 list) {
6978		if ((lpfc_check_sli_ndlp(phba, pring, piocb, ndlp))) {
6979
6980			list_move_tail(&piocb->list, &completions);
6981		}
6982	}
6983	spin_unlock_irq(&phba->hbalock);
6984
6985	/* Cancel all the IOCBs from the completions list */
6986	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
6987			      IOERR_SLI_ABORTED);
6988}
6989
6990/**
6991 * lpfc_fabric_abort_hba - Abort all iocbs on driver fabric iocb list
6992 * @phba: pointer to lpfc hba data structure.
6993 *
6994 * This routine aborts all the IOCBs currently on the driver internal
6995 * fabric IOCB list. The list contains fabric IOCBs to be issued to the ELS
6996 * IOCB ring. This function takes the entire IOCB list off the fabric IOCB
6997 * list, removes IOCBs off the list, set the status feild to
6998 * IOSTAT_LOCAL_REJECT, and invokes the callback function associated with
6999 * the IOCB.
7000 **/
7001void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
7002{
7003	LIST_HEAD(completions);
7004
7005	spin_lock_irq(&phba->hbalock);
7006	list_splice_init(&phba->fabric_iocb_list, &completions);
7007	spin_unlock_irq(&phba->hbalock);
7008
7009	/* Cancel all the IOCBs from the completions list */
7010	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
7011			      IOERR_SLI_ABORTED);
7012}
7013
7014/**
7015 * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
7016 * @phba: pointer to lpfc hba data structure.
7017 * @axri: pointer to the els xri abort wcqe structure.
7018 *
7019 * This routine is invoked by the worker thread to process a SLI4 slow-path
7020 * ELS aborted xri.
7021 **/
7022void
7023lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
7024			  struct sli4_wcqe_xri_aborted *axri)
7025{
7026	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
7027	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
7028	unsigned long iflag = 0;
7029	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
7030
7031	spin_lock_irqsave(&phba->hbalock, iflag);
7032	spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
7033	list_for_each_entry_safe(sglq_entry, sglq_next,
7034			&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
7035		if (sglq_entry->sli4_xritag == xri) {
7036			list_del(&sglq_entry->list);
7037			list_add_tail(&sglq_entry->list,
7038				&phba->sli4_hba.lpfc_sgl_list);
7039			sglq_entry->state = SGL_FREED;
7040			spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
7041			spin_unlock_irqrestore(&phba->hbalock, iflag);
7042
7043			/* Check if TXQ queue needs to be serviced */
7044			if (pring->txq_cnt)
7045				lpfc_worker_wake_up(phba);
7046			return;
7047		}
7048	}
7049	spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
7050	sglq_entry = __lpfc_get_active_sglq(phba, xri);
7051	if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
7052		spin_unlock_irqrestore(&phba->hbalock, iflag);
7053		return;
7054	}
7055	sglq_entry->state = SGL_XRI_ABORTED;
7056	spin_unlock_irqrestore(&phba->hbalock, iflag);
7057	return;
7058}
7059