1/*-
2 * Copyright (c) 2017 Broadcom. All rights reserved.
3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 *    this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 *    this list of conditions and the following disclaimer in the documentation
13 *    and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the copyright holder nor the names of its contributors
16 *    may be used to endorse or promote products derived from this software
17 *    without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * $FreeBSD: stable/11/sys/dev/ocs_fc/ocs_fabric.c 331766 2018-03-30 15:28:25Z ken $
32 */
33
34/**
35 * @file
36 *
37 * This file implements remote node state machines for:
38 * - Fabric logins.
39 * - Fabric controller events.
40 * - Name/directory services interaction.
41 * - Point-to-point logins.
42 */
43
44/*!
45@defgroup fabric_sm Node State Machine: Fabric States
46@defgroup ns_sm Node State Machine: Name/Directory Services States
47@defgroup p2p_sm Node State Machine: Point-to-Point Node States
48*/
49
50#include "ocs.h"
51#include "ocs_fabric.h"
52#include "ocs_els.h"
53#include "ocs_device.h"
54
55static void ocs_fabric_initiate_shutdown(ocs_node_t *node);
56static void * __ocs_fabric_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
57static int32_t ocs_start_ns_node(ocs_sport_t *sport);
58static int32_t ocs_start_fabctl_node(ocs_sport_t *sport);
59static int32_t ocs_process_gidpt_payload(ocs_node_t *node, fcct_gidpt_acc_t *gidpt, uint32_t gidpt_len);
60static void ocs_process_rscn(ocs_node_t *node, ocs_node_cb_t *cbdata);
61static uint64_t ocs_get_wwpn(fc_plogi_payload_t *sp);
62static void gidpt_delay_timer_cb(void *arg);
63
64/**
65 * @ingroup fabric_sm
66 * @brief Fabric node state machine: Initial state.
67 *
68 * @par Description
69 * Send an FLOGI to a well-known fabric.
70 *
71 * @param ctx Remote node sm context.
72 * @param evt Event to process.
73 * @param arg Per event optional argument.
74 *
75 * @return Returns NULL.
76 */
77void *
78__ocs_fabric_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
79{
80	std_node_state_decl();
81
82	node_sm_trace();
83
84	switch(evt) {
85	case OCS_EVT_REENTER:	/* not sure why we're getting these ... */
86		ocs_log_debug(node->ocs, ">>> reenter !!\n");
87		/* fall through */
88	case OCS_EVT_ENTER:
89		/* sm: / send FLOGI */
90		ocs_send_flogi(node, OCS_FC_FLOGI_TIMEOUT_SEC, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
91		ocs_node_transition(node, __ocs_fabric_flogi_wait_rsp, NULL);
92		break;
93
94	default:
95		__ocs_fabric_common(__func__, ctx, evt, arg);
96		break;
97	}
98
99	return NULL;
100}
101
102/**
103 * @ingroup fabric_sm
104 * @brief Set sport topology.
105 *
106 * @par Description
107 * Set sport topology.
108 *
109 * @param node Pointer to the node for which the topology is set.
110 * @param topology Topology to set.
111 *
112 * @return Returns NULL.
113 */
114void
115ocs_fabric_set_topology(ocs_node_t *node, ocs_sport_topology_e topology)
116{
117	node->sport->topology = topology;
118}
119
120/**
121 * @ingroup fabric_sm
122 * @brief Notify sport topology.
123 * @par Description
124 * notify sport topology.
125 * @param node Pointer to the node for which the topology is set.
126 * @return Returns NULL.
127 */
128void
129ocs_fabric_notify_topology(ocs_node_t *node)
130{
131	ocs_node_t *tmp_node;
132	ocs_node_t *next;
133	ocs_sport_topology_e topology = node->sport->topology;
134
135	/* now loop through the nodes in the sport and send topology notification */
136	ocs_sport_lock(node->sport);
137	ocs_list_foreach_safe(&node->sport->node_list, tmp_node, next) {
138		if (tmp_node != node) {
139			ocs_node_post_event(tmp_node, OCS_EVT_SPORT_TOPOLOGY_NOTIFY, (void *)topology);
140		}
141	}
142	ocs_sport_unlock(node->sport);
143}
144
145/**
146 * @ingroup fabric_sm
147 * @brief Fabric node state machine: Wait for an FLOGI response.
148 *
149 * @par Description
150 * Wait for an FLOGI response event.
151 *
152 * @param ctx Remote node state machine context.
153 * @param evt Event to process.
154 * @param arg Per event optional argument.
155 *
156 * @return Returns NULL.
157 */
158
159void *
160__ocs_fabric_flogi_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
161{
162	ocs_node_cb_t *cbdata = arg;
163	std_node_state_decl();
164
165	node_sm_trace();
166
167	switch(evt) {
168	case OCS_EVT_SRRS_ELS_REQ_OK: {
169
170		if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_FLOGI, __ocs_fabric_common, __func__)) {
171			return NULL;
172		}
173		ocs_assert(node->els_req_cnt, NULL);
174		node->els_req_cnt--;
175
176		ocs_domain_save_sparms(node->sport->domain, cbdata->els->els_rsp.virt);
177
178		ocs_display_sparams(node->display_name, "flogi rcvd resp", 0, NULL,
179			((uint8_t*)cbdata->els->els_rsp.virt) + 4);
180
181		/* Check to see if the fabric is an F_PORT or and N_PORT */
182		if (ocs_rnode_is_nport(cbdata->els->els_rsp.virt)) {
183			/* sm: if nport and p2p_winner / ocs_domain_attach */
184			ocs_fabric_set_topology(node, OCS_SPORT_TOPOLOGY_P2P);
185			if (ocs_p2p_setup(node->sport)) {
186				node_printf(node, "p2p setup failed, shutting down node\n");
187				node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
188				ocs_fabric_initiate_shutdown(node);
189			} else {
190				if (node->sport->p2p_winner) {
191					ocs_node_transition(node, __ocs_p2p_wait_domain_attach, NULL);
192					if (!node->sport->domain->attached) {
193						node_printf(node, "p2p winner, domain not attached\n");
194						ocs_domain_attach(node->sport->domain, node->sport->p2p_port_id);
195					} else {
196						/* already attached, just send ATTACH_OK */
197						node_printf(node, "p2p winner, domain already attached\n");
198						ocs_node_post_event(node, OCS_EVT_DOMAIN_ATTACH_OK, NULL);
199					}
200				} else {
201					/* peer is p2p winner; PLOGI will be received on the
202					 * remote SID=1 node; this node has served its purpose
203					 */
204					node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
205					ocs_fabric_initiate_shutdown(node);
206				}
207			}
208		} else {
209			/* sm: if not nport / ocs_domain_attach */
210			/* ext_status has the fc_id, attach domain */
211			ocs_fabric_set_topology(node, OCS_SPORT_TOPOLOGY_FABRIC);
212			ocs_fabric_notify_topology(node);
213			ocs_assert(!node->sport->domain->attached, NULL);
214			ocs_domain_attach(node->sport->domain, cbdata->ext_status);
215			ocs_node_transition(node, __ocs_fabric_wait_domain_attach, NULL);
216		}
217
218		break;
219	}
220
221	case OCS_EVT_ELS_REQ_ABORTED:
222	case OCS_EVT_SRRS_ELS_REQ_RJT:
223	case OCS_EVT_SRRS_ELS_REQ_FAIL: {
224		ocs_sport_t *sport = node->sport;
225		/*
226		 * with these errors, we have no recovery, so shutdown the sport, leave the link
227		 * up and the domain ready
228		 */
229		if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_FLOGI, __ocs_fabric_common, __func__)) {
230			return NULL;
231		}
232		node_printf(node, "FLOGI failed evt=%s, shutting down sport [%s]\n", ocs_sm_event_name(evt),
233			sport->display_name);
234		ocs_assert(node->els_req_cnt, NULL);
235		node->els_req_cnt--;
236		ocs_sm_post_event(&sport->sm, OCS_EVT_SHUTDOWN, NULL);
237		break;
238	}
239
240	default:
241		__ocs_fabric_common(__func__, ctx, evt, arg);
242		break;
243	}
244
245	return NULL;
246}
247
248/**
249 * @ingroup fabric_sm
250 * @brief Fabric node state machine: Initial state for a virtual port.
251 *
252 * @par Description
253 * State entered when a virtual port is created. Send FDISC.
254 *
255 * @param ctx Remote node state machine context.
256 * @param evt Event to process.
257 * @param arg Per event optional argument.
258 *
259 * @return Returns NULL.
260 */
261void *
262__ocs_vport_fabric_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
263{
264	std_node_state_decl();
265
266	node_sm_trace();
267
268	switch(evt) {
269	case OCS_EVT_ENTER:
270		/* sm: send FDISC */
271		ocs_send_fdisc(node, OCS_FC_FLOGI_TIMEOUT_SEC, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
272		ocs_node_transition(node, __ocs_fabric_fdisc_wait_rsp, NULL);
273		break;
274
275	default:
276		__ocs_fabric_common(__func__, ctx, evt, arg);
277		break;
278	}
279
280	return NULL;
281}
282
283/**
284 * @ingroup fabric_sm
285 * @brief Fabric node state machine: Wait for an FDISC response
286 *
287 * @par Description
288 * Used for a virtual port. Waits for an FDISC response. If OK, issue a HW port attach.
289 *
290 * @param ctx Remote node state machine context.
291 * @param evt Event to process.
292 * @param arg Per event optional argument.
293 *
294 * @return Returns NULL.
295 */
296void *
297__ocs_fabric_fdisc_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
298{
299	ocs_node_cb_t *cbdata = arg;
300	std_node_state_decl();
301
302	node_sm_trace();
303
304	switch(evt) {
305	case OCS_EVT_SRRS_ELS_REQ_OK: {
306		/* fc_id is in ext_status */
307		if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_FDISC, __ocs_fabric_common, __func__)) {
308			return NULL;
309		}
310
311		ocs_display_sparams(node->display_name, "fdisc rcvd resp", 0, NULL,
312			((uint8_t*)cbdata->els->els_rsp.virt) + 4);
313
314		ocs_assert(node->els_req_cnt, NULL);
315		node->els_req_cnt--;
316		/* sm: ocs_sport_attach */
317		ocs_sport_attach(node->sport, cbdata->ext_status);
318		ocs_node_transition(node, __ocs_fabric_wait_domain_attach, NULL);
319		break;
320
321	}
322
323	case OCS_EVT_SRRS_ELS_REQ_RJT:
324	case OCS_EVT_SRRS_ELS_REQ_FAIL: {
325		if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_FDISC, __ocs_fabric_common, __func__)) {
326			return NULL;
327		}
328		ocs_assert(node->els_req_cnt, NULL);
329		node->els_req_cnt--;
330		ocs_log_err(ocs, "FDISC failed, shutting down sport\n");
331		/* sm: shutdown sport */
332		ocs_sm_post_event(&node->sport->sm, OCS_EVT_SHUTDOWN, NULL);
333		break;
334	}
335
336	default:
337		__ocs_fabric_common(__func__, ctx, evt, arg);
338		break;
339	}
340
341	return NULL;
342}
343
344/**
345 * @ingroup fabric_sm
346 * @brief Fabric node state machine: Wait for a domain/sport attach event.
347 *
348 * @par Description
349 * Waits for a domain/sport attach event.
350 *
351 * @param ctx Remote node state machine context.
352 * @param evt Event to process.
353 * @param arg Per event optional argument.
354 *
355 * @return Returns NULL.
356 */
357void *
358__ocs_fabric_wait_domain_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
359{
360	std_node_state_decl();
361
362	node_sm_trace();
363
364	switch(evt) {
365	case OCS_EVT_ENTER:
366		ocs_node_hold_frames(node);
367		break;
368
369	case OCS_EVT_EXIT:
370		ocs_node_accept_frames(node);
371		break;
372	case OCS_EVT_DOMAIN_ATTACH_OK:
373	case OCS_EVT_SPORT_ATTACH_OK: {
374		int rc;
375
376		rc = ocs_start_ns_node(node->sport);
377		if (rc)
378			return NULL;
379
380		/* sm: if enable_ini / start fabctl node
381		 * Instantiate the fabric controller (sends SCR) */
382		if (node->sport->enable_rscn) {
383			rc = ocs_start_fabctl_node(node->sport);
384			if (rc)
385				return NULL;
386		}
387		ocs_node_transition(node, __ocs_fabric_idle, NULL);
388		break;
389	}
390	default:
391		__ocs_fabric_common(__func__, ctx, evt, arg);
392		return NULL;
393	}
394
395	return NULL;
396}
397
398/**
399 * @ingroup fabric_sm
400 * @brief Fabric node state machine: Fabric node is idle.
401 *
402 * @par Description
403 * Wait for fabric node events.
404 *
405 * @param ctx Remote node state machine context.
406 * @param evt Event to process.
407 * @param arg Per event optional argument.
408 *
409 * @return Returns NULL.
410 */
411void *
412__ocs_fabric_idle(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
413{
414	std_node_state_decl();
415
416	node_sm_trace();
417
418	switch(evt) {
419	case OCS_EVT_DOMAIN_ATTACH_OK:
420		break;
421	default:
422		__ocs_fabric_common(__func__, ctx, evt, arg);
423		return NULL;
424	}
425
426	return NULL;
427}
428
429/**
430 * @ingroup ns_sm
431 * @brief Name services node state machine: Initialize.
432 *
433 * @par Description
434 * A PLOGI is sent to the well-known name/directory services node.
435 *
436 * @param ctx Remote node state machine context.
437 * @param evt Event to process.
438 * @param arg Per event optional argument.
439 *
440 * @return Returns NULL.
441 */
442void *
443__ocs_ns_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
444{
445	std_node_state_decl();
446
447	node_sm_trace();
448
449	switch(evt) {
450	case OCS_EVT_ENTER:
451		/* sm: send PLOGI */
452		ocs_send_plogi(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
453		ocs_node_transition(node, __ocs_ns_plogi_wait_rsp, NULL);
454		break;
455	default:
456		__ocs_fabric_common(__func__, ctx, evt, arg);
457		break;
458	}
459
460	return NULL;
461}
462
463/**
464 * @ingroup ns_sm
465 * @brief Name services node state machine: Wait for a PLOGI response.
466 *
467 * @par Description
468 * Waits for a response from PLOGI to name services node, then issues a
469 * node attach request to the HW.
470 *
471 * @param ctx Remote node state machine context.
472 * @param evt Event to process.
473 * @param arg Per event optional argument.
474 *
475 * @return Returns NULL.
476 */
477void *
478__ocs_ns_plogi_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
479{
480	int32_t rc;
481	ocs_node_cb_t *cbdata = arg;
482	std_node_state_decl();
483
484	node_sm_trace();
485
486	switch(evt) {
487	case OCS_EVT_SRRS_ELS_REQ_OK: {
488		/* Save service parameters */
489		if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_fabric_common, __func__)) {
490			return NULL;
491		}
492		ocs_assert(node->els_req_cnt, NULL);
493		node->els_req_cnt--;
494		/* sm: save sparams, ocs_node_attach */
495		ocs_node_save_sparms(node, cbdata->els->els_rsp.virt);
496		ocs_display_sparams(node->display_name, "plogi rcvd resp", 0, NULL,
497			((uint8_t*)cbdata->els->els_rsp.virt) + 4);
498		rc = ocs_node_attach(node);
499		ocs_node_transition(node, __ocs_ns_wait_node_attach, NULL);
500		if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
501			ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
502		}
503		break;
504	}
505	default:
506		__ocs_fabric_common(__func__, ctx, evt, arg);
507		return NULL;
508	}
509
510	return NULL;
511}
512
513/**
514 * @ingroup ns_sm
515 * @brief Name services node state machine: Wait for a node attach completion.
516 *
517 * @par Description
518 * Waits for a node attach completion, then issues an RFTID name services
519 * request.
520 *
521 * @param ctx Remote node state machine context.
522 * @param evt Event to process.
523 * @param arg Per event optional argument.
524 *
525 * @return Returns NULL.
526 */
527void *
528__ocs_ns_wait_node_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
529{
530	std_node_state_decl();
531
532	node_sm_trace();
533
534	switch(evt) {
535	case OCS_EVT_ENTER:
536		ocs_node_hold_frames(node);
537		break;
538
539	case OCS_EVT_EXIT:
540		ocs_node_accept_frames(node);
541		break;
542
543	case OCS_EVT_NODE_ATTACH_OK:
544		node->attached = TRUE;
545		/* sm: send RFTID */
546		ocs_ns_send_rftid(node, OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT,
547				 OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
548		ocs_node_transition(node, __ocs_ns_rftid_wait_rsp, NULL);
549		break;
550
551	case OCS_EVT_NODE_ATTACH_FAIL:
552		/* node attach failed, shutdown the node */
553		node->attached = FALSE;
554		node_printf(node, "Node attach failed\n");
555		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
556		ocs_fabric_initiate_shutdown(node);
557		break;
558
559	case OCS_EVT_SHUTDOWN:
560		node_printf(node, "Shutdown event received\n");
561		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
562		ocs_node_transition(node, __ocs_fabric_wait_attach_evt_shutdown, NULL);
563		break;
564
565	/* if receive RSCN just ignore,
566	 * we haven't sent GID_PT yet (ACC sent by fabctl node) */
567	case OCS_EVT_RSCN_RCVD:
568		break;
569
570	default:
571		__ocs_fabric_common(__func__, ctx, evt, arg);
572		return NULL;
573	}
574
575	return NULL;
576}
577
578/**
579 * @ingroup ns_sm
580 * @brief Wait for a domain/sport/node attach completion, then
581 * shutdown.
582 *
583 * @par Description
584 * Waits for a domain/sport/node attach completion, then shuts
585 * node down.
586 *
587 * @param ctx Remote node state machine context.
588 * @param evt Event to process.
589 * @param arg Per event optional argument.
590 *
591 * @return Returns NULL.
592 */
593void *
594__ocs_fabric_wait_attach_evt_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
595{
596	std_node_state_decl();
597
598	node_sm_trace();
599
600	switch(evt) {
601	case OCS_EVT_ENTER:
602		ocs_node_hold_frames(node);
603		break;
604
605	case OCS_EVT_EXIT:
606		ocs_node_accept_frames(node);
607		break;
608
609	/* wait for any of these attach events and then shutdown */
610	case OCS_EVT_NODE_ATTACH_OK:
611		node->attached = TRUE;
612		node_printf(node, "Attach evt=%s, proceed to shutdown\n", ocs_sm_event_name(evt));
613		ocs_fabric_initiate_shutdown(node);
614		break;
615
616	case OCS_EVT_NODE_ATTACH_FAIL:
617		node->attached = FALSE;
618		node_printf(node, "Attach evt=%s, proceed to shutdown\n", ocs_sm_event_name(evt));
619		ocs_fabric_initiate_shutdown(node);
620		break;
621
622	/* ignore shutdown event as we're already in shutdown path */
623	case OCS_EVT_SHUTDOWN:
624		node_printf(node, "Shutdown event received\n");
625		break;
626
627	default:
628		__ocs_fabric_common(__func__, ctx, evt, arg);
629		return NULL;
630	}
631
632	return NULL;
633}
634
635/**
636 * @ingroup ns_sm
637 * @brief Name services node state machine: Wait for an RFTID response event.
638 *
639 * @par Description
640 * Waits for an RFTID response event; if configured for an initiator operation,
641 * a GIDPT name services request is issued.
642 *
643 * @param ctx Remote node state machine context.
644 * @param evt Event to process.
645 * @param arg Per event optional argument.
646 *
647 * @return Returns NULL.
648 */
649void *
650__ocs_ns_rftid_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
651{
652	std_node_state_decl();
653
654	node_sm_trace();
655
656	switch(evt) {
657	case OCS_EVT_SRRS_ELS_REQ_OK:
658		if (node_check_ns_req(ctx, evt, arg, FC_GS_NAMESERVER_RFT_ID, __ocs_fabric_common, __func__)) {
659			return NULL;
660		}
661		ocs_assert(node->els_req_cnt, NULL);
662		node->els_req_cnt--;
663		/*sm: send RFFID */
664		ocs_ns_send_rffid(node, OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT,
665				OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
666		ocs_node_transition(node, __ocs_ns_rffid_wait_rsp, NULL);
667		break;
668
669	/* if receive RSCN just ignore,
670	 * we haven't sent GID_PT yet (ACC sent by fabctl node) */
671	case OCS_EVT_RSCN_RCVD:
672		break;
673
674	default:
675		__ocs_fabric_common(__func__, ctx, evt, arg);
676		return NULL;
677	}
678
679	return NULL;
680}
681
682/**
683 * @ingroup ns_sm
684 * @brief Fabric node state machine: Wait for RFFID response event.
685 *
686 * @par Description
687 * Waits for an RFFID response event; if configured for an initiator operation,
688 * a GIDPT name services request is issued.
689 *
690 * @param ctx Remote node state machine context.
691 * @param evt Event to process.
692 * @param arg Per event optional argument.
693 *
694 * @return Returns NULL.
695 */
696void *
697__ocs_ns_rffid_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
698{
699	std_node_state_decl();
700
701	node_sm_trace();
702
703	switch(evt) {
704	case OCS_EVT_SRRS_ELS_REQ_OK:	{
705		if (node_check_ns_req(ctx, evt, arg, FC_GS_NAMESERVER_RFF_ID, __ocs_fabric_common, __func__)) {
706			return NULL;
707		}
708		ocs_assert(node->els_req_cnt, NULL);
709		node->els_req_cnt--;
710		if (node->sport->enable_rscn) {
711			/* sm: if enable_rscn / send GIDPT */
712			ocs_ns_send_gidpt(node, OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT,
713					OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
714			ocs_node_transition(node, __ocs_ns_gidpt_wait_rsp, NULL);
715		} else {
716			/* if 'T' only, we're done, go to idle */
717			ocs_node_transition(node, __ocs_ns_idle, NULL);
718		}
719		break;
720	}
721	/* if receive RSCN just ignore,
722	 * we haven't sent GID_PT yet (ACC sent by fabctl node) */
723	case OCS_EVT_RSCN_RCVD:
724		break;
725
726	default:
727		__ocs_fabric_common(__func__, ctx, evt, arg);
728		return NULL;
729	}
730
731	return NULL;
732}
733
734/**
735 * @ingroup ns_sm
736 * @brief Name services node state machine: Wait for a GIDPT response.
737 *
738 * @par Description
739 * Wait for a GIDPT response from the name server. Process the FC_IDs that are
740 * reported by creating new remote ports, as needed.
741 *
742 * @param ctx Remote node state machine context.
743 * @param evt Event to process.
744 * @param arg Per event optional argument.
745 *
746 * @return Returns NULL.
747 */
748void *
749__ocs_ns_gidpt_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
750{
751	ocs_node_cb_t *cbdata = arg;
752	std_node_state_decl();
753
754	node_sm_trace();
755
756	switch(evt) {
757	case OCS_EVT_SRRS_ELS_REQ_OK:	{
758		if (node_check_ns_req(ctx, evt, arg, FC_GS_NAMESERVER_GID_PT, __ocs_fabric_common, __func__)) {
759			return NULL;
760		}
761		ocs_assert(node->els_req_cnt, NULL);
762		node->els_req_cnt--;
763		/* sm: / process GIDPT payload */
764		ocs_process_gidpt_payload(node, cbdata->els->els_rsp.virt, cbdata->els->els_rsp.len);
765		/* TODO: should we logout at this point or just go idle */
766		ocs_node_transition(node, __ocs_ns_idle, NULL);
767		break;
768	}
769
770	case OCS_EVT_SRRS_ELS_REQ_FAIL:	{
771		/* not much we can do; will retry with the next RSCN */
772		node_printf(node, "GID_PT failed to complete\n");
773		ocs_assert(node->els_req_cnt, NULL);
774		node->els_req_cnt--;
775		ocs_node_transition(node, __ocs_ns_idle, NULL);
776		break;
777	}
778
779	/* if receive RSCN here, queue up another discovery processing */
780	case OCS_EVT_RSCN_RCVD: {
781		node_printf(node, "RSCN received during GID_PT processing\n");
782		node->rscn_pending = 1;
783		break;
784	}
785
786	default:
787		__ocs_fabric_common(__func__, ctx, evt, arg);
788		return NULL;
789	}
790
791	return NULL;
792}
793
794
795/**
796 * @ingroup ns_sm
797 * @brief Name services node state machine: Idle state.
798 *
799 * @par Description
800 * Idle. Waiting for RSCN received events (posted from the fabric controller), and
801 * restarts the GIDPT name services query and processing.
802 *
803 * @param ctx Remote node state machine context.
804 * @param evt Event to process.
805 * @param arg Per event optional argument.
806 *
807 * @return Returns NULL.
808 */
809void *
810__ocs_ns_idle(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
811{
812	std_node_state_decl();
813
814	node_sm_trace();
815
816	switch(evt) {
817	case OCS_EVT_ENTER:
818		if (!node->rscn_pending) {
819			break;
820		}
821		node_printf(node, "RSCN pending, restart discovery\n");
822		node->rscn_pending = 0;
823
824			/* fall through */
825
826	case OCS_EVT_RSCN_RCVD: {
827		/* sm: / send GIDPT
828		 * If target RSCN processing is enabled, and this is target only
829		 * (not initiator), and tgt_rscn_delay is non-zero,
830		 * then we delay issuing the GID_PT
831		 */
832		if ((ocs->tgt_rscn_delay_msec != 0) && !node->sport->enable_ini && node->sport->enable_tgt &&
833			enable_target_rscn(ocs)) {
834			ocs_node_transition(node, __ocs_ns_gidpt_delay, NULL);
835		} else {
836			ocs_ns_send_gidpt(node, OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT,
837					OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
838			ocs_node_transition(node, __ocs_ns_gidpt_wait_rsp, NULL);
839		}
840		break;
841	}
842
843	default:
844		__ocs_fabric_common(__func__, ctx, evt, arg);
845		break;
846	}
847
848	return NULL;
849}
850
851/**
852 * @brief Handle GIDPT delay timer callback
853 *
854 * @par Description
855 * Post an OCS_EVT_GIDPT_DEIALY_EXPIRED event to the passed in node.
856 *
857 * @param arg Pointer to node.
858 *
859 * @return None.
860 */
861static void
862gidpt_delay_timer_cb(void *arg)
863{
864	ocs_node_t *node = arg;
865	int32_t rc;
866
867	ocs_del_timer(&node->gidpt_delay_timer);
868	rc = ocs_xport_control(node->ocs->xport, OCS_XPORT_POST_NODE_EVENT, node, OCS_EVT_GIDPT_DELAY_EXPIRED, NULL);
869	if (rc) {
870		ocs_log_err(node->ocs, "ocs_xport_control(OCS_XPORT_POST_NODE_EVENT) failed: %d\n", rc);
871	}
872}
873
874/**
875 * @ingroup ns_sm
876 * @brief Name services node state machine: Delayed GIDPT.
877 *
878 * @par Description
879 * Waiting for GIDPT delay to expire before submitting GIDPT to name server.
880 *
881 * @param ctx Remote node state machine context.
882 * @param evt Event to process.
883 * @param arg Per event optional argument.
884 *
885 * @return Returns NULL.
886 */
887void *
888__ocs_ns_gidpt_delay(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
889{
890	std_node_state_decl();
891
892	node_sm_trace();
893
894	switch(evt) {
895	case OCS_EVT_ENTER: {
896		time_t delay_msec;
897
898		ocs_assert(ocs->tgt_rscn_delay_msec != 0, NULL);
899
900		/*
901		 * Compute the delay time.   Set to tgt_rscn_delay, if the time since last GIDPT
902		 * is less than tgt_rscn_period, then use tgt_rscn_period.
903		 */
904		delay_msec = ocs->tgt_rscn_delay_msec;
905		if ((ocs_msectime() - node->time_last_gidpt_msec) < ocs->tgt_rscn_period_msec) {
906			delay_msec = ocs->tgt_rscn_period_msec;
907		}
908
909		ocs_setup_timer(ocs, &node->gidpt_delay_timer, gidpt_delay_timer_cb, node, delay_msec);
910
911		break;
912	}
913
914	case OCS_EVT_GIDPT_DELAY_EXPIRED:
915		node->time_last_gidpt_msec = ocs_msectime();
916		ocs_ns_send_gidpt(node, OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT,
917				OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
918		ocs_node_transition(node, __ocs_ns_gidpt_wait_rsp, NULL);
919		break;
920
921	case OCS_EVT_RSCN_RCVD: {
922		ocs_log_debug(ocs, "RSCN received while in GIDPT delay - no action\n");
923		break;
924	}
925
926	default:
927		__ocs_fabric_common(__func__, ctx, evt, arg);
928		break;
929	}
930
931	return NULL;
932}
933
934/**
935 * @ingroup fabric_sm
936 * @brief Fabric controller node state machine: Initial state.
937 *
938 * @par Description
939 * Issue a PLOGI to a well-known fabric controller address.
940 *
941 * @param ctx Remote node state machine context.
942 * @param evt Event to process.
943 * @param arg Per event optional argument.
944 *
945 * @return Returns NULL.
946 */
947void *
948__ocs_fabctl_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
949{
950	ocs_node_t *node = ctx->app;
951
952	node_sm_trace();
953
954	switch(evt) {
955	case OCS_EVT_ENTER:
956		/* no need to login to fabric controller, just send SCR */
957		ocs_send_scr(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
958		ocs_node_transition(node, __ocs_fabctl_wait_scr_rsp, NULL);
959		break;
960
961	case OCS_EVT_NODE_ATTACH_OK:
962		node->attached = TRUE;
963		break;
964
965	default:
966		__ocs_fabric_common(__func__, ctx, evt, arg);
967		return NULL;
968	}
969
970	return NULL;
971}
972
973/**
974 * @ingroup fabric_sm
975 * @brief Fabric controller node state machine: Wait for a node attach request
976 * to complete.
977 *
978 * @par Description
979 * Wait for a node attach to complete. If successful, issue an SCR
980 * to the fabric controller, subscribing to all RSCN.
981 *
982 * @param ctx Remote node state machine context.
983 * @param evt Event to process.
984 * @param arg Per event optional argument.
985 *
986 * @return Returns NULL.
987 *
988 */
989void *
990__ocs_fabctl_wait_node_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
991{
992	std_node_state_decl();
993
994	node_sm_trace();
995
996	switch(evt) {
997	case OCS_EVT_ENTER:
998		ocs_node_hold_frames(node);
999		break;
1000
1001	case OCS_EVT_EXIT:
1002		ocs_node_accept_frames(node);
1003		break;
1004
1005	case OCS_EVT_NODE_ATTACH_OK:
1006		node->attached = TRUE;
1007		/* sm: / send SCR */
1008		ocs_send_scr(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
1009		ocs_node_transition(node, __ocs_fabctl_wait_scr_rsp, NULL);
1010		break;
1011
1012	case OCS_EVT_NODE_ATTACH_FAIL:
1013		/* node attach failed, shutdown the node */
1014		node->attached = FALSE;
1015		node_printf(node, "Node attach failed\n");
1016		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1017		ocs_fabric_initiate_shutdown(node);
1018		break;
1019
1020	case OCS_EVT_SHUTDOWN:
1021		node_printf(node, "Shutdown event received\n");
1022		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1023		ocs_node_transition(node, __ocs_fabric_wait_attach_evt_shutdown, NULL);
1024		break;
1025
1026	default:
1027		__ocs_fabric_common(__func__, ctx, evt, arg);
1028		return NULL;
1029	}
1030
1031	return NULL;
1032}
1033
1034/**
1035 * @ingroup fabric_sm
1036 * @brief Fabric controller node state machine: Wait for an SCR response from the
1037 * fabric controller.
1038 *
1039 * @par Description
1040 * Waits for an SCR response from the fabric controller.
1041 *
1042 * @param ctx Remote node state machine context.
1043 * @param evt Event to process.
1044 * @param arg Per event optional argument.
1045 *
1046 * @return Returns NULL.
1047 */
1048void *
1049__ocs_fabctl_wait_scr_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1050{
1051	std_node_state_decl();
1052
1053	node_sm_trace();
1054
1055	switch(evt) {
1056	case OCS_EVT_SRRS_ELS_REQ_OK:
1057		if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_SCR, __ocs_fabric_common, __func__)) {
1058			return NULL;
1059		}
1060		ocs_assert(node->els_req_cnt, NULL);
1061		node->els_req_cnt--;
1062		ocs_node_transition(node, __ocs_fabctl_ready, NULL);
1063		break;
1064
1065	default:
1066		__ocs_fabric_common(__func__, ctx, evt, arg);
1067		return NULL;
1068	}
1069
1070	return NULL;
1071}
1072
1073/**
1074 * @ingroup fabric_sm
1075 * @brief Fabric controller node state machine: Ready.
1076 *
1077 * @par Description
1078 * In this state, the fabric controller sends a RSCN, which is received
1079 * by this node and is forwarded to the name services node object; and
1080 * the RSCN LS_ACC is sent.
1081 *
1082 * @param ctx Remote node state machine context.
1083 * @param evt Event to process.
1084 * @param arg Per event optional argument.
1085 *
1086 * @return Returns NULL.
1087 */
1088
1089void *
1090__ocs_fabctl_ready(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1091{
1092	ocs_node_cb_t *cbdata = arg;
1093	std_node_state_decl();
1094
1095	node_sm_trace();
1096
1097	switch(evt) {
1098	case OCS_EVT_RSCN_RCVD: {
1099		fc_header_t *hdr = cbdata->header->dma.virt;
1100
1101		/* sm: / process RSCN (forward to name services node),
1102		 * send LS_ACC */
1103		ocs_process_rscn(node, cbdata);
1104		ocs_send_ls_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
1105		ocs_node_transition(node, __ocs_fabctl_wait_ls_acc_cmpl, NULL);
1106		break;
1107	}
1108
1109	default:
1110		__ocs_fabric_common(__func__, ctx, evt, arg);
1111		return NULL;
1112	}
1113
1114	return NULL;
1115}
1116
1117/**
1118 * @ingroup fabric_sm
1119 * @brief Fabric controller node state machine: Wait for LS_ACC.
1120 *
1121 * @par Description
1122 * Waits for the LS_ACC from the fabric controller.
1123 *
1124 * @param ctx Remote node state machine context.
1125 * @param evt Event to process.
1126 * @param arg Per event optional argument.
1127 *
1128 * @return Returns NULL.
1129 */
1130
1131void *
1132__ocs_fabctl_wait_ls_acc_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1133{
1134	std_node_state_decl();
1135
1136	node_sm_trace();
1137
1138	switch(evt) {
1139	case OCS_EVT_ENTER:
1140		ocs_node_hold_frames(node);
1141		break;
1142
1143	case OCS_EVT_EXIT:
1144		ocs_node_accept_frames(node);
1145		break;
1146
1147	case OCS_EVT_SRRS_ELS_CMPL_OK:
1148		ocs_assert(node->els_cmpl_cnt, NULL);
1149		node->els_cmpl_cnt--;
1150		ocs_node_transition(node, __ocs_fabctl_ready, NULL);
1151		break;
1152
1153	default:
1154		__ocs_fabric_common(__func__, ctx, evt, arg);
1155		return NULL;
1156	}
1157
1158	return NULL;
1159}
1160
1161/**
1162 * @ingroup fabric_sm
1163 * @brief Initiate fabric node shutdown.
1164 *
1165 * @param node Node for which shutdown is initiated.
1166 *
1167 * @return Returns None.
1168 */
1169
1170static void
1171ocs_fabric_initiate_shutdown(ocs_node_t *node)
1172{
1173	ocs_hw_rtn_e rc;
1174	ocs_t *ocs = node->ocs;
1175	ocs_scsi_io_alloc_disable(node);
1176
1177	if (node->attached) {
1178		/* issue hw node free; don't care if succeeds right away
1179		 * or sometime later, will check node->attached later in
1180		 * shutdown process
1181		 */
1182		rc = ocs_hw_node_detach(&ocs->hw, &node->rnode);
1183		if (node->rnode.free_group) {
1184			ocs_remote_node_group_free(node->node_group);
1185			node->node_group = NULL;
1186			node->rnode.free_group = FALSE;
1187		}
1188		if (rc != OCS_HW_RTN_SUCCESS && rc != OCS_HW_RTN_SUCCESS_SYNC) {
1189			node_printf(node, "Failed freeing HW node, rc=%d\n", rc);
1190		}
1191	}
1192	/*
1193	 * node has either been detached or is in the process of being detached,
1194	 * call common node's initiate cleanup function
1195	 */
1196	ocs_node_initiate_cleanup(node);
1197}
1198
1199/**
1200 * @ingroup fabric_sm
1201 * @brief Fabric node state machine: Handle the common fabric node events.
1202 *
1203 * @param funcname Function name text.
1204 * @param ctx Remote node state machine context.
1205 * @param evt Event to process.
1206 * @param arg Per event optional argument.
1207 *
1208 * @return Returns NULL.
1209 */
1210
1211static void *
1212__ocs_fabric_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1213{
1214	ocs_node_t *node = NULL;
1215	ocs_assert(ctx, NULL);
1216	ocs_assert(ctx->app, NULL);
1217	node = ctx->app;
1218
1219	switch(evt) {
1220	case OCS_EVT_DOMAIN_ATTACH_OK:
1221		break;
1222	case OCS_EVT_SHUTDOWN:
1223		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1224		ocs_fabric_initiate_shutdown(node);
1225		break;
1226
1227	default:
1228		/* call default event handler common to all nodes */
1229		__ocs_node_common(funcname, ctx, evt, arg);
1230		break;
1231	}
1232	return NULL;
1233}
1234
1235/**
1236 * @brief Return TRUE if the remote node is an NPORT.
1237 *
1238 * @par Description
1239 * Examines the service parameters. Returns TRUE if the node reports itself as
1240 * an NPORT.
1241 *
1242 * @param remote_sparms Remote node service parameters.
1243 *
1244 * @return Returns TRUE if NPORT.
1245 */
1246
1247int32_t
1248ocs_rnode_is_nport(fc_plogi_payload_t *remote_sparms)
1249{
1250	return (ocs_be32toh(remote_sparms->common_service_parameters[1]) & (1U << 28)) == 0;
1251}
1252
1253/**
1254 * @brief Return the node's WWPN as an uint64_t.
1255 *
1256 * @par Description
1257 * The WWPN is computed from service parameters, and returned as a uint64_t.
1258 *
1259 * @param sp Pointer to service parameters.
1260 *
1261 * @return Returns WWPN.
1262 *
1263 */
1264
1265static uint64_t
1266ocs_get_wwpn(fc_plogi_payload_t *sp)
1267{
1268	return (((uint64_t)ocs_be32toh(sp->port_name_hi) << 32ll) | (ocs_be32toh(sp->port_name_lo)));
1269}
1270
1271/**
1272 * @brief Return TRUE if the remote node is the point-to-point winner.
1273 *
1274 * @par Description
1275 * Compares WWPNs. Returns TRUE if the remote node's WWPN is numerically
1276 * higher than the local node's WWPN.
1277 *
1278 * @param sport Pointer to the sport object.
1279 *
1280 * @return
1281 * - 0, if the remote node is the loser.
1282 * - 1, if the remote node is the winner.
1283 * - (-1), if remote node is neither the loser nor the winner
1284 *   (WWPNs match)
1285 */
1286
1287static int32_t
1288ocs_rnode_is_winner(ocs_sport_t *sport)
1289{
1290	fc_plogi_payload_t *remote_sparms = (fc_plogi_payload_t*) sport->domain->flogi_service_params;
1291	uint64_t remote_wwpn = ocs_get_wwpn(remote_sparms);
1292	uint64_t local_wwpn = sport->wwpn;
1293	char prop_buf[32];
1294	uint64_t wwn_bump = 0;
1295
1296	if (ocs_get_property("wwn_bump", prop_buf, sizeof(prop_buf)) == 0) {
1297		wwn_bump = ocs_strtoull(prop_buf, 0, 0);
1298	}
1299	local_wwpn ^= wwn_bump;
1300
1301	remote_wwpn = ocs_get_wwpn(remote_sparms);
1302
1303	ocs_log_debug(sport->ocs, "r: %08x %08x\n", ocs_be32toh(remote_sparms->port_name_hi), ocs_be32toh(remote_sparms->port_name_lo));
1304	ocs_log_debug(sport->ocs, "l: %08x %08x\n", (uint32_t) (local_wwpn >> 32ll), (uint32_t) local_wwpn);
1305
1306	if (remote_wwpn == local_wwpn) {
1307		ocs_log_warn(sport->ocs, "WWPN of remote node [%08x %08x] matches local WWPN\n",
1308			(uint32_t) (local_wwpn >> 32ll), (uint32_t) local_wwpn);
1309		return (-1);
1310	}
1311
1312	return (remote_wwpn > local_wwpn);
1313}
1314
1315/**
1316 * @ingroup p2p_sm
1317 * @brief Point-to-point state machine: Wait for the domain attach to complete.
1318 *
1319 * @par Description
1320 * Once the domain attach has completed, a PLOGI is sent (if we're the
1321 * winning point-to-point node).
1322 *
1323 * @param ctx Remote node state machine context.
1324 * @param evt Event to process.
1325 * @param arg Per event optional argument.
1326 *
1327 * @return Returns NULL.
1328 */
1329
1330void *
1331__ocs_p2p_wait_domain_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1332{
1333	std_node_state_decl();
1334
1335	node_sm_trace();
1336
1337	switch(evt) {
1338	case OCS_EVT_ENTER:
1339		ocs_node_hold_frames(node);
1340		break;
1341
1342	case OCS_EVT_EXIT:
1343		ocs_node_accept_frames(node);
1344		break;
1345
1346	case OCS_EVT_DOMAIN_ATTACH_OK: {
1347		ocs_sport_t *sport = node->sport;
1348		ocs_node_t *rnode;
1349
1350		/* this transient node (SID=0 (recv'd FLOGI) or DID=fabric (sent FLOGI))
1351		 * is the p2p winner, will use a separate node to send PLOGI to peer
1352		 */
1353		ocs_assert (node->sport->p2p_winner, NULL);
1354
1355		rnode = ocs_node_find(sport, node->sport->p2p_remote_port_id);
1356		if (rnode != NULL) {
1357			/* the "other" transient p2p node has already kicked off the
1358			 * new node from which PLOGI is sent */
1359			node_printf(node, "Node with fc_id x%x already exists\n", rnode->rnode.fc_id);
1360			ocs_assert (rnode != node, NULL);
1361		} else {
1362			/* create new node (SID=1, DID=2) from which to send PLOGI */
1363			rnode = ocs_node_alloc(sport, sport->p2p_remote_port_id, FALSE, FALSE);
1364			if (rnode == NULL) {
1365				ocs_log_err(ocs, "node alloc failed\n");
1366				return NULL;
1367			}
1368
1369			ocs_fabric_notify_topology(node);
1370			/* sm: allocate p2p remote node */
1371			ocs_node_transition(rnode, __ocs_p2p_rnode_init, NULL);
1372		}
1373
1374		/* the transient node (SID=0 or DID=fabric) has served its purpose */
1375		if (node->rnode.fc_id == 0) {
1376			/* if this is the SID=0 node, move to the init state in case peer
1377			 * has restarted FLOGI discovery and FLOGI is pending
1378			 */
1379			/* don't send PLOGI on ocs_d_init entry */
1380			ocs_node_init_device(node, FALSE);
1381		} else {
1382			/* if this is the DID=fabric node (we initiated FLOGI), shut it down */
1383			node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1384			ocs_fabric_initiate_shutdown(node);
1385		}
1386		break;
1387	}
1388
1389	default:
1390		__ocs_fabric_common(__func__, ctx, evt, arg);
1391		return NULL;
1392	}
1393
1394	return NULL;
1395}
1396
1397/**
1398 * @ingroup p2p_sm
1399 * @brief Point-to-point state machine: Remote node initialization state.
1400 *
1401 * @par Description
1402 * This state is entered after winning point-to-point, and the remote node
1403 * is instantiated.
1404 *
1405 * @param ctx Remote node state machine context.
1406 * @param evt Event to process.
1407 * @param arg Per event optional argument.
1408 *
1409 * @return Returns NULL.
1410 */
1411
1412void *
1413__ocs_p2p_rnode_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1414{
1415	ocs_node_cb_t *cbdata = arg;
1416	std_node_state_decl();
1417
1418	node_sm_trace();
1419
1420	switch(evt) {
1421	case OCS_EVT_ENTER:
1422		/* sm: / send PLOGI */
1423		ocs_send_plogi(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
1424		ocs_node_transition(node, __ocs_p2p_wait_plogi_rsp, NULL);
1425		break;
1426
1427	case OCS_EVT_ABTS_RCVD:
1428		/* sm: send BA_ACC */
1429		ocs_bls_send_acc_hdr(cbdata->io, cbdata->header->dma.virt);
1430		break;
1431
1432	default:
1433		__ocs_fabric_common(__func__, ctx, evt, arg);
1434		return NULL;
1435	}
1436
1437	return NULL;
1438}
1439
1440/**
1441 * @ingroup p2p_sm
1442 * @brief Point-to-point node state machine: Wait for the FLOGI accept completion.
1443 *
1444 * @par Description
1445 * Wait for the FLOGI accept completion.
1446 *
1447 * @param ctx Remote node state machine context.
1448 * @param evt Event to process.
1449 * @param arg Per event optional argument.
1450 *
1451 * @return Returns NULL.
1452 */
1453
1454void *
1455__ocs_p2p_wait_flogi_acc_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1456{
1457	ocs_node_cb_t *cbdata = arg;
1458	std_node_state_decl();
1459
1460	node_sm_trace();
1461
1462	switch(evt) {
1463	case OCS_EVT_ENTER:
1464		ocs_node_hold_frames(node);
1465		break;
1466
1467	case OCS_EVT_EXIT:
1468		ocs_node_accept_frames(node);
1469		break;
1470
1471	case OCS_EVT_SRRS_ELS_CMPL_OK:
1472		ocs_assert(node->els_cmpl_cnt, NULL);
1473		node->els_cmpl_cnt--;
1474
1475		/* sm: if p2p_winner / domain_attach */
1476		if (node->sport->p2p_winner) {
1477			ocs_node_transition(node, __ocs_p2p_wait_domain_attach, NULL);
1478			if (node->sport->domain->attached &&
1479			    !(node->sport->domain->domain_notify_pend)) {
1480				node_printf(node, "Domain already attached\n");
1481				ocs_node_post_event(node, OCS_EVT_DOMAIN_ATTACH_OK, NULL);
1482			}
1483		} else {
1484			/* this node has served its purpose; we'll expect a PLOGI on a separate
1485			 * node (remote SID=0x1); return this node to init state in case peer
1486			 * restarts discovery -- it may already have (pending frames may exist).
1487			 */
1488			/* don't send PLOGI on ocs_d_init entry */
1489			ocs_node_init_device(node, FALSE);
1490		}
1491		break;
1492
1493	case OCS_EVT_SRRS_ELS_CMPL_FAIL:
1494		/* LS_ACC failed, possibly due to link down; shutdown node and wait
1495		 * for FLOGI discovery to restart */
1496		node_printf(node, "FLOGI LS_ACC failed, shutting down\n");
1497		ocs_assert(node->els_cmpl_cnt, NULL);
1498		node->els_cmpl_cnt--;
1499		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1500		ocs_fabric_initiate_shutdown(node);
1501		break;
1502
1503	case OCS_EVT_ABTS_RCVD: {
1504		/* sm: / send BA_ACC */
1505		ocs_bls_send_acc_hdr(cbdata->io, cbdata->header->dma.virt);
1506		break;
1507	}
1508
1509	default:
1510		__ocs_fabric_common(__func__, ctx, evt, arg);
1511		return NULL;
1512	}
1513
1514	return NULL;
1515}
1516
1517
1518/**
1519 * @ingroup p2p_sm
1520 * @brief Point-to-point node state machine: Wait for a PLOGI response
1521 * as a point-to-point winner.
1522 *
1523 * @par Description
1524 * Wait for a PLOGI response from the remote node as a point-to-point winner.
1525 * Submit node attach request to the HW.
1526 *
1527 * @param ctx Remote node state machine context.
1528 * @param evt Event to process.
1529 * @param arg Per event optional argument.
1530 *
1531 * @return Returns NULL.
1532 */
1533
1534void *
1535__ocs_p2p_wait_plogi_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1536{
1537	int32_t rc;
1538	ocs_node_cb_t *cbdata = arg;
1539	std_node_state_decl();
1540
1541	node_sm_trace();
1542
1543	switch(evt) {
1544	case OCS_EVT_SRRS_ELS_REQ_OK: {
1545		if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_fabric_common, __func__)) {
1546			return NULL;
1547		}
1548		ocs_assert(node->els_req_cnt, NULL);
1549		node->els_req_cnt--;
1550		/* sm: / save sparams, ocs_node_attach */
1551		ocs_node_save_sparms(node, cbdata->els->els_rsp.virt);
1552		rc = ocs_node_attach(node);
1553		ocs_node_transition(node, __ocs_p2p_wait_node_attach, NULL);
1554		if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
1555			ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
1556		}
1557		break;
1558	}
1559	case OCS_EVT_SRRS_ELS_REQ_FAIL: {
1560		if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_fabric_common, __func__)) {
1561			return NULL;
1562		}
1563		node_printf(node, "PLOGI failed, shutting down\n");
1564		ocs_assert(node->els_req_cnt, NULL);
1565		node->els_req_cnt--;
1566		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1567		ocs_fabric_initiate_shutdown(node);
1568		break;
1569	}
1570
1571	case OCS_EVT_PLOGI_RCVD: {
1572		fc_header_t *hdr = cbdata->header->dma.virt;
1573		/* if we're in external loopback mode, just send LS_ACC */
1574		if (node->ocs->external_loopback) {
1575			ocs_send_plogi_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
1576			break;
1577		} else{
1578			/* if this isn't external loopback, pass to default handler */
1579			__ocs_fabric_common(__func__, ctx, evt, arg);
1580		}
1581		break;
1582	}
1583	case OCS_EVT_PRLI_RCVD:
1584		/* I, or I+T */
1585		/* sent PLOGI and before completion was seen, received the
1586		 * PRLI from the remote node (WCQEs and RCQEs come in on
1587		 * different queues and order of processing cannot be assumed)
1588		 * Save OXID so PRLI can be sent after the attach and continue
1589		 * to wait for PLOGI response
1590		 */
1591		ocs_process_prli_payload(node, cbdata->payload->dma.virt);
1592		ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PRLI);
1593		ocs_node_transition(node, __ocs_p2p_wait_plogi_rsp_recvd_prli, NULL);
1594		break;
1595	default:
1596		__ocs_fabric_common(__func__, ctx, evt, arg);
1597		return NULL;
1598	}
1599
1600	return NULL;
1601}
1602
1603/**
1604 * @ingroup p2p_sm
1605 * @brief Point-to-point node state machine: Waiting on a response for a
1606 *	sent PLOGI.
1607 *
1608 * @par Description
1609 * State is entered when the point-to-point winner has sent
1610 * a PLOGI and is waiting for a response. Before receiving the
1611 * response, a PRLI was received, implying that the PLOGI was
1612 * successful.
1613 *
1614 * @param ctx Remote node state machine context.
1615 * @param evt Event to process.
1616 * @param arg Per event optional argument.
1617 *
1618 * @return Returns NULL.
1619 */
1620
1621void *
1622__ocs_p2p_wait_plogi_rsp_recvd_prli(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1623{
1624	int32_t rc;
1625	ocs_node_cb_t *cbdata = arg;
1626	std_node_state_decl();
1627
1628	node_sm_trace();
1629
1630	switch(evt) {
1631	case OCS_EVT_ENTER:
1632		/*
1633		 * Since we've received a PRLI, we have a port login and will
1634		 * just need to wait for the PLOGI response to do the node
1635		 * attach and then we can send the LS_ACC for the PRLI. If,
1636		 * during this time, we receive FCP_CMNDs (which is possible
1637		 * since we've already sent a PRLI and our peer may have accepted).
1638		 * At this time, we are not waiting on any other unsolicited
1639		 * frames to continue with the login process. Thus, it will not
1640		 * hurt to hold frames here.
1641		 */
1642		ocs_node_hold_frames(node);
1643		break;
1644
1645	case OCS_EVT_EXIT:
1646		ocs_node_accept_frames(node);
1647		break;
1648
1649	case OCS_EVT_SRRS_ELS_REQ_OK:	/* PLOGI response received */
1650		/* Completion from PLOGI sent */
1651		if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_fabric_common, __func__)) {
1652			return NULL;
1653		}
1654		ocs_assert(node->els_req_cnt, NULL);
1655		node->els_req_cnt--;
1656		/* sm: / save sparams, ocs_node_attach */
1657		ocs_node_save_sparms(node, cbdata->els->els_rsp.virt);
1658		ocs_display_sparams(node->display_name, "plogi rcvd resp", 0, NULL,
1659			((uint8_t*)cbdata->els->els_rsp.virt) + 4);
1660		rc = ocs_node_attach(node);
1661		ocs_node_transition(node, __ocs_p2p_wait_node_attach, NULL);
1662		if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
1663			ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
1664		}
1665		break;
1666
1667	case OCS_EVT_SRRS_ELS_REQ_FAIL:	/* PLOGI response received */
1668	case OCS_EVT_SRRS_ELS_REQ_RJT:
1669		/* PLOGI failed, shutdown the node */
1670		if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_fabric_common, __func__)) {
1671			return NULL;
1672		}
1673		ocs_assert(node->els_req_cnt, NULL);
1674		node->els_req_cnt--;
1675		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1676		ocs_fabric_initiate_shutdown(node);
1677		break;
1678
1679	default:
1680		__ocs_fabric_common(__func__, ctx, evt, arg);
1681		return NULL;
1682	}
1683
1684	return NULL;
1685}
1686
1687/**
1688 * @ingroup p2p_sm
1689 * @brief Point-to-point node state machine: Wait for a point-to-point node attach
1690 * to complete.
1691 *
1692 * @par Description
1693 * Waits for the point-to-point node attach to complete.
1694 *
1695 * @param ctx Remote node state machine context.
1696 * @param evt Event to process.
1697 * @param arg Per event optional argument.
1698 *
1699 * @return Returns NULL.
1700 */
1701
1702void *
1703__ocs_p2p_wait_node_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1704{
1705	ocs_node_cb_t *cbdata = arg;
1706	std_node_state_decl();
1707
1708	node_sm_trace();
1709
1710	switch(evt) {
1711	case OCS_EVT_ENTER:
1712		ocs_node_hold_frames(node);
1713		break;
1714
1715	case OCS_EVT_EXIT:
1716		ocs_node_accept_frames(node);
1717		break;
1718
1719	case OCS_EVT_NODE_ATTACH_OK:
1720		node->attached = TRUE;
1721		switch (node->send_ls_acc) {
1722		case OCS_NODE_SEND_LS_ACC_PRLI: {
1723			ocs_d_send_prli_rsp(node->ls_acc_io, node->ls_acc_oxid);
1724			node->send_ls_acc = OCS_NODE_SEND_LS_ACC_NONE;
1725			node->ls_acc_io = NULL;
1726			break;
1727		}
1728		case OCS_NODE_SEND_LS_ACC_PLOGI: /* Can't happen in P2P */
1729		case OCS_NODE_SEND_LS_ACC_NONE:
1730		default:
1731			/* Normal case for I */
1732			/* sm: send_plogi_acc is not set / send PLOGI acc */
1733			ocs_node_transition(node, __ocs_d_port_logged_in, NULL);
1734			break;
1735		}
1736		break;
1737
1738	case OCS_EVT_NODE_ATTACH_FAIL:
1739		/* node attach failed, shutdown the node */
1740		node->attached = FALSE;
1741		node_printf(node, "Node attach failed\n");
1742		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1743		ocs_fabric_initiate_shutdown(node);
1744		break;
1745
1746	case OCS_EVT_SHUTDOWN:
1747		node_printf(node, "%s received\n", ocs_sm_event_name(evt));
1748		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1749		ocs_node_transition(node, __ocs_fabric_wait_attach_evt_shutdown, NULL);
1750		break;
1751	case OCS_EVT_PRLI_RCVD:
1752		node_printf(node, "%s: PRLI received before node is attached\n", ocs_sm_event_name(evt));
1753		ocs_process_prli_payload(node, cbdata->payload->dma.virt);
1754		ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PRLI);
1755		break;
1756	default:
1757		__ocs_fabric_common(__func__, ctx, evt, arg);
1758		return NULL;
1759	}
1760
1761	return NULL;
1762}
1763
1764/**
1765 * @brief Start up the name services node.
1766 *
1767 * @par Description
1768 * Allocates and starts up the name services node.
1769 *
1770 * @param sport Pointer to the sport structure.
1771 *
1772 * @return Returns 0 on success, or a negative error value on failure.
1773 */
1774
1775static int32_t
1776ocs_start_ns_node(ocs_sport_t *sport)
1777{
1778	ocs_node_t *ns;
1779
1780	/* Instantiate a name services node */
1781	ns = ocs_node_find(sport, FC_ADDR_NAMESERVER);
1782	if (ns == NULL) {
1783		ns = ocs_node_alloc(sport, FC_ADDR_NAMESERVER, FALSE, FALSE);
1784		if (ns == NULL) {
1785			return -1;
1786		}
1787	}
1788	/* TODO: for found ns, should we be transitioning from here?
1789	 * breaks transition only 1. from within state machine or
1790	 * 2. if after alloc
1791	 */
1792	if (ns->ocs->nodedb_mask & OCS_NODEDB_PAUSE_NAMESERVER) {
1793		ocs_node_pause(ns, __ocs_ns_init);
1794	} else {
1795		ocs_node_transition(ns, __ocs_ns_init, NULL);
1796	}
1797	return 0;
1798}
1799
1800/**
1801 * @brief Start up the fabric controller node.
1802 *
1803 * @par Description
1804 * Allocates and starts up the fabric controller node.
1805 *
1806 * @param sport Pointer to the sport structure.
1807 *
1808 * @return Returns 0 on success, or a negative error value on failure.
1809 */
1810
1811static int32_t
1812ocs_start_fabctl_node(ocs_sport_t *sport)
1813{
1814	ocs_node_t *fabctl;
1815
1816	fabctl = ocs_node_find(sport, FC_ADDR_CONTROLLER);
1817	if (fabctl == NULL) {
1818		fabctl = ocs_node_alloc(sport, FC_ADDR_CONTROLLER, FALSE, FALSE);
1819		if (fabctl == NULL) {
1820			return -1;
1821		}
1822	}
1823	/* TODO: for found ns, should we be transitioning from here?
1824	 * breaks transition only 1. from within state machine or
1825	 * 2. if after alloc
1826	 */
1827	ocs_node_transition(fabctl, __ocs_fabctl_init, NULL);
1828	return 0;
1829}
1830
1831/**
1832 * @brief Process the GIDPT payload.
1833 *
1834 * @par Description
1835 * The GIDPT payload is parsed, and new nodes are created, as needed.
1836 *
1837 * @param node Pointer to the node structure.
1838 * @param gidpt Pointer to the GIDPT payload.
1839 * @param gidpt_len Payload length
1840 *
1841 * @return Returns 0 on success, or a negative error value on failure.
1842 */
1843
1844static int32_t
1845ocs_process_gidpt_payload(ocs_node_t *node, fcct_gidpt_acc_t *gidpt, uint32_t gidpt_len)
1846{
1847	uint32_t i;
1848	uint32_t j;
1849	ocs_node_t *newnode;
1850	ocs_sport_t *sport = node->sport;
1851	ocs_t *ocs = node->ocs;
1852	uint32_t port_id;
1853	uint32_t port_count;
1854	ocs_node_t *n;
1855	ocs_node_t **active_nodes;
1856	uint32_t portlist_count;
1857	uint16_t residual;
1858
1859	residual = ocs_be16toh(gidpt->hdr.max_residual_size);
1860
1861	if (residual != 0) {
1862		ocs_log_debug(node->ocs, "residual is %u words\n", residual);
1863	}
1864
1865	if (ocs_be16toh(gidpt->hdr.cmd_rsp_code) == FCCT_HDR_CMDRSP_REJECT) {
1866		node_printf(node, "GIDPT request failed: rsn x%x rsn_expl x%x\n",
1867			gidpt->hdr.reason_code, gidpt->hdr.reason_code_explanation);
1868		return -1;
1869	}
1870
1871	portlist_count = (gidpt_len - sizeof(fcct_iu_header_t)) / sizeof(gidpt->port_list);
1872
1873	/* Count the number of nodes */
1874	port_count = 0;
1875	ocs_sport_lock(sport);
1876		ocs_list_foreach(&sport->node_list, n) {
1877			port_count ++;
1878		}
1879
1880		/* Allocate a buffer for all nodes */
1881		active_nodes = ocs_malloc(node->ocs, port_count * sizeof(*active_nodes), OCS_M_NOWAIT | OCS_M_ZERO);
1882		if (active_nodes == NULL) {
1883			node_printf(node, "ocs_malloc failed\n");
1884			ocs_sport_unlock(sport);
1885			return -1;
1886		}
1887
1888		/* Fill buffer with fc_id of active nodes */
1889		i = 0;
1890		ocs_list_foreach(&sport->node_list, n) {
1891			port_id = n->rnode.fc_id;
1892			switch (port_id) {
1893			case FC_ADDR_FABRIC:
1894			case FC_ADDR_CONTROLLER:
1895			case FC_ADDR_NAMESERVER:
1896				break;
1897			default:
1898				if (!FC_ADDR_IS_DOMAIN_CTRL(port_id)) {
1899					active_nodes[i++] = n;
1900				}
1901				break;
1902			}
1903		}
1904
1905		/* update the active nodes buffer */
1906		for (i = 0; i < portlist_count; i ++) {
1907			port_id = fc_be24toh(gidpt->port_list[i].port_id);
1908
1909			for (j = 0; j < port_count; j ++) {
1910				if ((active_nodes[j] != NULL) && (port_id == active_nodes[j]->rnode.fc_id)) {
1911					active_nodes[j] = NULL;
1912				}
1913			}
1914
1915			if (gidpt->port_list[i].ctl & FCCT_GID_PT_LAST_ID)
1916				break;
1917		}
1918
1919		/* Those remaining in the active_nodes[] are now gone ! */
1920		for (i = 0; i < port_count; i ++) {
1921			/* if we're an initiator and the remote node is a target, then
1922			 * post the node missing event.   if we're target and we have enabled
1923			 * target RSCN, then post the node missing event.
1924			 */
1925			if (active_nodes[i] != NULL) {
1926				if ((node->sport->enable_ini && active_nodes[i]->targ) ||
1927				    (node->sport->enable_tgt && enable_target_rscn(ocs))) {
1928					ocs_node_post_event(active_nodes[i], OCS_EVT_NODE_MISSING, NULL);
1929				} else {
1930					node_printf(node, "GID_PT: skipping non-tgt port_id x%06x\n",
1931						active_nodes[i]->rnode.fc_id);
1932				}
1933			}
1934		}
1935		ocs_free(ocs, active_nodes, port_count * sizeof(*active_nodes));
1936
1937		for(i = 0; i < portlist_count; i ++) {
1938			uint32_t port_id = fc_be24toh(gidpt->port_list[i].port_id);
1939
1940			/* node_printf(node, "GID_PT: port_id x%06x\n", port_id); */
1941
1942			/* Don't create node for ourselves or the associated NPIV ports */
1943			if (port_id != node->rnode.sport->fc_id && !ocs_sport_find(sport->domain, port_id)) {
1944				newnode = ocs_node_find(sport, port_id);
1945				if (newnode) {
1946					/* TODO: what if node deleted here?? */
1947					if (node->sport->enable_ini && newnode->targ) {
1948						ocs_node_post_event(newnode, OCS_EVT_NODE_REFOUND, NULL);
1949					}
1950					/* original code sends ADISC, has notion of "refound" */
1951				} else {
1952					if (node->sport->enable_ini) {
1953						newnode = ocs_node_alloc(sport, port_id, 0, 0);
1954						if (newnode == NULL) {
1955							ocs_log_err(ocs, "ocs_node_alloc() failed\n");
1956							ocs_sport_unlock(sport);
1957							return -1;
1958						}
1959						/* send PLOGI automatically if initiator */
1960						ocs_node_init_device(newnode, TRUE);
1961					}
1962				}
1963			}
1964
1965			if (gidpt->port_list[i].ctl & FCCT_GID_PT_LAST_ID) {
1966				break;
1967			}
1968		}
1969	ocs_sport_unlock(sport);
1970	return 0;
1971}
1972
1973/**
1974 * @brief Set up the domain point-to-point parameters.
1975 *
1976 * @par Description
1977 * The remote node service parameters are examined, and various point-to-point
1978 * variables are set.
1979 *
1980 * @param sport Pointer to the sport object.
1981 *
1982 * @return Returns 0 on success, or a negative error value on failure.
1983 */
1984
1985int32_t
1986ocs_p2p_setup(ocs_sport_t *sport)
1987{
1988	ocs_t *ocs = sport->ocs;
1989	int32_t rnode_winner;
1990	rnode_winner = ocs_rnode_is_winner(sport);
1991
1992	/* set sport flags to indicate p2p "winner" */
1993	if (rnode_winner == 1) {
1994		sport->p2p_remote_port_id = 0;
1995		sport->p2p_port_id = 0;
1996		sport->p2p_winner = FALSE;
1997	} else if (rnode_winner == 0) {
1998		sport->p2p_remote_port_id = 2;
1999		sport->p2p_port_id = 1;
2000		sport->p2p_winner = TRUE;
2001	} else {
2002		/* no winner; only okay if external loopback enabled */
2003		if (sport->ocs->external_loopback) {
2004			/*
2005			 * External loopback mode enabled; local sport and remote node
2006			 * will be registered with an NPortID = 1;
2007			 */
2008			ocs_log_debug(ocs, "External loopback mode enabled\n");
2009			sport->p2p_remote_port_id = 1;
2010			sport->p2p_port_id = 1;
2011			sport->p2p_winner = TRUE;
2012		} else {
2013			ocs_log_warn(ocs, "failed to determine p2p winner\n");
2014			return rnode_winner;
2015		}
2016	}
2017	return 0;
2018}
2019
2020/**
2021 * @brief Process the FABCTL node RSCN.
2022 *
2023 * <h3 class="desc">Description</h3>
2024 * Processes the FABCTL node RSCN payload, simply passes the event to the name server.
2025 *
2026 * @param node Pointer to the node structure.
2027 * @param cbdata Callback data to pass forward.
2028 *
2029 * @return None.
2030 */
2031
2032static void
2033ocs_process_rscn(ocs_node_t *node, ocs_node_cb_t *cbdata)
2034{
2035	ocs_t *ocs = node->ocs;
2036	ocs_sport_t *sport = node->sport;
2037	ocs_node_t *ns;
2038
2039	/* Forward this event to the name-services node */
2040	ns = ocs_node_find(sport, FC_ADDR_NAMESERVER);
2041	if (ns != NULL)  {
2042		ocs_node_post_event(ns, OCS_EVT_RSCN_RCVD, cbdata);
2043	} else {
2044		ocs_log_warn(ocs, "can't find name server node\n");
2045	}
2046}
2047