1/*-
2 * Copyright (c) 2017 Broadcom. All rights reserved.
3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 *    this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 *    this list of conditions and the following disclaimer in the documentation
13 *    and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the copyright holder nor the names of its contributors
16 *    may be used to endorse or promote products derived from this software
17 *    without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 *
31 * $FreeBSD$
32 */
33
34/**
35 * @file
36 * Details SLI port (sport) functions.
37 */
38
39
40#include "ocs.h"
41#include "ocs_fabric.h"
42#include "ocs_els.h"
43#include "ocs_device.h"
44
45static void ocs_vport_update_spec(ocs_sport_t *sport);
46static void ocs_vport_link_down(ocs_sport_t *sport);
47
48void ocs_mgmt_sport_list(ocs_textbuf_t *textbuf, void *sport);
49void ocs_mgmt_sport_get_all(ocs_textbuf_t *textbuf, void *sport);
50int ocs_mgmt_sport_get(ocs_textbuf_t *textbuf, char *parent, char *name, void *sport);
51int ocs_mgmt_sport_set(char *parent, char *name, char *value, void *sport);
52int ocs_mgmt_sport_exec(char *parent, char *action, void *arg_in, uint32_t arg_in_length,
53		void *arg_out, uint32_t arg_out_length, void *sport);
54static ocs_mgmt_functions_t sport_mgmt_functions = {
55	.get_list_handler = ocs_mgmt_sport_list,
56	.get_handler = ocs_mgmt_sport_get,
57	.get_all_handler = ocs_mgmt_sport_get_all,
58	.set_handler = ocs_mgmt_sport_set,
59	.exec_handler = ocs_mgmt_sport_exec,
60};
61
62/*!
63@defgroup sport_sm SLI Port (sport) State Machine: States
64*/
65
66/**
67 * @ingroup sport_sm
68 * @brief SLI port HW callback.
69 *
70 * @par Description
71 * This function is called in response to a HW sport event. This code resolves
72 * the reference to the sport object, and posts the corresponding event.
73 *
74 * @param arg Pointer to the OCS context.
75 * @param event HW sport event.
76 * @param data Application-specific event (pointer to the sport).
77 *
78 * @return Returns 0 on success, or a negative error value on failure.
79 */
80
81int32_t
82ocs_port_cb(void *arg, ocs_hw_port_event_e event, void *data)
83{
84	ocs_t *ocs = arg;
85	ocs_sli_port_t *sport = data;
86
87	switch (event) {
88	case OCS_HW_PORT_ALLOC_OK:
89		ocs_log_debug(ocs, "OCS_HW_PORT_ALLOC_OK\n");
90		ocs_sm_post_event(&sport->sm, OCS_EVT_SPORT_ALLOC_OK, NULL);
91		break;
92	case OCS_HW_PORT_ALLOC_FAIL:
93		ocs_log_debug(ocs, "OCS_HW_PORT_ALLOC_FAIL\n");
94		ocs_sm_post_event(&sport->sm, OCS_EVT_SPORT_ALLOC_FAIL, NULL);
95		break;
96	case OCS_HW_PORT_ATTACH_OK:
97		ocs_log_debug(ocs, "OCS_HW_PORT_ATTACH_OK\n");
98		ocs_sm_post_event(&sport->sm, OCS_EVT_SPORT_ATTACH_OK, NULL);
99		break;
100	case OCS_HW_PORT_ATTACH_FAIL:
101		ocs_log_debug(ocs, "OCS_HW_PORT_ATTACH_FAIL\n");
102		ocs_sm_post_event(&sport->sm, OCS_EVT_SPORT_ATTACH_FAIL, NULL);
103		break;
104	case OCS_HW_PORT_FREE_OK:
105		ocs_log_debug(ocs, "OCS_HW_PORT_FREE_OK\n");
106		ocs_sm_post_event(&sport->sm, OCS_EVT_SPORT_FREE_OK, NULL);
107		break;
108	case OCS_HW_PORT_FREE_FAIL:
109		ocs_log_debug(ocs, "OCS_HW_PORT_FREE_FAIL\n");
110		ocs_sm_post_event(&sport->sm, OCS_EVT_SPORT_FREE_FAIL, NULL);
111		break;
112	default:
113		ocs_log_test(ocs, "unknown event %#x\n", event);
114	}
115
116	return 0;
117}
118
119/**
120 * @ingroup sport_sm
121 * @brief Allocate a SLI port object.
122 *
123 * @par Description
124 * A sport object is allocated and associated with the domain. Various
125 * structure members are initialized.
126 *
127 * @param domain Pointer to the domain structure.
128 * @param wwpn World wide port name in host endian.
129 * @param wwnn World wide node name in host endian.
130 * @param fc_id Port ID of sport may be specified, use UINT32_MAX to fabric choose
131 * @param enable_ini Enables initiator capability on this port using a non-zero value.
132 * @param enable_tgt Enables target capability on this port using a non-zero value.
133 *
134 * @return Pointer to an ocs_sport_t object; or NULL.
135 */
136
137ocs_sport_t *
138ocs_sport_alloc(ocs_domain_t *domain, uint64_t wwpn, uint64_t wwnn, uint32_t fc_id, uint8_t enable_ini, uint8_t enable_tgt)
139{
140	ocs_sport_t *sport;
141
142	if (domain->ocs->ctrlmask & OCS_CTRLMASK_INHIBIT_INITIATOR) {
143		enable_ini = 0;
144	}
145
146	/* Return a failure if this sport has already been allocated */
147	if (wwpn != 0) {
148		sport = ocs_sport_find_wwn(domain, wwnn, wwpn);
149		if (sport != NULL) {
150			ocs_log_test(domain->ocs, "Failed: SPORT %016llx  %016llx already allocated\n",
151				     (unsigned long long)wwnn, (unsigned long long)wwpn);
152			return NULL;
153		}
154	}
155
156	sport = ocs_malloc(domain->ocs, sizeof(*sport), OCS_M_NOWAIT | OCS_M_ZERO);
157	if (sport) {
158		sport->ocs = domain->ocs;
159		ocs_snprintf(sport->display_name, sizeof(sport->display_name), "------");
160		sport->domain = domain;
161		sport->lookup = spv_new(domain->ocs);
162		sport->instance_index = domain->sport_instance_count++;
163		ocs_sport_lock_init(sport);
164		ocs_list_init(&sport->node_list, ocs_node_t, link);
165		sport->sm.app = sport;
166		sport->enable_ini = enable_ini;
167		sport->enable_tgt = enable_tgt;
168		sport->enable_rscn = (sport->enable_ini || (sport->enable_tgt && enable_target_rscn(sport->ocs)));
169
170		/* Copy service parameters from domain */
171		ocs_memcpy(sport->service_params, domain->service_params, sizeof(fc_plogi_payload_t));
172
173		/* Update requested fc_id */
174		sport->fc_id = fc_id;
175
176		/* Update the sport's service parameters for the new wwn's */
177		sport->wwpn = wwpn;
178		sport->wwnn = wwnn;
179		ocs_snprintf(sport->wwnn_str, sizeof(sport->wwnn_str), "%016llx" , (unsigned long long)wwnn);
180
181		/* Initialize node group list */
182		ocs_lock_init(sport->ocs, &sport->node_group_lock, "node_group_lock[%d]", sport->instance_index);
183		ocs_list_init(&sport->node_group_dir_list, ocs_node_group_dir_t, link);
184
185		/* if this is the "first" sport of the domain, then make it the "phys" sport */
186		ocs_domain_lock(domain);
187			if (ocs_list_empty(&domain->sport_list)) {
188				domain->sport = sport;
189			}
190
191			ocs_list_add_tail(&domain->sport_list, sport);
192		ocs_domain_unlock(domain);
193
194		sport->mgmt_functions = &sport_mgmt_functions;
195
196		ocs_log_debug(domain->ocs, "[%s] allocate sport\n", sport->display_name);
197	}
198	return sport;
199}
200
201/**
202 * @ingroup sport_sm
203 * @brief Free a SLI port object.
204 *
205 * @par Description
206 * The sport object is freed.
207 *
208 * @param sport Pointer to the SLI port object.
209 *
210 * @return None.
211 */
212
213void
214ocs_sport_free(ocs_sport_t *sport)
215{
216	ocs_domain_t *domain;
217	ocs_node_group_dir_t *node_group_dir;
218	ocs_node_group_dir_t *node_group_dir_next;
219	int post_all_free = FALSE;
220
221	if (sport) {
222		domain = sport->domain;
223		ocs_log_debug(domain->ocs, "[%s] free sport\n", sport->display_name);
224		ocs_domain_lock(domain);
225			ocs_list_remove(&domain->sport_list, sport);
226			ocs_sport_lock(sport);
227				spv_del(sport->lookup);
228				sport->lookup = NULL;
229
230				ocs_lock(&domain->lookup_lock);
231					/* Remove the sport from the domain's sparse vector lookup table */
232					spv_set(domain->lookup, sport->fc_id, NULL);
233				ocs_unlock(&domain->lookup_lock);
234
235				/* if this is the physical sport, then clear it out of the domain */
236				if (sport == domain->sport) {
237					domain->sport = NULL;
238				}
239
240				/*
241				 * If the domain's sport_list is empty, then post the ALL_NODES_FREE event to the domain,
242				 * after the lock is released. The domain may be free'd as a result of the event.
243				 */
244				if (ocs_list_empty(&domain->sport_list)) {
245					post_all_free = TRUE;
246				}
247
248				/* Free any node group directories */
249				ocs_lock(&sport->node_group_lock);
250					ocs_list_foreach_safe(&sport->node_group_dir_list, node_group_dir, node_group_dir_next) {
251						ocs_unlock(&sport->node_group_lock);
252							ocs_node_group_dir_free(node_group_dir);
253						ocs_lock(&sport->node_group_lock);
254					}
255				ocs_unlock(&sport->node_group_lock);
256			ocs_sport_unlock(sport);
257		ocs_domain_unlock(domain);
258
259		if (post_all_free) {
260			ocs_domain_post_event(domain, OCS_EVT_ALL_CHILD_NODES_FREE, NULL);
261		}
262
263		ocs_sport_lock_free(sport);
264		ocs_lock_free(&sport->node_group_lock);
265		ocs_scsi_sport_deleted(sport);
266
267		ocs_free(domain->ocs, sport, sizeof(*sport));
268
269	}
270}
271
272/**
273 * @ingroup sport_sm
274 * @brief Free memory resources of a SLI port object.
275 *
276 * @par Description
277 * After the sport object is freed, its child objects are freed.
278 *
279 * @param sport Pointer to the SLI port object.
280 *
281 * @return None.
282 */
283
284void ocs_sport_force_free(ocs_sport_t *sport)
285{
286	ocs_node_t *node;
287	ocs_node_t *next;
288
289	/* shutdown sm processing */
290	ocs_sm_disable(&sport->sm);
291
292	ocs_scsi_notify_sport_force_free(sport);
293
294	ocs_sport_lock(sport);
295		ocs_list_foreach_safe(&sport->node_list, node, next) {
296			ocs_node_force_free(node);
297		}
298	ocs_sport_unlock(sport);
299	ocs_sport_free(sport);
300}
301
302/**
303 * @ingroup sport_sm
304 * @brief Return a SLI port object, given an instance index.
305 *
306 * @par Description
307 * A pointer to a sport object is returned, given its instance @c index.
308 *
309 * @param domain Pointer to the domain.
310 * @param index Instance index value to find.
311 *
312 * @return Returns a pointer to the ocs_sport_t object; or NULL.
313 */
314
315ocs_sport_t *
316ocs_sport_get_instance(ocs_domain_t *domain, uint32_t index)
317{
318	ocs_sport_t *sport;
319
320	ocs_domain_lock(domain);
321		ocs_list_foreach(&domain->sport_list, sport) {
322			if (sport->instance_index == index) {
323				ocs_domain_unlock(domain);
324				return sport;
325			}
326		}
327	ocs_domain_unlock(domain);
328	return NULL;
329}
330
331/**
332 * @ingroup sport_sm
333 * @brief Find a SLI port object, given an FC_ID.
334 *
335 * @par Description
336 * Returns a pointer to the sport object, given an FC_ID.
337 *
338 * @param domain Pointer to the domain.
339 * @param d_id FC_ID to find.
340 *
341 * @return Returns a pointer to the ocs_sport_t; or NULL.
342 */
343
344ocs_sport_t *
345ocs_sport_find(ocs_domain_t *domain, uint32_t d_id)
346{
347	ocs_sport_t *sport;
348
349	ocs_assert(domain, NULL);
350	ocs_lock(&domain->lookup_lock);
351		if (domain->lookup == NULL) {
352			ocs_log_test(domain->ocs, "assertion failed: domain->lookup is not valid\n");
353			ocs_unlock(&domain->lookup_lock);
354			return NULL;
355		}
356
357		sport = spv_get(domain->lookup, d_id);
358	ocs_unlock(&domain->lookup_lock);
359	return sport;
360}
361
362/**
363 * @ingroup sport_sm
364 * @brief Find a SLI port, given the WWNN and WWPN.
365 *
366 * @par Description
367 * Return a pointer to a sport, given the WWNN and WWPN.
368 *
369 * @param domain Pointer to the domain.
370 * @param wwnn World wide node name.
371 * @param wwpn World wide port name.
372 *
373 * @return Returns a pointer to a SLI port, if found; or NULL.
374 */
375
376ocs_sport_t *
377ocs_sport_find_wwn(ocs_domain_t *domain, uint64_t wwnn, uint64_t wwpn)
378{
379	ocs_sport_t *sport = NULL;
380
381	ocs_domain_lock(domain);
382		ocs_list_foreach(&domain->sport_list, sport) {
383			if ((sport->wwnn == wwnn) && (sport->wwpn == wwpn)) {
384				ocs_domain_unlock(domain);
385				return sport;
386			}
387		}
388	ocs_domain_unlock(domain);
389	return NULL;
390}
391
392/**
393 * @ingroup sport_sm
394 * @brief Request a SLI port attach.
395 *
396 * @par Description
397 * External call to request an attach for a sport, given an FC_ID.
398 *
399 * @param sport Pointer to the sport context.
400 * @param fc_id FC_ID of which to attach.
401 *
402 * @return Returns 0 on success, or a negative error value on failure.
403 */
404
405int32_t
406ocs_sport_attach(ocs_sport_t *sport, uint32_t fc_id)
407{
408	ocs_hw_rtn_e rc;
409	ocs_node_t *node;
410
411	/* Set our lookup */
412	ocs_lock(&sport->domain->lookup_lock);
413		spv_set(sport->domain->lookup, fc_id, sport);
414	ocs_unlock(&sport->domain->lookup_lock);
415
416	/* Update our display_name */
417	ocs_node_fcid_display(fc_id, sport->display_name, sizeof(sport->display_name));
418	ocs_sport_lock(sport);
419		ocs_list_foreach(&sport->node_list, node) {
420			ocs_node_update_display_name(node);
421		}
422	ocs_sport_unlock(sport);
423	ocs_log_debug(sport->ocs, "[%s] attach sport: fc_id x%06x\n", sport->display_name, fc_id);
424
425	rc = ocs_hw_port_attach(&sport->ocs->hw, sport, fc_id);
426	if (rc != OCS_HW_RTN_SUCCESS) {
427		ocs_log_err(sport->ocs, "ocs_hw_port_attach failed: %d\n", rc);
428		return -1;
429	}
430	return 0;
431}
432
433/**
434 * @brief Common SLI port state machine declarations and initialization.
435 */
436#define std_sport_state_decl() \
437	ocs_sport_t *sport = NULL; \
438	ocs_domain_t *domain = NULL; \
439	ocs_t *ocs = NULL; \
440	\
441	ocs_assert(ctx, NULL); \
442	sport = ctx->app; \
443	ocs_assert(sport, NULL); \
444	\
445	domain = sport->domain; \
446	ocs_assert(domain, NULL); \
447	ocs = sport->ocs; \
448	ocs_assert(ocs, NULL);
449
450/**
451 * @brief Common SLI port state machine trace logging.
452 */
453#define sport_sm_trace(sport)  \
454	do { \
455		if (OCS_LOG_ENABLE_DOMAIN_SM_TRACE(ocs)) \
456			ocs_log_debug(ocs, "[%s] %-20s\n", sport->display_name, ocs_sm_event_name(evt)); \
457	} while (0)
458
459
460/**
461 * @brief SLI port state machine: Common event handler.
462 *
463 * @par Description
464 * Handle common sport events.
465 *
466 * @param funcname Function name to display.
467 * @param ctx Sport state machine context.
468 * @param evt Event to process.
469 * @param arg Per event optional argument.
470 *
471 * @return Returns NULL.
472 */
473
474static void *
475__ocs_sport_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
476{
477	std_sport_state_decl();
478
479	switch(evt) {
480	case OCS_EVT_ENTER:
481	case OCS_EVT_REENTER:
482	case OCS_EVT_EXIT:
483	case OCS_EVT_ALL_CHILD_NODES_FREE:
484		break;
485	case OCS_EVT_SPORT_ATTACH_OK:
486			ocs_sm_transition(ctx, __ocs_sport_attached, NULL);
487		break;
488	case OCS_EVT_SHUTDOWN: {
489		ocs_node_t *node;
490		ocs_node_t *node_next;
491		int node_list_empty;
492
493		/* Flag this sport as shutting down */
494		sport->shutting_down = 1;
495
496		if (sport->is_vport) {
497			ocs_vport_link_down(sport);
498		}
499
500		ocs_sport_lock(sport);
501			node_list_empty = ocs_list_empty(&sport->node_list);
502		ocs_sport_unlock(sport);
503
504		if (node_list_empty) {
505			/* sm: node list is empty / ocs_hw_port_free
506			 * Remove the sport from the domain's sparse vector lookup table */
507			ocs_lock(&domain->lookup_lock);
508				spv_set(domain->lookup, sport->fc_id, NULL);
509			ocs_unlock(&domain->lookup_lock);
510			ocs_sm_transition(ctx, __ocs_sport_wait_port_free, NULL);
511			if (ocs_hw_port_free(&ocs->hw, sport)) {
512				ocs_log_test(sport->ocs, "ocs_hw_port_free failed\n");
513				/* Not much we can do, free the sport anyways */
514				ocs_sport_free(sport);
515			}
516		} else {
517			/* sm: node list is not empty / shutdown nodes */
518			ocs_sm_transition(ctx, __ocs_sport_wait_shutdown, NULL);
519			ocs_sport_lock(sport);
520				ocs_list_foreach_safe(&sport->node_list, node, node_next) {
521					/*
522					 * If this is a vport, logout of the fabric controller so that it
523					 * deletes the vport on the switch.
524					 */
525					if((node->rnode.fc_id == FC_ADDR_FABRIC) && (sport->is_vport)) {
526						/* if link is down, don't send logo */
527						if (sport->ocs->hw.link.status == SLI_LINK_STATUS_DOWN) {
528							ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
529						} else {
530							ocs_log_debug(ocs,"[%s] sport shutdown vport,sending logo to node\n",
531								      node->display_name);
532
533							if (ocs_send_logo(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT,
534								  0, NULL, NULL) == NULL) {
535								/* failed to send LOGO, go ahead and cleanup node anyways */
536								node_printf(node, "Failed to send LOGO\n");
537								ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
538							} else {
539								/* sent LOGO, wait for response */
540								ocs_node_transition(node, __ocs_d_wait_logo_rsp, NULL);
541							}
542						}
543					} else {
544						ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
545					}
546				}
547			ocs_sport_unlock(sport);
548		}
549		break;
550	}
551	default:
552		ocs_log_test(sport->ocs, "[%s] %-20s %-20s not handled\n", sport->display_name, funcname, ocs_sm_event_name(evt));
553		break;
554	}
555
556	return NULL;
557}
558
559/**
560 * @ingroup sport_sm
561 * @brief SLI port state machine: Physical sport allocated.
562 *
563 * @par Description
564 * This is the initial state for sport objects.
565 *
566 * @param ctx Remote node state machine context.
567 * @param evt Event to process.
568 * @param arg Per event optional argument.
569 *
570 * @return Returns NULL.
571 */
572
573void *
574__ocs_sport_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
575{
576	std_sport_state_decl();
577
578	sport_sm_trace(sport);
579
580	switch(evt) {
581	/* the physical sport is attached */
582	case OCS_EVT_SPORT_ATTACH_OK:
583		ocs_assert(sport == domain->sport, NULL);
584		ocs_sm_transition(ctx, __ocs_sport_attached, NULL);
585		break;
586
587	case OCS_EVT_SPORT_ALLOC_OK:
588		/* ignore */
589		break;
590	default:
591		__ocs_sport_common(__func__, ctx, evt, arg);
592		return NULL;
593	}
594	return NULL;
595}
596
597/**
598 * @ingroup sport_sm
599 * @brief SLI port state machine: Handle initial virtual port events.
600 *
601 * @par Description
602 * This state is entered when a virtual port is instantiated,
603 *
604 * @param ctx Remote node state machine context.
605 * @param evt Event to process.
606 * @param arg Per event optional argument.
607 *
608 * @return Returns NULL.
609 */
610
611void *
612__ocs_sport_vport_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
613{
614	std_sport_state_decl();
615
616	sport_sm_trace(sport);
617
618	switch(evt) {
619	case OCS_EVT_ENTER: {
620		uint64_t be_wwpn = ocs_htobe64(sport->wwpn);
621
622		if (sport->wwpn == 0) {
623			ocs_log_debug(ocs, "vport: letting f/w select WWN\n");
624		}
625
626		if (sport->fc_id != UINT32_MAX) {
627			ocs_log_debug(ocs, "vport: hard coding port id: %x\n", sport->fc_id);
628		}
629
630		ocs_sm_transition(ctx, __ocs_sport_vport_wait_alloc, NULL);
631		/* If wwpn is zero, then we'll let the f/w */
632		if (ocs_hw_port_alloc(&ocs->hw, sport, sport->domain,
633			(sport->wwpn == 0) ? NULL : (uint8_t *)&be_wwpn)) {
634			ocs_log_err(ocs, "Can't allocate port\n");
635			break;
636		}
637
638
639		break;
640	}
641	default:
642		__ocs_sport_common(__func__, ctx, evt, arg);
643		return NULL;
644	}
645	return NULL;
646}
647
648/**
649 * @ingroup sport_sm
650 * @brief SLI port state machine: Wait for the HW SLI port allocation to complete.
651 *
652 * @par Description
653 * Waits for the HW sport allocation request to complete.
654 *
655 * @param ctx Remote node state machine context.
656 * @param evt Event to process.
657 * @param arg Per event optional argument.
658 *
659 * @return Returns NULL.
660 */
661
662void *
663__ocs_sport_vport_wait_alloc(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
664{
665	std_sport_state_decl();
666
667	sport_sm_trace(sport);
668
669	switch(evt) {
670	case OCS_EVT_SPORT_ALLOC_OK: {
671		fc_plogi_payload_t *sp = (fc_plogi_payload_t*) sport->service_params;
672		ocs_node_t *fabric;
673
674		/* If we let f/w assign wwn's, then sport wwn's with those returned by hw */
675		if (sport->wwnn == 0) {
676			sport->wwnn = ocs_be64toh(sport->sli_wwnn);
677			sport->wwpn = ocs_be64toh(sport->sli_wwpn);
678			ocs_snprintf(sport->wwnn_str, sizeof(sport->wwnn_str), "%016llx", (unsigned long long) sport->wwpn);
679		}
680
681		/* Update the sport's service parameters */
682		sp->port_name_hi = ocs_htobe32((uint32_t) (sport->wwpn >> 32ll));
683		sp->port_name_lo = ocs_htobe32((uint32_t) sport->wwpn);
684		sp->node_name_hi = ocs_htobe32((uint32_t) (sport->wwnn >> 32ll));
685		sp->node_name_lo = ocs_htobe32((uint32_t) sport->wwnn);
686
687		/* if sport->fc_id is uninitialized, then request that the fabric node use FDISC
688		 * to find an fc_id.   Otherwise we're restoring vports, or we're in
689		 * fabric emulation mode, so attach the fc_id
690		 */
691		if (sport->fc_id == UINT32_MAX) {
692			fabric = ocs_node_alloc(sport, FC_ADDR_FABRIC, FALSE, FALSE);
693			if (fabric == NULL) {
694				ocs_log_err(ocs, "ocs_node_alloc() failed\n");
695				return NULL;
696			}
697			ocs_node_transition(fabric, __ocs_vport_fabric_init, NULL);
698		} else {
699			ocs_snprintf(sport->wwnn_str, sizeof(sport->wwnn_str), "%016llx", (unsigned long long)sport->wwpn);
700			ocs_sport_attach(sport, sport->fc_id);
701		}
702		ocs_sm_transition(ctx, __ocs_sport_vport_allocated, NULL);
703		break;
704	}
705	default:
706		__ocs_sport_common(__func__, ctx, evt, arg);
707		return NULL;
708	}
709	return NULL;
710}
711
712/**
713 * @ingroup sport_sm
714 * @brief SLI port state machine: virtual sport allocated.
715 *
716 * @par Description
717 * This state is entered after the sport is allocated; it then waits for a fabric node
718 * FDISC to complete, which requests a sport attach.
719 * The sport attach complete is handled in this state.
720 *
721 * @param ctx Remote node state machine context.
722 * @param evt Event to process.
723 * @param arg Per event optional argument.
724 *
725 * @return Returns NULL.
726 */
727
728void *
729__ocs_sport_vport_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
730{
731	std_sport_state_decl();
732
733	sport_sm_trace(sport);
734
735	switch(evt) {
736	case OCS_EVT_SPORT_ATTACH_OK: {
737		ocs_node_t *node;
738
739		if (!(domain->femul_enable)) {
740			/* Find our fabric node, and forward this event */
741			node = ocs_node_find(sport, FC_ADDR_FABRIC);
742			if (node == NULL) {
743				ocs_log_test(ocs, "can't find node %06x\n", FC_ADDR_FABRIC);
744				break;
745			}
746			/* sm: / forward sport attach to fabric node */
747			ocs_node_post_event(node, evt, NULL);
748		}
749		ocs_sm_transition(ctx, __ocs_sport_attached, NULL);
750		break;
751	}
752	default:
753		__ocs_sport_common(__func__, ctx, evt, arg);
754		return NULL;
755	}
756	return NULL;
757}
758
759/**
760 * @ingroup sport_sm
761 * @brief SLI port state machine: Attached.
762 *
763 * @par Description
764 * State entered after the sport attach has completed.
765 *
766 * @param ctx Remote node state machine context.
767 * @param evt Event to process.
768 * @param arg Per event optional argument.
769 *
770 * @return Returns NULL.
771 */
772
773void *
774__ocs_sport_attached(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
775{
776	std_sport_state_decl();
777
778	sport_sm_trace(sport);
779
780	switch(evt) {
781	case OCS_EVT_ENTER: {
782		ocs_node_t *node;
783
784		ocs_log_debug(ocs, "[%s] SPORT attached WWPN %016llx WWNN %016llx \n", sport->display_name,
785			sport->wwpn, sport->wwnn);
786		ocs_sport_lock(sport);
787			ocs_list_foreach(&sport->node_list, node) {
788				ocs_node_update_display_name(node);
789			}
790		ocs_sport_unlock(sport);
791		sport->tgt_id = sport->fc_id;
792		if (sport->enable_ini) {
793			ocs_scsi_ini_new_sport(sport);
794		}
795		if (sport->enable_tgt) {
796			ocs_scsi_tgt_new_sport(sport);
797		}
798
799		/* Update the vport (if its not the physical sport) parameters */
800		if (sport->is_vport) {
801			ocs_vport_update_spec(sport);
802		}
803
804		break;
805	}
806
807	case OCS_EVT_EXIT:
808		ocs_log_debug(ocs, "[%s] SPORT deattached WWPN %016llx WWNN %016llx \n", sport->display_name,
809			sport->wwpn, sport->wwnn);
810		if (sport->enable_ini) {
811			ocs_scsi_ini_del_sport(sport);
812		}
813		if (sport->enable_tgt) {
814			ocs_scsi_tgt_del_sport(sport);
815		}
816		break;
817	default:
818		__ocs_sport_common(__func__, ctx, evt, arg);
819		return NULL;
820	}
821	return NULL;
822}
823
824/**
825 * @ingroup sport_sm
826 * @brief SLI port state machine: Wait for the node shutdowns to complete.
827 *
828 * @par Description
829 * Waits for the ALL_CHILD_NODES_FREE event to be posted from the node
830 * shutdown process.
831 *
832 * @param ctx Remote node state machine context.
833 * @param evt Event to process.
834 * @param arg Per event optional argument.
835 *
836 * @return Returns NULL.
837 */
838
839void *
840__ocs_sport_wait_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
841{
842	std_sport_state_decl();
843
844	sport_sm_trace(sport);
845
846	switch(evt) {
847	case OCS_EVT_SPORT_ALLOC_OK:
848	case OCS_EVT_SPORT_ALLOC_FAIL:
849	case OCS_EVT_SPORT_ATTACH_OK:
850	case OCS_EVT_SPORT_ATTACH_FAIL:
851		/* ignore these events - just wait for the all free event */
852		break;
853
854	case OCS_EVT_ALL_CHILD_NODES_FREE: {
855		/* Remove the sport from the domain's sparse vector lookup table */
856		ocs_lock(&domain->lookup_lock);
857			spv_set(domain->lookup, sport->fc_id, NULL);
858		ocs_unlock(&domain->lookup_lock);
859		ocs_sm_transition(ctx, __ocs_sport_wait_port_free, NULL);
860		if (ocs_hw_port_free(&ocs->hw, sport)) {
861			ocs_log_err(sport->ocs, "ocs_hw_port_free failed\n");
862			/* Not much we can do, free the sport anyways */
863			ocs_sport_free(sport);
864		}
865		break;
866	}
867	default:
868		__ocs_sport_common(__func__, ctx, evt, arg);
869		return NULL;
870	}
871	return NULL;
872}
873
874/**
875 * @ingroup sport_sm
876 * @brief SLI port state machine: Wait for the HW's port free to complete.
877 *
878 * @par Description
879 * Waits for the HW's port free to complete.
880 *
881 * @param ctx Remote node state machine context.
882 * @param evt Event to process.
883 * @param arg Per event optional argument.
884 *
885 * @return Returns NULL.
886 */
887
888void *
889__ocs_sport_wait_port_free(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
890{
891	std_sport_state_decl();
892
893	sport_sm_trace(sport);
894
895	switch(evt) {
896	case OCS_EVT_SPORT_ATTACH_OK:
897		/* Ignore as we are waiting for the free CB */
898		break;
899	case OCS_EVT_SPORT_FREE_OK: {
900		/* All done, free myself */
901		ocs_sport_free(sport);
902		break;
903	}
904	default:
905		__ocs_sport_common(__func__, ctx, evt, arg);
906		return NULL;
907	}
908	return NULL;
909}
910
911/**
912 * @ingroup sport_sm
913 * @brief Start the vports on a domain
914 *
915 * @par Description
916 * Use the vport specification to find the associated vports and start them.
917 *
918 * @param domain Pointer to the domain context.
919 *
920 * @return Returns 0 on success, or a negative error value on failure.
921 */
922int32_t
923ocs_vport_start(ocs_domain_t *domain)
924{
925	ocs_t *ocs = domain->ocs;
926	ocs_xport_t *xport = ocs->xport;
927	ocs_vport_spec_t *vport;
928	ocs_vport_spec_t *next;
929	ocs_sport_t *sport;
930	int32_t rc = 0;
931
932	ocs_device_lock(ocs);
933	ocs_list_foreach_safe(&xport->vport_list, vport, next) {
934		if (vport->domain_instance == domain->instance_index &&
935		    vport->sport == NULL) {
936			/* If role not set, skip this vport */
937			if (!(vport->enable_ini || vport->enable_tgt)) {
938				continue;
939			}
940
941			/* Allocate a sport */
942			vport->sport = sport = ocs_sport_alloc(domain, vport->wwpn, vport->wwnn, vport->fc_id,
943							       vport->enable_ini, vport->enable_tgt);
944			if (sport == NULL) {
945				rc = -1;
946			} else {
947				sport->is_vport = 1;
948				sport->tgt_data = vport->tgt_data;
949				sport->ini_data = vport->ini_data;
950
951				/* Transition to vport_init */
952				ocs_sm_transition(&sport->sm, __ocs_sport_vport_init, NULL);
953			}
954		}
955	}
956	ocs_device_unlock(ocs);
957	return rc;
958}
959
960/**
961 * @ingroup sport_sm
962 * @brief Clear the sport reference in the vport specification.
963 *
964 * @par Description
965 * Clear the sport pointer on the vport specification when the vport is torn down. This allows it to be
966 * re-created when the link is re-established.
967 *
968 * @param sport Pointer to the sport context.
969 */
970static void
971ocs_vport_link_down(ocs_sport_t *sport)
972{
973	ocs_t *ocs = sport->ocs;
974	ocs_xport_t *xport = ocs->xport;
975	ocs_vport_spec_t *vport;
976
977	ocs_device_lock(ocs);
978	ocs_list_foreach(&xport->vport_list, vport) {
979		if (vport->sport == sport) {
980			vport->sport = NULL;
981			break;
982		}
983	}
984	ocs_device_unlock(ocs);
985}
986
987/**
988 * @ingroup sport_sm
989 * @brief Allocate a new virtual SLI port.
990 *
991 * @par Description
992 * A new sport is created, in response to an external management request.
993 *
994 * @n @b Note: If the WWPN is zero, the firmware will assign the WWNs.
995 *
996 * @param domain Pointer to the domain context.
997 * @param wwpn World wide port name.
998 * @param wwnn World wide node name
999 * @param fc_id Requested port ID (used in fabric emulation mode).
1000 * @param ini TRUE, if port is created as an initiator node.
1001 * @param tgt TRUE, if port is created as a target node.
1002 * @param tgt_data Pointer to target specific data
1003 * @param ini_data Pointer to initiator specific data
1004 * @param restore_vport If TRUE, then the vport will be re-created automatically
1005 *                      on link disruption.
1006 *
1007 * @return Returns 0 on success; or a negative error value on failure.
1008 */
1009
1010int32_t
1011ocs_sport_vport_new(ocs_domain_t *domain, uint64_t wwpn, uint64_t wwnn,
1012		    uint32_t fc_id, uint8_t ini, uint8_t tgt, void *tgt_data,
1013		    void *ini_data, uint8_t restore_vport)
1014{
1015	ocs_sport_t *sport;
1016
1017	if (ini && (domain->ocs->enable_ini == 0)) {
1018		ocs_log_test(domain->ocs, "driver initiator functionality not enabled\n");
1019		return -1;
1020	}
1021
1022	if (tgt && (domain->ocs->enable_tgt == 0)) {
1023		ocs_log_test(domain->ocs, "driver target functionality not enabled\n");
1024		return -1;
1025	}
1026
1027	/* Create a vport spec if we need to recreate this vport after a link up event */
1028	if (restore_vport) {
1029		if (ocs_vport_create_spec(domain->ocs, wwnn, wwpn, fc_id, ini, tgt, tgt_data, ini_data)) {
1030			ocs_log_test(domain->ocs, "failed to create vport object entry\n");
1031			return -1;
1032		}
1033		return ocs_vport_start(domain);
1034	}
1035
1036	/* Allocate a sport */
1037	sport = ocs_sport_alloc(domain, wwpn, wwnn, fc_id, ini, tgt);
1038
1039	if (sport == NULL) {
1040		return -1;
1041	}
1042
1043	sport->is_vport = 1;
1044	sport->tgt_data = tgt_data;
1045	sport->ini_data = ini_data;
1046
1047	/* Transition to vport_init */
1048	ocs_sm_transition(&sport->sm, __ocs_sport_vport_init, NULL);
1049
1050	return 0;
1051}
1052
1053int32_t
1054ocs_sport_vport_alloc(ocs_domain_t *domain, ocs_vport_spec_t *vport)
1055{
1056	ocs_sport_t *sport = NULL;
1057
1058	if (domain == NULL) {
1059		return (0);
1060	}
1061
1062	ocs_assert((vport->sport == NULL), -1);
1063
1064	/* Allocate a sport */
1065	vport->sport = sport = ocs_sport_alloc(domain, vport->wwpn, vport->wwnn, UINT32_MAX, vport->enable_ini, vport->enable_tgt);
1066
1067	if (sport == NULL) {
1068		return -1;
1069	}
1070
1071	sport->is_vport = 1;
1072	sport->tgt_data = vport->tgt_data;
1073	sport->ini_data = vport->tgt_data;
1074
1075	/* Transition to vport_init */
1076	ocs_sm_transition(&sport->sm, __ocs_sport_vport_init, NULL);
1077
1078	return (0);
1079}
1080
1081/**
1082 * @ingroup sport_sm
1083 * @brief Remove a previously-allocated virtual port.
1084 *
1085 * @par Description
1086 * A previously-allocated virtual port is removed by posting the shutdown event to the
1087 * sport with a matching WWN.
1088 *
1089 * @param ocs Pointer to the device object.
1090 * @param domain Pointer to the domain structure (may be NULL).
1091 * @param wwpn World wide port name of the port to delete (host endian).
1092 * @param wwnn World wide node name of the port to delete (host endian).
1093 *
1094 * @return Returns 0 on success, or a negative error value on failure.
1095 */
1096
1097int32_t ocs_sport_vport_del(ocs_t *ocs, ocs_domain_t *domain, uint64_t wwpn, uint64_t wwnn)
1098{
1099	ocs_xport_t *xport = ocs->xport;
1100	ocs_sport_t *sport;
1101	int found = 0;
1102	ocs_vport_spec_t *vport;
1103	ocs_vport_spec_t *next;
1104	uint32_t instance;
1105
1106	/* If no domain is given, use instance 0, otherwise use domain instance */
1107	if (domain == NULL) {
1108		instance = 0;
1109	} else {
1110		instance = domain->instance_index;
1111	}
1112
1113	/* walk the ocs_vport_list and remove from there */
1114
1115	ocs_device_lock(ocs);
1116		ocs_list_foreach_safe(&xport->vport_list, vport, next) {
1117			if ((vport->domain_instance == instance) &&
1118				(vport->wwpn == wwpn) && (vport->wwnn == wwnn)) {
1119				vport->sport = NULL;
1120				break;
1121			}
1122		}
1123	ocs_device_unlock(ocs);
1124
1125	if (domain == NULL) {
1126		/* No domain means no sport to look for */
1127		return 0;
1128	}
1129
1130	ocs_domain_lock(domain);
1131		ocs_list_foreach(&domain->sport_list, sport) {
1132			if ((sport->wwpn == wwpn) && (sport->wwnn == wwnn)) {
1133				found = 1;
1134				break;
1135			}
1136		}
1137		if (found) {
1138			/* Shutdown this SPORT */
1139			ocs_sm_post_event(&sport->sm, OCS_EVT_SHUTDOWN, NULL);
1140		}
1141	ocs_domain_unlock(domain);
1142	return 0;
1143}
1144
1145/**
1146 * @brief Force free all saved vports.
1147 *
1148 * @par Description
1149 * Delete all device vports.
1150 *
1151 * @param ocs Pointer to the device object.
1152 *
1153 * @return None.
1154 */
1155
1156void
1157ocs_vport_del_all(ocs_t *ocs)
1158{
1159	ocs_xport_t *xport = ocs->xport;
1160	ocs_vport_spec_t *vport;
1161	ocs_vport_spec_t *next;
1162
1163	ocs_device_lock(ocs);
1164		ocs_list_foreach_safe(&xport->vport_list, vport, next) {
1165			ocs_list_remove(&xport->vport_list, vport);
1166			ocs_free(ocs, vport, sizeof(*vport));
1167		}
1168	ocs_device_unlock(ocs);
1169}
1170
1171/**
1172 * @ingroup sport_sm
1173 * @brief Generate a SLI port ddump.
1174 *
1175 * @par Description
1176 * Generates the SLI port ddump data.
1177 *
1178 * @param textbuf Pointer to the text buffer.
1179 * @param sport Pointer to the SLI-4 port.
1180 *
1181 * @return Returns 0 on success, or a negative value on failure.
1182 */
1183
1184int
1185ocs_ddump_sport(ocs_textbuf_t *textbuf, ocs_sli_port_t *sport)
1186{
1187	ocs_node_t *node;
1188	ocs_node_group_dir_t *node_group_dir;
1189	int retval = 0;
1190
1191	ocs_ddump_section(textbuf, "sport", sport->instance_index);
1192	ocs_ddump_value(textbuf, "display_name", "%s", sport->display_name);
1193
1194	ocs_ddump_value(textbuf, "is_vport", "%d", sport->is_vport);
1195	ocs_ddump_value(textbuf, "enable_ini", "%d", sport->enable_ini);
1196	ocs_ddump_value(textbuf, "enable_tgt", "%d", sport->enable_tgt);
1197	ocs_ddump_value(textbuf, "shutting_down", "%d", sport->shutting_down);
1198	ocs_ddump_value(textbuf, "topology", "%d", sport->topology);
1199	ocs_ddump_value(textbuf, "p2p_winner", "%d", sport->p2p_winner);
1200	ocs_ddump_value(textbuf, "p2p_port_id", "%06x", sport->p2p_port_id);
1201	ocs_ddump_value(textbuf, "p2p_remote_port_id", "%06x", sport->p2p_remote_port_id);
1202	ocs_ddump_value(textbuf, "wwpn", "%016llx", (unsigned long long)sport->wwpn);
1203	ocs_ddump_value(textbuf, "wwnn", "%016llx", (unsigned long long)sport->wwnn);
1204	/*TODO: service_params */
1205
1206	ocs_ddump_value(textbuf, "indicator", "x%x", sport->indicator);
1207	ocs_ddump_value(textbuf, "fc_id", "x%06x", sport->fc_id);
1208	ocs_ddump_value(textbuf, "index", "%d", sport->index);
1209
1210	ocs_display_sparams(NULL, "sport_sparams", 1, textbuf, sport->service_params+4);
1211
1212	/* HLM dump */
1213	ocs_ddump_section(textbuf, "hlm", sport->instance_index);
1214	ocs_lock(&sport->node_group_lock);
1215		ocs_list_foreach(&sport->node_group_dir_list, node_group_dir) {
1216			ocs_remote_node_group_t *remote_node_group;
1217
1218			ocs_ddump_section(textbuf, "node_group_dir", node_group_dir->instance_index);
1219
1220			ocs_ddump_value(textbuf, "node_group_list_count", "%d", node_group_dir->node_group_list_count);
1221			ocs_ddump_value(textbuf, "next_idx", "%d", node_group_dir->next_idx);
1222			ocs_list_foreach(&node_group_dir->node_group_list, remote_node_group) {
1223				ocs_ddump_section(textbuf, "node_group", remote_node_group->instance_index);
1224				ocs_ddump_value(textbuf, "indicator", "x%x", remote_node_group->indicator);
1225				ocs_ddump_value(textbuf, "index", "x%x", remote_node_group->index);
1226				ocs_ddump_value(textbuf, "instance_index", "x%x", remote_node_group->instance_index);
1227				ocs_ddump_endsection(textbuf, "node_group", 0);
1228			}
1229			ocs_ddump_endsection(textbuf, "node_group_dir", 0);
1230		}
1231	ocs_unlock(&sport->node_group_lock);
1232	ocs_ddump_endsection(textbuf, "hlm", sport->instance_index);
1233
1234	ocs_scsi_ini_ddump(textbuf, OCS_SCSI_DDUMP_SPORT, sport);
1235	ocs_scsi_tgt_ddump(textbuf, OCS_SCSI_DDUMP_SPORT, sport);
1236
1237	/* Dump all the nodes */
1238	if (ocs_sport_lock_try(sport) != TRUE) {
1239		/* Didn't get lock */
1240		return -1;
1241	}
1242		/* Here the sport lock is held */
1243		ocs_list_foreach(&sport->node_list, node) {
1244			retval = ocs_ddump_node(textbuf, node);
1245			if (retval != 0) {
1246				break;
1247			}
1248		}
1249	ocs_sport_unlock(sport);
1250
1251	ocs_ddump_endsection(textbuf, "sport", sport->index);
1252
1253	return retval;
1254}
1255
1256
1257void
1258ocs_mgmt_sport_list(ocs_textbuf_t *textbuf, void *object)
1259{
1260	ocs_node_t *node;
1261	ocs_sport_t *sport = (ocs_sport_t *)object;
1262
1263	ocs_mgmt_start_section(textbuf, "sport", sport->instance_index);
1264
1265	/* Add my status values to textbuf */
1266	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "indicator");
1267	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "fc_id");
1268	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "index");
1269	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "display_name");
1270	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "is_vport");
1271	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "enable_ini");
1272	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "enable_tgt");
1273	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "p2p");
1274	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "p2p_winner");
1275	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "p2p_port_id");
1276	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "p2p_remote_port_id");
1277	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "wwpn");
1278	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "wwnn");
1279
1280	if (ocs_sport_lock_try(sport) == TRUE) {
1281
1282		/* If we get here, then we are holding the sport lock */
1283		ocs_list_foreach(&sport->node_list, node) {
1284			if ((node->mgmt_functions) && (node->mgmt_functions->get_list_handler)) {
1285				node->mgmt_functions->get_list_handler(textbuf, node);
1286			}
1287
1288		}
1289		ocs_sport_unlock(sport);
1290	}
1291
1292	ocs_mgmt_end_section(textbuf, "sport", sport->instance_index);
1293}
1294
1295int
1296ocs_mgmt_sport_get(ocs_textbuf_t *textbuf, char *parent, char *name, void *object)
1297{
1298	ocs_node_t *node;
1299	ocs_sport_t *sport = (ocs_sport_t *)object;
1300	char qualifier[80];
1301	int retval = -1;
1302
1303	ocs_mgmt_start_section(textbuf, "sport", sport->instance_index);
1304
1305	snprintf(qualifier, sizeof(qualifier), "%s/sport[%d]", parent, sport->instance_index);
1306
1307	/* If it doesn't start with my qualifier I don't know what to do with it */
1308	if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) {
1309		char *unqualified_name = name + strlen(qualifier) +1;
1310
1311		/* See if it's a value I can supply */
1312		if (ocs_strcmp(unqualified_name, "indicator") == 0) {
1313			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "indicator", "0x%x", sport->indicator);
1314			retval = 0;
1315		} else if (ocs_strcmp(unqualified_name, "fc_id") == 0) {
1316			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fc_id", "0x%06x", sport->fc_id);
1317			retval = 0;
1318		} else if (ocs_strcmp(unqualified_name, "index") == 0) {
1319			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "index", "%d", sport->index);
1320			retval = 0;
1321		} else if (ocs_strcmp(unqualified_name, "display_name") == 0) {
1322			ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "display_name", sport->display_name);
1323			retval = 0;
1324		} else if (ocs_strcmp(unqualified_name, "is_vport") == 0) {
1325			ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "is_vport",  sport->is_vport);
1326			retval = 0;
1327		} else if (ocs_strcmp(unqualified_name, "enable_ini") == 0) {
1328			ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "enable_ini",  sport->enable_ini);
1329			retval = 0;
1330		} else if (ocs_strcmp(unqualified_name, "enable_tgt") == 0) {
1331			ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "enable_tgt",  sport->enable_tgt);
1332			retval = 0;
1333		} else if (ocs_strcmp(unqualified_name, "p2p_winner") == 0) {
1334			ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "p2p_winner",  sport->p2p_winner);
1335			retval = 0;
1336		} else if (ocs_strcmp(unqualified_name, "p2p_port_id") == 0) {
1337			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "p2p_port_id", "0x%06x", sport->p2p_port_id);
1338			retval = 0;
1339		} else if (ocs_strcmp(unqualified_name, "p2p_remote_port_id") == 0) {
1340			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "p2p_remote_port_id", "0x%06x", sport->p2p_remote_port_id);
1341			retval = 0;
1342		} else if (ocs_strcmp(unqualified_name, "wwpn") == 0) {
1343			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwpn", "0x%016llx", (unsigned long long)sport->wwpn);
1344			retval = 0;
1345		} else if (ocs_strcmp(unqualified_name, "wwnn") == 0) {
1346			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwnn", "0x%016llx", (unsigned long long)sport->wwnn);
1347			retval = 0;
1348		} else {
1349			/* If I didn't know the value of this status pass the request to each of my children */
1350			ocs_sport_lock(sport);
1351				ocs_list_foreach(&sport->node_list, node) {
1352					if ((node->mgmt_functions) && (node->mgmt_functions->get_handler)) {
1353						retval = node->mgmt_functions->get_handler(textbuf, qualifier, name, node);
1354					}
1355
1356					if (retval == 0) {
1357						break;
1358					}
1359				}
1360			ocs_sport_unlock(sport);
1361		}
1362	}
1363
1364	ocs_mgmt_end_section(textbuf, "sport", sport->instance_index);
1365
1366	return retval;
1367}
1368
1369void
1370ocs_mgmt_sport_get_all(ocs_textbuf_t *textbuf, void *object)
1371{
1372	ocs_node_t *node;
1373	ocs_sport_t *sport = (ocs_sport_t *)object;
1374
1375	ocs_mgmt_start_section(textbuf, "sport", sport->instance_index);
1376
1377	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "indicator", "0x%x", sport->indicator);
1378	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fc_id", "0x%06x", sport->fc_id);
1379	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "index", "%d", sport->index);
1380	ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "display_name", sport->display_name);
1381	ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "is_vport",  sport->is_vport);
1382	ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "enable_ini",  sport->enable_ini);
1383	ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "enable_tgt",  sport->enable_tgt);
1384	ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "p2p_winner",  sport->p2p_winner);
1385	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "p2p_port_id", "0x%06x", sport->p2p_port_id);
1386	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "p2p_remote_port_id", "0x%06x", sport->p2p_remote_port_id);
1387	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwpn", "0x%016llx" , (unsigned long long)sport->wwpn);
1388	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwnn", "0x%016llx", (unsigned long long)sport->wwnn);
1389
1390	ocs_sport_lock(sport);
1391	ocs_list_foreach(&sport->node_list, node) {
1392		if ((node->mgmt_functions) && (node->mgmt_functions->get_all_handler)) {
1393			node->mgmt_functions->get_all_handler(textbuf, node);
1394		}
1395	}
1396	ocs_sport_unlock(sport);
1397
1398	ocs_mgmt_end_section(textbuf, "sport", sport->instance_index);
1399}
1400
1401int
1402ocs_mgmt_sport_set(char *parent, char *name, char *value, void *object)
1403{
1404	ocs_node_t *node;
1405	ocs_sport_t *sport = (ocs_sport_t *)object;
1406	char qualifier[80];
1407	int retval = -1;
1408
1409	snprintf(qualifier, sizeof(qualifier), "%s/sport[%d]", parent, sport->instance_index);
1410
1411	/* If it doesn't start with my qualifier I don't know what to do with it */
1412	if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) {
1413		/* The sport has no settable values.  Pass the request to each node. */
1414
1415		ocs_sport_lock(sport);
1416		ocs_list_foreach(&sport->node_list, node) {
1417			if ((node->mgmt_functions) && (node->mgmt_functions->set_handler)) {
1418				retval = node->mgmt_functions->set_handler(qualifier, name, value, node);
1419			}
1420			if (retval == 0) {
1421				break;
1422			}
1423		}
1424		ocs_sport_unlock(sport);
1425	}
1426
1427	return retval;
1428}
1429
1430
1431int
1432ocs_mgmt_sport_exec(char *parent, char *action, void *arg_in, uint32_t arg_in_length,
1433		    void *arg_out, uint32_t arg_out_length, void *object)
1434{
1435	ocs_node_t *node;
1436	ocs_sport_t *sport = (ocs_sport_t *)object;
1437	char qualifier[80];
1438	int retval = -1;
1439
1440	snprintf(qualifier, sizeof(qualifier), "%s.sport%d", parent, sport->instance_index);
1441
1442	/* If it doesn't start with my qualifier I don't know what to do with it */
1443	if (ocs_strncmp(action, qualifier, strlen(qualifier)) == 0) {
1444
1445		/* See if it's an action I can perform */
1446
1447		/* if (ocs_strcmp ....
1448		 * {
1449		 * } else
1450		 */
1451
1452		{
1453			/* If I didn't know how to do this action pass the request to each of my children */
1454			ocs_sport_lock(sport);
1455				ocs_list_foreach(&sport->node_list, node) {
1456					if ((node->mgmt_functions) && (node->mgmt_functions->exec_handler)) {
1457						retval = node->mgmt_functions->exec_handler(qualifier, action, arg_in, arg_in_length,
1458											    arg_out, arg_out_length, node);
1459					}
1460
1461					if (retval == 0) {
1462						break;
1463					}
1464
1465				}
1466			ocs_sport_unlock(sport);
1467		}
1468	}
1469
1470	return retval;
1471}
1472
1473/**
1474 * @brief Save the virtual port's parameters.
1475 *
1476 * @par Description
1477 * The information required to restore a virtual port is saved.
1478 *
1479 * @param sport Pointer to the sport context.
1480 *
1481 * @return None.
1482 */
1483
1484static void
1485ocs_vport_update_spec(ocs_sport_t *sport)
1486{
1487	ocs_t *ocs = sport->ocs;
1488	ocs_xport_t *xport = ocs->xport;
1489	ocs_vport_spec_t *vport;
1490
1491	ocs_device_lock(ocs);
1492	ocs_list_foreach(&xport->vport_list, vport) {
1493		if (vport->sport == sport) {
1494			vport->wwnn = sport->wwnn;
1495			vport->wwpn = sport->wwpn;
1496			vport->tgt_data = sport->tgt_data;
1497			vport->ini_data = sport->ini_data;
1498			break;
1499		}
1500	}
1501	ocs_device_unlock(ocs);
1502}
1503
1504/**
1505 * @brief Create a saved vport entry.
1506 *
1507 * A saved vport entry is added to the vport list, which is restored following
1508 * a link up. This function is used to allow vports to be created the first time
1509 * the link comes up without having to go through the ioctl() API.
1510 *
1511 * @param ocs Pointer to device context.
1512 * @param wwnn World wide node name (may be zero for auto-select).
1513 * @param wwpn World wide port name (may be zero for auto-select).
1514 * @param fc_id Requested port ID (used in fabric emulation mode).
1515 * @param enable_ini TRUE if vport is to be an initiator port.
1516 * @param enable_tgt TRUE if vport is to be a target port.
1517 * @param tgt_data Pointer to target specific data.
1518 * @param ini_data Pointer to initiator specific data.
1519 *
1520 * @return None.
1521 */
1522
1523int8_t
1524ocs_vport_create_spec(ocs_t *ocs, uint64_t wwnn, uint64_t wwpn, uint32_t fc_id, uint32_t enable_ini, uint32_t enable_tgt, void *tgt_data, void *ini_data)
1525{
1526	ocs_xport_t *xport = ocs->xport;
1527	ocs_vport_spec_t *vport;
1528
1529	/* walk the ocs_vport_list and return failure if a valid(vport with non zero WWPN and WWNN) vport entry
1530	   is already created */
1531	ocs_list_foreach(&xport->vport_list, vport) {
1532		if ((wwpn && (vport->wwpn == wwpn)) && (wwnn && (vport->wwnn == wwnn))) {
1533			ocs_log_test(ocs, "Failed: VPORT %016llx  %016llx already allocated\n",
1534				     (unsigned long long)wwnn, (unsigned long long)wwpn);
1535			return -1;
1536		}
1537	}
1538
1539	vport = ocs_malloc(ocs, sizeof(*vport), OCS_M_ZERO | OCS_M_NOWAIT);
1540	if (vport == NULL) {
1541		ocs_log_err(ocs, "ocs_malloc failed\n");
1542		return -1;
1543	}
1544
1545	vport->wwnn = wwnn;
1546	vport->wwpn = wwpn;
1547	vport->fc_id = fc_id;
1548	vport->domain_instance = 0;	/*TODO: may need to change this */
1549	vport->enable_tgt = enable_tgt;
1550	vport->enable_ini = enable_ini;
1551	vport->tgt_data = tgt_data;
1552	vport->ini_data = ini_data;
1553
1554	ocs_device_lock(ocs);
1555		ocs_list_add_tail(&xport->vport_list, vport);
1556	ocs_device_unlock(ocs);
1557	return 0;
1558}
1559
1560/* node group api */
1561
1562/**
1563 * @brief Perform the AND operation on source vectors.
1564 *
1565 * @par Description
1566 * Performs an AND operation on the 8-bit values in source vectors @c b and @c c.
1567 * The resulting value is stored in @c a.
1568 *
1569 * @param a Destination-byte vector.
1570 * @param b Source-byte vector.
1571 * @param c Source-byte vector.
1572 * @param n Byte count.
1573 *
1574 * @return None.
1575 */
1576
1577static void
1578and8(uint8_t *a, uint8_t *b, uint8_t *c, uint32_t n)
1579{
1580	uint32_t i;
1581
1582	for (i = 0; i < n; i ++) {
1583		*a = *b & *c;
1584		a++;
1585		b++;
1586		c++;
1587	}
1588}
1589
1590/**
1591 * @brief Service parameters mask data.
1592 */
1593static fc_sparms_t sparms_cmp_mask = {
1594	0,			/*uint32_t	command_code: 8, */
1595	0,			/*		resv1: 24; */
1596	{~0, ~0, ~0, ~0},	/* uint32_t	common_service_parameters[4]; */
1597	0,			/* uint32_t	port_name_hi; */
1598	0,			/* uint32_t	port_name_lo; */
1599	0,			/* uint32_t	node_name_hi; */
1600	0,			/* uint32_t	node_name_lo; */
1601	{~0, ~0, ~0, ~0},	/* uint32_t	class1_service_parameters[4]; */
1602	{~0, ~0, ~0, ~0},	/* uint32_t	class2_service_parameters[4]; */
1603	{~0, ~0, ~0, ~0},	/* uint32_t	class3_service_parameters[4]; */
1604	{~0, ~0, ~0, ~0},	/* uint32_t	class4_service_parameters[4]; */
1605	{~0, ~0, ~0, ~0}};	/* uint32_t	vendor_version_level[4]; */
1606
1607/**
1608 * @brief Compare service parameters.
1609 *
1610 * @par Description
1611 * Returns 0 if the two service parameters are the same, excluding the port/node name
1612 * elements.
1613 *
1614 * @param sp1 Pointer to service parameters 1.
1615 * @param sp2 Pointer to service parameters 2.
1616 *
1617 * @return Returns 0 if parameters match; otherwise, returns a positive or negative value,
1618 * depending on the arithmetic magnitude of the first mismatching byte.
1619 */
1620
1621int
1622ocs_sparm_cmp(uint8_t *sp1, uint8_t *sp2)
1623{
1624	int i;
1625	int v;
1626	uint8_t *sp3 = (uint8_t*) &sparms_cmp_mask;
1627
1628	for (i = 0; i < OCS_SERVICE_PARMS_LENGTH; i ++) {
1629		v = ((int)(sp1[i] & sp3[i])) - ((int)(sp2[i] & sp3[i]));
1630		if (v) {
1631			break;
1632		}
1633	}
1634	return v;
1635}
1636
1637/**
1638 * @brief Allocate a node group directory entry.
1639 *
1640 * @par Description
1641 * A node group directory entry is allocated, initialized, and added to the sport's
1642 * node group directory list.
1643 *
1644 * @param sport Pointer to the sport object.
1645 * @param sparms Pointer to the service parameters.
1646 *
1647 * @return Returns a pointer to the allocated ocs_node_group_dir_t; or NULL.
1648 */
1649
1650ocs_node_group_dir_t *
1651ocs_node_group_dir_alloc(ocs_sport_t *sport, uint8_t *sparms)
1652{
1653	ocs_node_group_dir_t *node_group_dir;
1654
1655	node_group_dir = ocs_malloc(sport->ocs, sizeof(*node_group_dir), OCS_M_ZERO | OCS_M_NOWAIT);
1656	if (node_group_dir != NULL) {
1657		node_group_dir->sport = sport;
1658
1659		ocs_lock(&sport->node_group_lock);
1660			node_group_dir->instance_index = sport->node_group_dir_next_instance++;
1661			and8(node_group_dir->service_params, sparms, (uint8_t*)&sparms_cmp_mask, OCS_SERVICE_PARMS_LENGTH);
1662			ocs_list_init(&node_group_dir->node_group_list, ocs_remote_node_group_t, link);
1663
1664			node_group_dir->node_group_list_count = 0;
1665			node_group_dir->next_idx = 0;
1666			ocs_list_add_tail(&sport->node_group_dir_list, node_group_dir);
1667		ocs_unlock(&sport->node_group_lock);
1668
1669		ocs_log_debug(sport->ocs, "[%s] [%d] allocating node group directory\n", sport->display_name,
1670			node_group_dir->instance_index);
1671	}
1672	return node_group_dir;
1673}
1674
1675/**
1676 * @brief Free a node group directory entry.
1677 *
1678 * @par Description
1679 * The node group directory entry @c node_group_dir is removed
1680 * from the sport's node group directory list and freed.
1681 *
1682 * @param node_group_dir Pointer to the node group directory entry.
1683 *
1684 * @return None.
1685 */
1686
1687void
1688ocs_node_group_dir_free(ocs_node_group_dir_t *node_group_dir)
1689{
1690	ocs_sport_t *sport;
1691	if (node_group_dir != NULL) {
1692		sport = node_group_dir->sport;
1693		ocs_log_debug(sport->ocs, "[%s] [%d] freeing node group directory\n", sport->display_name,
1694			node_group_dir->instance_index);
1695		ocs_lock(&sport->node_group_lock);
1696			if (!ocs_list_empty(&node_group_dir->node_group_list)) {
1697				ocs_log_test(sport->ocs, "[%s] WARNING: node group list not empty\n", sport->display_name);
1698			}
1699			ocs_list_remove(&sport->node_group_dir_list, node_group_dir);
1700		ocs_unlock(&sport->node_group_lock);
1701		ocs_free(sport->ocs, node_group_dir, sizeof(*node_group_dir));
1702	}
1703}
1704
1705/**
1706 * @brief Find a matching node group directory entry.
1707 *
1708 * @par Description
1709 * The sport's node group directory list is searched for a matching set of
1710 * service parameters. The first matching entry is returned; otherwise
1711 * NULL is returned.
1712 *
1713 * @param sport Pointer to the sport object.
1714 * @param sparms Pointer to the sparams to match.
1715 *
1716 * @return Returns a pointer to the first matching entry found; or NULL.
1717 */
1718
1719ocs_node_group_dir_t *
1720ocs_node_group_dir_find(ocs_sport_t *sport, uint8_t *sparms)
1721{
1722	ocs_node_group_dir_t *node_dir = NULL;
1723
1724	ocs_lock(&sport->node_group_lock);
1725		ocs_list_foreach(&sport->node_group_dir_list, node_dir) {
1726			if (ocs_sparm_cmp(sparms, node_dir->service_params) == 0) {
1727				ocs_unlock(&sport->node_group_lock);
1728				return node_dir;
1729			}
1730		}
1731	ocs_unlock(&sport->node_group_lock);
1732	return NULL;
1733}
1734
1735/**
1736 * @brief Allocate a remote node group object.
1737 *
1738 * @par Description
1739 * A remote node group object is allocated, initialized, and placed on the node group
1740 * list of @c node_group_dir. The HW remote node group @b alloc function is called.
1741 *
1742 * @param node_group_dir Pointer to the node group directory.
1743 *
1744 * @return Returns a pointer to the allocated remote node group object; or NULL.
1745 */
1746
1747ocs_remote_node_group_t *
1748ocs_remote_node_group_alloc(ocs_node_group_dir_t *node_group_dir)
1749{
1750	ocs_t *ocs;
1751	ocs_sport_t *sport;
1752	ocs_remote_node_group_t *node_group;
1753	ocs_hw_rtn_e hrc;
1754
1755	ocs_assert(node_group_dir, NULL);
1756	ocs_assert(node_group_dir->sport, NULL);
1757	ocs_assert(node_group_dir->sport->ocs, NULL);
1758
1759	sport = node_group_dir->sport;
1760	ocs = sport->ocs;
1761
1762
1763	node_group = ocs_malloc(ocs, sizeof(*node_group), OCS_M_ZERO | OCS_M_NOWAIT);
1764	if (node_group != NULL) {
1765
1766		/* set pointer to node group directory */
1767		node_group->node_group_dir = node_group_dir;
1768
1769		ocs_lock(&node_group_dir->sport->node_group_lock);
1770			node_group->instance_index = sport->node_group_next_instance++;
1771		ocs_unlock(&node_group_dir->sport->node_group_lock);
1772
1773		/* invoke HW node group inialization */
1774		hrc = ocs_hw_node_group_alloc(&ocs->hw, node_group);
1775		if (hrc != OCS_HW_RTN_SUCCESS) {
1776			ocs_log_err(ocs, "ocs_hw_node_group_alloc() failed: %d\n", hrc);
1777			ocs_free(ocs, node_group, sizeof(*node_group));
1778			return NULL;
1779		}
1780
1781		ocs_log_debug(ocs, "[%s] [%d] indicator x%03x allocating node group\n", sport->display_name,
1782			node_group->indicator, node_group->instance_index);
1783
1784			/* add to the node group directory entry node group list */
1785		ocs_lock(&node_group_dir->sport->node_group_lock);
1786			ocs_list_add_tail(&node_group_dir->node_group_list, node_group);
1787			node_group_dir->node_group_list_count ++;
1788		ocs_unlock(&node_group_dir->sport->node_group_lock);
1789	}
1790	return node_group;
1791}
1792
1793/**
1794 * @brief Free a remote node group object.
1795 *
1796 * @par Description
1797 * The remote node group object @c node_group is removed from its
1798 * node group directory entry and freed.
1799 *
1800 * @param node_group Pointer to the remote node group object.
1801 *
1802 * @return None.
1803 */
1804
1805void
1806ocs_remote_node_group_free(ocs_remote_node_group_t *node_group)
1807{
1808	ocs_sport_t *sport;
1809	ocs_node_group_dir_t *node_group_dir;
1810
1811	if (node_group != NULL) {
1812
1813		ocs_assert(node_group->node_group_dir);
1814		ocs_assert(node_group->node_group_dir->sport);
1815		ocs_assert(node_group->node_group_dir->sport->ocs);
1816
1817		node_group_dir = node_group->node_group_dir;
1818		sport = node_group_dir->sport;
1819
1820		ocs_log_debug(sport->ocs, "[%s] [%d] freeing node group\n", sport->display_name, node_group->instance_index);
1821
1822		/* Remove from node group directory node group list */
1823		ocs_lock(&sport->node_group_lock);
1824			ocs_list_remove(&node_group_dir->node_group_list, node_group);
1825			node_group_dir->node_group_list_count --;
1826		/* TODO: note that we're going to have the node_group_dir entry persist forever ... we could delete it if
1827		 * the group_list_count goes to zero (or the linked list is empty */
1828		ocs_unlock(&sport->node_group_lock);
1829		ocs_free(sport->ocs, node_group, sizeof(*node_group));
1830	}
1831}
1832
1833/**
1834 * @brief Initialize a node for high login mode.
1835 *
1836 * @par Description
1837 * The @c node is initialized for high login mode. The following steps are performed:
1838 * 1. The sports node group directory is searched for a matching set of service parameters.
1839 * 2. If a matching set is not found, a node group directory entry is allocated.
1840 * 3. If less than the @c hlm_group_size number of remote node group objects is present in the
1841 *   node group directory, a new remote node group object is allocated and added to the list.
1842 * 4. A remote node group object is selected, and the node is attached to the node group.
1843 *
1844 * @param node Pointer to the node.
1845 *
1846 * @return Returns 0 on success, or a negative error value on failure.
1847 */
1848
1849int
1850ocs_node_group_init(ocs_node_t *node)
1851{
1852	ocs_t *ocs;
1853	ocs_sport_t *sport;
1854	ocs_node_group_dir_t *node_group_dir;
1855	ocs_remote_node_group_t *node_group;
1856	ocs_hw_rtn_e hrc;
1857
1858	ocs_assert(node, -1);
1859	ocs_assert(node->sport, -1);
1860	ocs_assert(node->ocs, -1);
1861
1862	ocs = node->ocs;
1863	sport = node->sport;
1864
1865	ocs_assert(ocs->enable_hlm, -1);
1866
1867	/* see if there's a node group directory allocated for this service parameter set */
1868	node_group_dir = ocs_node_group_dir_find(sport, node->service_params);
1869	if (node_group_dir == NULL) {
1870		/* not found, so allocate one */
1871		node_group_dir = ocs_node_group_dir_alloc(sport, node->service_params);
1872		if (node_group_dir == NULL) {
1873			/* node group directory allocation failed ... can't continue, however,
1874			 * the node will be allocated with a normal (not shared) RPI
1875			 */
1876			ocs_log_err(ocs, "ocs_node_group_dir_alloc() failed\n");
1877			return -1;
1878		}
1879	}
1880
1881	/* check to see if we've allocated hlm_group_size's worth of node group structures for this
1882	 * directory entry, if not, then allocate and use a new one, otherwise pick the next one.
1883	 */
1884	ocs_lock(&node->sport->node_group_lock);
1885		if (node_group_dir->node_group_list_count < ocs->hlm_group_size) {
1886			ocs_unlock(&node->sport->node_group_lock);
1887				node_group = ocs_remote_node_group_alloc(node_group_dir);
1888			if (node_group == NULL) {
1889				ocs_log_err(ocs, "ocs_remote_node_group_alloc() failed\n");
1890				return -1;
1891			}
1892			ocs_lock(&node->sport->node_group_lock);
1893		} else {
1894			uint32_t idx = 0;
1895
1896			ocs_list_foreach(&node_group_dir->node_group_list, node_group) {
1897				if (idx >= ocs->hlm_group_size) {
1898					ocs_log_err(node->ocs, "assertion failed: idx >= ocs->hlm_group_size\n");
1899					ocs_unlock(&node->sport->node_group_lock);
1900					return -1;
1901				}
1902
1903				if (idx == node_group_dir->next_idx) {
1904					break;
1905				}
1906				idx ++;
1907			}
1908			if (idx == ocs->hlm_group_size) {
1909				node_group = ocs_list_get_head(&node_group_dir->node_group_list);
1910			}
1911			if (++node_group_dir->next_idx >= node_group_dir->node_group_list_count) {
1912				node_group_dir->next_idx = 0;
1913			}
1914		}
1915	ocs_unlock(&node->sport->node_group_lock);
1916
1917	/* Initialize a pointer in the node back to the node group */
1918	node->node_group = node_group;
1919
1920	/* Join this node into the group */
1921	hrc = ocs_hw_node_group_attach(&ocs->hw, node_group, &node->rnode);
1922
1923	return (hrc == OCS_HW_RTN_SUCCESS) ? 0 : -1;
1924}
1925
1926
1927