1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26/*
27 * SunOS 5.x Multithreaded STREAMS DLPI FCIP Module
28 * This is a pseudo driver module to handle encapsulation of IP and ARP
29 * datagrams over FibreChannel interfaces. FCIP is a cloneable STREAMS
30 * driver module which interfaces with IP/ARP using DLPI. This module
31 * is a Style-2 DLS provider.
32 *
33 * The implementation of this module is based on RFC 2625 which gives
34 * details on the encapsulation of IP/ARP data over FibreChannel.
35 * The fcip module needs to resolve an IP address to a port address before
36 * sending data to a destination port. A FC device port has 2 addresses
37 * associated with it: A 8 byte World Wide unique Port Name and a 3 byte
38 * volatile Port number or Port_ID.
39 *
40 * The mapping between a IP address and the World Wide Port Name is handled
41 * by the ARP layer since the IP over FC draft requires the MAC address to
42 * be the least significant six bytes of the WorldWide Port Names. The
43 * fcip module however needs to identify the destination port uniquely when
44 * the destination FC device has multiple FC ports.
45 *
46 * The FC layer mapping between the World Wide Port Name and the Port_ID
47 * will be handled through the use of a fabric name server or through the
48 * use of the FARP ELS command as described in the draft. Since the Port_IDs
49 * are volatile, the mapping between the World Wide Port Name and Port_IDs
50 * must be maintained and validated before use each time a datagram
51 * needs to be sent to the destination ports. The FC transport module
52 * informs the fcip module of all changes to states of ports on the
53 * fabric through registered callbacks. This enables the fcip module
54 * to maintain the WW_PN to Port_ID mappings current.
55 *
56 * For details on how this module interfaces with the FibreChannel Transport
57 * modules, refer to PSARC/1997/385. Chapter 3 of the FibreChannel Transport
58 * Programming guide details the APIs between ULPs and the Transport.
59 *
60 * Now for some Caveats:
61 *
62 * RFC 2625 requires that a FibreChannel Port name (the Port WWN) have
63 * the NAA bits set to '0001' indicating a IEEE 48bit address which
64 * corresponds to a ULA (Universal LAN MAC address). But with FibreChannel
65 * adapters containing 2 or more ports, IEEE naming cannot identify the
66 * ports on an adapter uniquely so we will in the first implementation
67 * be operating only on Port 0 of each adapter.
68 */
69
70#include	<sys/types.h>
71#include	<sys/errno.h>
72#include	<sys/debug.h>
73#include	<sys/time.h>
74#include	<sys/sysmacros.h>
75#include	<sys/systm.h>
76#include	<sys/user.h>
77#include	<sys/stropts.h>
78#include	<sys/stream.h>
79#include	<sys/strlog.h>
80#include	<sys/strsubr.h>
81#include	<sys/cmn_err.h>
82#include	<sys/cpu.h>
83#include	<sys/kmem.h>
84#include	<sys/conf.h>
85#include	<sys/ddi.h>
86#include	<sys/sunddi.h>
87#include	<sys/ksynch.h>
88#include	<sys/stat.h>
89#include	<sys/kstat.h>
90#include	<sys/vtrace.h>
91#include	<sys/strsun.h>
92#include	<sys/varargs.h>
93#include	<sys/modctl.h>
94#include 	<sys/thread.h>
95#include 	<sys/var.h>
96#include 	<sys/proc.h>
97#include	<inet/common.h>
98#include	<netinet/ip6.h>
99#include	<inet/ip.h>
100#include	<inet/arp.h>
101#include	<inet/mi.h>
102#include	<inet/nd.h>
103#include	<sys/dlpi.h>
104#include	<sys/ethernet.h>
105#include	<sys/file.h>
106#include	<sys/syslog.h>
107#include	<sys/disp.h>
108#include	<sys/taskq.h>
109
110/*
111 * Leadville includes
112 */
113
114#include	<sys/fibre-channel/fc.h>
115#include	<sys/fibre-channel/impl/fc_ulpif.h>
116#include	<sys/fibre-channel/ulp/fcip.h>
117
118/*
119 * TNF Probe/trace facility include
120 */
121#if defined(lint) || defined(FCIP_TNF_ENABLED)
122#include <sys/tnf_probe.h>
123#endif
124
125#define	FCIP_ESBALLOC
126
127/*
128 * Function prototypes
129 */
130
131/* standard loadable modules entry points */
132static int	fcip_attach(dev_info_t *, ddi_attach_cmd_t);
133static int 	fcip_detach(dev_info_t *, ddi_detach_cmd_t);
134static void 	fcip_dodetach(struct fcipstr *slp);
135static int fcip_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd,
136    void *arg, void **result);
137
138
139/* streams specific */
140static void fcip_setipq(struct fcip *fptr);
141static int fcip_wput(queue_t *, mblk_t *);
142static int fcip_wsrv(queue_t *);
143static void fcip_proto(queue_t *, mblk_t *);
144static void fcip_ioctl(queue_t *, mblk_t *);
145static int fcip_open(queue_t *wq, dev_t *devp, int flag,
146		int sflag, cred_t *credp);
147static int fcip_close(queue_t *rq, int flag, int otyp, cred_t *credp);
148static int fcip_start(queue_t *wq, mblk_t *mp, struct fcip *fptr,
149    struct fcip_dest *fdestp, int flags);
150static void fcip_sendup(struct fcip *fptr, mblk_t *mp,
151    struct fcipstr *(*acceptfunc)());
152static struct fcipstr *fcip_accept(struct fcipstr *slp, struct fcip *fptr,
153    int type, la_wwn_t *dhostp);
154static mblk_t *fcip_addudind(struct fcip *fptr, mblk_t *mp,
155    fcph_network_hdr_t *nhdr, int type);
156static int fcip_setup_mac_addr(struct fcip *fptr);
157static void fcip_kstat_init(struct fcip *fptr);
158static int fcip_stat_update(kstat_t *, int);
159
160
161/* dlpi specific */
162static void fcip_spareq(queue_t *wq, mblk_t *mp);
163static void fcip_pareq(queue_t *wq, mblk_t *mp);
164static void fcip_ubreq(queue_t *wq, mblk_t *mp);
165static void fcip_breq(queue_t *wq, mblk_t *mp);
166static void fcip_dreq(queue_t *wq, mblk_t *mp);
167static void fcip_areq(queue_t *wq, mblk_t *mp);
168static void fcip_udreq(queue_t *wq, mblk_t *mp);
169static void fcip_ireq(queue_t *wq, mblk_t *mp);
170static void fcip_dl_ioc_hdr_info(queue_t *wq, mblk_t *mp);
171
172
173/* solaris sundry, DR/CPR etc */
174static int fcip_cache_constructor(void *buf, void *arg, int size);
175static void fcip_cache_destructor(void *buf, void *size);
176static int fcip_handle_suspend(fcip_port_info_t *fport, fc_detach_cmd_t cmd);
177static int fcip_handle_resume(fcip_port_info_t *fport,
178    fc_ulp_port_info_t *port_info, fc_attach_cmd_t cmd);
179static fcip_port_info_t *fcip_softstate_free(fcip_port_info_t *fport);
180static int fcip_port_attach_handler(struct fcip *fptr);
181
182
183/*
184 * ulp - transport interface function prototypes
185 */
186static int fcip_port_attach(opaque_t ulp_handle, fc_ulp_port_info_t *,
187    fc_attach_cmd_t cmd, uint32_t sid);
188static int fcip_port_detach(opaque_t ulp_handle, fc_ulp_port_info_t *,
189    fc_detach_cmd_t cmd);
190static int fcip_port_ioctl(opaque_t ulp_handle,  opaque_t port_handle,
191    dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
192    uint32_t claimed);
193static void fcip_statec_cb(opaque_t ulp_handle, opaque_t phandle,
194    uint32_t port_state, uint32_t port_top, fc_portmap_t changelist[],
195    uint32_t listlen, uint32_t sid);
196static int fcip_els_cb(opaque_t ulp_handle, opaque_t phandle,
197    fc_unsol_buf_t *buf, uint32_t claimed);
198static int fcip_data_cb(opaque_t ulp_handle, opaque_t phandle,
199    fc_unsol_buf_t *payload, uint32_t claimed);
200
201
202/* Routing table specific */
203static void fcip_handle_topology(struct fcip *fptr);
204static int fcip_init_port(struct fcip *fptr);
205struct fcip_routing_table *fcip_lookup_rtable(struct fcip *fptr,
206    la_wwn_t *pwwn, int matchflag);
207static void fcip_rt_update(struct fcip *fptr, fc_portmap_t *devlist,
208    uint32_t listlen);
209static void fcip_rt_flush(struct fcip *fptr);
210static void fcip_rte_remove_deferred(void *arg);
211static int fcip_do_plogi(struct fcip *fptr, struct fcip_routing_table *frp);
212
213
214/* dest table specific */
215static struct fcip_dest *fcip_get_dest(struct fcip *fptr,
216    la_wwn_t *dlphys);
217static struct fcip_dest *fcip_add_dest(struct fcip *fptr,
218    struct fcip_routing_table *frp);
219static int fcip_dest_add_broadcast_entry(struct fcip *fptr, int new_flag);
220static uint32_t fcip_get_broadcast_did(struct fcip *fptr);
221static void fcip_cleanup_dest(struct fcip *fptr);
222
223
224/* helper functions */
225static fcip_port_info_t *fcip_get_port(opaque_t phandle);
226static int fcip_wwn_compare(la_wwn_t *wwn1, la_wwn_t *wwn2, int flag);
227static void fcip_ether_to_str(struct ether_addr *e, caddr_t s);
228static int fcip_port_get_num_pkts(struct fcip *fptr);
229static int fcip_check_port_busy(struct fcip *fptr);
230static void fcip_check_remove_minor_node(void);
231static int fcip_set_wwn(la_wwn_t *pwwn);
232static int fcip_plogi_in_progress(struct fcip *fptr);
233static int fcip_check_port_exists(struct fcip *fptr);
234static int fcip_is_supported_fc_topology(int fc_topology);
235
236
237/* pkt specific */
238static fcip_pkt_t *fcip_pkt_alloc(struct fcip *fptr, mblk_t *bp,
239    int flags, int datalen);
240static void fcip_pkt_free(struct fcip_pkt *fcip_pkt, int flags);
241static fcip_pkt_t *fcip_ipkt_alloc(struct fcip *fptr, int cmdlen,
242    int resplen, opaque_t pd, int flags);
243static void fcip_ipkt_free(fcip_pkt_t *fcip_pkt);
244static void fcip_ipkt_callback(fc_packet_t *fc_pkt);
245static void fcip_free_pkt_dma(fcip_pkt_t *fcip_pkt);
246static void fcip_pkt_callback(fc_packet_t *fc_pkt);
247static void fcip_init_unicast_pkt(fcip_pkt_t *fcip_pkt, fc_portid_t sid,
248    fc_portid_t did, void (*comp) ());
249static int fcip_transport(fcip_pkt_t *fcip_pkt);
250static void fcip_pkt_timeout(void *arg);
251static void fcip_timeout(void *arg);
252static void fcip_fdestp_enqueue_pkt(struct fcip_dest *fdestp,
253    fcip_pkt_t *fcip_pkt);
254static int fcip_fdestp_dequeue_pkt(struct fcip_dest *fdestp,
255    fcip_pkt_t *fcip_pkt);
256static int fcip_sendup_constructor(void *buf, void *arg, int flags);
257static void fcip_sendup_thr(void *arg);
258static int fcip_sendup_alloc_enque(struct fcip *ftpr, mblk_t *mp,
259    struct fcipstr *(*f)());
260
261/*
262 * zero copy inbound data handling
263 */
264#ifdef FCIP_ESBALLOC
265static void fcip_ubfree(char *arg);
266#endif /* FCIP_ESBALLOC */
267
268#if !defined(FCIP_ESBALLOC)
269static void *fcip_allocb(size_t size, uint_t pri);
270#endif
271
272
273/* FCIP FARP support functions */
274static struct fcip_dest *fcip_do_farp(struct fcip *fptr, la_wwn_t *pwwn,
275    char *ip_addr, size_t ip_addr_len, int flags);
276static void fcip_init_broadcast_pkt(fcip_pkt_t *fcip_pkt, void (*comp) (),
277    int is_els);
278static int fcip_handle_farp_request(struct fcip *fptr, la_els_farp_t *fcmd);
279static int fcip_handle_farp_response(struct fcip *fptr, la_els_farp_t *fcmd);
280static void fcip_cache_arp_broadcast(struct fcip *ftpr, fc_unsol_buf_t *buf);
281static void fcip_port_ns(void *arg);
282
283#ifdef DEBUG
284
285#include <sys/debug.h>
286
287#define	FCIP_DEBUG_DEFAULT	0x1
288#define	FCIP_DEBUG_ATTACH	0x2
289#define	FCIP_DEBUG_INIT		0x4
290#define	FCIP_DEBUG_DETACH	0x8
291#define	FCIP_DEBUG_DLPI		0x10
292#define	FCIP_DEBUG_ELS		0x20
293#define	FCIP_DEBUG_DOWNSTREAM	0x40
294#define	FCIP_DEBUG_UPSTREAM	0x80
295#define	FCIP_DEBUG_MISC		0x100
296
297#define	FCIP_DEBUG_STARTUP	(FCIP_DEBUG_ATTACH|FCIP_DEBUG_INIT)
298#define	FCIP_DEBUG_DATAOUT	(FCIP_DEBUG_DLPI|FCIP_DEBUG_DOWNSTREAM)
299#define	FCIP_DEBUG_DATAIN	(FCIP_DEBUG_ELS|FCIP_DEBUG_UPSTREAM)
300
301static int fcip_debug = FCIP_DEBUG_DEFAULT;
302
303#define	FCIP_DEBUG(level, args)	\
304	if (fcip_debug & (level))	cmn_err args;
305
306#else	/* DEBUG */
307
308#define	FCIP_DEBUG(level, args)		/* do nothing */
309
310#endif	/* DEBUG */
311
312#define	KIOIP	KSTAT_INTR_PTR(fcip->fcip_intrstats)
313
314/*
315 * Endian independent ethernet to WWN copy
316 */
317#define	ether_to_wwn(E, W)	\
318	bzero((void *)(W), sizeof (la_wwn_t)); \
319	bcopy((void *)(E), (void *)&((W)->raw_wwn[2]), ETHERADDRL); \
320	(W)->raw_wwn[0] |= 0x10
321
322/*
323 * wwn_to_ether : Endian independent, copies a WWN to struct ether_addr.
324 * The args to the macro are pointers to WWN and ether_addr structures
325 */
326#define	wwn_to_ether(W, E)	\
327	bcopy((void *)&((W)->raw_wwn[2]), (void *)E, ETHERADDRL)
328
329/*
330 * The module_info structure contains identification and limit values.
331 * All queues associated with a certain driver share the same module_info
332 * structures. This structure defines the characteristics of that driver/
333 * module's queues. The module name must be unique. The max and min packet
334 * sizes limit the no. of characters in M_DATA messages. The Hi and Lo
335 * water marks are for flow control when a module has a service procedure.
336 */
337static struct module_info	fcipminfo = {
338	FCIPIDNUM,	/* mi_idnum : Module ID num */
339	FCIPNAME, 	/* mi_idname: Module Name */
340	FCIPMINPSZ,	/* mi_minpsz: Min packet size */
341	FCIPMAXPSZ,	/* mi_maxpsz: Max packet size */
342	FCIPHIWAT,	/* mi_hiwat : High water mark */
343	FCIPLOWAT	/* mi_lowat : Low water mark */
344};
345
346/*
347 * The qinit structres contain the module put, service. open and close
348 * procedure pointers. All modules and drivers with the same streamtab
349 * file (i.e same fmodsw or cdevsw entry points) point to the same
350 * upstream (read) and downstream (write) qinit structs.
351 */
352static struct qinit	fcip_rinit = {
353	NULL,		/* qi_putp */
354	NULL,		/* qi_srvp */
355	fcip_open,	/* qi_qopen */
356	fcip_close,	/* qi_qclose */
357	NULL,		/* qi_qadmin */
358	&fcipminfo,	/* qi_minfo */
359	NULL		/* qi_mstat */
360};
361
362static struct qinit	fcip_winit = {
363	fcip_wput,	/* qi_putp */
364	fcip_wsrv,	/* qi_srvp */
365	NULL,		/* qi_qopen */
366	NULL,		/* qi_qclose */
367	NULL,		/* qi_qadmin */
368	&fcipminfo,	/* qi_minfo */
369	NULL		/* qi_mstat */
370};
371
372/*
373 * streamtab contains pointers to the read and write qinit structures
374 */
375
376static struct streamtab fcip_info = {
377	&fcip_rinit,	/* st_rdinit */
378	&fcip_winit,	/* st_wrinit */
379	NULL,		/* st_muxrinit */
380	NULL,		/* st_muxwrinit */
381};
382
383static struct cb_ops  fcip_cb_ops = {
384	nodev,				/* open */
385	nodev,				/* close */
386	nodev,				/* strategy */
387	nodev,				/* print */
388	nodev,				/* dump */
389	nodev,				/* read */
390	nodev,				/* write */
391	nodev,				/* ioctl */
392	nodev,				/* devmap */
393	nodev,				/* mmap */
394	nodev,				/* segmap */
395	nochpoll,			/* poll */
396	ddi_prop_op,			/* cb_prop_op */
397	&fcip_info,			/* streamtab  */
398	D_MP | D_HOTPLUG,		/* Driver compatibility flag */
399	CB_REV,				/* rev */
400	nodev,				/* int (*cb_aread)() */
401	nodev				/* int (*cb_awrite)() */
402};
403
404/*
405 * autoconfiguration routines.
406 */
407static struct dev_ops fcip_ops = {
408	DEVO_REV,		/* devo_rev, */
409	0,			/* refcnt  */
410	fcip_getinfo,		/* info */
411	nulldev,		/* identify */
412	nulldev,		/* probe */
413	fcip_attach,		/* attach */
414	fcip_detach,		/* detach */
415	nodev,			/* RESET */
416	&fcip_cb_ops,		/* driver operations */
417	NULL,			/* bus operations */
418	ddi_power		/* power management */
419};
420
421#define	FCIP_VERSION	"1.61"
422#define	FCIP_NAME	"SunFC FCIP v" FCIP_VERSION
423
424#define	PORT_DRIVER	"fp"
425
426#define	GETSTRUCT(struct, number)	\
427	((struct *)kmem_zalloc((size_t)(sizeof (struct) * (number)), \
428		KM_SLEEP))
429
430static struct modldrv modldrv = {
431	&mod_driverops,			/* Type of module - driver */
432	FCIP_NAME,			/* Name of module */
433	&fcip_ops,			/* driver ops */
434};
435
436static struct modlinkage modlinkage = {
437	MODREV_1, (void *)&modldrv, NULL
438};
439
440
441/*
442 * Now for some global statics
443 */
444static uint32_t	fcip_ub_nbufs = FCIP_UB_NBUFS;
445static uint32_t fcip_ub_size = FCIP_UB_SIZE;
446static int fcip_pkt_ttl_ticks = FCIP_PKT_TTL;
447static int fcip_tick_incr = 1;
448static int fcip_wait_cmds = FCIP_WAIT_CMDS;
449static int fcip_num_attaching = 0;
450static int fcip_port_attach_pending = 0;
451static int fcip_create_nodes_on_demand = 1;	/* keep it similar to fcp */
452static int fcip_cache_on_arp_broadcast = 0;
453static int fcip_farp_supported = 0;
454static int fcip_minor_node_created = 0;
455
456/*
457 * Supported FCAs
458 */
459#define	QLC_PORT_1_ID_BITS		0x100
460#define	QLC_PORT_2_ID_BITS		0x101
461#define	QLC_PORT_NAA			0x2
462#define	QLC_MODULE_NAME			"qlc"
463#define	IS_QLC_PORT(port_dip)		\
464			(strcmp(ddi_driver_name(ddi_get_parent((port_dip))),\
465			QLC_MODULE_NAME) == 0)
466
467
468/*
469 * fcip softstate structures head.
470 */
471
472static void *fcip_softp = NULL;
473
474/*
475 * linked list of active (inuse) driver streams
476 */
477
478static int fcip_num_instances = 0;
479static dev_info_t *fcip_module_dip = (dev_info_t *)0;
480
481
482/*
483 * Ethernet broadcast address: Broadcast addressing in IP over fibre
484 * channel should be the IEEE ULA (also the low 6 bytes of the Port WWN).
485 *
486 * The broadcast addressing varies for differing topologies a node may be in:
487 *	- On a private loop the ARP broadcast is a class 3 sequence sent
488 *	  using OPNfr (Open Broadcast Replicate primitive) followed by
489 *	  the ARP frame to D_ID 0xFFFFFF
490 *
491 *	- On a public Loop the broadcast sequence is sent to AL_PA 0x00
492 *	  (no OPNfr primitive).
493 *
494 *	- For direct attach and point to point topologies we just send
495 *	  the frame to D_ID 0xFFFFFF
496 *
497 * For public loop the handling would probably be different - for now
498 * I'll just declare this struct - It can be deleted if not necessary.
499 *
500 */
501
502
503/*
504 * DL_INFO_ACK template for the fcip module. The dl_info_ack_t structure is
505 * returned as a part of an  DL_INFO_ACK message which is a M_PCPROTO message
506 * returned in response to a DL_INFO_REQ message sent to us from a DLS user
507 * Let us fake an ether header as much as possible.
508 *
509 * dl_addr_length is the Provider's DLSAP addr which is SAP addr +
510 *                Physical addr of the provider. We set this to
511 *                ushort_t + sizeof (la_wwn_t) for Fibre Channel ports.
512 * dl_mac_type    Lets just use DL_ETHER - we can try using DL_IPFC, a new
513 *		  dlpi.h define later.
514 * dl_sap_length  -2 indicating the SAP address follows the Physical addr
515 *		  component in the DLSAP addr.
516 * dl_service_mode: DLCLDS - connectionless data link service.
517 *
518 */
519
520static dl_info_ack_t fcip_infoack = {
521	DL_INFO_ACK,				/* dl_primitive */
522	FCIPMTU,				/* dl_max_sdu */
523	0,					/* dl_min_sdu */
524	FCIPADDRL,				/* dl_addr_length */
525	DL_ETHER,				/* dl_mac_type */
526	0,					/* dl_reserved */
527	0,					/* dl_current_state */
528	-2,					/* dl_sap_length */
529	DL_CLDLS,				/* dl_service_mode */
530	0,					/* dl_qos_length */
531	0,					/* dl_qos_offset */
532	0,					/* dl_range_length */
533	0,					/* dl_range_offset */
534	DL_STYLE2,				/* dl_provider_style */
535	sizeof (dl_info_ack_t),			/* dl_addr_offset */
536	DL_VERSION_2,				/* dl_version */
537	ETHERADDRL,				/* dl_brdcst_addr_length */
538	sizeof (dl_info_ack_t) + FCIPADDRL,	/* dl_brdcst_addr_offset */
539	0					/* dl_growth */
540};
541
542/*
543 * FCIP broadcast address definition.
544 */
545static	struct ether_addr	fcipnhbroadcastaddr = {
546	0xff, 0xff, 0xff, 0xff, 0xff, 0xff
547};
548
549/*
550 * RFC2625 requires the broadcast ARP address in the ARP data payload to
551 * be set to 0x00 00 00 00 00 00 for ARP broadcast packets
552 */
553static	struct ether_addr	fcip_arpbroadcast_addr = {
554	0x00, 0x00, 0x00, 0x00, 0x00, 0x00
555};
556
557
558#define	ether_bcopy(src, dest)	bcopy((src), (dest), ETHERADDRL);
559
560/*
561 * global kernel locks
562 */
563static kcondvar_t	fcip_global_cv;
564static kmutex_t		fcip_global_mutex;
565
566/*
567 * fctl external defines
568 */
569extern int fc_ulp_add(fc_ulp_modinfo_t *);
570
571/*
572 * fctl data structures
573 */
574
575#define	FCIP_REV	0x07
576
577/* linked list of port info structures */
578static fcip_port_info_t *fcip_port_head = NULL;
579
580/* linked list of fcip structures */
581static struct fcipstr	*fcipstrup = NULL;
582static krwlock_t	fcipstruplock;
583
584
585/*
586 * Module information structure. This structure gives the FC Transport modules
587 * information about an ULP that registers with it.
588 */
589static fc_ulp_modinfo_t	fcip_modinfo = {
590	0,			/* for xref checks? */
591	FCTL_ULP_MODREV_4,	/* FCIP revision */
592	FC_TYPE_IS8802_SNAP,	/* type 5 for SNAP encapsulated datagrams */
593	FCIP_NAME,		/* module name as in the modldrv struct */
594	0x0,			/* get all statec callbacks for now */
595	fcip_port_attach,	/* port attach callback */
596	fcip_port_detach,	/* port detach callback */
597	fcip_port_ioctl,	/* port ioctl callback */
598	fcip_els_cb,		/* els callback */
599	fcip_data_cb,		/* data callback */
600	fcip_statec_cb		/* state change callback */
601};
602
603
604/*
605 * Solaris 9 and up, the /kernel/drv/fp.conf file will have the following entry
606 *
607 * ddi-forceattach=1;
608 *
609 * This will ensure that fp is loaded at bootup. No additional checks are needed
610 */
611int
612_init(void)
613{
614	int	rval;
615
616	FCIP_TNF_LOAD();
617
618	/*
619	 * Initialize the mutexs used by port attach and other callbacks.
620	 * The transport can call back into our port_attach_callback
621	 * routine even before _init() completes and bad things can happen.
622	 */
623	mutex_init(&fcip_global_mutex, NULL, MUTEX_DRIVER, NULL);
624	cv_init(&fcip_global_cv, NULL, CV_DRIVER, NULL);
625	rw_init(&fcipstruplock, NULL, RW_DRIVER, NULL);
626
627	mutex_enter(&fcip_global_mutex);
628	fcip_port_attach_pending = 1;
629	mutex_exit(&fcip_global_mutex);
630
631	/*
632	 * Now attempt to register fcip with the transport.
633	 * If fc_ulp_add fails, fcip module will not be loaded.
634	 */
635	rval = fc_ulp_add(&fcip_modinfo);
636	if (rval != FC_SUCCESS) {
637		mutex_destroy(&fcip_global_mutex);
638		cv_destroy(&fcip_global_cv);
639		rw_destroy(&fcipstruplock);
640		switch (rval) {
641		case FC_ULP_SAMEMODULE:
642			FCIP_DEBUG(FCIP_DEBUG_DEFAULT, (CE_WARN,
643			    "!fcip: module is already registered with"
644			    " transport"));
645			rval = EEXIST;
646			break;
647		case FC_ULP_SAMETYPE:
648			FCIP_DEBUG(FCIP_DEBUG_DEFAULT, (CE_WARN,
649			    "!fcip: Another module of the same ULP type 0x%x"
650			    " is already registered with the transport",
651			    fcip_modinfo.ulp_type));
652			rval = EEXIST;
653			break;
654		case FC_BADULP:
655			FCIP_DEBUG(FCIP_DEBUG_DEFAULT, (CE_WARN,
656			    "!fcip: Current fcip version 0x%x does not match"
657			    " fctl version",
658			    fcip_modinfo.ulp_rev));
659			rval = ENODEV;
660			break;
661		default:
662			FCIP_DEBUG(FCIP_DEBUG_DEFAULT, (CE_WARN,
663			    "!fcip: fc_ulp_add failed with status 0x%x", rval));
664			rval = ENODEV;
665			break;
666		}
667		FCIP_TNF_UNLOAD(&modlinkage);
668		return (rval);
669	}
670
671	if ((rval = ddi_soft_state_init(&fcip_softp, sizeof (struct fcip),
672			FCIP_NUM_INSTANCES)) != 0) {
673		mutex_destroy(&fcip_global_mutex);
674		cv_destroy(&fcip_global_cv);
675		rw_destroy(&fcipstruplock);
676		(void) fc_ulp_remove(&fcip_modinfo);
677		FCIP_TNF_UNLOAD(&modlinkage);
678		return (rval);
679	}
680
681	if ((rval = mod_install(&modlinkage)) != 0) {
682		FCIP_TNF_UNLOAD(&modlinkage);
683		(void) fc_ulp_remove(&fcip_modinfo);
684		mutex_destroy(&fcip_global_mutex);
685		cv_destroy(&fcip_global_cv);
686		rw_destroy(&fcipstruplock);
687		ddi_soft_state_fini(&fcip_softp);
688	}
689	return (rval);
690}
691
692/*
693 * Unload the port driver if this was the only ULP loaded and then
694 * deregister with the transport.
695 */
696int
697_fini(void)
698{
699	int	rval;
700	int	rval1;
701
702	/*
703	 * Do not permit the module to be unloaded before a port
704	 * attach callback has happened.
705	 */
706	mutex_enter(&fcip_global_mutex);
707	if (fcip_num_attaching || fcip_port_attach_pending) {
708		mutex_exit(&fcip_global_mutex);
709		return (EBUSY);
710	}
711	mutex_exit(&fcip_global_mutex);
712
713	if ((rval = mod_remove(&modlinkage)) != 0) {
714		return (rval);
715	}
716
717	/*
718	 * unregister with the transport layer
719	 */
720	rval1 = fc_ulp_remove(&fcip_modinfo);
721
722	/*
723	 * If the ULP was not registered with the transport, init should
724	 * have failed. If transport has no knowledge of our existence
725	 * we should simply bail out and succeed
726	 */
727#ifdef DEBUG
728	if (rval1 == FC_BADULP) {
729		FCIP_DEBUG(FCIP_DEBUG_DEFAULT, (CE_WARN,
730		"fcip: ULP was never registered with the transport"));
731		rval = ENODEV;
732	} else if (rval1 == FC_BADTYPE) {
733		FCIP_DEBUG(FCIP_DEBUG_DEFAULT, (CE_WARN,
734			"fcip: No ULP of this type 0x%x was registered with "
735			"transport", fcip_modinfo.ulp_type));
736		rval = ENODEV;
737	}
738#endif /* DEBUG */
739
740	mutex_destroy(&fcip_global_mutex);
741	rw_destroy(&fcipstruplock);
742	cv_destroy(&fcip_global_cv);
743	ddi_soft_state_fini(&fcip_softp);
744
745	FCIP_TNF_UNLOAD(&modlinkage);
746
747	return (rval);
748}
749
750/*
751 * Info about this loadable module
752 */
753int
754_info(struct modinfo *modinfop)
755{
756	return (mod_info(&modlinkage, modinfop));
757}
758
759/*
760 * The port attach callback is invoked by the port driver when a FCA
761 * port comes online and binds with the transport layer. The transport
762 * then callsback into all ULP modules registered with it. The Port attach
763 * call back will also provide the ULP module with the Port's WWN and S_ID
764 */
765/* ARGSUSED */
766static int
767fcip_port_attach(opaque_t ulp_handle, fc_ulp_port_info_t *port_info,
768    fc_attach_cmd_t cmd, uint32_t sid)
769{
770	int 			rval = FC_FAILURE;
771	int 			instance;
772	struct fcip		*fptr;
773	fcip_port_info_t	*fport = NULL;
774	fcip_port_info_t	*cur_fport;
775	fc_portid_t		src_id;
776
777	switch (cmd) {
778	case FC_CMD_ATTACH: {
779		la_wwn_t	*ww_pn = NULL;
780		/*
781		 * It was determined that, as per spec, the lower 48 bits of
782		 * the port-WWN will always be unique. This will make the MAC
783		 * address (i.e the lower 48 bits of the WWN), that IP/ARP
784		 * depend on, unique too. Hence we should be able to remove the
785		 * restriction of attaching to only one of the ports of
786		 * multi port FCAs.
787		 *
788		 * Earlier, fcip used to attach only to qlc module and fail
789		 * silently for attach failures resulting from unknown FCAs or
790		 * unsupported FCA ports. Now, we'll do no such checks.
791		 */
792		ww_pn = &port_info->port_pwwn;
793
794		FCIP_TNF_PROBE_2((fcip_port_attach, "fcip io", /* CSTYLED */,
795			tnf_string, msg, "port id bits",
796			tnf_opaque, nport_id, ww_pn->w.nport_id));
797		FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_NOTE,
798		    "port id bits: 0x%x", ww_pn->w.nport_id));
799		/*
800		 * A port has come online
801		 */
802		mutex_enter(&fcip_global_mutex);
803		fcip_num_instances++;
804		fcip_num_attaching++;
805
806		if (fcip_port_head == NULL) {
807			/* OK to sleep here ? */
808			fport = kmem_zalloc(sizeof (fcip_port_info_t),
809						KM_NOSLEEP);
810			if (fport == NULL) {
811				fcip_num_instances--;
812				fcip_num_attaching--;
813				ASSERT(fcip_num_attaching >= 0);
814				mutex_exit(&fcip_global_mutex);
815				rval = FC_FAILURE;
816				cmn_err(CE_WARN, "!fcip(%d): port attach "
817				    "failed: alloc failed",
818				    ddi_get_instance(port_info->port_dip));
819				goto done;
820			}
821			fcip_port_head = fport;
822		} else {
823			/*
824			 * traverse the port list and also check for
825			 * duplicate port attaches - Nothing wrong in being
826			 * paranoid Heh Heh.
827			 */
828			cur_fport = fcip_port_head;
829			while (cur_fport != NULL) {
830				if (cur_fport->fcipp_handle ==
831				    port_info->port_handle) {
832					fcip_num_instances--;
833					fcip_num_attaching--;
834					ASSERT(fcip_num_attaching >= 0);
835					mutex_exit(&fcip_global_mutex);
836					FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_WARN,
837					    "!fcip(%d): port already "
838					    "attached!!", ddi_get_instance(
839					    port_info->port_dip)));
840					rval = FC_FAILURE;
841					goto done;
842				}
843				cur_fport = cur_fport->fcipp_next;
844			}
845			fport = kmem_zalloc(sizeof (fcip_port_info_t),
846						KM_NOSLEEP);
847			if (fport == NULL) {
848				rval = FC_FAILURE;
849				fcip_num_instances--;
850				fcip_num_attaching--;
851				ASSERT(fcip_num_attaching >= 0);
852				mutex_exit(&fcip_global_mutex);
853				cmn_err(CE_WARN, "!fcip(%d): port attach "
854				    "failed: alloc failed",
855				    ddi_get_instance(port_info->port_dip));
856				goto done;
857			}
858			fport->fcipp_next = fcip_port_head;
859			fcip_port_head = fport;
860		}
861
862		mutex_exit(&fcip_global_mutex);
863
864		/*
865		 * now fill in the details about the port itself
866		 */
867		fport->fcipp_linkage = *port_info->port_linkage;
868		fport->fcipp_handle = port_info->port_handle;
869		fport->fcipp_dip = port_info->port_dip;
870		fport->fcipp_topology = port_info->port_flags;
871		fport->fcipp_pstate = port_info->port_state;
872		fport->fcipp_naa = port_info->port_pwwn.w.naa_id;
873		bcopy(&port_info->port_pwwn, &fport->fcipp_pwwn,
874		    sizeof (la_wwn_t));
875		bcopy(&port_info->port_nwwn, &fport->fcipp_nwwn,
876		    sizeof (la_wwn_t));
877		fport->fcipp_fca_pkt_size = port_info->port_fca_pkt_size;
878		fport->fcipp_cmd_dma_attr = *port_info->port_cmd_dma_attr;
879		fport->fcipp_resp_dma_attr = *port_info->port_resp_dma_attr;
880		fport->fcipp_fca_acc_attr = *port_info->port_acc_attr;
881		src_id.port_id = sid;
882		src_id.priv_lilp_posit = 0;
883		fport->fcipp_sid = src_id;
884
885		/*
886		 * allocate soft state for this instance
887		 */
888		instance = ddi_get_instance(fport->fcipp_dip);
889		if (ddi_soft_state_zalloc(fcip_softp,
890		    instance) != DDI_SUCCESS) {
891			rval = FC_FAILURE;
892			cmn_err(CE_WARN, "!fcip(%d): port attach failed: "
893			    "soft state alloc failed", instance);
894			goto failure;
895		}
896
897		fptr = ddi_get_soft_state(fcip_softp, instance);
898
899		if (fptr == NULL) {
900			rval = FC_FAILURE;
901			cmn_err(CE_WARN, "!fcip(%d): port attach failed: "
902			    "failure to get soft state", instance);
903			goto failure;
904		}
905
906		/*
907		 * initialize all mutexes and locks required for this module
908		 */
909		mutex_init(&fptr->fcip_mutex, NULL, MUTEX_DRIVER, NULL);
910		mutex_init(&fptr->fcip_ub_mutex, NULL, MUTEX_DRIVER, NULL);
911		mutex_init(&fptr->fcip_rt_mutex, NULL, MUTEX_DRIVER, NULL);
912		mutex_init(&fptr->fcip_dest_mutex, NULL, MUTEX_DRIVER, NULL);
913		mutex_init(&fptr->fcip_sendup_mutex, NULL, MUTEX_DRIVER, NULL);
914		cv_init(&fptr->fcip_farp_cv, NULL, CV_DRIVER, NULL);
915		cv_init(&fptr->fcip_sendup_cv, NULL, CV_DRIVER, NULL);
916		cv_init(&fptr->fcip_ub_cv, NULL, CV_DRIVER, NULL);
917
918		mutex_enter(&fptr->fcip_mutex);
919
920		fptr->fcip_dip = fport->fcipp_dip;	/* parent's dip */
921		fptr->fcip_instance = instance;
922		fptr->fcip_ub_upstream = 0;
923
924		if (FC_PORT_STATE_MASK(port_info->port_state) ==
925		    FC_STATE_ONLINE) {
926			fptr->fcip_port_state = FCIP_PORT_ONLINE;
927			if (fptr->fcip_flags & FCIP_LINK_DOWN) {
928				fptr->fcip_flags &= ~FCIP_LINK_DOWN;
929			}
930		} else {
931			fptr->fcip_port_state = FCIP_PORT_OFFLINE;
932		}
933
934		fptr->fcip_flags |= FCIP_ATTACHING;
935		fptr->fcip_port_info = fport;
936
937		/*
938		 * Extract our MAC addr from our port's WWN. The lower 48
939		 * bits will be our MAC address
940		 */
941		wwn_to_ether(&fport->fcipp_nwwn, &fptr->fcip_macaddr);
942
943		fport->fcipp_fcip = fptr;
944
945		FCIP_DEBUG(FCIP_DEBUG_ATTACH,
946		    (CE_NOTE, "fcipdest : 0x%lx, rtable : 0x%lx",
947		    (long)(sizeof (fptr->fcip_dest)),
948		    (long)(sizeof (fptr->fcip_rtable))));
949
950		bzero(fptr->fcip_dest, sizeof (fptr->fcip_dest));
951		bzero(fptr->fcip_rtable, sizeof (fptr->fcip_rtable));
952
953		/*
954		 * create a taskq to handle sundry jobs for the driver
955		 * This way we can have jobs run in parallel
956		 */
957		fptr->fcip_tq = taskq_create("fcip_tasks",
958		    FCIP_NUM_THREADS, MINCLSYSPRI, FCIP_MIN_TASKS,
959		    FCIP_MAX_TASKS, TASKQ_PREPOPULATE);
960
961		mutex_exit(&fptr->fcip_mutex);
962
963		/*
964		 * create a separate thread to handle all unsolicited
965		 * callback handling. This is because unsolicited_callback
966		 * can happen from an interrupt context and the upstream
967		 * modules can put new messages right back in the same
968		 * thread context. This usually works fine, but sometimes
969		 * we may have to block to obtain the dest struct entries
970		 * for some remote ports.
971		 */
972		mutex_enter(&fptr->fcip_sendup_mutex);
973		if (thread_create(NULL, DEFAULTSTKSZ,
974		    (void (*)())fcip_sendup_thr, (caddr_t)fptr, 0, &p0,
975		    TS_RUN, minclsyspri) == NULL) {
976			mutex_exit(&fptr->fcip_sendup_mutex);
977			cmn_err(CE_WARN,
978			    "!unable to create fcip sendup thread for "
979			    " instance: 0x%x", instance);
980			rval = FC_FAILURE;
981			goto done;
982		}
983		fptr->fcip_sendup_thr_initted = 1;
984		fptr->fcip_sendup_head = fptr->fcip_sendup_tail = NULL;
985		mutex_exit(&fptr->fcip_sendup_mutex);
986
987
988		/* Let the attach handler do the rest */
989		if (fcip_port_attach_handler(fptr) != FC_SUCCESS) {
990			/*
991			 * We have already cleaned up so return
992			 */
993			rval = FC_FAILURE;
994			cmn_err(CE_WARN, "!fcip(%d): port attach failed",
995			    instance);
996			goto done;
997		}
998
999		FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_CONT,
1000		    "!fcip attach for port instance (0x%x) successful",
1001		    instance));
1002
1003		rval = FC_SUCCESS;
1004		goto done;
1005	}
1006	case FC_CMD_POWER_UP:
1007	/* FALLTHROUGH */
1008	case FC_CMD_RESUME:
1009		mutex_enter(&fcip_global_mutex);
1010		fport = fcip_port_head;
1011		while (fport != NULL) {
1012			if (fport->fcipp_handle == port_info->port_handle) {
1013				break;
1014			}
1015			fport = fport->fcipp_next;
1016		}
1017		if (fport == NULL) {
1018			rval = FC_SUCCESS;
1019			mutex_exit(&fcip_global_mutex);
1020			goto done;
1021		}
1022		rval = fcip_handle_resume(fport, port_info, cmd);
1023		mutex_exit(&fcip_global_mutex);
1024		goto done;
1025
1026	default:
1027		FCIP_TNF_PROBE_2((fcip_port_attach, "fcip io", /* CSTYLED */,
1028			tnf_string, msg, "unknown command type",
1029			tnf_uint, cmd, cmd));
1030		FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_WARN,
1031		    "unknown cmd type 0x%x in port_attach", cmd));
1032		rval = FC_FAILURE;
1033		goto done;
1034	}
1035
1036failure:
1037	if (fport) {
1038		mutex_enter(&fcip_global_mutex);
1039		fcip_num_attaching--;
1040		ASSERT(fcip_num_attaching >= 0);
1041		(void) fcip_softstate_free(fport);
1042		fcip_port_attach_pending = 0;
1043		mutex_exit(&fcip_global_mutex);
1044	}
1045	return (rval);
1046
1047done:
1048	mutex_enter(&fcip_global_mutex);
1049	fcip_port_attach_pending = 0;
1050	mutex_exit(&fcip_global_mutex);
1051	return (rval);
1052}
1053
1054/*
1055 * fcip_port_attach_handler : Completes the port attach operation after
1056 * the ulp_port_attach routine has completed its ground work. The job
1057 * of this function among other things is to obtain and handle topology
1058 * specifics, initialize a port, setup broadcast address entries in
1059 * the fcip tables etc. This routine cleans up behind itself on failures.
1060 * Returns FC_SUCCESS or FC_FAILURE.
1061 */
1062static int
1063fcip_port_attach_handler(struct fcip *fptr)
1064{
1065	fcip_port_info_t		*fport = fptr->fcip_port_info;
1066	int				rval = FC_FAILURE;
1067
1068	ASSERT(fport != NULL);
1069
1070	mutex_enter(&fcip_global_mutex);
1071
1072	FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_NOTE,
1073	    "fcip module dip: %p instance: %d",
1074	    (void *)fcip_module_dip, ddi_get_instance(fptr->fcip_dip)));
1075
1076	if (fcip_module_dip == NULL) {
1077		clock_t		fcip_lbolt;
1078
1079		fcip_lbolt = ddi_get_lbolt();
1080		/*
1081		 * we need to use the fcip devinfo for creating
1082		 * the clone device node, but the fcip attach
1083		 * (from its conf file entry claiming to be a
1084		 * child of pseudo) may not have happened yet.
1085		 * wait here for 10 seconds and fail port attach
1086		 * if the fcip devinfo is not attached yet
1087		 */
1088		fcip_lbolt += drv_usectohz(FCIP_INIT_DELAY);
1089
1090		FCIP_DEBUG(FCIP_DEBUG_ATTACH,
1091		    (CE_WARN, "cv_timedwait lbolt %lx", fcip_lbolt));
1092
1093		(void) cv_timedwait(&fcip_global_cv, &fcip_global_mutex,
1094		    fcip_lbolt);
1095
1096		if (fcip_module_dip == NULL) {
1097			mutex_exit(&fcip_global_mutex);
1098
1099			FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_WARN,
1100				"fcip attach did not happen"));
1101			goto port_attach_cleanup;
1102		}
1103	}
1104
1105	if ((!fcip_minor_node_created) &&
1106	    fcip_is_supported_fc_topology(fport->fcipp_topology)) {
1107		/*
1108		 * Checking for same topologies which are considered valid
1109		 * by fcip_handle_topology(). Dont create a minor node if
1110		 * nothing is hanging off the FC port.
1111		 */
1112		if (ddi_create_minor_node(fcip_module_dip, "fcip", S_IFCHR,
1113		    ddi_get_instance(fptr->fcip_dip), DDI_PSEUDO,
1114		    CLONE_DEV) == DDI_FAILURE) {
1115			mutex_exit(&fcip_global_mutex);
1116			FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_WARN,
1117			    "failed to create minor node for fcip(%d)",
1118			    ddi_get_instance(fptr->fcip_dip)));
1119			goto port_attach_cleanup;
1120		}
1121		fcip_minor_node_created++;
1122	}
1123	mutex_exit(&fcip_global_mutex);
1124
1125	/*
1126	 * initialize port for traffic
1127	 */
1128	if (fcip_init_port(fptr) != FC_SUCCESS) {
1129		/* fcip_init_port has already cleaned up its stuff */
1130
1131		mutex_enter(&fcip_global_mutex);
1132
1133		if ((fcip_num_instances == 1) &&
1134		    (fcip_minor_node_created == 1)) {
1135			/* Remove minor node iff this is the last instance */
1136			ddi_remove_minor_node(fcip_module_dip, NULL);
1137		}
1138
1139		mutex_exit(&fcip_global_mutex);
1140
1141		goto port_attach_cleanup;
1142	}
1143
1144	mutex_enter(&fptr->fcip_mutex);
1145	fptr->fcip_flags &= ~FCIP_ATTACHING;
1146	fptr->fcip_flags |= FCIP_INITED;
1147	fptr->fcip_timeout_ticks = 0;
1148
1149	/*
1150	 * start the timeout threads
1151	 */
1152	fptr->fcip_timeout_id = timeout(fcip_timeout, fptr,
1153	    drv_usectohz(1000000));
1154
1155	mutex_exit(&fptr->fcip_mutex);
1156	mutex_enter(&fcip_global_mutex);
1157	fcip_num_attaching--;
1158	ASSERT(fcip_num_attaching >= 0);
1159	mutex_exit(&fcip_global_mutex);
1160	rval = FC_SUCCESS;
1161	return (rval);
1162
1163port_attach_cleanup:
1164	mutex_enter(&fcip_global_mutex);
1165	(void) fcip_softstate_free(fport);
1166	fcip_num_attaching--;
1167	ASSERT(fcip_num_attaching >= 0);
1168	mutex_exit(&fcip_global_mutex);
1169	rval = FC_FAILURE;
1170	return (rval);
1171}
1172
1173
1174/*
1175 * Handler for DDI_RESUME operations. Port must be ready to restart IP
1176 * traffic on resume
1177 */
1178static int
1179fcip_handle_resume(fcip_port_info_t *fport, fc_ulp_port_info_t *port_info,
1180    fc_attach_cmd_t cmd)
1181{
1182	int 		rval = FC_SUCCESS;
1183	struct fcip	*fptr = fport->fcipp_fcip;
1184	struct fcipstr	*tslp;
1185	int		index;
1186
1187
1188	ASSERT(fptr != NULL);
1189
1190	mutex_enter(&fptr->fcip_mutex);
1191
1192	if (cmd == FC_CMD_POWER_UP) {
1193		fptr->fcip_flags &= ~(FCIP_POWER_DOWN);
1194		if (fptr->fcip_flags & FCIP_SUSPENDED) {
1195			mutex_exit(&fptr->fcip_mutex);
1196			return (FC_SUCCESS);
1197		}
1198	} else if (cmd == FC_CMD_RESUME) {
1199		fptr->fcip_flags &= ~(FCIP_SUSPENDED);
1200	} else {
1201		mutex_exit(&fptr->fcip_mutex);
1202		return (FC_FAILURE);
1203	}
1204
1205	/*
1206	 * set the current port state and topology
1207	 */
1208	fport->fcipp_topology = port_info->port_flags;
1209	fport->fcipp_pstate = port_info->port_state;
1210
1211	rw_enter(&fcipstruplock, RW_READER);
1212	for (tslp = fcipstrup; tslp; tslp = tslp->sl_nextp) {
1213		if (tslp->sl_fcip == fptr) {
1214			break;
1215		}
1216	}
1217	rw_exit(&fcipstruplock);
1218
1219	/*
1220	 * No active streams on this port
1221	 */
1222	if (tslp == NULL) {
1223		rval = FC_SUCCESS;
1224		goto done;
1225	}
1226
1227	mutex_enter(&fptr->fcip_rt_mutex);
1228	for (index = 0; index < FCIP_RT_HASH_ELEMS; index++) {
1229		struct fcip_routing_table 	*frp;
1230
1231		frp = fptr->fcip_rtable[index];
1232		while (frp) {
1233			uint32_t		did;
1234			/*
1235			 * Mark the broadcast RTE available again. It
1236			 * was marked SUSPENDED during SUSPEND.
1237			 */
1238			did = fcip_get_broadcast_did(fptr);
1239			if (frp->fcipr_d_id.port_id == did) {
1240				frp->fcipr_state = 0;
1241				index = FCIP_RT_HASH_ELEMS;
1242				break;
1243			}
1244			frp = frp->fcipr_next;
1245		}
1246	}
1247	mutex_exit(&fptr->fcip_rt_mutex);
1248
1249	/*
1250	 * fcip_handle_topology will update the port entries in the
1251	 * routing table.
1252	 * fcip_handle_topology also takes care of resetting the
1253	 * fcipr_state field in the routing table structure. The entries
1254	 * were set to RT_INVALID during suspend.
1255	 */
1256	fcip_handle_topology(fptr);
1257
1258done:
1259	/*
1260	 * Restart the timeout thread
1261	 */
1262	fptr->fcip_timeout_id = timeout(fcip_timeout, fptr,
1263	    drv_usectohz(1000000));
1264	mutex_exit(&fptr->fcip_mutex);
1265	return (rval);
1266}
1267
1268
1269/*
1270 * Insert a destination port entry into the routing table for
1271 * this port
1272 */
1273static void
1274fcip_rt_update(struct fcip *fptr, fc_portmap_t *devlist, uint32_t listlen)
1275{
1276	struct fcip_routing_table	*frp;
1277	fcip_port_info_t		*fport = fptr->fcip_port_info;
1278	int				hash_bucket, i;
1279	fc_portmap_t			*pmap;
1280	char				wwn_buf[20];
1281
1282	FCIP_TNF_PROBE_2((fcip_rt_update, "fcip io", /* CSTYLED */,
1283		tnf_string, msg, "enter",
1284		tnf_int, listlen, listlen));
1285
1286	ASSERT(!mutex_owned(&fptr->fcip_mutex));
1287	mutex_enter(&fptr->fcip_rt_mutex);
1288
1289	for (i = 0; i < listlen; i++) {
1290		pmap = &(devlist[i]);
1291
1292		frp = fcip_lookup_rtable(fptr, &(pmap->map_pwwn),
1293		    FCIP_COMPARE_PWWN);
1294		/*
1295		 * If an entry for a port in the devlist exists in the
1296		 * in the per port routing table, make sure the data
1297		 * is current. We need to do this irrespective of the
1298		 * underlying port topology.
1299		 */
1300		switch (pmap->map_type) {
1301		/* FALLTHROUGH */
1302		case PORT_DEVICE_NOCHANGE:
1303		/* FALLTHROUGH */
1304		case PORT_DEVICE_USER_LOGIN:
1305		/* FALLTHROUGH */
1306		case PORT_DEVICE_CHANGED:
1307		/* FALLTHROUGH */
1308		case PORT_DEVICE_NEW:
1309			if (frp == NULL) {
1310				goto add_new_entry;
1311			} else if (frp) {
1312				goto update_entry;
1313			} else {
1314				continue;
1315			}
1316
1317		case PORT_DEVICE_OLD:
1318		/* FALLTHROUGH */
1319		case PORT_DEVICE_USER_LOGOUT:
1320			/*
1321			 * Mark entry for removal from Routing Table if
1322			 * one exists. Let the timeout thread actually
1323			 * remove the entry after we've given up hopes
1324			 * of the port ever showing up.
1325			 */
1326			if (frp) {
1327				uint32_t		did;
1328
1329				/*
1330				 * Mark the routing table as invalid to bail
1331				 * the packets early that are in transit
1332				 */
1333				did = fptr->fcip_broadcast_did;
1334				if (frp->fcipr_d_id.port_id != did) {
1335					frp->fcipr_pd = NULL;
1336					frp->fcipr_state = FCIP_RT_INVALID;
1337					frp->fcipr_invalid_timeout =
1338					    fptr->fcip_timeout_ticks +
1339					    FCIP_RTE_TIMEOUT;
1340				}
1341			}
1342			continue;
1343
1344		default:
1345			FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_WARN,
1346			    "unknown map flags in rt_update"));
1347			continue;
1348		}
1349add_new_entry:
1350		ASSERT(frp == NULL);
1351		hash_bucket = FCIP_RT_HASH(pmap->map_pwwn.raw_wwn);
1352
1353		ASSERT(hash_bucket < FCIP_RT_HASH_ELEMS);
1354
1355		FCIP_TNF_PROBE_2((fcip_rt_update, "cfip io", /* CSTYLED */,
1356			tnf_string, msg,
1357			"add new entry",
1358			tnf_int, hashbucket, hash_bucket));
1359
1360		frp = (struct fcip_routing_table *)
1361		    kmem_zalloc(sizeof (struct fcip_routing_table), KM_SLEEP);
1362		/* insert at beginning of hash bucket */
1363		frp->fcipr_next = fptr->fcip_rtable[hash_bucket];
1364		fptr->fcip_rtable[hash_bucket] = frp;
1365		fc_wwn_to_str(&pmap->map_pwwn, wwn_buf);
1366		FCIP_DEBUG(FCIP_DEBUG_ATTACH, (CE_NOTE,
1367		    "added entry for pwwn %s and d_id 0x%x",
1368		    wwn_buf, pmap->map_did.port_id));
1369update_entry:
1370		bcopy((void *)&pmap->map_pwwn,
1371		    (void *)&frp->fcipr_pwwn, sizeof (la_wwn_t));
1372		bcopy((void *)&pmap->map_nwwn, (void *)&frp->fcipr_nwwn,
1373		    sizeof (la_wwn_t));
1374		frp->fcipr_d_id = pmap->map_did;
1375		frp->fcipr_state = pmap->map_state;
1376		frp->fcipr_pd = pmap->map_pd;
1377
1378		/*
1379		 * If there is no pd for a destination port that is not
1380		 * a broadcast entry, the port is pretty much unusable - so
1381		 * mark the port for removal so we can try adding back the
1382		 * entry again.
1383		 */
1384		if ((frp->fcipr_pd == NULL) &&
1385		    (frp->fcipr_d_id.port_id != fptr->fcip_broadcast_did)) {
1386			frp->fcipr_state = PORT_DEVICE_INVALID;
1387			frp->fcipr_invalid_timeout = fptr->fcip_timeout_ticks +
1388			    (FCIP_RTE_TIMEOUT / 2);
1389		}
1390		frp->fcipr_fca_dev =
1391		    fc_ulp_get_fca_device(fport->fcipp_handle, pmap->map_did);
1392
1393		/*
1394		 * login to the remote port. Don't worry about
1395		 * plogi failures for now
1396		 */
1397		if (pmap->map_pd != NULL) {
1398			(void) fcip_do_plogi(fptr, frp);
1399		} else if (FC_TOP_EXTERNAL(fport->fcipp_topology)) {
1400			fc_wwn_to_str(&frp->fcipr_pwwn, wwn_buf);
1401			FCIP_DEBUG(FCIP_DEBUG_MISC, (CE_NOTE,
1402			    "logging into pwwn %s, d_id 0x%x",
1403			    wwn_buf, frp->fcipr_d_id.port_id));
1404			(void) fcip_do_plogi(fptr, frp);
1405		}
1406
1407		FCIP_TNF_BYTE_ARRAY(fcip_rt_update, "fcip io", "detail",
1408			"new wwn in rt", pwwn,
1409			&frp->fcipr_pwwn, sizeof (la_wwn_t));
1410	}
1411	mutex_exit(&fptr->fcip_rt_mutex);
1412}
1413
1414
1415/*
1416 * return a matching routing table entry for a given fcip instance
1417 */
1418struct fcip_routing_table *
1419fcip_lookup_rtable(struct fcip *fptr, la_wwn_t *wwn, int matchflag)
1420{
1421	struct fcip_routing_table	*frp = NULL;
1422	int				hash_bucket;
1423
1424
1425	FCIP_TNF_PROBE_1((fcip_lookup_rtable, "fcip io", /* CSTYLED */,
1426		tnf_string, msg, "enter"));
1427	FCIP_TNF_BYTE_ARRAY(fcip_lookup_rtable, "fcip io", "detail",
1428		"rtable lookup for", wwn,
1429		&wwn->raw_wwn, sizeof (la_wwn_t));
1430	FCIP_TNF_PROBE_2((fcip_lookup_rtable, "fcip io", /* CSTYLED */,
1431		tnf_string, msg, "match by",
1432		tnf_int, matchflag, matchflag));
1433
1434	ASSERT(mutex_owned(&fptr->fcip_rt_mutex));
1435
1436	hash_bucket = FCIP_RT_HASH(wwn->raw_wwn);
1437	frp = fptr->fcip_rtable[hash_bucket];
1438	while (frp != NULL) {
1439
1440		FCIP_TNF_BYTE_ARRAY(fcip_lookup_rtable, "fcip io", "detail",
1441			"rtable entry", nwwn,
1442			&(frp->fcipr_nwwn.raw_wwn), sizeof (la_wwn_t));
1443
1444		if (fcip_wwn_compare(&frp->fcipr_pwwn, wwn, matchflag) == 0) {
1445			break;
1446		}
1447
1448		frp = frp->fcipr_next;
1449	}
1450	FCIP_TNF_PROBE_2((fcip_lookup_rtable, "fcip io", /* CSTYLED */,
1451		tnf_string, msg, "lookup result",
1452		tnf_opaque, frp, frp));
1453	return (frp);
1454}
1455
1456/*
1457 * Attach of fcip under pseudo. The actual setup of the interface
1458 * actually happens in fcip_port_attach on a callback from the
1459 * transport. The port_attach callback however can proceed only
1460 * after the devinfo for fcip has been created under pseudo
1461 */
1462static int
1463fcip_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1464{
1465	switch ((int)cmd) {
1466
1467	case DDI_ATTACH: {
1468		ASSERT(fcip_module_dip == NULL);
1469		fcip_module_dip = dip;
1470
1471		/*
1472		 * this call originates as a result of fcip's conf
1473		 * file entry and will result in a fcip instance being
1474		 * a child of pseudo. We should ensure here that the port
1475		 * driver (fp) has been loaded and initted since we would
1476		 * never get a port attach callback without fp being loaded.
1477		 * If we are unable to succesfully load and initalize fp -
1478		 * just fail this attach.
1479		 */
1480		mutex_enter(&fcip_global_mutex);
1481
1482		FCIP_DEBUG(FCIP_DEBUG_ATTACH,
1483		    (CE_WARN, "global cv - signaling"));
1484
1485		cv_signal(&fcip_global_cv);
1486
1487		FCIP_DEBUG(FCIP_DEBUG_ATTACH,
1488		    (CE_WARN, "global cv - signaled"));
1489		mutex_exit(&fcip_global_mutex);
1490		return (DDI_SUCCESS);
1491	}
1492	case DDI_RESUME:
1493		/*
1494		 * Resume appears trickier
1495		 */
1496		return (DDI_SUCCESS);
1497	default:
1498		return (DDI_FAILURE);
1499	}
1500}
1501
1502
1503/*
1504 * The detach entry point to permit unloading fcip. We make sure
1505 * there are no active streams before we proceed with the detach
1506 */
1507/* ARGSUSED */
1508static int
1509fcip_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1510{
1511	struct fcip		*fptr;
1512	fcip_port_info_t	*fport;
1513	int			detached;
1514
1515	switch (cmd) {
1516	case DDI_DETACH: {
1517		/*
1518		 * If we got here, any active streams should have been
1519		 * unplumbed but check anyway
1520		 */
1521		mutex_enter(&fcip_global_mutex);
1522		if (fcipstrup != NULL) {
1523			mutex_exit(&fcip_global_mutex);
1524			return (DDI_FAILURE);
1525		}
1526
1527		if (fcip_port_head != NULL) {
1528			/*
1529			 * Check to see if we have unattached/unbound
1530			 * ports. If all the ports are unattached/unbound go
1531			 * ahead and unregister with the transport
1532			 */
1533			fport = fcip_port_head;
1534			while (fport != NULL) {
1535				fptr = fport->fcipp_fcip;
1536				if (fptr == NULL) {
1537					continue;
1538				}
1539				mutex_enter(&fptr->fcip_mutex);
1540				fptr->fcip_flags |= FCIP_DETACHING;
1541				if (fptr->fcip_ipq ||
1542				    fptr->fcip_flags & (FCIP_IN_TIMEOUT |
1543				    FCIP_IN_CALLBACK | FCIP_ATTACHING |
1544				    FCIP_SUSPENDED | FCIP_POWER_DOWN |
1545				    FCIP_REG_INPROGRESS)) {
1546					FCIP_TNF_PROBE_1((fcip_detach,
1547					    "fcip io", /* CSTYLED */,
1548					    tnf_string, msg,
1549					    "fcip instance busy"));
1550
1551					mutex_exit(&fptr->fcip_mutex);
1552					FCIP_DEBUG(FCIP_DEBUG_DETACH, (CE_WARN,
1553					    "fcip instance busy"));
1554					break;
1555				}
1556				/*
1557				 * Check for any outstanding pkts. If yes
1558				 * fail the detach
1559				 */
1560				mutex_enter(&fptr->fcip_dest_mutex);
1561				if (fcip_port_get_num_pkts(fptr) > 0) {
1562					mutex_exit(&fptr->fcip_dest_mutex);
1563					mutex_exit(&fptr->fcip_mutex);
1564					FCIP_DEBUG(FCIP_DEBUG_DETACH, (CE_WARN,
1565					    "fcip instance busy - pkts "
1566					    "pending"));
1567					break;
1568				}
1569				mutex_exit(&fptr->fcip_dest_mutex);
1570
1571				mutex_enter(&fptr->fcip_rt_mutex);
1572				if (fcip_plogi_in_progress(fptr)) {
1573					mutex_exit(&fptr->fcip_rt_mutex);
1574					mutex_exit(&fptr->fcip_mutex);
1575					FCIP_DEBUG(FCIP_DEBUG_DETACH, (CE_WARN,
1576					    "fcip instance busy - plogi in "
1577					    "progress"));
1578					break;
1579				}
1580				mutex_exit(&fptr->fcip_rt_mutex);
1581
1582				mutex_exit(&fptr->fcip_mutex);
1583				fport = fport->fcipp_next;
1584			}
1585			/*
1586			 * if fport is non NULL - we have active ports
1587			 */
1588			if (fport != NULL) {
1589				/*
1590				 * Remove the DETACHING flags on the ports
1591				 */
1592				fport = fcip_port_head;
1593				while (fport != NULL) {
1594					fptr = fport->fcipp_fcip;
1595					mutex_enter(&fptr->fcip_mutex);
1596					fptr->fcip_flags &= ~(FCIP_DETACHING);
1597					mutex_exit(&fptr->fcip_mutex);
1598					fport = fport->fcipp_next;
1599				}
1600				mutex_exit(&fcip_global_mutex);
1601				return (DDI_FAILURE);
1602			}
1603		}
1604
1605		/*
1606		 * free up all softstate structures
1607		 */
1608		fport = fcip_port_head;
1609		while (fport != NULL) {
1610			detached = 1;
1611
1612			fptr = fport->fcipp_fcip;
1613			if (fptr) {
1614				mutex_enter(&fptr->fcip_mutex);
1615				/*
1616				 * Check to see if somebody beat us to the
1617				 * punch
1618				 */
1619				detached = fptr->fcip_flags & FCIP_DETACHED;
1620				fptr->fcip_flags &= ~(FCIP_DETACHING);
1621				fptr->fcip_flags |= FCIP_DETACHED;
1622				mutex_exit(&fptr->fcip_mutex);
1623			}
1624
1625			if (!detached) {
1626				fport = fcip_softstate_free(fport);
1627			} else {
1628				/*
1629				 * If the port was marked as detached
1630				 * but it was still in the list, that
1631				 * means another thread has marked it
1632				 * but we got in while it released the
1633				 * fcip_global_mutex in softstate_free.
1634				 * Given that, we're still safe to use
1635				 * fport->fcipp_next to find out what
1636				 * the next port on the list is.
1637				 */
1638				fport = fport->fcipp_next;
1639			}
1640
1641			FCIP_DEBUG(FCIP_DEBUG_DETACH,
1642			    (CE_NOTE, "detaching port"));
1643
1644			FCIP_TNF_PROBE_1((fcip_detach,
1645				"fcip io", /* CSTYLED */, tnf_string,
1646				msg, "detaching port"));
1647		}
1648
1649		/*
1650		 * If we haven't removed all the port structures, we
1651		 * aren't yet ready to be detached.
1652		 */
1653		if (fcip_port_head != NULL) {
1654			mutex_exit(&fcip_global_mutex);
1655			return (DDI_FAILURE);
1656		}
1657
1658		fcip_num_instances = 0;
1659		mutex_exit(&fcip_global_mutex);
1660		fcip_module_dip = NULL;
1661		return (DDI_SUCCESS);
1662	}
1663	case DDI_SUSPEND:
1664		return (DDI_SUCCESS);
1665	default:
1666		return (DDI_FAILURE);
1667	}
1668}
1669
1670/*
1671 * The port_detach callback is called from the transport when a
1672 * FC port is being removed from the transport's control. This routine
1673 * provides fcip with an opportunity to cleanup all activities and
1674 * structures on the port marked for removal.
1675 */
1676/* ARGSUSED */
1677static int
1678fcip_port_detach(opaque_t ulp_handle, fc_ulp_port_info_t *port_info,
1679    fc_detach_cmd_t cmd)
1680{
1681	int 			rval = FC_FAILURE;
1682	fcip_port_info_t	*fport;
1683	struct fcip		*fptr;
1684	struct fcipstr		*strp;
1685
1686	switch (cmd) {
1687	case FC_CMD_DETACH: {
1688		mutex_enter(&fcip_global_mutex);
1689
1690		if (fcip_port_head == NULL) {
1691			/*
1692			 * we are all done but our fini has not been
1693			 * called yet!! Let's hope we have no active
1694			 * fcip instances here. - strange secnario but
1695			 * no harm in having this return a success.
1696			 */
1697			fcip_check_remove_minor_node();
1698
1699			mutex_exit(&fcip_global_mutex);
1700			return (FC_SUCCESS);
1701		} else {
1702			/*
1703			 * traverse the port list
1704			 */
1705			fport = fcip_port_head;
1706			while (fport != NULL) {
1707				if (fport->fcipp_handle ==
1708				    port_info->port_handle) {
1709					fptr = fport->fcipp_fcip;
1710
1711					/*
1712					 * Fail the port detach if there is
1713					 * still an attached, bound stream on
1714					 * this interface.
1715					 */
1716
1717					rw_enter(&fcipstruplock, RW_READER);
1718
1719					for (strp = fcipstrup; strp != NULL;
1720					    strp = strp->sl_nextp) {
1721						if (strp->sl_fcip == fptr) {
1722							rw_exit(&fcipstruplock);
1723							mutex_exit(
1724							    &fcip_global_mutex);
1725							return (FC_FAILURE);
1726						}
1727					}
1728
1729					rw_exit(&fcipstruplock);
1730
1731					/*
1732					 * fail port detach if we are in
1733					 * the middle of a deferred port attach
1734					 * or if the port has outstanding pkts
1735					 */
1736					if (fptr != NULL) {
1737						mutex_enter(&fptr->fcip_mutex);
1738						if (fcip_check_port_busy
1739						    (fptr) ||
1740						    (fptr->fcip_flags &
1741						    FCIP_DETACHED)) {
1742							mutex_exit(
1743							    &fptr->fcip_mutex);
1744							mutex_exit(
1745							    &fcip_global_mutex);
1746							return (FC_FAILURE);
1747						}
1748
1749						fptr->fcip_flags |=
1750						    FCIP_DETACHED;
1751						mutex_exit(&fptr->fcip_mutex);
1752					}
1753					(void) fcip_softstate_free(fport);
1754
1755					fcip_check_remove_minor_node();
1756					mutex_exit(&fcip_global_mutex);
1757					return (FC_SUCCESS);
1758				}
1759				fport = fport->fcipp_next;
1760			}
1761			ASSERT(fport == NULL);
1762		}
1763		mutex_exit(&fcip_global_mutex);
1764		break;
1765	}
1766	case FC_CMD_POWER_DOWN:
1767	/* FALLTHROUGH */
1768	case FC_CMD_SUSPEND:
1769		mutex_enter(&fcip_global_mutex);
1770		fport = fcip_port_head;
1771		while (fport != NULL) {
1772			if (fport->fcipp_handle == port_info->port_handle) {
1773				break;
1774			}
1775			fport = fport->fcipp_next;
1776		}
1777		if (fport == NULL) {
1778			mutex_exit(&fcip_global_mutex);
1779			break;
1780		}
1781		rval = fcip_handle_suspend(fport, cmd);
1782		mutex_exit(&fcip_global_mutex);
1783		break;
1784	default:
1785		FCIP_DEBUG(FCIP_DEBUG_DETACH,
1786		    (CE_WARN, "unknown port detach command!!"));
1787		break;
1788	}
1789	return (rval);
1790}
1791
1792
1793/*
1794 * Returns 0 if the port is not busy, else returns non zero.
1795 */
1796static int
1797fcip_check_port_busy(struct fcip *fptr)
1798{
1799	int rval = 0, num_pkts = 0;
1800
1801	ASSERT(fptr != NULL);
1802	ASSERT(MUTEX_HELD(&fptr->fcip_mutex));
1803
1804	mutex_enter(&fptr->fcip_dest_mutex);
1805
1806	if (fptr->fcip_flags & FCIP_PORT_BUSY ||
1807	    ((num_pkts = fcip_port_get_num_pkts(fptr)) > 0) ||
1808	    fptr->fcip_num_ipkts_pending) {
1809		rval = 1;
1810		FCIP_DEBUG(FCIP_DEBUG_DETACH,
1811		    (CE_NOTE, "!fcip_check_port_busy: port is busy "
1812		    "fcip_flags: 0x%x, num_pkts: 0x%x, ipkts_pending: 0x%lx!",
1813		    fptr->fcip_flags, num_pkts, fptr->fcip_num_ipkts_pending));
1814	}
1815
1816	mutex_exit(&fptr->fcip_dest_mutex);
1817	return (rval);
1818}
1819
1820/*
1821 * Helper routine to remove fcip's minor node
1822 * There is one minor node per system and it should be removed if there are no
1823 * other fcip instances (which has a 1:1 mapping for fp instances) present
1824 */
1825static void
1826fcip_check_remove_minor_node(void)
1827{
1828	ASSERT(MUTEX_HELD(&fcip_global_mutex));
1829
1830	/*
1831	 * If there are no more fcip (fp) instances, remove the
1832	 * minor node for fcip.
1833	 * Reset fcip_minor_node_created to invalidate it.
1834	 */
1835	if (fcip_num_instances == 0 && (fcip_module_dip != NULL)) {
1836		ddi_remove_minor_node(fcip_module_dip, NULL);
1837		fcip_minor_node_created = 0;
1838	}
1839}
1840
1841/*
1842 * This routine permits the suspend operation during a CPR/System
1843 * power management operation. The routine basically quiesces I/Os
1844 * on all active interfaces
1845 */
1846static int
1847fcip_handle_suspend(fcip_port_info_t *fport, fc_detach_cmd_t cmd)
1848{
1849	struct fcip	*fptr = fport->fcipp_fcip;
1850	timeout_id_t	tid;
1851	int 		index;
1852	int		tryagain = 0;
1853	int		count;
1854	struct fcipstr	*tslp;
1855
1856
1857	ASSERT(fptr != NULL);
1858	mutex_enter(&fptr->fcip_mutex);
1859
1860	/*
1861	 * Fail if we are in the middle of a callback. Don't use delay during
1862	 * suspend since clock intrs are not available so busy wait
1863	 */
1864	count = 0;
1865	while (count++ < 15 &&
1866	    ((fptr->fcip_flags & FCIP_IN_CALLBACK) ||
1867	    (fptr->fcip_flags & FCIP_IN_TIMEOUT))) {
1868		mutex_exit(&fptr->fcip_mutex);
1869		drv_usecwait(1000000);
1870		mutex_enter(&fptr->fcip_mutex);
1871	}
1872
1873	if (fptr->fcip_flags & FCIP_IN_CALLBACK ||
1874	    fptr->fcip_flags & FCIP_IN_TIMEOUT) {
1875		mutex_exit(&fptr->fcip_mutex);
1876		return (FC_FAILURE);
1877	}
1878
1879	if (cmd == FC_CMD_POWER_DOWN) {
1880		if (fptr->fcip_flags & FCIP_SUSPENDED) {
1881			fptr->fcip_flags |= FCIP_POWER_DOWN;
1882			mutex_exit(&fptr->fcip_mutex);
1883			goto success;
1884		} else {
1885			fptr->fcip_flags |= FCIP_POWER_DOWN;
1886		}
1887	} else if (cmd == FC_CMD_SUSPEND) {
1888		fptr->fcip_flags |= FCIP_SUSPENDED;
1889	} else {
1890		mutex_exit(&fptr->fcip_mutex);
1891		return (FC_FAILURE);
1892	}
1893
1894	mutex_exit(&fptr->fcip_mutex);
1895	/*
1896	 * If no streams are plumbed - its the easiest case - Just
1897	 * bail out without having to do much
1898	 */
1899
1900	rw_enter(&fcipstruplock, RW_READER);
1901	for (tslp = fcipstrup; tslp; tslp = tslp->sl_nextp) {
1902		if (tslp->sl_fcip == fptr) {
1903			break;
1904		}
1905	}
1906	rw_exit(&fcipstruplock);
1907
1908	/*
1909	 * No active streams on this port
1910	 */
1911	if (tslp == NULL) {
1912		goto success;
1913	}
1914
1915	/*
1916	 * Walk through each Routing table structure and check if
1917	 * the destination table has any outstanding commands. If yes
1918	 * wait for the commands to drain. Since we go through each
1919	 * routing table entry in succession, it may be wise to wait
1920	 * only a few seconds for each entry.
1921	 */
1922	mutex_enter(&fptr->fcip_rt_mutex);
1923	while (!tryagain) {
1924
1925		tryagain = 0;
1926		for (index = 0; index < FCIP_RT_HASH_ELEMS; index++) {
1927			struct fcip_routing_table 	*frp;
1928			struct fcip_dest 		*fdestp;
1929			la_wwn_t			*pwwn;
1930			int				hash_bucket;
1931
1932			frp = fptr->fcip_rtable[index];
1933			while (frp) {
1934				/*
1935				 * Mark the routing table as SUSPENDED. Even
1936				 * mark the broadcast entry SUSPENDED to
1937				 * prevent any ARP or other broadcasts. We
1938				 * can reset the state of the broadcast
1939				 * RTE when we resume.
1940				 */
1941				frp->fcipr_state = FCIP_RT_SUSPENDED;
1942				pwwn = &frp->fcipr_pwwn;
1943
1944				/*
1945				 * Get hold of destination pointer
1946				 */
1947				mutex_enter(&fptr->fcip_dest_mutex);
1948
1949				hash_bucket = FCIP_DEST_HASH(pwwn->raw_wwn);
1950				ASSERT(hash_bucket < FCIP_DEST_HASH_ELEMS);
1951
1952				fdestp = fptr->fcip_dest[hash_bucket];
1953				while (fdestp != NULL) {
1954					mutex_enter(&fdestp->fcipd_mutex);
1955					if (fdestp->fcipd_rtable) {
1956						if (fcip_wwn_compare(pwwn,
1957						    &fdestp->fcipd_pwwn,
1958						    FCIP_COMPARE_PWWN) == 0) {
1959							mutex_exit(
1960							&fdestp->fcipd_mutex);
1961							break;
1962						}
1963					}
1964					mutex_exit(&fdestp->fcipd_mutex);
1965					fdestp = fdestp->fcipd_next;
1966				}
1967
1968				mutex_exit(&fptr->fcip_dest_mutex);
1969				if (fdestp == NULL) {
1970					frp = frp->fcipr_next;
1971					continue;
1972				}
1973
1974				/*
1975				 * Wait for fcip_wait_cmds seconds for
1976				 * the commands to drain.
1977				 */
1978				count = 0;
1979				mutex_enter(&fdestp->fcipd_mutex);
1980				while (fdestp->fcipd_ncmds &&
1981				    count < fcip_wait_cmds) {
1982					mutex_exit(&fdestp->fcipd_mutex);
1983					mutex_exit(&fptr->fcip_rt_mutex);
1984					drv_usecwait(1000000);
1985					mutex_enter(&fptr->fcip_rt_mutex);
1986					mutex_enter(&fdestp->fcipd_mutex);
1987					count++;
1988				}
1989				/*
1990				 * Check if we were able to drain all cmds
1991				 * successfully. Else continue with other
1992				 * ports and try during the second pass
1993				 */
1994				if (fdestp->fcipd_ncmds) {
1995					tryagain++;
1996				}
1997				mutex_exit(&fdestp->fcipd_mutex);
1998
1999				frp = frp->fcipr_next;
2000			}
2001		}
2002		if (tryagain == 0) {
2003			break;
2004		}
2005	}
2006	mutex_exit(&fptr->fcip_rt_mutex);
2007
2008	if (tryagain) {
2009		mutex_enter(&fptr->fcip_mutex);
2010		fptr->fcip_flags &= ~(FCIP_SUSPENDED | FCIP_POWER_DOWN);
2011		mutex_exit(&fptr->fcip_mutex);
2012		return (FC_FAILURE);
2013	}
2014
2015success:
2016	mutex_enter(&fptr->fcip_mutex);
2017	tid = fptr->fcip_timeout_id;
2018	fptr->fcip_timeout_id = NULL;
2019	mutex_exit(&fptr->fcip_mutex);
2020
2021	(void) untimeout(tid);
2022
2023	return (FC_SUCCESS);
2024}
2025
2026/*
2027 * the getinfo(9E) entry point
2028 */
2029/* ARGSUSED */
2030static int
2031fcip_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
2032{
2033	int rval = DDI_FAILURE;
2034
2035	switch (cmd) {
2036	case DDI_INFO_DEVT2DEVINFO:
2037		*result = fcip_module_dip;
2038		if (*result)
2039			rval = DDI_SUCCESS;
2040		break;
2041
2042	case DDI_INFO_DEVT2INSTANCE:
2043		*result = (void *)0;
2044		rval = DDI_SUCCESS;
2045		break;
2046	default:
2047		break;
2048	}
2049
2050	return (rval);
2051}
2052
2053/*
2054 * called from fcip_attach to initialize kstats for the link
2055 */
2056/* ARGSUSED */
2057static void
2058fcip_kstat_init(struct fcip *fptr)
2059{
2060	int instance;
2061	char buf[16];
2062	struct fcipstat	*fcipstatp;
2063
2064	ASSERT(mutex_owned(&fptr->fcip_mutex));
2065
2066	instance = ddi_get_instance(fptr->fcip_dip);
2067	(void) sprintf(buf, "fcip%d", instance);
2068
2069#ifdef	kstat
2070	fptr->fcip_kstatp = kstat_create("fcip", instance, buf, "net",
2071	    KSTAT_TYPE_NAMED,
2072	    (sizeof (struct fcipstat)/ sizeof (kstat_named_t)),
2073	    KSTAT_FLAG_PERSISTENT);
2074#else
2075	fptr->fcip_kstatp = kstat_create("fcip", instance, buf, "net",
2076	    KSTAT_TYPE_NAMED,
2077	    (sizeof (struct fcipstat)/ sizeof (kstat_named_t)), 0);
2078#endif
2079	if (fptr->fcip_kstatp == NULL) {
2080		FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_WARN, "kstat created failed"));
2081		return;
2082	}
2083
2084	fcipstatp = (struct  fcipstat *)fptr->fcip_kstatp->ks_data;
2085	kstat_named_init(&fcipstatp->fcips_ipackets,	"ipackets",
2086		KSTAT_DATA_ULONG);
2087	kstat_named_init(&fcipstatp->fcips_ierrors,	"ierrors",
2088		KSTAT_DATA_ULONG);
2089	kstat_named_init(&fcipstatp->fcips_opackets,	"opackets",
2090		KSTAT_DATA_ULONG);
2091	kstat_named_init(&fcipstatp->fcips_oerrors,	"oerrors",
2092		KSTAT_DATA_ULONG);
2093	kstat_named_init(&fcipstatp->fcips_collisions,	"collisions",
2094		KSTAT_DATA_ULONG);
2095	kstat_named_init(&fcipstatp->fcips_nocanput,	"nocanput",
2096		KSTAT_DATA_ULONG);
2097	kstat_named_init(&fcipstatp->fcips_allocbfail,	"allocbfail",
2098		KSTAT_DATA_ULONG);
2099
2100	kstat_named_init(&fcipstatp->fcips_defer, "defer",
2101		KSTAT_DATA_ULONG);
2102	kstat_named_init(&fcipstatp->fcips_fram, "fram",
2103		KSTAT_DATA_ULONG);
2104	kstat_named_init(&fcipstatp->fcips_crc, "crc",
2105		KSTAT_DATA_ULONG);
2106	kstat_named_init(&fcipstatp->fcips_oflo, "oflo",
2107		KSTAT_DATA_ULONG);
2108	kstat_named_init(&fcipstatp->fcips_uflo, "uflo",
2109		KSTAT_DATA_ULONG);
2110	kstat_named_init(&fcipstatp->fcips_missed, "missed",
2111		KSTAT_DATA_ULONG);
2112	kstat_named_init(&fcipstatp->fcips_tlcol, "tlcol",
2113		KSTAT_DATA_ULONG);
2114	kstat_named_init(&fcipstatp->fcips_trtry, "trtry",
2115		KSTAT_DATA_ULONG);
2116	kstat_named_init(&fcipstatp->fcips_tnocar, "tnocar",
2117		KSTAT_DATA_ULONG);
2118	kstat_named_init(&fcipstatp->fcips_inits, "inits",
2119		KSTAT_DATA_ULONG);
2120	kstat_named_init(&fcipstatp->fcips_notbufs, "notbufs",
2121		KSTAT_DATA_ULONG);
2122	kstat_named_init(&fcipstatp->fcips_norbufs, "norbufs",
2123		KSTAT_DATA_ULONG);
2124	kstat_named_init(&fcipstatp->fcips_allocbfail, "allocbfail",
2125		KSTAT_DATA_ULONG);
2126
2127	/*
2128	 * required by kstat for MIB II objects(RFC 1213)
2129	 */
2130	kstat_named_init(&fcipstatp->fcips_rcvbytes, "fcips_rcvbytes",
2131		KSTAT_DATA_ULONG);	/* # octets received */
2132					/* MIB - ifInOctets */
2133	kstat_named_init(&fcipstatp->fcips_xmtbytes, "fcips_xmtbytes",
2134		KSTAT_DATA_ULONG);	/* # octets xmitted */
2135					/* MIB - ifOutOctets */
2136	kstat_named_init(&fcipstatp->fcips_multircv,	"fcips_multircv",
2137		KSTAT_DATA_ULONG);	/* # multicast packets */
2138					/* delivered to upper layer */
2139					/* MIB - ifInNUcastPkts */
2140	kstat_named_init(&fcipstatp->fcips_multixmt,	"fcips_multixmt",
2141		KSTAT_DATA_ULONG);	/* # multicast packets */
2142					/* requested to be sent */
2143					/* MIB - ifOutNUcastPkts */
2144	kstat_named_init(&fcipstatp->fcips_brdcstrcv, "fcips_brdcstrcv",
2145		KSTAT_DATA_ULONG); /* # broadcast packets */
2146					/* delivered to upper layer */
2147					/* MIB - ifInNUcastPkts */
2148	kstat_named_init(&fcipstatp->fcips_brdcstxmt, "fcips_brdcstxmt",
2149		KSTAT_DATA_ULONG);	/* # broadcast packets */
2150					/* requested to be sent */
2151					/* MIB - ifOutNUcastPkts */
2152	kstat_named_init(&fcipstatp->fcips_norcvbuf,	"fcips_norcvbuf",
2153		KSTAT_DATA_ULONG);	/* # rcv packets discarded */
2154					/* MIB - ifInDiscards */
2155	kstat_named_init(&fcipstatp->fcips_noxmtbuf,	"fcips_noxmtbuf",
2156		KSTAT_DATA_ULONG);	/* # xmt packets discarded */
2157
2158	fptr->fcip_kstatp->ks_update = fcip_stat_update;
2159	fptr->fcip_kstatp->ks_private = (void *) fptr;
2160	kstat_install(fptr->fcip_kstatp);
2161}
2162
2163/*
2164 * Update the defined kstats for netstat et al to use
2165 */
2166/* ARGSUSED */
2167static int
2168fcip_stat_update(kstat_t *fcip_statp, int val)
2169{
2170	struct fcipstat	*fcipstatp;
2171	struct fcip	*fptr;
2172
2173	fptr = (struct fcip *)fcip_statp->ks_private;
2174	fcipstatp = (struct fcipstat *)fcip_statp->ks_data;
2175
2176	if (val == KSTAT_WRITE) {
2177		fptr->fcip_ipackets	= fcipstatp->fcips_ipackets.value.ul;
2178		fptr->fcip_ierrors	= fcipstatp->fcips_ierrors.value.ul;
2179		fptr->fcip_opackets	= fcipstatp->fcips_opackets.value.ul;
2180		fptr->fcip_oerrors	= fcipstatp->fcips_oerrors.value.ul;
2181		fptr->fcip_collisions	= fcipstatp->fcips_collisions.value.ul;
2182		fptr->fcip_defer	= fcipstatp->fcips_defer.value.ul;
2183		fptr->fcip_fram	= fcipstatp->fcips_fram.value.ul;
2184		fptr->fcip_crc	= fcipstatp->fcips_crc.value.ul;
2185		fptr->fcip_oflo	= fcipstatp->fcips_oflo.value.ul;
2186		fptr->fcip_uflo	= fcipstatp->fcips_uflo.value.ul;
2187		fptr->fcip_missed	= fcipstatp->fcips_missed.value.ul;
2188		fptr->fcip_tlcol	= fcipstatp->fcips_tlcol.value.ul;
2189		fptr->fcip_trtry	= fcipstatp->fcips_trtry.value.ul;
2190		fptr->fcip_tnocar	= fcipstatp->fcips_tnocar.value.ul;
2191		fptr->fcip_inits	= fcipstatp->fcips_inits.value.ul;
2192		fptr->fcip_notbufs	= fcipstatp->fcips_notbufs.value.ul;
2193		fptr->fcip_norbufs	= fcipstatp->fcips_norbufs.value.ul;
2194		fptr->fcip_nocanput	= fcipstatp->fcips_nocanput.value.ul;
2195		fptr->fcip_allocbfail	= fcipstatp->fcips_allocbfail.value.ul;
2196		fptr->fcip_rcvbytes	= fcipstatp->fcips_rcvbytes.value.ul;
2197		fptr->fcip_xmtbytes	= fcipstatp->fcips_xmtbytes.value.ul;
2198		fptr->fcip_multircv	= fcipstatp->fcips_multircv.value.ul;
2199		fptr->fcip_multixmt	= fcipstatp->fcips_multixmt.value.ul;
2200		fptr->fcip_brdcstrcv	= fcipstatp->fcips_brdcstrcv.value.ul;
2201		fptr->fcip_norcvbuf	= fcipstatp->fcips_norcvbuf.value.ul;
2202		fptr->fcip_noxmtbuf	= fcipstatp->fcips_noxmtbuf.value.ul;
2203		fptr->fcip_allocbfail	= fcipstatp->fcips_allocbfail.value.ul;
2204		fptr->fcip_allocbfail	= fcipstatp->fcips_allocbfail.value.ul;
2205		fptr->fcip_allocbfail	= fcipstatp->fcips_allocbfail.value.ul;
2206		fptr->fcip_allocbfail	= fcipstatp->fcips_allocbfail.value.ul;
2207		fptr->fcip_allocbfail	= fcipstatp->fcips_allocbfail.value.ul;
2208		fptr->fcip_allocbfail	= fcipstatp->fcips_allocbfail.value.ul;
2209		fptr->fcip_allocbfail	= fcipstatp->fcips_allocbfail.value.ul;
2210		fptr->fcip_allocbfail	= fcipstatp->fcips_allocbfail.value.ul;
2211
2212	} else {
2213		fcipstatp->fcips_ipackets.value.ul	= fptr->fcip_ipackets;
2214		fcipstatp->fcips_ierrors.value.ul	= fptr->fcip_ierrors;
2215		fcipstatp->fcips_opackets.value.ul	= fptr->fcip_opackets;
2216		fcipstatp->fcips_oerrors.value.ul	= fptr->fcip_oerrors;
2217		fcipstatp->fcips_collisions.value.ul	= fptr->fcip_collisions;
2218		fcipstatp->fcips_nocanput.value.ul	= fptr->fcip_nocanput;
2219		fcipstatp->fcips_allocbfail.value.ul	= fptr->fcip_allocbfail;
2220		fcipstatp->fcips_defer.value.ul	= fptr->fcip_defer;
2221		fcipstatp->fcips_fram.value.ul	= fptr->fcip_fram;
2222		fcipstatp->fcips_crc.value.ul	= fptr->fcip_crc;
2223		fcipstatp->fcips_oflo.value.ul	= fptr->fcip_oflo;
2224		fcipstatp->fcips_uflo.value.ul	= fptr->fcip_uflo;
2225		fcipstatp->fcips_missed.value.ul	= fptr->fcip_missed;
2226		fcipstatp->fcips_tlcol.value.ul	= fptr->fcip_tlcol;
2227		fcipstatp->fcips_trtry.value.ul	= fptr->fcip_trtry;
2228		fcipstatp->fcips_tnocar.value.ul	= fptr->fcip_tnocar;
2229		fcipstatp->fcips_inits.value.ul	= fptr->fcip_inits;
2230		fcipstatp->fcips_norbufs.value.ul	= fptr->fcip_norbufs;
2231		fcipstatp->fcips_notbufs.value.ul	= fptr->fcip_notbufs;
2232		fcipstatp->fcips_rcvbytes.value.ul	= fptr->fcip_rcvbytes;
2233		fcipstatp->fcips_xmtbytes.value.ul	= fptr->fcip_xmtbytes;
2234		fcipstatp->fcips_multircv.value.ul	= fptr->fcip_multircv;
2235		fcipstatp->fcips_multixmt.value.ul	= fptr->fcip_multixmt;
2236		fcipstatp->fcips_brdcstrcv.value.ul	= fptr->fcip_brdcstrcv;
2237		fcipstatp->fcips_brdcstxmt.value.ul	= fptr->fcip_brdcstxmt;
2238		fcipstatp->fcips_norcvbuf.value.ul	= fptr->fcip_norcvbuf;
2239		fcipstatp->fcips_noxmtbuf.value.ul	= fptr->fcip_noxmtbuf;
2240
2241	}
2242	return (0);
2243}
2244
2245
2246/*
2247 * fcip_statec_cb: handles all required state change callback notifications
2248 * it receives from the transport
2249 */
2250/* ARGSUSED */
2251static void
2252fcip_statec_cb(opaque_t ulp_handle, opaque_t phandle,
2253    uint32_t port_state, uint32_t port_top, fc_portmap_t changelist[],
2254    uint32_t listlen, uint32_t sid)
2255{
2256	fcip_port_info_t	*fport;
2257	struct fcip 		*fptr;
2258	struct fcipstr		*slp;
2259	queue_t			*wrq;
2260	int			instance;
2261	int 			index;
2262	struct fcip_routing_table 	*frtp;
2263
2264	fport = fcip_get_port(phandle);
2265
2266	if (fport == NULL) {
2267		return;
2268	}
2269
2270	fptr = fport->fcipp_fcip;
2271	ASSERT(fptr != NULL);
2272
2273	if (fptr == NULL) {
2274		return;
2275	}
2276
2277	instance = ddi_get_instance(fport->fcipp_dip);
2278
2279	FCIP_TNF_PROBE_4((fcip_statec_cb, "fcip io", /* CSTYLED */,
2280		tnf_string, msg, "state change callback",
2281		tnf_uint, instance, instance,
2282		tnf_uint, S_ID, sid,
2283		tnf_int, count, listlen));
2284	FCIP_DEBUG(FCIP_DEBUG_ELS,
2285	    (CE_NOTE, "fcip%d, state change callback: state:0x%x, "
2286	    "S_ID:0x%x, count:0x%x", instance, port_state, sid, listlen));
2287
2288	mutex_enter(&fptr->fcip_mutex);
2289
2290	if ((fptr->fcip_flags & (FCIP_DETACHING | FCIP_DETACHED)) ||
2291	    (fptr->fcip_flags & (FCIP_SUSPENDED | FCIP_POWER_DOWN))) {
2292		mutex_exit(&fptr->fcip_mutex);
2293		return;
2294	}
2295
2296	/*
2297	 * set fcip flags to indicate we are in the middle of a
2298	 * state change callback so we can wait till the statechange
2299	 * is handled before succeeding/failing the SUSPEND/POWER DOWN.
2300	 */
2301	fptr->fcip_flags |= FCIP_IN_SC_CB;
2302
2303	fport->fcipp_pstate = port_state;
2304
2305	/*
2306	 * Check if topology changed. If Yes - Modify the broadcast
2307	 * RTE entries to understand the new broadcast D_IDs
2308	 */
2309	if (fport->fcipp_topology != port_top &&
2310	    (port_top != FC_TOP_UNKNOWN)) {
2311		/* REMOVE later */
2312		FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_NOTE,
2313		    "topology changed: Old topology: 0x%x New topology 0x%x",
2314		    fport->fcipp_topology, port_top));
2315		/*
2316		 * If topology changed - attempt a rediscovery of
2317		 * devices. Helps specially in Fabric/Public loops
2318		 * and if on_demand_node_creation is disabled
2319		 */
2320		fport->fcipp_topology = port_top;
2321		fcip_handle_topology(fptr);
2322	}
2323
2324	mutex_exit(&fptr->fcip_mutex);
2325
2326	switch (FC_PORT_STATE_MASK(port_state)) {
2327	case FC_STATE_ONLINE:
2328	/* FALLTHROUGH */
2329	case FC_STATE_LIP:
2330	/* FALLTHROUGH */
2331	case FC_STATE_LIP_LBIT_SET:
2332
2333		/*
2334		 * nothing to do here actually other than if we
2335		 * were actually logged onto a port in the devlist
2336		 * (which indicates active communication between
2337		 * the host port and the port in the changelist).
2338		 * If however we are in a private loop or point to
2339		 * point mode, we need to check for any IP capable
2340		 * ports and update our routing table.
2341		 */
2342		switch (port_top) {
2343		case FC_TOP_FABRIC:
2344			/*
2345			 * This indicates a fabric port with a NameServer.
2346			 * Check the devlist to see if we are in active
2347			 * communication with a port on the devlist.
2348			 */
2349			FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_NOTE,
2350			    "Statec_cb: fabric topology"));
2351			fcip_rt_update(fptr, changelist, listlen);
2352			break;
2353		case FC_TOP_NO_NS:
2354			/*
2355			 * No nameserver - so treat it like a Private loop
2356			 * or point to point topology and get a map of
2357			 * devices on the link and get IP capable ports to
2358			 * to update the routing table.
2359			 */
2360			FCIP_DEBUG(FCIP_DEBUG_ELS,
2361			    (CE_NOTE, "Statec_cb: NO_NS topology"));
2362		/* FALLTHROUGH */
2363		case FC_TOP_PRIVATE_LOOP:
2364			FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_NOTE,
2365			    "Statec_cb: Pvt_Loop topology"));
2366		/* FALLTHROUGH */
2367		case FC_TOP_PT_PT:
2368			/*
2369			 * call get_port_map() and update routing table
2370			 */
2371			fcip_rt_update(fptr, changelist, listlen);
2372			break;
2373		default:
2374			FCIP_DEBUG(FCIP_DEBUG_ELS,
2375			    (CE_NOTE, "Statec_cb: Unknown topology"));
2376		}
2377
2378		/*
2379		 * We should now enable the Queues and permit I/Os
2380		 * to flow through downstream. The update of routing
2381		 * table should have flushed out any port entries that
2382		 * don't exist or are not available after the state change
2383		 */
2384		mutex_enter(&fptr->fcip_mutex);
2385		fptr->fcip_port_state = FCIP_PORT_ONLINE;
2386		if (fptr->fcip_flags & FCIP_LINK_DOWN) {
2387			fptr->fcip_flags &= ~FCIP_LINK_DOWN;
2388		}
2389		mutex_exit(&fptr->fcip_mutex);
2390
2391		/*
2392		 * Enable write queues
2393		 */
2394		rw_enter(&fcipstruplock, RW_READER);
2395		for (slp = fcipstrup; slp != NULL; slp = slp->sl_nextp) {
2396			if (slp && slp->sl_fcip == fptr) {
2397				wrq = WR(slp->sl_rq);
2398				if (wrq->q_flag & QFULL) {
2399					qenable(wrq);
2400				}
2401			}
2402		}
2403		rw_exit(&fcipstruplock);
2404		break;
2405	case FC_STATE_OFFLINE:
2406		/*
2407		 * mark the port_state OFFLINE and wait for it to
2408		 * become online. Any new messages in this state will
2409		 * simply be queued back up. If the port does not
2410		 * come online in a short while, we can begin failing
2411		 * messages and flush the routing table
2412		 */
2413		mutex_enter(&fptr->fcip_mutex);
2414		fptr->fcip_mark_offline = fptr->fcip_timeout_ticks +
2415		    FCIP_OFFLINE_TIMEOUT;
2416		fptr->fcip_port_state = FCIP_PORT_OFFLINE;
2417		mutex_exit(&fptr->fcip_mutex);
2418
2419		/*
2420		 * Mark all Routing table entries as invalid to prevent
2421		 * any commands from trickling through to ports that
2422		 * have disappeared from under us
2423		 */
2424		mutex_enter(&fptr->fcip_rt_mutex);
2425		for (index = 0; index < FCIP_RT_HASH_ELEMS; index++) {
2426			frtp = fptr->fcip_rtable[index];
2427			while (frtp) {
2428				frtp->fcipr_state = PORT_DEVICE_INVALID;
2429				frtp = frtp->fcipr_next;
2430			}
2431		}
2432		mutex_exit(&fptr->fcip_rt_mutex);
2433
2434		break;
2435
2436	case FC_STATE_RESET_REQUESTED:
2437		/*
2438		 * Release all Unsolicited buffers back to transport/FCA.
2439		 * This also means the port state is marked offline - so
2440		 * we may have to do what OFFLINE state requires us to do.
2441		 * Care must be taken to wait for any active unsolicited
2442		 * buffer with the other Streams modules - so wait for
2443		 * a freeb if the unsolicited buffer is passed back all
2444		 * the way upstream.
2445		 */
2446		mutex_enter(&fptr->fcip_mutex);
2447
2448#ifdef FCIP_ESBALLOC
2449		while (fptr->fcip_ub_upstream) {
2450			cv_wait(&fptr->fcip_ub_cv, &fptr->fcip_mutex);
2451		}
2452#endif	/* FCIP_ESBALLOC */
2453
2454		fptr->fcip_mark_offline = fptr->fcip_timeout_ticks +
2455		    FCIP_OFFLINE_TIMEOUT;
2456		fptr->fcip_port_state = FCIP_PORT_OFFLINE;
2457		mutex_exit(&fptr->fcip_mutex);
2458		break;
2459
2460	case FC_STATE_DEVICE_CHANGE:
2461		if (listlen) {
2462			fcip_rt_update(fptr, changelist, listlen);
2463		}
2464		break;
2465	case FC_STATE_RESET:
2466		/*
2467		 * Not much to do I guess - wait for port to become
2468		 * ONLINE. If the port doesn't become online in a short
2469		 * while, the upper layers abort any request themselves.
2470		 * We can just putback the messages in the streams queues
2471		 * if the link is offline
2472		 */
2473		break;
2474	}
2475	mutex_enter(&fptr->fcip_mutex);
2476	fptr->fcip_flags &= ~(FCIP_IN_SC_CB);
2477	mutex_exit(&fptr->fcip_mutex);
2478}
2479
2480/*
2481 * Given a port handle, return the fcip_port_info structure corresponding
2482 * to that port handle. The transport allocates and communicates with
2483 * ULPs using port handles
2484 */
2485static fcip_port_info_t *
2486fcip_get_port(opaque_t phandle)
2487{
2488	fcip_port_info_t *fport;
2489
2490	ASSERT(phandle != NULL);
2491
2492	mutex_enter(&fcip_global_mutex);
2493	fport = fcip_port_head;
2494
2495	while (fport != NULL) {
2496		if (fport->fcipp_handle == phandle) {
2497			/* found */
2498			break;
2499		}
2500		fport = fport->fcipp_next;
2501	}
2502
2503	mutex_exit(&fcip_global_mutex);
2504
2505	return (fport);
2506}
2507
2508/*
2509 * Handle inbound ELS requests received by the transport. We are only
2510 * intereseted in FARP/InARP mostly.
2511 */
2512/* ARGSUSED */
2513static int
2514fcip_els_cb(opaque_t ulp_handle, opaque_t phandle,
2515    fc_unsol_buf_t *buf, uint32_t claimed)
2516{
2517	fcip_port_info_t	*fport;
2518	struct fcip 		*fptr;
2519	int			instance;
2520	uchar_t			r_ctl;
2521	uchar_t			ls_code;
2522	la_els_farp_t		farp_cmd;
2523	la_els_farp_t		*fcmd;
2524	int			rval = FC_UNCLAIMED;
2525
2526	fport = fcip_get_port(phandle);
2527	if (fport == NULL) {
2528		return (FC_UNCLAIMED);
2529	}
2530
2531	fptr = fport->fcipp_fcip;
2532	ASSERT(fptr != NULL);
2533	if (fptr == NULL) {
2534		return (FC_UNCLAIMED);
2535	}
2536
2537	instance = ddi_get_instance(fport->fcipp_dip);
2538
2539	mutex_enter(&fptr->fcip_mutex);
2540	if ((fptr->fcip_flags & (FCIP_DETACHING | FCIP_DETACHED)) ||
2541	    (fptr->fcip_flags & (FCIP_SUSPENDED | FCIP_POWER_DOWN))) {
2542		mutex_exit(&fptr->fcip_mutex);
2543		return (FC_UNCLAIMED);
2544	}
2545
2546	/*
2547	 * set fcip flags to indicate we are in the middle of a
2548	 * ELS callback so we can wait till the statechange
2549	 * is handled before succeeding/failing the SUSPEND/POWER DOWN.
2550	 */
2551	fptr->fcip_flags |= FCIP_IN_ELS_CB;
2552	mutex_exit(&fptr->fcip_mutex);
2553
2554	FCIP_TNF_PROBE_2((fcip_els_cb, "fcip io", /* CSTYLED */,
2555		tnf_string, msg, "ELS callback",
2556		tnf_uint, instance, instance));
2557
2558	FCIP_DEBUG(FCIP_DEBUG_ELS,
2559	    (CE_NOTE, "fcip%d, ELS callback , ", instance));
2560
2561	r_ctl = buf->ub_frame.r_ctl;
2562	switch (r_ctl & R_CTL_ROUTING) {
2563	case R_CTL_EXTENDED_SVC:
2564		if (r_ctl == R_CTL_ELS_REQ) {
2565			ls_code = buf->ub_buffer[0];
2566			if (ls_code == LA_ELS_FARP_REQ) {
2567				/*
2568				 * Inbound FARP broadcast request
2569				 */
2570				if (buf->ub_bufsize != sizeof (la_els_farp_t)) {
2571					FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_WARN,
2572					    "Invalid FARP req buffer size "
2573					    "expected 0x%lx, got 0x%x",
2574					    (long)(sizeof (la_els_farp_t)),
2575					    buf->ub_bufsize));
2576					rval = FC_UNCLAIMED;
2577					goto els_cb_done;
2578				}
2579				fcmd = (la_els_farp_t *)buf;
2580				if (fcip_wwn_compare(&fcmd->resp_nwwn,
2581				    &fport->fcipp_nwwn,
2582				    FCIP_COMPARE_NWWN) != 0) {
2583					rval = FC_UNCLAIMED;
2584					goto els_cb_done;
2585				}
2586				/*
2587				 * copy the FARP request and release the
2588				 * unsolicited buffer
2589				 */
2590				fcmd = &farp_cmd;
2591				bcopy((void *)buf, (void *)fcmd,
2592				    sizeof (la_els_farp_t));
2593				(void) fc_ulp_ubrelease(fport->fcipp_handle, 1,
2594				    &buf->ub_token);
2595
2596				if (fcip_farp_supported &&
2597				    fcip_handle_farp_request(fptr, fcmd) ==
2598				    FC_SUCCESS) {
2599					/*
2600					 * We successfully sent out a FARP
2601					 * reply to the requesting port
2602					 */
2603					rval = FC_SUCCESS;
2604					goto els_cb_done;
2605				} else {
2606					rval = FC_UNCLAIMED;
2607					goto els_cb_done;
2608				}
2609			}
2610		} else if (r_ctl == R_CTL_ELS_RSP) {
2611			ls_code = buf->ub_buffer[0];
2612			if (ls_code == LA_ELS_FARP_REPLY) {
2613				/*
2614				 * We received a REPLY to our FARP request
2615				 */
2616				if (buf->ub_bufsize != sizeof (la_els_farp_t)) {
2617					FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_WARN,
2618					    "Invalid FARP req buffer size "
2619					    "expected 0x%lx, got 0x%x",
2620					    (long)(sizeof (la_els_farp_t)),
2621					    buf->ub_bufsize));
2622					rval = FC_UNCLAIMED;
2623					goto els_cb_done;
2624				}
2625				fcmd = &farp_cmd;
2626				bcopy((void *)buf, (void *)fcmd,
2627				    sizeof (la_els_farp_t));
2628				(void) fc_ulp_ubrelease(fport->fcipp_handle, 1,
2629				    &buf->ub_token);
2630				if (fcip_farp_supported &&
2631				    fcip_handle_farp_response(fptr, fcmd) ==
2632				    FC_SUCCESS) {
2633					FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_NOTE,
2634					    "Successfully recevied a FARP "
2635					    "response"));
2636					mutex_enter(&fptr->fcip_mutex);
2637					fptr->fcip_farp_rsp_flag = 1;
2638					cv_signal(&fptr->fcip_farp_cv);
2639					mutex_exit(&fptr->fcip_mutex);
2640					rval = FC_SUCCESS;
2641					goto els_cb_done;
2642				} else {
2643					FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_WARN,
2644					    "Unable to handle a FARP response "
2645					    "receive"));
2646					rval = FC_UNCLAIMED;
2647					goto els_cb_done;
2648				}
2649			}
2650		}
2651		break;
2652	default:
2653		break;
2654	}
2655els_cb_done:
2656	mutex_enter(&fptr->fcip_mutex);
2657	fptr->fcip_flags &= ~(FCIP_IN_ELS_CB);
2658	mutex_exit(&fptr->fcip_mutex);
2659	return (rval);
2660}
2661
2662
2663/*
2664 * Handle inbound FARP requests
2665 */
2666static int
2667fcip_handle_farp_request(struct fcip *fptr, la_els_farp_t *fcmd)
2668{
2669	fcip_pkt_t		*fcip_pkt;
2670	fc_packet_t		*fc_pkt;
2671	fcip_port_info_t	*fport = fptr->fcip_port_info;
2672	int			rval = FC_FAILURE;
2673	opaque_t		fca_dev;
2674	fc_portmap_t 		map;
2675	struct fcip_routing_table *frp;
2676	struct fcip_dest *fdestp;
2677
2678	/*
2679	 * Add an entry for the remote port into our routing and destination
2680	 * tables.
2681	 */
2682	map.map_did = fcmd->req_id;
2683	map.map_hard_addr.hard_addr = fcmd->req_id.port_id;
2684	map.map_state = PORT_DEVICE_VALID;
2685	map.map_type = PORT_DEVICE_NEW;
2686	map.map_flags = 0;
2687	map.map_pd = NULL;
2688	bcopy((void *)&fcmd->req_pwwn, (void *)&map.map_pwwn,
2689	    sizeof (la_wwn_t));
2690	bcopy((void *)&fcmd->req_nwwn, (void *)&map.map_nwwn,
2691	    sizeof (la_wwn_t));
2692	fcip_rt_update(fptr, &map, 1);
2693	mutex_enter(&fptr->fcip_rt_mutex);
2694	frp = fcip_lookup_rtable(fptr, &fcmd->req_pwwn, FCIP_COMPARE_NWWN);
2695	mutex_exit(&fptr->fcip_rt_mutex);
2696
2697	fdestp = fcip_add_dest(fptr, frp);
2698
2699	fcip_pkt = fcip_ipkt_alloc(fptr, sizeof (la_els_farp_t),
2700	    sizeof (la_els_farp_t), NULL, KM_SLEEP);
2701	if (fcip_pkt == NULL) {
2702		rval = FC_FAILURE;
2703		goto farp_done;
2704	}
2705	/*
2706	 * Fill in our port's PWWN and NWWN
2707	 */
2708	fcmd->resp_pwwn = fport->fcipp_pwwn;
2709	fcmd->resp_nwwn = fport->fcipp_nwwn;
2710
2711	fcip_init_unicast_pkt(fcip_pkt, fport->fcipp_sid,
2712	    fcmd->req_id, NULL);
2713
2714	fca_dev =
2715	    fc_ulp_get_fca_device(fport->fcipp_handle, fcmd->req_id);
2716	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
2717	fc_pkt->pkt_cmd_fhdr.r_ctl = R_CTL_ELS_RSP;
2718	fc_pkt->pkt_fca_device = fca_dev;
2719	fcip_pkt->fcip_pkt_dest = fdestp;
2720
2721	/*
2722	 * Attempt a PLOGI again
2723	 */
2724	if (fcmd->resp_flags & FARP_INIT_P_LOGI) {
2725		if (fcip_do_plogi(fptr, frp) != FC_SUCCESS) {
2726			/*
2727			 * Login to the remote port failed. There is no
2728			 * point continuing with the FARP request further
2729			 * so bail out here.
2730			 */
2731			frp->fcipr_state = PORT_DEVICE_INVALID;
2732			rval = FC_FAILURE;
2733			goto farp_done;
2734		}
2735	}
2736
2737	FCIP_CP_OUT(fcmd, fc_pkt->pkt_cmd, fc_pkt->pkt_cmd_acc,
2738	    sizeof (la_els_farp_t));
2739
2740	rval = fc_ulp_issue_els(fport->fcipp_handle, fc_pkt);
2741	if (rval != FC_SUCCESS) {
2742		FCIP_TNF_PROBE_2((fcip_handle_farp_request, "fcip io",
2743		    /* CSTYLED */, tnf_string, msg,
2744		    "fcip_transport of farp reply failed",
2745		    tnf_uint, rval, rval));
2746		FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_WARN,
2747		    "fcip_transport of farp reply failed 0x%x", rval));
2748	}
2749
2750farp_done:
2751	return (rval);
2752}
2753
2754
2755/*
2756 * Handle FARP responses to our FARP requests. When we receive a FARP
2757 * reply, we need to add the entry for the Port that replied into our
2758 * routing and destination hash tables. It is possible that the remote
2759 * port did not login into us (FARP responses can be received without
2760 * a PLOGI)
2761 */
2762static int
2763fcip_handle_farp_response(struct fcip *fptr, la_els_farp_t *fcmd)
2764{
2765	int			rval = FC_FAILURE;
2766	fc_portmap_t 		map;
2767	struct fcip_routing_table *frp;
2768	struct fcip_dest *fdestp;
2769
2770	/*
2771	 * Add an entry for the remote port into our routing and destination
2772	 * tables.
2773	 */
2774	map.map_did = fcmd->dest_id;
2775	map.map_hard_addr.hard_addr = fcmd->dest_id.port_id;
2776	map.map_state = PORT_DEVICE_VALID;
2777	map.map_type = PORT_DEVICE_NEW;
2778	map.map_flags = 0;
2779	map.map_pd = NULL;
2780	bcopy((void *)&fcmd->resp_pwwn, (void *)&map.map_pwwn,
2781	    sizeof (la_wwn_t));
2782	bcopy((void *)&fcmd->resp_nwwn, (void *)&map.map_nwwn,
2783	    sizeof (la_wwn_t));
2784	fcip_rt_update(fptr, &map, 1);
2785	mutex_enter(&fptr->fcip_rt_mutex);
2786	frp = fcip_lookup_rtable(fptr, &fcmd->resp_pwwn, FCIP_COMPARE_NWWN);
2787	mutex_exit(&fptr->fcip_rt_mutex);
2788
2789	fdestp = fcip_add_dest(fptr, frp);
2790
2791	if (fdestp != NULL) {
2792		rval = FC_SUCCESS;
2793	}
2794	return (rval);
2795}
2796
2797
2798#define	FCIP_HDRS_LENGTH	\
2799	sizeof (fcph_network_hdr_t)+sizeof (llc_snap_hdr_t)+sizeof (ipha_t)
2800
2801/*
2802 * fcip_data_cb is the heart of most IP operations. This routine is called
2803 * by the transport when any unsolicited IP data arrives at a port (which
2804 * is almost all IP data). This routine then strips off the Network header
2805 * from the payload (after authenticating the received payload ofcourse),
2806 * creates a message blk and sends the data upstream. You will see ugly
2807 * #defines because of problems with using esballoc() as opposed to
2808 * allocb to prevent an extra copy of data. We should probably move to
2809 * esballoc entirely when the MTU eventually will be larger than 1500 bytes
2810 * since copies will get more expensive then. At 1500 byte MTUs, there is
2811 * no noticable difference between using allocb and esballoc. The other
2812 * caveat is that the qlc firmware still cannot tell us accurately the
2813 * no. of valid bytes in the unsol buffer it DMA'ed so we have to resort
2814 * to looking into the IP header and hoping that the no. of bytes speficified
2815 * in the header was actually received.
2816 */
2817/* ARGSUSED */
2818static int
2819fcip_data_cb(opaque_t ulp_handle, opaque_t phandle,
2820    fc_unsol_buf_t *buf, uint32_t claimed)
2821{
2822	fcip_port_info_t		*fport;
2823	struct fcip 			*fptr;
2824	fcph_network_hdr_t		*nhdr;
2825	llc_snap_hdr_t			*snaphdr;
2826	mblk_t				*bp;
2827	uint32_t 			len;
2828	uint32_t			hdrlen;
2829	ushort_t			type;
2830	ipha_t				*iphdr;
2831	int				rval;
2832
2833#ifdef FCIP_ESBALLOC
2834	frtn_t				*free_ubuf;
2835	struct fcip_esballoc_arg	*fesb_argp;
2836#endif /* FCIP_ESBALLOC */
2837
2838	fport = fcip_get_port(phandle);
2839	if (fport == NULL) {
2840		return (FC_UNCLAIMED);
2841	}
2842
2843	fptr = fport->fcipp_fcip;
2844	ASSERT(fptr != NULL);
2845
2846	if (fptr == NULL) {
2847		return (FC_UNCLAIMED);
2848	}
2849
2850	mutex_enter(&fptr->fcip_mutex);
2851	if ((fptr->fcip_flags & (FCIP_DETACHING | FCIP_DETACHED)) ||
2852	    (fptr->fcip_flags & (FCIP_SUSPENDED | FCIP_POWER_DOWN))) {
2853		mutex_exit(&fptr->fcip_mutex);
2854		rval = FC_UNCLAIMED;
2855		goto data_cb_done;
2856	}
2857
2858	/*
2859	 * set fcip flags to indicate we are in the middle of a
2860	 * data callback so we can wait till the statechange
2861	 * is handled before succeeding/failing the SUSPEND/POWER DOWN.
2862	 */
2863	fptr->fcip_flags |= FCIP_IN_DATA_CB;
2864	mutex_exit(&fptr->fcip_mutex);
2865
2866	FCIP_TNF_PROBE_2((fcip_data_cb, "fcip io", /* CSTYLED */,
2867		tnf_string, msg, "data callback",
2868		tnf_int, instance, ddi_get_instance(fport->fcipp_dip)));
2869	FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2870	    (CE_NOTE, "fcip%d, data callback",
2871	    ddi_get_instance(fport->fcipp_dip)));
2872
2873	/*
2874	 * get to the network and snap headers in the payload
2875	 */
2876	nhdr = (fcph_network_hdr_t *)buf->ub_buffer;
2877	snaphdr = (llc_snap_hdr_t *)(buf->ub_buffer +
2878	    sizeof (fcph_network_hdr_t));
2879
2880	hdrlen = sizeof (fcph_network_hdr_t) + sizeof (llc_snap_hdr_t);
2881
2882	/*
2883	 * get the IP header to obtain the no. of bytes we need to read
2884	 * off from the unsol buffer. This obviously is because not all
2885	 * data fills up the unsol buffer completely and the firmware
2886	 * doesn't tell us how many valid bytes are in there as well
2887	 */
2888	iphdr = (ipha_t *)(buf->ub_buffer + hdrlen);
2889	snaphdr->pid = BE_16(snaphdr->pid);
2890	type = snaphdr->pid;
2891
2892	FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2893	    (CE_CONT, "SNAPHDR: dsap %x, ssap %x, ctrl %x\n",
2894	    snaphdr->dsap, snaphdr->ssap, snaphdr->ctrl));
2895
2896	FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2897	    (CE_CONT, "oui[0] 0x%x oui[1] 0x%x oui[2] 0x%x pid 0x%x\n",
2898	    snaphdr->oui[0], snaphdr->oui[1], snaphdr->oui[2], snaphdr->pid));
2899
2900	/* Authneticate, Authenticate */
2901	if (type == ETHERTYPE_IP) {
2902		len = hdrlen + BE_16(iphdr->ipha_length);
2903	} else if (type == ETHERTYPE_ARP) {
2904		len = hdrlen + 28;
2905	} else {
2906		len = buf->ub_bufsize;
2907	}
2908
2909	FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2910	    (CE_CONT, "effective packet length is %d bytes.\n", len));
2911
2912	if (len < hdrlen || len > FCIP_UB_SIZE) {
2913		FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2914		    (CE_NOTE, "Incorrect buffer size %d bytes", len));
2915		rval = FC_UNCLAIMED;
2916		goto data_cb_done;
2917	}
2918
2919	if (buf->ub_frame.type != FC_TYPE_IS8802_SNAP) {
2920		FCIP_DEBUG(FCIP_DEBUG_UPSTREAM, (CE_NOTE, "Not IP/ARP data"));
2921		rval = FC_UNCLAIMED;
2922		goto data_cb_done;
2923	}
2924
2925	FCIP_DEBUG(FCIP_DEBUG_UPSTREAM, (CE_NOTE, "checking wwn"));
2926
2927	if ((fcip_wwn_compare(&nhdr->net_dest_addr, &fport->fcipp_pwwn,
2928	    FCIP_COMPARE_NWWN) != 0) &&
2929	    (!IS_BROADCAST_ADDR(&nhdr->net_dest_addr))) {
2930		rval = FC_UNCLAIMED;
2931		goto data_cb_done;
2932	} else if (fcip_cache_on_arp_broadcast &&
2933	    IS_BROADCAST_ADDR(&nhdr->net_dest_addr)) {
2934		fcip_cache_arp_broadcast(fptr, buf);
2935	}
2936
2937	FCIP_DEBUG(FCIP_DEBUG_UPSTREAM, (CE_NOTE, "Allocate streams block"));
2938
2939	/*
2940	 * Using esballoc instead of allocb should be faster, atleast at
2941	 * larger MTUs than 1500 bytes. Someday we'll get there :)
2942	 */
2943#if defined(FCIP_ESBALLOC)
2944	/*
2945	 * allocate memory for the frtn function arg. The Function
2946	 * (fcip_ubfree) arg is a struct fcip_esballoc_arg type
2947	 * which contains pointers to the unsol buffer and the
2948	 * opaque port handle for releasing the unsol buffer back to
2949	 * the FCA for reuse
2950	 */
2951	fesb_argp = (struct fcip_esballoc_arg *)
2952	    kmem_zalloc(sizeof (struct fcip_esballoc_arg), KM_NOSLEEP);
2953
2954	if (fesb_argp == NULL) {
2955		FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2956		    (CE_WARN, "esballoc of mblk failed in data_cb"));
2957		rval = FC_UNCLAIMED;
2958		goto data_cb_done;
2959	}
2960	/*
2961	 * Check with KM_NOSLEEP
2962	 */
2963	free_ubuf = (frtn_t *)kmem_zalloc(sizeof (frtn_t), KM_NOSLEEP);
2964	if (free_ubuf == NULL) {
2965		kmem_free(fesb_argp, sizeof (struct fcip_esballoc_arg));
2966		FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2967		    (CE_WARN, "esballoc of mblk failed in data_cb"));
2968		rval = FC_UNCLAIMED;
2969		goto data_cb_done;
2970	}
2971
2972	fesb_argp->frtnp = free_ubuf;
2973	fesb_argp->buf = buf;
2974	fesb_argp->phandle = phandle;
2975	free_ubuf->free_func = fcip_ubfree;
2976	free_ubuf->free_arg = (char *)fesb_argp;
2977	if ((bp = (mblk_t *)esballoc((unsigned char *)buf->ub_buffer,
2978	    len, BPRI_MED, free_ubuf)) == NULL) {
2979		kmem_free(fesb_argp, sizeof (struct fcip_esballoc_arg));
2980		kmem_free(free_ubuf, sizeof (frtn_t));
2981		FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2982		    (CE_WARN, "esballoc of mblk failed in data_cb"));
2983		rval = FC_UNCLAIMED;
2984		goto data_cb_done;
2985	}
2986#elif !defined(FCIP_ESBALLOC)
2987	/*
2988	 * allocate streams mblk and copy the contents of the
2989	 * unsolicited buffer into this newly alloc'ed mblk
2990	 */
2991	if ((bp = (mblk_t *)fcip_allocb((size_t)len, BPRI_LO)) == NULL) {
2992		FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
2993		    (CE_WARN, "alloc of mblk failed in data_cb"));
2994		rval = FC_UNCLAIMED;
2995		goto data_cb_done;
2996	}
2997
2998	/*
2999	 * Unsolicited buffers handed up to us from the FCA must be
3000	 * endian clean so just bcopy the data into our mblk. Else
3001	 * we may have to either copy the data byte by byte or
3002	 * use the ddi_rep_get* routines to do the copy for us.
3003	 */
3004	bcopy(buf->ub_buffer, bp->b_rptr, len);
3005
3006	/*
3007	 * for esballoc'ed mblks - free the UB in the frtn function
3008	 * along with the memory allocated for the function arg.
3009	 * for allocb'ed mblk - release the unsolicited buffer here
3010	 */
3011	(void) fc_ulp_ubrelease(phandle, 1, &buf->ub_token);
3012
3013#endif	/* FCIP_ESBALLOC */
3014
3015	bp->b_wptr = bp->b_rptr + len;
3016	fptr->fcip_ipackets++;
3017
3018	if (type == ETHERTYPE_IP) {
3019		mutex_enter(&fptr->fcip_mutex);
3020		fptr->fcip_ub_upstream++;
3021		mutex_exit(&fptr->fcip_mutex);
3022		bp->b_rptr += hdrlen;
3023
3024		/*
3025		 * Check if ipq is valid in the sendup thread
3026		 */
3027		if (fcip_sendup_alloc_enque(fptr, bp, NULL) != FC_SUCCESS) {
3028			freemsg(bp);
3029		}
3030	} else {
3031		/*
3032		 * We won't get ethernet 802.3 packets in FCIP but we may get
3033		 * types other than ETHERTYPE_IP, such as ETHERTYPE_ARP. Let
3034		 * fcip_sendup() do the matching.
3035		 */
3036		mutex_enter(&fptr->fcip_mutex);
3037		fptr->fcip_ub_upstream++;
3038		mutex_exit(&fptr->fcip_mutex);
3039		if (fcip_sendup_alloc_enque(fptr, bp,
3040		    fcip_accept) != FC_SUCCESS) {
3041			freemsg(bp);
3042		}
3043	}
3044
3045	rval = FC_SUCCESS;
3046
3047	/*
3048	 * Unset fcip_flags to indicate we are out of callback and return
3049	 */
3050data_cb_done:
3051	mutex_enter(&fptr->fcip_mutex);
3052	fptr->fcip_flags &= ~(FCIP_IN_DATA_CB);
3053	mutex_exit(&fptr->fcip_mutex);
3054	return (rval);
3055}
3056
3057#if !defined(FCIP_ESBALLOC)
3058/*
3059 * Allocate a message block for the inbound data to be sent upstream.
3060 */
3061static void *
3062fcip_allocb(size_t size, uint_t pri)
3063{
3064	mblk_t	*mp;
3065
3066	if ((mp = allocb(size, pri)) == NULL) {
3067		return (NULL);
3068	}
3069	return (mp);
3070}
3071
3072#endif
3073
3074/*
3075 * This helper routine kmem cache alloc's a sendup element for enquing
3076 * into the sendup list for callbacks upstream from the dedicated sendup
3077 * thread. We enque the msg buf into the sendup list and cv_signal the
3078 * sendup thread to finish the callback for us.
3079 */
3080static int
3081fcip_sendup_alloc_enque(struct fcip *fptr, mblk_t *mp, struct fcipstr *(*f)())
3082{
3083	struct fcip_sendup_elem 	*msg_elem;
3084	int				rval = FC_FAILURE;
3085
3086	FCIP_TNF_PROBE_1((fcip_sendup_alloc_enque, "fcip io", /* CSTYLED */,
3087		tnf_string, msg, "sendup msg enque"));
3088	msg_elem = kmem_cache_alloc(fptr->fcip_sendup_cache, KM_NOSLEEP);
3089	if (msg_elem == NULL) {
3090		/* drop pkt to floor - update stats */
3091		rval = FC_FAILURE;
3092		goto sendup_alloc_done;
3093	}
3094	msg_elem->fcipsu_mp = mp;
3095	msg_elem->fcipsu_func = f;
3096
3097	mutex_enter(&fptr->fcip_sendup_mutex);
3098	if (fptr->fcip_sendup_head == NULL) {
3099		fptr->fcip_sendup_head = fptr->fcip_sendup_tail = msg_elem;
3100	} else {
3101		fptr->fcip_sendup_tail->fcipsu_next = msg_elem;
3102		fptr->fcip_sendup_tail = msg_elem;
3103	}
3104	fptr->fcip_sendup_cnt++;
3105	cv_signal(&fptr->fcip_sendup_cv);
3106	mutex_exit(&fptr->fcip_sendup_mutex);
3107	rval = FC_SUCCESS;
3108
3109sendup_alloc_done:
3110	return (rval);
3111}
3112
3113/*
3114 * One of the ways of performing the WWN to D_ID mapping required for
3115 * IPFC data is to cache the unsolicited ARP broadcast messages received
3116 * and update the routing table to add entry for the destination port
3117 * if we are the intended recipient of the ARP broadcast message. This is
3118 * one of the methods recommended in the rfc to obtain the WWN to D_ID mapping
3119 * but is not typically used unless enabled. The driver prefers to use the
3120 * nameserver/lilp map to obtain this mapping.
3121 */
3122static void
3123fcip_cache_arp_broadcast(struct fcip *fptr, fc_unsol_buf_t *buf)
3124{
3125	fcip_port_info_t		*fport;
3126	fcph_network_hdr_t		*nhdr;
3127	struct fcip_routing_table	*frp;
3128	fc_portmap_t			map;
3129
3130	fport = fptr->fcip_port_info;
3131	if (fport == NULL) {
3132		return;
3133	}
3134	ASSERT(fport != NULL);
3135
3136	nhdr = (fcph_network_hdr_t *)buf->ub_buffer;
3137
3138	mutex_enter(&fptr->fcip_rt_mutex);
3139	frp = fcip_lookup_rtable(fptr, &nhdr->net_src_addr, FCIP_COMPARE_NWWN);
3140	mutex_exit(&fptr->fcip_rt_mutex);
3141	if (frp == NULL) {
3142		map.map_did.port_id = buf->ub_frame.s_id;
3143		map.map_hard_addr.hard_addr = buf->ub_frame.s_id;
3144		map.map_state = PORT_DEVICE_VALID;
3145		map.map_type = PORT_DEVICE_NEW;
3146		map.map_flags = 0;
3147		map.map_pd = NULL;
3148		bcopy((void *)&nhdr->net_src_addr, (void *)&map.map_pwwn,
3149		    sizeof (la_wwn_t));
3150		bcopy((void *)&nhdr->net_src_addr, (void *)&map.map_nwwn,
3151		    sizeof (la_wwn_t));
3152		fcip_rt_update(fptr, &map, 1);
3153		mutex_enter(&fptr->fcip_rt_mutex);
3154		frp = fcip_lookup_rtable(fptr, &nhdr->net_src_addr,
3155		    FCIP_COMPARE_NWWN);
3156		mutex_exit(&fptr->fcip_rt_mutex);
3157
3158		(void) fcip_add_dest(fptr, frp);
3159	}
3160
3161}
3162
3163/*
3164 * This is a dedicated thread to do callbacks from fcip's data callback
3165 * routines into the modules upstream. The reason for this thread is
3166 * the data callback function can be called from an interrupt context and
3167 * the upstream modules *can* make calls downstream in the same thread
3168 * context. If the call is to a fabric port which is not yet in our
3169 * routing tables, we may have to query the nameserver/fabric for the
3170 * MAC addr to Port_ID mapping which may be blocking calls.
3171 */
3172static void
3173fcip_sendup_thr(void *arg)
3174{
3175	struct fcip		*fptr = (struct fcip *)arg;
3176	struct fcip_sendup_elem	*msg_elem;
3177	queue_t			*ip4q = NULL;
3178
3179	CALLB_CPR_INIT(&fptr->fcip_cpr_info, &fptr->fcip_sendup_mutex,
3180	    callb_generic_cpr, "fcip_sendup_thr");
3181
3182	mutex_enter(&fptr->fcip_sendup_mutex);
3183	for (;;) {
3184
3185		while (fptr->fcip_sendup_thr_initted &&
3186		    fptr->fcip_sendup_head == NULL) {
3187			CALLB_CPR_SAFE_BEGIN(&fptr->fcip_cpr_info);
3188			cv_wait(&fptr->fcip_sendup_cv,
3189			    &fptr->fcip_sendup_mutex);
3190			CALLB_CPR_SAFE_END(&fptr->fcip_cpr_info,
3191			    &fptr->fcip_sendup_mutex);
3192		}
3193
3194		if (fptr->fcip_sendup_thr_initted == 0) {
3195			break;
3196		}
3197
3198		FCIP_TNF_PROBE_1((fcip_sendup_thr, "fcip io", /* CSTYLED */,
3199		    tnf_string, msg, "fcip sendup thr - new msg"));
3200
3201		msg_elem = fptr->fcip_sendup_head;
3202		fptr->fcip_sendup_head = msg_elem->fcipsu_next;
3203		msg_elem->fcipsu_next = NULL;
3204		mutex_exit(&fptr->fcip_sendup_mutex);
3205
3206		if (msg_elem->fcipsu_func == NULL) {
3207			/*
3208			 * Message for ipq. Check to see if the ipq is
3209			 * is still valid. Since the thread is asynchronous,
3210			 * there could have been a close on the stream
3211			 */
3212			mutex_enter(&fptr->fcip_mutex);
3213			if (fptr->fcip_ipq && canputnext(fptr->fcip_ipq)) {
3214				ip4q = fptr->fcip_ipq;
3215				mutex_exit(&fptr->fcip_mutex);
3216				putnext(ip4q, msg_elem->fcipsu_mp);
3217			} else {
3218				mutex_exit(&fptr->fcip_mutex);
3219				freemsg(msg_elem->fcipsu_mp);
3220			}
3221		} else {
3222			fcip_sendup(fptr, msg_elem->fcipsu_mp,
3223			    msg_elem->fcipsu_func);
3224		}
3225
3226#if !defined(FCIP_ESBALLOC)
3227		/*
3228		 * for allocb'ed mblk - decrement upstream count here
3229		 */
3230		mutex_enter(&fptr->fcip_mutex);
3231		ASSERT(fptr->fcip_ub_upstream > 0);
3232		fptr->fcip_ub_upstream--;
3233		mutex_exit(&fptr->fcip_mutex);
3234#endif /* FCIP_ESBALLOC */
3235
3236		kmem_cache_free(fptr->fcip_sendup_cache, (void *)msg_elem);
3237		mutex_enter(&fptr->fcip_sendup_mutex);
3238		fptr->fcip_sendup_cnt--;
3239	}
3240
3241
3242#ifndef	__lock_lint
3243	CALLB_CPR_EXIT(&fptr->fcip_cpr_info);
3244#else
3245	mutex_exit(&fptr->fcip_sendup_mutex);
3246#endif /* __lock_lint */
3247
3248	/* Wake up fcip detach thread by the end */
3249	cv_signal(&fptr->fcip_sendup_cv);
3250
3251	thread_exit();
3252}
3253
3254#ifdef FCIP_ESBALLOC
3255
3256/*
3257 * called from the stream head when it is done using an unsolicited buffer.
3258 * We release this buffer then to the FCA for reuse.
3259 */
3260static void
3261fcip_ubfree(char *arg)
3262{
3263	struct fcip_esballoc_arg *fesb_argp = (struct fcip_esballoc_arg *)arg;
3264	fc_unsol_buf_t	*ubuf;
3265	frtn_t		*frtnp;
3266	fcip_port_info_t		*fport;
3267	struct fcip 			*fptr;
3268
3269
3270	fport = fcip_get_port(fesb_argp->phandle);
3271	fptr = fport->fcipp_fcip;
3272
3273	ASSERT(fesb_argp != NULL);
3274	ubuf = fesb_argp->buf;
3275	frtnp = fesb_argp->frtnp;
3276
3277
3278	FCIP_DEBUG(FCIP_DEBUG_UPSTREAM,
3279	    (CE_WARN, "freeing ubuf after esballoc in fcip_ubfree"));
3280	(void) fc_ulp_ubrelease(fesb_argp->phandle, 1, &ubuf->ub_token);
3281
3282	mutex_enter(&fptr->fcip_mutex);
3283	ASSERT(fptr->fcip_ub_upstream > 0);
3284	fptr->fcip_ub_upstream--;
3285	cv_signal(&fptr->fcip_ub_cv);
3286	mutex_exit(&fptr->fcip_mutex);
3287
3288	kmem_free(frtnp, sizeof (frtn_t));
3289	kmem_free(fesb_argp, sizeof (struct fcip_esballoc_arg));
3290}
3291
3292#endif /* FCIP_ESBALLOC */
3293
3294/*
3295 * handle data other than that of type ETHERTYPE_IP and send it on its
3296 * way upstream to the right streams module to handle
3297 */
3298static void
3299fcip_sendup(struct fcip *fptr, mblk_t *mp, struct fcipstr *(*acceptfunc)())
3300{
3301	struct fcipstr	*slp, *nslp;
3302	la_wwn_t	*dhostp;
3303	mblk_t		*nmp;
3304	uint32_t 	isgroupaddr;
3305	int 		type;
3306	uint32_t	hdrlen;
3307	fcph_network_hdr_t	*nhdr;
3308	llc_snap_hdr_t		*snaphdr;
3309
3310	FCIP_TNF_PROBE_1((fcip_sendup, "fcip io", /* CSTYLED */,
3311		tnf_string, msg, "fcip sendup"));
3312	nhdr = (fcph_network_hdr_t *)mp->b_rptr;
3313	snaphdr =
3314	    (llc_snap_hdr_t *)(mp->b_rptr + sizeof (fcph_network_hdr_t));
3315	dhostp = &nhdr->net_dest_addr;
3316	type = snaphdr->pid;
3317	hdrlen = sizeof (fcph_network_hdr_t) + sizeof (llc_snap_hdr_t);
3318
3319	/* No group address with fibre channel */
3320	isgroupaddr = 0;
3321
3322	/*
3323	 * While holding a reader lock on the linked list of streams structures,
3324	 * attempt to match the address criteria for each stream
3325	 * and pass up the raw M_DATA ("fastpath") or a DL_UNITDATA_IND.
3326	 */
3327
3328	rw_enter(&fcipstruplock, RW_READER);
3329
3330	if ((slp = (*acceptfunc)(fcipstrup, fptr, type, dhostp)) == NULL) {
3331		rw_exit(&fcipstruplock);
3332		FCIP_TNF_PROBE_1((fcip_sendup, "fcip io", /* CSTYLED */,
3333		    tnf_string, msg, "fcip sendup - no slp"));
3334		freemsg(mp);
3335		return;
3336	}
3337
3338	/*
3339	 * Loop on matching open streams until (*acceptfunc)() returns NULL.
3340	 */
3341	for (; nslp = (*acceptfunc)(slp->sl_nextp, fptr, type, dhostp);
3342	    slp = nslp) {
3343		if (canputnext(slp->sl_rq)) {
3344			if (nmp = dupmsg(mp)) {
3345				if ((slp->sl_flags & FCIP_SLFAST) &&
3346							!isgroupaddr) {
3347					nmp->b_rptr += hdrlen;
3348					putnext(slp->sl_rq, nmp);
3349				} else if (slp->sl_flags & FCIP_SLRAW) {
3350					/* No headers when FCIP_SLRAW is set */
3351					putnext(slp->sl_rq, nmp);
3352				} else if ((nmp = fcip_addudind(fptr, nmp,
3353				    nhdr, type))) {
3354					putnext(slp->sl_rq, nmp);
3355				}
3356			}
3357		}
3358	}
3359
3360	/*
3361	 * Do the last one.
3362	 */
3363	if (canputnext(slp->sl_rq)) {
3364		if (slp->sl_flags & FCIP_SLFAST) {
3365			mp->b_rptr += hdrlen;
3366			putnext(slp->sl_rq, mp);
3367		} else if (slp->sl_flags & FCIP_SLRAW) {
3368			putnext(slp->sl_rq, mp);
3369		} else if ((mp = fcip_addudind(fptr, mp, nhdr, type))) {
3370			putnext(slp->sl_rq, mp);
3371		}
3372	} else {
3373		freemsg(mp);
3374	}
3375	FCIP_TNF_PROBE_1((fcip_sendup, "fcip io", /* CSTYLED */,
3376	    tnf_string, msg, "fcip sendup done"));
3377
3378	rw_exit(&fcipstruplock);
3379}
3380
3381/*
3382 * Match the stream based on type and wwn if necessary.
3383 * Destination wwn dhostp is passed to this routine is reserved
3384 * for future usage. We don't need to use it right now since port
3385 * to fcip instance mapping is unique and wwn is already validated when
3386 * packet comes to fcip.
3387 */
3388/* ARGSUSED */
3389static struct fcipstr *
3390fcip_accept(struct fcipstr *slp, struct fcip *fptr, int type, la_wwn_t *dhostp)
3391{
3392	t_uscalar_t 	sap;
3393
3394	FCIP_TNF_PROBE_1((fcip_accept, "fcip io", /* CSTYLED */,
3395	    tnf_string, msg, "fcip accept"));
3396
3397	for (; slp; slp = slp->sl_nextp) {
3398		sap = slp->sl_sap;
3399		FCIP_DEBUG(FCIP_DEBUG_UPSTREAM, (CE_CONT,
3400		    "fcip_accept: checking next sap = %x, type = %x",
3401		    sap, type));
3402
3403		if ((slp->sl_fcip == fptr) && (type == sap)) {
3404			return (slp);
3405		}
3406	}
3407	return (NULL);
3408}
3409
3410/*
3411 * Handle DL_UNITDATA_IND messages
3412 */
3413static mblk_t *
3414fcip_addudind(struct fcip *fptr, mblk_t *mp, fcph_network_hdr_t *nhdr,
3415    int type)
3416{
3417	dl_unitdata_ind_t	*dludindp;
3418	struct	fcipdladdr	*dlap;
3419	mblk_t	*nmp;
3420	int	size;
3421	uint32_t hdrlen;
3422	struct ether_addr	src_addr;
3423	struct ether_addr	dest_addr;
3424
3425
3426	hdrlen = (sizeof (llc_snap_hdr_t) + sizeof (fcph_network_hdr_t));
3427	mp->b_rptr += hdrlen;
3428
3429	FCIP_TNF_PROBE_1((fcip_addudind, "fcip io", /* CSTYLED */,
3430	    tnf_string, msg, "fcip addudind"));
3431
3432	/*
3433	 * Allocate an M_PROTO mblk for the DL_UNITDATA_IND.
3434	 */
3435	size = sizeof (dl_unitdata_ind_t) + FCIPADDRL + FCIPADDRL;
3436	if ((nmp = allocb(size, BPRI_LO)) == NULL) {
3437		fptr->fcip_allocbfail++;
3438		freemsg(mp);
3439		return (NULL);
3440	}
3441	DB_TYPE(nmp) = M_PROTO;
3442	nmp->b_wptr = nmp->b_datap->db_lim;
3443	nmp->b_rptr = nmp->b_wptr - size;
3444
3445	/*
3446	 * Construct a DL_UNITDATA_IND primitive.
3447	 */
3448	dludindp = (dl_unitdata_ind_t *)nmp->b_rptr;
3449	dludindp->dl_primitive = DL_UNITDATA_IND;
3450	dludindp->dl_dest_addr_length = FCIPADDRL;
3451	dludindp->dl_dest_addr_offset = sizeof (dl_unitdata_ind_t);
3452	dludindp->dl_src_addr_length = FCIPADDRL;
3453	dludindp->dl_src_addr_offset = sizeof (dl_unitdata_ind_t) + FCIPADDRL;
3454	dludindp->dl_group_address = 0;		/* not DL_MULTI */
3455
3456	dlap = (struct fcipdladdr *)(nmp->b_rptr + sizeof (dl_unitdata_ind_t));
3457	wwn_to_ether(&nhdr->net_dest_addr, &dest_addr);
3458	ether_bcopy(&dest_addr, &dlap->dl_phys);
3459	dlap->dl_sap = (uint16_t)type;
3460
3461	dlap = (struct fcipdladdr *)(nmp->b_rptr + sizeof (dl_unitdata_ind_t)
3462		+ FCIPADDRL);
3463	wwn_to_ether(&nhdr->net_src_addr, &src_addr);
3464	ether_bcopy(&src_addr, &dlap->dl_phys);
3465	dlap->dl_sap = (uint16_t)type;
3466
3467	/*
3468	 * Link the M_PROTO and M_DATA together.
3469	 */
3470	nmp->b_cont = mp;
3471	return (nmp);
3472}
3473
3474
3475/*
3476 * The open routine. For clone opens, we return the next available minor
3477 * no. for the stream to use
3478 */
3479/* ARGSUSED */
3480static int
3481fcip_open(queue_t *rq, dev_t *devp, int flag, int sflag, cred_t *credp)
3482{
3483	struct fcipstr	*slp;
3484	struct fcipstr	**prevslp;
3485	minor_t	minor;
3486
3487	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, "in fcip_open"));
3488	FCIP_TNF_PROBE_1((fcip_open, "fcip io", /* CSTYLED */,
3489		tnf_string, msg, "enter"));
3490	/*
3491	 * We need to ensure that the port driver is loaded before
3492	 * we proceed
3493	 */
3494	if (ddi_hold_installed_driver(ddi_name_to_major(PORT_DRIVER)) == NULL) {
3495		/* no port driver instances found */
3496		FCIP_DEBUG(FCIP_DEBUG_STARTUP, (CE_WARN,
3497		    "!ddi_hold_installed_driver of fp failed\n"));
3498		return (ENXIO);
3499	}
3500	/* serialize opens */
3501	rw_enter(&fcipstruplock, RW_WRITER);
3502
3503	prevslp = &fcipstrup;
3504	if (sflag == CLONEOPEN) {
3505		minor = 0;
3506		for (; (slp = *prevslp) != NULL; prevslp = &slp->sl_nextp) {
3507			if (minor < slp->sl_minor) {
3508				break;
3509			}
3510			minor ++;
3511		}
3512		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE,
3513		    "getmajor returns 0x%x", getmajor(*devp)));
3514		*devp = makedevice(getmajor(*devp), minor);
3515	} else {
3516		minor = getminor(*devp);
3517	}
3518
3519	/*
3520	 * check if our qp's private area is already initialized. If yes
3521	 * the stream is already open - just return
3522	 */
3523	if (rq->q_ptr) {
3524		goto done;
3525	}
3526
3527	slp = GETSTRUCT(struct fcipstr, 1);
3528	slp->sl_minor = minor;
3529	slp->sl_rq = rq;
3530	slp->sl_sap = 0;
3531	slp->sl_flags = 0;
3532	slp->sl_state = DL_UNATTACHED;
3533	slp->sl_fcip = NULL;
3534
3535	mutex_init(&slp->sl_lock, NULL, MUTEX_DRIVER, NULL);
3536
3537	/*
3538	 * link this new stream entry into list of active streams
3539	 */
3540	slp->sl_nextp = *prevslp;
3541	*prevslp = slp;
3542
3543	rq->q_ptr = WR(rq)->q_ptr = (char *)slp;
3544
3545	/*
3546	 * Disable automatic enabling of our write service procedures
3547	 * we need to control this explicitly. This will prevent
3548	 * anyone scheduling of our write service procedures.
3549	 */
3550	noenable(WR(rq));
3551
3552done:
3553	rw_exit(&fcipstruplock);
3554	/*
3555	 * enable our put and service routines on the read side
3556	 */
3557	qprocson(rq);
3558
3559	/*
3560	 * There is only one instance of fcip (instance = 0)
3561	 * for multiple instances of hardware
3562	 */
3563	(void) qassociate(rq, 0);	/* don't allow drcompat to be pushed */
3564	return (0);
3565}
3566
3567/*
3568 * close an opened stream. The minor no. will then be available for
3569 * future opens.
3570 */
3571/* ARGSUSED */
3572static int
3573fcip_close(queue_t *rq, int flag, int otyp, cred_t *credp)
3574{
3575	struct fcipstr *slp;
3576	struct fcipstr **prevslp;
3577
3578	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, "in fcip_close"));
3579	FCIP_TNF_PROBE_1((fcip_close, "fcip io", /* CSTYLED */,
3580		tnf_string, msg, "enter"));
3581	ASSERT(rq);
3582	/* we should also have the active stream pointer in q_ptr */
3583	ASSERT(rq->q_ptr);
3584
3585	ddi_rele_driver(ddi_name_to_major(PORT_DRIVER));
3586	/*
3587	 * disable our put and service procedures. We had enabled them
3588	 * on open
3589	 */
3590	qprocsoff(rq);
3591	slp = (struct fcipstr *)rq->q_ptr;
3592
3593	/*
3594	 * Implicitly detach stream  a stream from an interface.
3595	 */
3596	if (slp->sl_fcip) {
3597		fcip_dodetach(slp);
3598	}
3599
3600	(void) qassociate(rq, -1);	/* undo association in open */
3601
3602	rw_enter(&fcipstruplock, RW_WRITER);
3603
3604	/*
3605	 * unlink this stream from the active stream list and free it
3606	 */
3607	for (prevslp = &fcipstrup; (slp = *prevslp) != NULL;
3608	    prevslp = &slp->sl_nextp) {
3609		if (slp == (struct fcipstr *)rq->q_ptr) {
3610			break;
3611		}
3612	}
3613
3614	/* we should have found slp */
3615	ASSERT(slp);
3616
3617	*prevslp = slp->sl_nextp;
3618	mutex_destroy(&slp->sl_lock);
3619	kmem_free(slp, sizeof (struct fcipstr));
3620	rq->q_ptr = WR(rq)->q_ptr = NULL;
3621
3622	rw_exit(&fcipstruplock);
3623	return (0);
3624}
3625
3626/*
3627 * This is not an extension of the DDI_DETACH request. This routine
3628 * only detaches a stream from an interface
3629 */
3630static void
3631fcip_dodetach(struct fcipstr *slp)
3632{
3633	struct fcipstr	*tslp;
3634	struct fcip	*fptr;
3635
3636	FCIP_DEBUG(FCIP_DEBUG_DETACH, (CE_NOTE, "in fcip_dodetach"));
3637	FCIP_TNF_PROBE_1((fcip_dodetach, "fcip io", /* CSTYLED */,
3638		tnf_string, msg, "enter"));
3639	ASSERT(slp->sl_fcip != NULL);
3640
3641	fptr = slp->sl_fcip;
3642	slp->sl_fcip = NULL;
3643
3644	/*
3645	 * we don't support promiscuous mode currently but check
3646	 * for and disable any promiscuous mode operation
3647	 */
3648	if (slp->sl_flags & SLALLPHYS) {
3649		slp->sl_flags &= ~SLALLPHYS;
3650	}
3651
3652	/*
3653	 * disable ALLMULTI mode if all mulitcast addr are ON
3654	 */
3655	if (slp->sl_flags & SLALLMULTI) {
3656		slp->sl_flags &= ~SLALLMULTI;
3657	}
3658
3659	/*
3660	 * we are most likely going to perform multicast by
3661	 * broadcasting to the well known addr (D_ID) 0xFFFFFF or
3662	 * ALPA 0x00 in case of public loops
3663	 */
3664
3665
3666	/*
3667	 * detach unit from device structure.
3668	 */
3669	for (tslp = fcipstrup; tslp != NULL; tslp = tslp->sl_nextp) {
3670		if (tslp->sl_fcip == fptr) {
3671			break;
3672		}
3673	}
3674	if (tslp == NULL) {
3675		FCIP_DEBUG(FCIP_DEBUG_DETACH, (CE_WARN,
3676		"fcip_dodeatch - active stream struct not found"));
3677
3678		/* unregister with Fabric nameserver?? */
3679	}
3680	slp->sl_state = DL_UNATTACHED;
3681
3682	fcip_setipq(fptr);
3683}
3684
3685
3686/*
3687 * Set or clear device ipq pointer.
3688 * Walk thru all the streams on this device, if a ETHERTYPE_IP
3689 * stream is found, assign device ipq to its sl_rq.
3690 */
3691static void
3692fcip_setipq(struct fcip *fptr)
3693{
3694	struct fcipstr	*slp;
3695	int		ok = 1;
3696	queue_t		*ipq = NULL;
3697
3698	FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_NOTE, "entered fcip_setipq"));
3699
3700	rw_enter(&fcipstruplock, RW_READER);
3701
3702	for (slp = fcipstrup; slp != NULL; slp = slp->sl_nextp) {
3703		if (slp->sl_fcip == fptr) {
3704			if (slp->sl_flags & (SLALLPHYS|SLALLSAP)) {
3705				ok = 0;
3706			}
3707			if (slp->sl_sap == ETHERTYPE_IP) {
3708				if (ipq == NULL) {
3709					ipq = slp->sl_rq;
3710				} else {
3711					ok = 0;
3712				}
3713			}
3714		}
3715	}
3716
3717	rw_exit(&fcipstruplock);
3718
3719	if (fcip_check_port_exists(fptr)) {
3720		/* fptr passed to us is stale */
3721		return;
3722	}
3723
3724	mutex_enter(&fptr->fcip_mutex);
3725	if (ok) {
3726		fptr->fcip_ipq = ipq;
3727	} else {
3728		fptr->fcip_ipq = NULL;
3729	}
3730	mutex_exit(&fptr->fcip_mutex);
3731}
3732
3733
3734/* ARGSUSED */
3735static void
3736fcip_ioctl(queue_t *wq, mblk_t *mp)
3737{
3738	struct iocblk		*iocp = (struct iocblk *)mp->b_rptr;
3739	struct fcipstr		*slp = (struct fcipstr *)wq->q_ptr;
3740
3741	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
3742	    (CE_NOTE, "in fcip ioctl : %d", iocp->ioc_cmd));
3743	FCIP_TNF_PROBE_1((fcip_ioctl, "fcip io", /* CSTYLED */,
3744		tnf_string, msg, "enter"));
3745
3746	switch (iocp->ioc_cmd) {
3747	case DLIOCRAW:
3748		slp->sl_flags |= FCIP_SLRAW;
3749		miocack(wq, mp, 0, 0);
3750		break;
3751
3752	case DL_IOC_HDR_INFO:
3753		fcip_dl_ioc_hdr_info(wq, mp);
3754		break;
3755
3756	default:
3757		miocnak(wq, mp, 0, EINVAL);
3758		break;
3759	}
3760}
3761
3762/*
3763 * The streams 'Put' routine.
3764 */
3765/* ARGSUSED */
3766static int
3767fcip_wput(queue_t *wq, mblk_t *mp)
3768{
3769	struct fcipstr *slp = (struct fcipstr *)wq->q_ptr;
3770	struct fcip *fptr;
3771	struct fcip_dest *fdestp;
3772	fcph_network_hdr_t *headerp;
3773
3774	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
3775	    (CE_NOTE, "in fcip_wput :: type:%x", DB_TYPE(mp)));
3776
3777	switch (DB_TYPE(mp)) {
3778	case M_DATA: {
3779
3780		fptr = slp->sl_fcip;
3781
3782		if (((slp->sl_flags & (FCIP_SLFAST|FCIP_SLRAW)) == 0) ||
3783		    (slp->sl_state != DL_IDLE) ||
3784		    (fptr == NULL)) {
3785			/*
3786			 * set error in the message block and send a reply
3787			 * back upstream. Sun's merror routine does this
3788			 * for us more cleanly.
3789			 */
3790			merror(wq, mp, EPROTO);
3791			break;
3792		}
3793
3794		/*
3795		 * if any messages are already enqueued or if the interface
3796		 * is in promiscuous mode, causing the packets to loop back
3797		 * up, then enqueue the message. Otherwise just transmit
3798		 * the message. putq() puts the message on fcip's
3799		 * write queue and qenable() puts the queue (wq) on
3800		 * the list of queues to be called by the streams scheduler.
3801		 */
3802		if (wq->q_first) {
3803			(void) putq(wq, mp);
3804			fptr->fcip_wantw = 1;
3805			qenable(wq);
3806		} else if (fptr->fcip_flags & FCIP_PROMISC) {
3807			/*
3808			 * Promiscous mode not supported but add this code in
3809			 * case it will be supported in future.
3810			 */
3811			(void) putq(wq, mp);
3812			qenable(wq);
3813		} else {
3814
3815			headerp = (fcph_network_hdr_t *)mp->b_rptr;
3816			fdestp = fcip_get_dest(fptr, &headerp->net_dest_addr);
3817
3818			if (fdestp == NULL) {
3819				merror(wq, mp, EPROTO);
3820				break;
3821			}
3822
3823			ASSERT(fdestp != NULL);
3824
3825			(void) fcip_start(wq, mp, fptr, fdestp, KM_SLEEP);
3826		}
3827		break;
3828	}
3829	case M_PROTO:
3830	case M_PCPROTO:
3831		/*
3832		 * to prevent recursive calls into fcip_proto
3833		 * (PROTO and PCPROTO messages are handled by fcip_proto)
3834		 * let the service procedure handle these messages by
3835		 * calling putq here.
3836		 */
3837		(void) putq(wq, mp);
3838		qenable(wq);
3839		break;
3840
3841	case M_IOCTL:
3842		fcip_ioctl(wq, mp);
3843		break;
3844
3845	case M_FLUSH:
3846		if (*mp->b_rptr & FLUSHW) {
3847			flushq(wq, FLUSHALL);
3848			*mp->b_rptr &= ~FLUSHW;
3849		}
3850		/*
3851		 * we have both FLUSHW and FLUSHR set with FLUSHRW
3852		 */
3853		if (*mp->b_rptr & FLUSHR) {
3854			/*
3855			 * send msg back upstream. qreply() takes care
3856			 * of using the RD(wq) queue on its reply
3857			 */
3858			qreply(wq, mp);
3859		} else {
3860			freemsg(mp);
3861		}
3862		break;
3863
3864	default:
3865		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
3866		    (CE_NOTE, "default msg type: %x", DB_TYPE(mp)));
3867		freemsg(mp);
3868		break;
3869	}
3870	return (0);
3871}
3872
3873
3874/*
3875 * Handle M_PROTO and M_PCPROTO messages
3876 */
3877/* ARGSUSED */
3878static void
3879fcip_proto(queue_t *wq, mblk_t *mp)
3880{
3881	union DL_primitives	*dlp;
3882	struct fcipstr		*slp;
3883	t_uscalar_t		prim;
3884
3885	slp = (struct fcipstr *)wq->q_ptr;
3886	dlp = (union DL_primitives *)mp->b_rptr;
3887	prim = dlp->dl_primitive;		/* the DLPI command */
3888
3889	FCIP_TNF_PROBE_5((fcip_proto, "fcip io", /* CSTYLED */,
3890		tnf_string, msg, "enter",
3891		tnf_opaque, wq, wq,
3892		tnf_opaque, mp, mp,
3893		tnf_opaque, MP_DB_TYPE, DB_TYPE(mp),
3894		tnf_opaque, dl_primitive, dlp->dl_primitive));
3895
3896	FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_NOTE, "dl_primitve : %x", prim));
3897
3898	mutex_enter(&slp->sl_lock);
3899
3900	switch (prim) {
3901	case DL_UNITDATA_REQ:
3902		FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3903			tnf_string, msg, "unit data request"));
3904		FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "unit data request"));
3905		fcip_udreq(wq, mp);
3906		break;
3907
3908	case DL_ATTACH_REQ:
3909		FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3910			tnf_string, msg, "Attach request"));
3911		FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "Attach request"));
3912		fcip_areq(wq, mp);
3913		break;
3914
3915	case DL_DETACH_REQ:
3916		FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3917			tnf_string, msg, "Detach request"));
3918		FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "Detach request"));
3919		fcip_dreq(wq, mp);
3920		break;
3921
3922	case DL_BIND_REQ:
3923		FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "Bind request"));
3924		FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3925			tnf_string, msg, "Bind request"));
3926		fcip_breq(wq, mp);
3927		break;
3928
3929	case DL_UNBIND_REQ:
3930		FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3931			tnf_string, msg, "unbind request"));
3932		FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "unbind request"));
3933		fcip_ubreq(wq, mp);
3934		break;
3935
3936	case DL_INFO_REQ:
3937		FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3938			tnf_string, msg, "Info request"));
3939		FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "Info request"));
3940		fcip_ireq(wq, mp);
3941		break;
3942
3943	case DL_SET_PHYS_ADDR_REQ:
3944		FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3945			tnf_string, msg, "set phy addr request"));
3946		FCIP_DEBUG(FCIP_DEBUG_DLPI,
3947		    (CE_NOTE, "set phy addr request"));
3948		fcip_spareq(wq, mp);
3949		break;
3950
3951	case DL_PHYS_ADDR_REQ:
3952		FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3953			tnf_string, msg, "phy addr request"));
3954		FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "phy addr request"));
3955		fcip_pareq(wq, mp);
3956		break;
3957
3958	case DL_ENABMULTI_REQ:
3959		FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3960			tnf_string, msg, "Enable Multicast request"));
3961		FCIP_DEBUG(FCIP_DEBUG_DLPI,
3962		    (CE_NOTE, "Enable Multicast request"));
3963		dlerrorack(wq, mp, prim, DL_UNSUPPORTED, 0);
3964		break;
3965
3966	case DL_DISABMULTI_REQ:
3967		FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3968			tnf_string, msg, "Disable Multicast request"));
3969		FCIP_DEBUG(FCIP_DEBUG_DLPI,
3970		    (CE_NOTE, "Disable Multicast request"));
3971		dlerrorack(wq, mp, prim, DL_UNSUPPORTED, 0);
3972		break;
3973
3974	case DL_PROMISCON_REQ:
3975		FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3976			tnf_string, msg, "Promiscuous mode ON request"));
3977		FCIP_DEBUG(FCIP_DEBUG_DLPI,
3978		    (CE_NOTE, "Promiscuous mode ON request"));
3979		dlerrorack(wq, mp, prim, DL_UNSUPPORTED, 0);
3980		break;
3981
3982	case DL_PROMISCOFF_REQ:
3983		FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3984			tnf_string, msg, "Promiscuous mode OFF request"));
3985		FCIP_DEBUG(FCIP_DEBUG_DLPI,
3986		    (CE_NOTE, "Promiscuous mode OFF request"));
3987		dlerrorack(wq, mp, prim, DL_UNSUPPORTED, 0);
3988		break;
3989
3990	default:
3991		FCIP_TNF_PROBE_1((fcip_proto, "fcip io", /* CSTYLED */,
3992			tnf_string, msg, "Unsupported request"));
3993		dlerrorack(wq, mp, prim, DL_UNSUPPORTED, 0);
3994		break;
3995	}
3996	mutex_exit(&slp->sl_lock);
3997}
3998
3999/*
4000 * Always enqueue M_PROTO and M_PCPROTO messages pn the wq and M_DATA
4001 * messages sometimes. Processing of M_PROTO and M_PCPROTO messages
4002 * require us to hold fcip's internal locks across (upstream) putnext
4003 * calls. Specifically fcip_intr could hold fcip_intrlock and fcipstruplock
4004 * when it calls putnext(). That thread could loop back around to call
4005 * fcip_wput and eventually fcip_init() to cause a recursive mutex panic
4006 *
4007 * M_DATA messages are enqueued only if we are out of xmit resources. Once
4008 * the transmit resources are available the service procedure is enabled
4009 * and an attempt is made to xmit all messages on the wq.
4010 */
4011/* ARGSUSED */
4012static int
4013fcip_wsrv(queue_t *wq)
4014{
4015	mblk_t		*mp;
4016	struct fcipstr	*slp;
4017	struct fcip	*fptr;
4018	struct fcip_dest *fdestp;
4019	fcph_network_hdr_t *headerp;
4020
4021	slp = (struct fcipstr *)wq->q_ptr;
4022	fptr = slp->sl_fcip;
4023
4024	FCIP_TNF_PROBE_2((fcip_wsrv, "fcip io", /* CSTYLED */,
4025		tnf_string, msg, "enter",
4026		tnf_opaque, wq, wq));
4027	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, "fcip wsrv"));
4028
4029	while (mp = getq(wq)) {
4030		switch (DB_TYPE(mp)) {
4031		case M_DATA:
4032			if (fptr && mp) {
4033				headerp = (fcph_network_hdr_t *)mp->b_rptr;
4034				fdestp = fcip_get_dest(fptr,
4035				    &headerp->net_dest_addr);
4036				if (fdestp == NULL) {
4037					freemsg(mp);
4038					goto done;
4039				}
4040				if (fcip_start(wq, mp, fptr, fdestp,
4041				    KM_SLEEP)) {
4042					goto done;
4043				}
4044			} else {
4045				freemsg(mp);
4046			}
4047			break;
4048
4049		case M_PROTO:
4050		case M_PCPROTO:
4051			FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
4052			    (CE_NOTE, "PROT msg in wsrv"));
4053			fcip_proto(wq, mp);
4054			break;
4055		default:
4056			break;
4057		}
4058	}
4059done:
4060	return (0);
4061}
4062
4063
4064/*
4065 * This routine is called from fcip_wsrv to send a message downstream
4066 * on the fibre towards its destination. This routine performs the
4067 * actual WWN to D_ID mapping by looking up the routing and destination
4068 * tables.
4069 */
4070/* ARGSUSED */
4071static int
4072fcip_start(queue_t *wq, mblk_t *mp, struct fcip *fptr,
4073    struct fcip_dest *fdestp, int flags)
4074{
4075	int			rval;
4076	int			free;
4077	fcip_pkt_t		*fcip_pkt;
4078	fc_packet_t		*fc_pkt;
4079	fcip_port_info_t	*fport = fptr->fcip_port_info;
4080	size_t			datalen;
4081
4082	FCIP_TNF_PROBE_4((fcip_start, "fcip io", /* CSTYLED */,
4083	    tnf_string, msg, "enter", tnf_opaque, wq, wq,
4084	    tnf_opaque, mp, mp,
4085	    tnf_opaque, MP_DB_TYPE, DB_TYPE(mp)));
4086	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, "in fcipstart"));
4087
4088	ASSERT(fdestp != NULL);
4089
4090	/*
4091	 * Only return if port has gone offline and not come back online
4092	 * in a while
4093	 */
4094	if (fptr->fcip_flags & FCIP_LINK_DOWN) {
4095		freemsg(mp);
4096		return (0);
4097	}
4098
4099	/*
4100	 * The message block coming in here already has the network and
4101	 * llc_snap hdr stuffed in
4102	 */
4103	/*
4104	 * Traditionally ethernet drivers at sun handle 3 cases here -
4105	 * 1. messages with one mblk
4106	 * 2. messages with 2 mblks
4107	 * 3. messages with >2 mblks
4108	 * For now lets handle all the 3 cases in a single case where we
4109	 * put them together in one mblk that has all the data
4110	 */
4111
4112	if (mp->b_cont != NULL) {
4113		if (!pullupmsg(mp, -1)) {
4114			FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
4115			    (CE_WARN, "failed to concat message"));
4116			freemsg(mp);
4117			return (1);
4118		}
4119	}
4120
4121	datalen = msgsize(mp);
4122
4123	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE,
4124	    "msgsize with nhdr & llcsnap hdr in fcip_pkt_alloc 0x%lx",
4125	    datalen));
4126
4127	/*
4128	 * We cannot have requests larger than FCIPMTU+Headers
4129	 */
4130	if (datalen > (FCIPMTU + sizeof (llc_snap_hdr_t) +
4131		sizeof (fcph_network_hdr_t))) {
4132		freemsg(mp);
4133		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE,
4134		    "fcip_pkt_alloc: datalen is larger than "
4135		    "max possible size."));
4136		return (1);
4137	}
4138
4139	fcip_pkt = fcip_pkt_alloc(fptr, mp, flags, datalen);
4140	if (fcip_pkt == NULL) {
4141		(void) putbq(wq, mp);
4142		return (1);
4143	}
4144
4145	fcip_pkt->fcip_pkt_mp = mp;
4146	fcip_pkt->fcip_pkt_wq = wq;
4147	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
4148
4149	mutex_enter(&fdestp->fcipd_mutex);
4150	/*
4151	 * If the device dynamically disappeared, just fail the request.
4152	 */
4153	if (fdestp->fcipd_rtable == NULL) {
4154		mutex_exit(&fdestp->fcipd_mutex);
4155		fcip_pkt_free(fcip_pkt, 1);
4156		return (1);
4157	}
4158
4159	/*
4160	 * Now that we've assigned pkt_pd, we can call fc_ulp_init_packet
4161	 */
4162
4163	fc_pkt->pkt_pd = fdestp->fcipd_pd;
4164
4165	if (fc_ulp_init_packet((opaque_t)fport->fcipp_handle,
4166	    fc_pkt, flags) != FC_SUCCESS) {
4167		mutex_exit(&fdestp->fcipd_mutex);
4168		fcip_pkt_free(fcip_pkt, 1);
4169		return (1);
4170	}
4171
4172	fcip_fdestp_enqueue_pkt(fdestp, fcip_pkt);
4173	fcip_pkt->fcip_pkt_dest = fdestp;
4174	fc_pkt->pkt_fca_device = fdestp->fcipd_fca_dev;
4175
4176	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE,
4177	    "setting cmdlen to 0x%x: rsp 0x%x : data 0x%x",
4178	    fc_pkt->pkt_cmdlen, fc_pkt->pkt_rsplen, fc_pkt->pkt_datalen));
4179
4180	fcip_init_unicast_pkt(fcip_pkt, fport->fcipp_sid,
4181	    fdestp->fcipd_did, fcip_pkt_callback);
4182
4183	fdestp->fcipd_ncmds++;
4184
4185	mutex_exit(&fdestp->fcipd_mutex);
4186	if ((rval = fcip_transport(fcip_pkt)) == FC_SUCCESS) {
4187		fptr->fcip_opackets++;
4188		return (0);
4189	}
4190
4191	free = (rval == FC_STATEC_BUSY || rval == FC_OFFLINE ||
4192	    rval == FC_TRAN_BUSY) ? 0 : 1;
4193
4194	mutex_enter(&fdestp->fcipd_mutex);
4195	rval = fcip_fdestp_dequeue_pkt(fdestp, fcip_pkt);
4196
4197	if (!rval) {
4198		fcip_pkt = NULL;
4199	} else {
4200		fdestp->fcipd_ncmds--;
4201	}
4202	mutex_exit(&fdestp->fcipd_mutex);
4203
4204	if (fcip_pkt != NULL) {
4205		fcip_pkt_free(fcip_pkt, free);
4206	}
4207
4208	if (!free) {
4209		(void) putbq(wq, mp);
4210	}
4211
4212	return (1);
4213}
4214
4215
4216/*
4217 * This routine enqueus a packet marked to be issued to the
4218 * transport in the dest structure. This enables us to timeout any
4219 * request stuck with the FCA/transport for long periods of time
4220 * without a response. fcip_pkt_timeout will attempt to clean up
4221 * any packets hung in this state of limbo.
4222 */
4223static void
4224fcip_fdestp_enqueue_pkt(struct fcip_dest *fdestp, fcip_pkt_t *fcip_pkt)
4225{
4226	ASSERT(mutex_owned(&fdestp->fcipd_mutex));
4227	FCIP_TNF_PROBE_1((fcip_fdestp_enqueue_pkt, "fcip io", /* CSTYLED */,
4228		tnf_string, msg, "destp enq pkt"));
4229
4230	/*
4231	 * Just hang it off the head of packet list
4232	 */
4233	fcip_pkt->fcip_pkt_next = fdestp->fcipd_head;
4234	fcip_pkt->fcip_pkt_prev = NULL;
4235	fcip_pkt->fcip_pkt_flags |= FCIP_PKT_IN_LIST;
4236
4237	if (fdestp->fcipd_head != NULL) {
4238		ASSERT(fdestp->fcipd_head->fcip_pkt_prev == NULL);
4239		fdestp->fcipd_head->fcip_pkt_prev = fcip_pkt;
4240	}
4241
4242	fdestp->fcipd_head = fcip_pkt;
4243}
4244
4245
4246/*
4247 * dequeues any packets after the transport/FCA tells us it has
4248 * been successfully sent on its way. Ofcourse it doesn't mean that
4249 * the packet will actually reach its destination but its atleast
4250 * a step closer in that direction
4251 */
4252static int
4253fcip_fdestp_dequeue_pkt(struct fcip_dest *fdestp, fcip_pkt_t *fcip_pkt)
4254{
4255	fcip_pkt_t	*fcipd_pkt;
4256
4257	ASSERT(mutex_owned(&fdestp->fcipd_mutex));
4258	if (fcip_pkt->fcip_pkt_flags & FCIP_PKT_IN_TIMEOUT) {
4259		fcipd_pkt = fdestp->fcipd_head;
4260		while (fcipd_pkt) {
4261			if (fcipd_pkt == fcip_pkt) {
4262				fcip_pkt_t	*pptr = NULL;
4263
4264				if (fcipd_pkt == fdestp->fcipd_head) {
4265					ASSERT(fcipd_pkt->fcip_pkt_prev ==
4266					    NULL);
4267					fdestp->fcipd_head =
4268					    fcipd_pkt->fcip_pkt_next;
4269				} else {
4270					pptr = fcipd_pkt->fcip_pkt_prev;
4271					ASSERT(pptr != NULL);
4272					pptr->fcip_pkt_next =
4273					    fcipd_pkt->fcip_pkt_next;
4274				}
4275				if (fcipd_pkt->fcip_pkt_next) {
4276					pptr = fcipd_pkt->fcip_pkt_next;
4277					pptr->fcip_pkt_prev =
4278					    fcipd_pkt->fcip_pkt_prev;
4279				}
4280				fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_LIST;
4281				break;
4282			}
4283			fcipd_pkt = fcipd_pkt->fcip_pkt_next;
4284		}
4285	} else {
4286		if (fcip_pkt->fcip_pkt_prev == NULL) {
4287			ASSERT(fdestp->fcipd_head == fcip_pkt);
4288			fdestp->fcipd_head = fcip_pkt->fcip_pkt_next;
4289		} else {
4290			fcip_pkt->fcip_pkt_prev->fcip_pkt_next =
4291			    fcip_pkt->fcip_pkt_next;
4292		}
4293
4294		if (fcip_pkt->fcip_pkt_next) {
4295			fcip_pkt->fcip_pkt_next->fcip_pkt_prev =
4296			    fcip_pkt->fcip_pkt_prev;
4297		}
4298
4299		fcipd_pkt = fcip_pkt;
4300		fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_LIST;
4301	}
4302
4303	return (fcipd_pkt == fcip_pkt);
4304}
4305
4306/*
4307 * The transport routine - this is the routine that actually calls
4308 * into the FCA driver (through the transport ofcourse) to transmit a
4309 * datagram on the fibre. The dest struct assoicated with the port to
4310 * which the data is intended is already bound to the packet, this routine
4311 * only takes care of marking the packet a broadcast packet if it is
4312 * intended to be a broadcast request. This permits the transport to send
4313 * the packet down on the wire even if it doesn't have an entry for the
4314 * D_ID in its d_id hash tables.
4315 */
4316static int
4317fcip_transport(fcip_pkt_t *fcip_pkt)
4318{
4319	struct fcip		*fptr;
4320	fc_packet_t		*fc_pkt;
4321	fcip_port_info_t	*fport;
4322	struct fcip_dest	*fdestp;
4323	uint32_t		did;
4324	int			rval = FC_FAILURE;
4325	struct fcip_routing_table *frp = NULL;
4326
4327	FCIP_TNF_PROBE_1((fcip_transport, "fcip io", /* CSTYLED */,
4328		tnf_string, msg, "enter"));
4329
4330	fptr = fcip_pkt->fcip_pkt_fptr;
4331	fport = fptr->fcip_port_info;
4332	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
4333	fdestp = fcip_pkt->fcip_pkt_dest;
4334	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_WARN, "fcip_transport called"));
4335
4336	did = fptr->fcip_broadcast_did;
4337	if (fc_pkt->pkt_cmd_fhdr.d_id == did &&
4338	    fc_pkt->pkt_tran_type != FC_PKT_BROADCAST) {
4339		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
4340		    (CE_NOTE, "trantype set to BROADCAST"));
4341		fc_pkt->pkt_tran_type = FC_PKT_BROADCAST;
4342	}
4343
4344	mutex_enter(&fptr->fcip_mutex);
4345	if ((fc_pkt->pkt_tran_type != FC_PKT_BROADCAST) &&
4346	    (fc_pkt->pkt_pd == NULL)) {
4347		mutex_exit(&fptr->fcip_mutex);
4348		FCIP_TNF_PROBE_1((fcip_transport, "fcip io", /* CSTYLED */,
4349		    tnf_string, msg, "fcip transport no pd"));
4350		return (rval);
4351	} else if (fptr->fcip_port_state == FCIP_PORT_OFFLINE) {
4352		mutex_exit(&fptr->fcip_mutex);
4353		FCIP_TNF_PROBE_1((fcip_transport, "fcip io", /* CSTYLED */,
4354		    tnf_string, msg, "fcip transport port offline"));
4355		return (FC_TRAN_BUSY);
4356	}
4357	mutex_exit(&fptr->fcip_mutex);
4358
4359	if (fdestp) {
4360		struct fcip_routing_table 	*frp;
4361
4362		frp = fdestp->fcipd_rtable;
4363		mutex_enter(&fptr->fcip_rt_mutex);
4364		mutex_enter(&fdestp->fcipd_mutex);
4365		if (fc_pkt->pkt_pd != NULL) {
4366			if ((frp == NULL) ||
4367			    (frp && FCIP_RTE_UNAVAIL(frp->fcipr_state))) {
4368				mutex_exit(&fdestp->fcipd_mutex);
4369				mutex_exit(&fptr->fcip_rt_mutex);
4370				if (frp &&
4371				    (frp->fcipr_state == FCIP_RT_INVALID)) {
4372					FCIP_TNF_PROBE_1((fcip_transport,
4373					    "fcip io", /* CSTYLED */,
4374					    tnf_string, msg,
4375					    "fcip transport - TRANBUSY"));
4376					return (FC_TRAN_BUSY);
4377				} else {
4378					FCIP_TNF_PROBE_1((fcip_transport,
4379					    "fcip io", /* CSTYLED */,
4380					    tnf_string, msg,
4381					    "fcip transport: frp unavailable"));
4382					return (rval);
4383				}
4384			}
4385		}
4386		mutex_exit(&fdestp->fcipd_mutex);
4387		mutex_exit(&fptr->fcip_rt_mutex);
4388		ASSERT(fcip_pkt->fcip_pkt_flags & FCIP_PKT_IN_LIST);
4389	}
4390
4391	/* Explicitly invalidate this field till fcip decides to use it */
4392	fc_pkt->pkt_ulp_rscn_infop = NULL;
4393
4394	rval = fc_ulp_transport(fport->fcipp_handle, fc_pkt);
4395	if (rval == FC_STATEC_BUSY || rval == FC_OFFLINE) {
4396		/*
4397		 * Need to queue up the command for retry
4398		 */
4399		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
4400		    (CE_WARN, "ulp_transport failed: 0x%x", rval));
4401	} else if (rval == FC_LOGINREQ && (frp != NULL)) {
4402		(void) fcip_do_plogi(fptr, frp);
4403	} else if (rval == FC_BADPACKET && (frp != NULL)) {
4404		/*
4405		 * There is a distinct possiblity in our scheme of things
4406		 * that we have a routing table entry with a NULL pd struct.
4407		 * Mark the routing table entry for removal if it is not a
4408		 * broadcast entry
4409		 */
4410		if ((frp->fcipr_d_id.port_id != 0x0) &&
4411		    (frp->fcipr_d_id.port_id != 0xffffff)) {
4412			mutex_enter(&fptr->fcip_rt_mutex);
4413			frp->fcipr_pd = NULL;
4414			frp->fcipr_state = PORT_DEVICE_INVALID;
4415			mutex_exit(&fptr->fcip_rt_mutex);
4416		}
4417	}
4418
4419	FCIP_TNF_PROBE_1((fcip_transport, "fcip io", /* CSTYLED */,
4420	    tnf_string, msg, "fcip transport done"));
4421	return (rval);
4422}
4423
4424/*
4425 * Call back routine. Called by the FCA/transport when the messages
4426 * has been put onto the wire towards its intended destination. We can
4427 * now free the fc_packet associated with the message
4428 */
4429static void
4430fcip_pkt_callback(fc_packet_t *fc_pkt)
4431{
4432	int			rval;
4433	fcip_pkt_t		*fcip_pkt;
4434	struct fcip_dest	*fdestp;
4435
4436	fcip_pkt = (fcip_pkt_t *)fc_pkt->pkt_ulp_private;
4437	fdestp = fcip_pkt->fcip_pkt_dest;
4438
4439	/*
4440	 * take the lock early so that we don't have a race condition
4441	 * with fcip_timeout
4442	 *
4443	 * fdestp->fcipd_mutex isn't really intended to lock per
4444	 * packet struct - see bug 5105592 for permanent solution
4445	 */
4446	mutex_enter(&fdestp->fcipd_mutex);
4447
4448	fcip_pkt->fcip_pkt_flags |= FCIP_PKT_RETURNED;
4449	fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_ABORT;
4450	if (fcip_pkt->fcip_pkt_flags & FCIP_PKT_IN_TIMEOUT) {
4451		mutex_exit(&fdestp->fcipd_mutex);
4452		return;
4453	}
4454
4455	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, "pkt callback"));
4456
4457	ASSERT(fdestp->fcipd_rtable != NULL);
4458	ASSERT(fcip_pkt->fcip_pkt_flags & FCIP_PKT_IN_LIST);
4459	rval = fcip_fdestp_dequeue_pkt(fdestp, fcip_pkt);
4460	fdestp->fcipd_ncmds--;
4461	mutex_exit(&fdestp->fcipd_mutex);
4462
4463	if (rval) {
4464		fcip_pkt_free(fcip_pkt, 1);
4465	}
4466
4467	FCIP_TNF_PROBE_1((fcip_pkt_callback, "fcip io", /* CSTYLED */,
4468		tnf_string, msg, "pkt callback done"));
4469	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_NOTE, "pkt callback done"));
4470}
4471
4472/*
4473 * Return 1 if the topology is supported, else return 0.
4474 * Topology support is consistent with what the whole
4475 * stack supports together.
4476 */
4477static int
4478fcip_is_supported_fc_topology(int fc_topology)
4479{
4480	switch (fc_topology) {
4481
4482	case FC_TOP_PRIVATE_LOOP :
4483	case FC_TOP_PUBLIC_LOOP :
4484	case FC_TOP_FABRIC :
4485	case FC_TOP_NO_NS :
4486		return (1);
4487	default :
4488		return (0);
4489	}
4490}
4491
4492/*
4493 * handle any topology specific initializations here
4494 * this routine must be called while holding fcip_mutex
4495 */
4496/* ARGSUSED */
4497static void
4498fcip_handle_topology(struct fcip *fptr)
4499{
4500
4501	fcip_port_info_t	*fport = fptr->fcip_port_info;
4502
4503	ASSERT(mutex_owned(&fptr->fcip_mutex));
4504
4505	/*
4506	 * Since we know the port's topology - handle topology
4507	 * specific details here. In Point to Point and Private Loop
4508	 * topologies - we would probably not have a name server
4509	 */
4510
4511	FCIP_TNF_PROBE_3((fcip_handle_topology, "fcip io", /* CSTYLED */,
4512		tnf_string, msg, "enter",
4513		tnf_uint, port_state, fport->fcipp_pstate,
4514		tnf_uint, topology, fport->fcipp_topology));
4515	FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_NOTE, "port state: %x, topology %x",
4516		fport->fcipp_pstate, fport->fcipp_topology));
4517
4518	fptr->fcip_broadcast_did = fcip_get_broadcast_did(fptr);
4519	mutex_exit(&fptr->fcip_mutex);
4520	(void) fcip_dest_add_broadcast_entry(fptr, 0);
4521	mutex_enter(&fptr->fcip_mutex);
4522
4523	if (!fcip_is_supported_fc_topology(fport->fcipp_topology)) {
4524		FCIP_DEBUG(FCIP_DEBUG_INIT,
4525		    (CE_WARN, "fcip(0x%x): Unsupported port topology (0x%x)",
4526		    fptr->fcip_instance, fport->fcipp_topology));
4527		return;
4528	}
4529
4530	switch (fport->fcipp_topology) {
4531	case FC_TOP_PRIVATE_LOOP: {
4532
4533		fc_portmap_t		*port_map;
4534		uint32_t		listlen, alloclen;
4535		/*
4536		 * we may have to maintain routing. Get a list of
4537		 * all devices on this port that the transport layer is
4538		 * aware of. Check if any of them is a IS8802 type port,
4539		 * if yes get its WWN and DID mapping and cache it in
4540		 * the purport routing table. Since there is no
4541		 * State Change notification for private loop/point_point
4542		 * topologies - this table may not be accurate. The static
4543		 * routing table is updated on a state change callback.
4544		 */
4545		FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_WARN, "port state valid!!"));
4546		fptr->fcip_port_state = FCIP_PORT_ONLINE;
4547		listlen = alloclen = FCIP_MAX_PORTS;
4548		port_map = (fc_portmap_t *)
4549		    kmem_zalloc((FCIP_MAX_PORTS * sizeof (fc_portmap_t)),
4550		    KM_SLEEP);
4551		if (fc_ulp_getportmap(fport->fcipp_handle, &port_map,
4552		    &listlen, FC_ULP_PLOGI_PRESERVE) == FC_SUCCESS) {
4553			mutex_exit(&fptr->fcip_mutex);
4554			fcip_rt_update(fptr, port_map, listlen);
4555			mutex_enter(&fptr->fcip_mutex);
4556		}
4557		if (listlen > alloclen) {
4558			alloclen = listlen;
4559		}
4560		kmem_free(port_map, (alloclen * sizeof (fc_portmap_t)));
4561		/*
4562		 * Now fall through and register with the transport
4563		 * that this port is IP capable
4564		 */
4565	}
4566	/* FALLTHROUGH */
4567	case FC_TOP_NO_NS:
4568		/*
4569		 * If we don't have a nameserver, lets wait until we
4570		 * have to send out a packet to a remote port and then
4571		 * try and discover the port using ARP/FARP.
4572		 */
4573	/* FALLTHROUGH */
4574	case FC_TOP_PUBLIC_LOOP:
4575	case FC_TOP_FABRIC: {
4576		fc_portmap_t	*port_map;
4577		uint32_t	listlen, alloclen;
4578
4579		/* FC_TYPE of 0x05 goes to word 0, LSB */
4580		fptr->fcip_port_state = FCIP_PORT_ONLINE;
4581
4582		if (!(fptr->fcip_flags & FCIP_REG_INPROGRESS)) {
4583			fptr->fcip_flags |= FCIP_REG_INPROGRESS;
4584			if (taskq_dispatch(fptr->fcip_tq, fcip_port_ns,
4585			    fptr, KM_NOSLEEP) == 0) {
4586				fptr->fcip_flags &= ~FCIP_REG_INPROGRESS;
4587			}
4588		}
4589
4590		/*
4591		 * If fcip_create_nodes_on_demand is overridden to force
4592		 * discovery of all nodes in Fabric/Public loop topologies
4593		 * we need to query for and obtain all nodes and log into
4594		 * them as with private loop devices
4595		 */
4596		if (!fcip_create_nodes_on_demand) {
4597			fptr->fcip_port_state = FCIP_PORT_ONLINE;
4598			listlen = alloclen = FCIP_MAX_PORTS;
4599			port_map = (fc_portmap_t *)
4600			    kmem_zalloc((FCIP_MAX_PORTS *
4601			    sizeof (fc_portmap_t)), KM_SLEEP);
4602			if (fc_ulp_getportmap(fport->fcipp_handle, &port_map,
4603			    &listlen, FC_ULP_PLOGI_PRESERVE) == FC_SUCCESS) {
4604				mutex_exit(&fptr->fcip_mutex);
4605				fcip_rt_update(fptr, port_map, listlen);
4606				mutex_enter(&fptr->fcip_mutex);
4607			}
4608			if (listlen > alloclen) {
4609				alloclen = listlen;
4610			}
4611			kmem_free(port_map,
4612			    (alloclen * sizeof (fc_portmap_t)));
4613		}
4614		break;
4615	}
4616
4617	default:
4618		break;
4619	}
4620}
4621
4622static void
4623fcip_port_ns(void *arg)
4624{
4625	struct	fcip		*fptr = (struct fcip *)arg;
4626	fcip_port_info_t	*fport = fptr->fcip_port_info;
4627	fc_ns_cmd_t		ns_cmd;
4628	uint32_t		types[8];
4629	ns_rfc_type_t		rfc;
4630
4631	mutex_enter(&fptr->fcip_mutex);
4632	if ((fptr->fcip_flags & (FCIP_DETACHING | FCIP_DETACHED)) ||
4633	    (fptr->fcip_flags & (FCIP_SUSPENDED | FCIP_POWER_DOWN))) {
4634		fptr->fcip_flags &= ~FCIP_REG_INPROGRESS;
4635		mutex_exit(&fptr->fcip_mutex);
4636		return;
4637	}
4638	mutex_exit(&fptr->fcip_mutex);
4639
4640	/*
4641	 * Prepare the Name server structure to
4642	 * register with the transport in case of
4643	 * Fabric configuration.
4644	 */
4645	bzero(&rfc, sizeof (rfc));
4646	bzero(types, sizeof (types));
4647
4648	types[FC4_TYPE_WORD_POS(FC_TYPE_IS8802_SNAP)] = (1 <<
4649	    FC4_TYPE_BIT_POS(FC_TYPE_IS8802_SNAP));
4650
4651	rfc.rfc_port_id.port_id = fport->fcipp_sid.port_id;
4652	bcopy(types, rfc.rfc_types, sizeof (types));
4653
4654	ns_cmd.ns_flags = 0;
4655	ns_cmd.ns_cmd = NS_RFT_ID;
4656	ns_cmd.ns_req_len = sizeof (rfc);
4657	ns_cmd.ns_req_payload = (caddr_t)&rfc;
4658	ns_cmd.ns_resp_len = 0;
4659	ns_cmd.ns_resp_payload = NULL;
4660
4661	/*
4662	 * Perform the Name Server Registration for FC IS8802_SNAP Type.
4663	 * We don't expect a reply for registering port type
4664	 */
4665	(void) fc_ulp_port_ns(fptr->fcip_port_info->fcipp_handle,
4666		(opaque_t)0, &ns_cmd);
4667
4668	mutex_enter(&fptr->fcip_mutex);
4669	fptr->fcip_flags &= ~FCIP_REG_INPROGRESS;
4670	mutex_exit(&fptr->fcip_mutex);
4671}
4672
4673/*
4674 * setup this instance of fcip. This routine inits kstats, allocates
4675 * unsolicited buffers, determines' this port's siblings and handles
4676 * topology specific details which includes registering with the name
4677 * server and also setting up the routing table for this port for
4678 * private loops and point to point topologies
4679 */
4680static int
4681fcip_init_port(struct fcip *fptr)
4682{
4683	int rval = FC_SUCCESS;
4684	fcip_port_info_t	*fport = fptr->fcip_port_info;
4685	static char buf[64];
4686	size_t	tok_buf_size;
4687
4688	ASSERT(fport != NULL);
4689
4690	FCIP_TNF_PROBE_1((fcip_init_port, "fcip io", /* CSTYLED */,
4691		tnf_string, msg, "enter"));
4692	mutex_enter(&fptr->fcip_mutex);
4693
4694	/*
4695	 * setup mac address for this port. Don't be too worried if
4696	 * the WWN is zero, there is probably nothing attached to
4697	 * to the port. There is no point allocating unsolicited buffers
4698	 * for an unused port so return success if we don't have a MAC
4699	 * address. Do the port init on a state change notification.
4700	 */
4701	if (fcip_setup_mac_addr(fptr) == FCIP_INVALID_WWN) {
4702		fptr->fcip_port_state = FCIP_PORT_OFFLINE;
4703		rval = FC_SUCCESS;
4704		goto done;
4705	}
4706
4707	/*
4708	 * clear routing table hash list for this port
4709	 */
4710	fcip_rt_flush(fptr);
4711
4712	/*
4713	 * init kstats for this instance
4714	 */
4715	fcip_kstat_init(fptr);
4716
4717	/*
4718	 * Allocate unsolicited buffers
4719	 */
4720	fptr->fcip_ub_nbufs = fcip_ub_nbufs;
4721	tok_buf_size = sizeof (*fptr->fcip_ub_tokens) * fcip_ub_nbufs;
4722
4723	FCIP_TNF_PROBE_2((fcip_init_port, "fcip io", /* CSTYLED */,
4724		tnf_string, msg, "debug",
4725		tnf_int, tokBufsize, tok_buf_size));
4726
4727	FCIP_DEBUG(FCIP_DEBUG_INIT,
4728	    (CE_WARN, "tokBufsize: 0x%lx", tok_buf_size));
4729
4730	fptr->fcip_ub_tokens = kmem_zalloc(tok_buf_size, KM_SLEEP);
4731
4732	if (fptr->fcip_ub_tokens == NULL) {
4733		rval = FC_FAILURE;
4734		FCIP_DEBUG(FCIP_DEBUG_INIT,
4735		    (CE_WARN, "fcip(%d): failed to allocate unsol buf",
4736		    fptr->fcip_instance));
4737		goto done;
4738	}
4739	rval = fc_ulp_uballoc(fport->fcipp_handle, &fptr->fcip_ub_nbufs,
4740		fcip_ub_size, FC_TYPE_IS8802_SNAP, fptr->fcip_ub_tokens);
4741
4742	if (rval != FC_SUCCESS) {
4743		FCIP_DEBUG(FCIP_DEBUG_INIT,
4744		    (CE_WARN, "fcip(%d): fc_ulp_uballoc failed with 0x%x!!",
4745		    fptr->fcip_instance, rval));
4746	}
4747
4748	switch (rval) {
4749	case FC_SUCCESS:
4750		break;
4751
4752	case FC_OFFLINE:
4753		fptr->fcip_port_state = FCIP_PORT_OFFLINE;
4754		rval = FC_FAILURE;
4755		goto done;
4756
4757	case FC_UB_ERROR:
4758		FCIP_TNF_PROBE_1((fcip_init_port, "fcip io", /* CSTYLED */,
4759			tnf_string, msg, "invalid ub alloc request"));
4760		FCIP_DEBUG(FCIP_DEBUG_INIT,
4761		    (CE_WARN, "invalid ub alloc request !!"));
4762		rval = FC_FAILURE;
4763		goto done;
4764
4765	case FC_FAILURE:
4766		/*
4767		 * requested bytes could not be alloced
4768		 */
4769		if (fptr->fcip_ub_nbufs != fcip_ub_nbufs) {
4770			cmn_err(CE_WARN,
4771			    "!fcip(0x%x): Failed to alloc unsolicited bufs",
4772			    ddi_get_instance(fport->fcipp_dip));
4773			rval = FC_FAILURE;
4774			goto done;
4775		}
4776		break;
4777
4778	default:
4779		rval = FC_FAILURE;
4780		break;
4781	}
4782
4783	/*
4784	 * Preallocate a Cache of fcip packets for transmit and receive
4785	 * We don't want to be holding on to unsolicited buffers while
4786	 * we transmit the message upstream
4787	 */
4788	FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_NOTE, "allocating fcip_pkt cache"));
4789
4790	(void) sprintf(buf, "fcip%d_cache", fptr->fcip_instance);
4791	fptr->fcip_xmit_cache = kmem_cache_create(buf,
4792		(fport->fcipp_fca_pkt_size + sizeof (fcip_pkt_t)),
4793		8, fcip_cache_constructor, fcip_cache_destructor,
4794		NULL, (void *)fport, NULL, 0);
4795
4796	(void) sprintf(buf, "fcip%d_sendup_cache", fptr->fcip_instance);
4797	fptr->fcip_sendup_cache = kmem_cache_create(buf,
4798		sizeof (struct fcip_sendup_elem),
4799		8, fcip_sendup_constructor, NULL, NULL, (void *)fport, NULL, 0);
4800
4801	if (fptr->fcip_xmit_cache == NULL) {
4802		FCIP_TNF_PROBE_2((fcip_init_port, "fcip io", /* CSTYLED */,
4803			tnf_string, msg, "unable to allocate xmit cache",
4804			tnf_int, instance, fptr->fcip_instance));
4805		FCIP_DEBUG(FCIP_DEBUG_INIT,
4806		    (CE_WARN, "fcip%d unable to allocate xmit cache",
4807		    fptr->fcip_instance));
4808		rval = FC_FAILURE;
4809		goto done;
4810	}
4811
4812	/*
4813	 * We may need to handle routing tables for point to point and
4814	 * fcal topologies and register with NameServer for Fabric
4815	 * topologies.
4816	 */
4817	fcip_handle_topology(fptr);
4818	mutex_exit(&fptr->fcip_mutex);
4819	if (fcip_dest_add_broadcast_entry(fptr, 1) != FC_SUCCESS) {
4820		FCIP_DEBUG(FCIP_DEBUG_INIT,
4821		    (CE_WARN, "fcip(0x%x):add broadcast entry failed!!",
4822		    fptr->fcip_instance));
4823		mutex_enter(&fptr->fcip_mutex);
4824		rval = FC_FAILURE;
4825		goto done;
4826	}
4827
4828	rval = FC_SUCCESS;
4829	return (rval);
4830
4831done:
4832	/*
4833	 * we don't always come here from port_attach - so cleanup
4834	 * anything done in the init_port routine
4835	 */
4836	if (fptr->fcip_kstatp) {
4837		kstat_delete(fptr->fcip_kstatp);
4838		fptr->fcip_kstatp = NULL;
4839	}
4840
4841	if (fptr->fcip_xmit_cache) {
4842		kmem_cache_destroy(fptr->fcip_xmit_cache);
4843		fptr->fcip_xmit_cache = NULL;
4844	}
4845
4846	if (fptr->fcip_sendup_cache) {
4847		kmem_cache_destroy(fptr->fcip_sendup_cache);
4848		fptr->fcip_sendup_cache = NULL;
4849	}
4850
4851	/* release unsolicited buffers */
4852	if (fptr->fcip_ub_tokens) {
4853		uint64_t	*tokens = fptr->fcip_ub_tokens;
4854		fptr->fcip_ub_tokens = NULL;
4855
4856		mutex_exit(&fptr->fcip_mutex);
4857		(void) fc_ulp_ubfree(fport->fcipp_handle, fptr->fcip_ub_nbufs,
4858			tokens);
4859		kmem_free(tokens, tok_buf_size);
4860
4861	} else {
4862		mutex_exit(&fptr->fcip_mutex);
4863	}
4864
4865	return (rval);
4866}
4867
4868/*
4869 * Sets up a port's MAC address from its WWN
4870 */
4871static int
4872fcip_setup_mac_addr(struct fcip *fptr)
4873{
4874	fcip_port_info_t	*fport = fptr->fcip_port_info;
4875
4876	ASSERT(mutex_owned(&fptr->fcip_mutex));
4877
4878	fptr->fcip_addrflags = 0;
4879
4880	/*
4881	 * we cannot choose a MAC address for our interface - we have
4882	 * to live with whatever node WWN we get (minus the top two
4883	 * MSbytes for the MAC address) from the transport layer. We will
4884	 * treat the WWN as our factory MAC address.
4885	 */
4886
4887	if ((fport->fcipp_nwwn.w.wwn_hi != 0) ||
4888	    (fport->fcipp_nwwn.w.wwn_lo != 0)) {
4889		char		etherstr[ETHERSTRL];
4890
4891		wwn_to_ether(&fport->fcipp_nwwn, &fptr->fcip_macaddr);
4892		fcip_ether_to_str(&fptr->fcip_macaddr, etherstr);
4893		FCIP_DEBUG(FCIP_DEBUG_INIT,
4894		    (CE_NOTE, "setupmacaddr ouraddr %s", etherstr));
4895
4896		fptr->fcip_addrflags = (FCIP_FACTADDR_PRESENT |
4897						FCIP_FACTADDR_USE);
4898	} else {
4899		/*
4900		 * No WWN - just return failure - there's not much
4901		 * we can do since we cannot set the WWN.
4902		 */
4903		FCIP_DEBUG(FCIP_DEBUG_INIT,
4904		    (CE_WARN, "Port does not have a valid WWN"));
4905		return (FCIP_INVALID_WWN);
4906	}
4907	return (FC_SUCCESS);
4908}
4909
4910
4911/*
4912 * flush routing table entries
4913 */
4914static void
4915fcip_rt_flush(struct fcip *fptr)
4916{
4917	int index;
4918
4919	mutex_enter(&fptr->fcip_rt_mutex);
4920	for (index = 0; index < FCIP_RT_HASH_ELEMS; index++) {
4921		struct fcip_routing_table 	*frtp, *frtp_next;
4922		frtp = fptr->fcip_rtable[index];
4923		while (frtp) {
4924			frtp_next = frtp->fcipr_next;
4925			kmem_free(frtp, sizeof (struct fcip_routing_table));
4926			frtp = frtp_next;
4927		}
4928		fptr->fcip_rtable[index] = NULL;
4929	}
4930	mutex_exit(&fptr->fcip_rt_mutex);
4931}
4932
4933/*
4934 * Free up the fcip softstate and all allocated resources for the
4935 * fcip instance assoicated with a given port driver instance
4936 *
4937 * Given that the list of structures pointed to by fcip_port_head,
4938 * this function is called from multiple sources, and the
4939 * fcip_global_mutex that protects fcip_port_head must be dropped,
4940 * our best solution is to return a value that indicates the next
4941 * port in the list.  This way the caller doesn't need to worry
4942 * about the race condition where he saves off a pointer to the
4943 * next structure in the list and by the time this routine returns,
4944 * that next structure has already been freed.
4945 */
4946static fcip_port_info_t *
4947fcip_softstate_free(fcip_port_info_t *fport)
4948{
4949	struct fcip		*fptr = NULL;
4950	int 			instance;
4951	timeout_id_t		tid;
4952	opaque_t		phandle = NULL;
4953	fcip_port_info_t	*prev_fport, *cur_fport, *next_fport = NULL;
4954
4955	ASSERT(MUTEX_HELD(&fcip_global_mutex));
4956
4957	if (fport) {
4958		phandle = fport->fcipp_handle;
4959		fptr = fport->fcipp_fcip;
4960	} else {
4961		return (next_fport);
4962	}
4963
4964	if (fptr) {
4965		mutex_enter(&fptr->fcip_mutex);
4966		instance = ddi_get_instance(fptr->fcip_dip);
4967
4968		/*
4969		 * dismantle timeout thread for this instance of fcip
4970		 */
4971		tid = fptr->fcip_timeout_id;
4972		fptr->fcip_timeout_id = NULL;
4973
4974		mutex_exit(&fptr->fcip_mutex);
4975		(void) untimeout(tid);
4976		mutex_enter(&fptr->fcip_mutex);
4977
4978		ASSERT(fcip_num_instances >= 0);
4979		fcip_num_instances--;
4980
4981		/*
4982		 * stop sendup thread
4983		 */
4984		mutex_enter(&fptr->fcip_sendup_mutex);
4985		if (fptr->fcip_sendup_thr_initted) {
4986			fptr->fcip_sendup_thr_initted = 0;
4987			cv_signal(&fptr->fcip_sendup_cv);
4988			cv_wait(&fptr->fcip_sendup_cv,
4989			    &fptr->fcip_sendup_mutex);
4990		}
4991		ASSERT(fptr->fcip_sendup_head == NULL);
4992		fptr->fcip_sendup_head = fptr->fcip_sendup_tail = NULL;
4993		mutex_exit(&fptr->fcip_sendup_mutex);
4994
4995		/*
4996		 * dismantle taskq
4997		 */
4998		if (fptr->fcip_tq) {
4999			taskq_t	*tq = fptr->fcip_tq;
5000
5001			fptr->fcip_tq = NULL;
5002
5003			mutex_exit(&fptr->fcip_mutex);
5004			taskq_destroy(tq);
5005			mutex_enter(&fptr->fcip_mutex);
5006		}
5007
5008		if (fptr->fcip_kstatp) {
5009			kstat_delete(fptr->fcip_kstatp);
5010			fptr->fcip_kstatp = NULL;
5011		}
5012
5013		/* flush the routing table entries */
5014		fcip_rt_flush(fptr);
5015
5016		if (fptr->fcip_xmit_cache) {
5017			kmem_cache_destroy(fptr->fcip_xmit_cache);
5018			fptr->fcip_xmit_cache = NULL;
5019		}
5020
5021		if (fptr->fcip_sendup_cache) {
5022			kmem_cache_destroy(fptr->fcip_sendup_cache);
5023			fptr->fcip_sendup_cache = NULL;
5024		}
5025
5026		fcip_cleanup_dest(fptr);
5027
5028		/* release unsolicited buffers */
5029		if (fptr->fcip_ub_tokens) {
5030			uint64_t	*tokens = fptr->fcip_ub_tokens;
5031
5032			fptr->fcip_ub_tokens = NULL;
5033			mutex_exit(&fptr->fcip_mutex);
5034			if (phandle) {
5035				/*
5036				 * release the global mutex here to
5037				 * permit any data pending callbacks to
5038				 * complete. Else we will deadlock in the
5039				 * FCA waiting for all unsol buffers to be
5040				 * returned.
5041				 */
5042				mutex_exit(&fcip_global_mutex);
5043				(void) fc_ulp_ubfree(phandle,
5044				    fptr->fcip_ub_nbufs, tokens);
5045				mutex_enter(&fcip_global_mutex);
5046			}
5047			kmem_free(tokens, (sizeof (*tokens) * fcip_ub_nbufs));
5048		} else {
5049			mutex_exit(&fptr->fcip_mutex);
5050		}
5051
5052		mutex_destroy(&fptr->fcip_mutex);
5053		mutex_destroy(&fptr->fcip_ub_mutex);
5054		mutex_destroy(&fptr->fcip_rt_mutex);
5055		mutex_destroy(&fptr->fcip_dest_mutex);
5056		mutex_destroy(&fptr->fcip_sendup_mutex);
5057		cv_destroy(&fptr->fcip_farp_cv);
5058		cv_destroy(&fptr->fcip_sendup_cv);
5059		cv_destroy(&fptr->fcip_ub_cv);
5060
5061		ddi_soft_state_free(fcip_softp, instance);
5062	}
5063
5064	/*
5065	 * Now dequeue the fcip_port_info from the port list
5066	 */
5067	cur_fport = fcip_port_head;
5068	prev_fport = NULL;
5069	while (cur_fport != NULL) {
5070		if (cur_fport == fport) {
5071			break;
5072		}
5073		prev_fport = cur_fport;
5074		cur_fport = cur_fport->fcipp_next;
5075	}
5076
5077	/*
5078	 * Assert that we found a port in our port list
5079	 */
5080	ASSERT(cur_fport == fport);
5081
5082	if (prev_fport) {
5083		/*
5084		 * Not the first port in the port list
5085		 */
5086		prev_fport->fcipp_next = fport->fcipp_next;
5087	} else {
5088		/*
5089		 * first port
5090		 */
5091		fcip_port_head = fport->fcipp_next;
5092	}
5093	next_fport = fport->fcipp_next;
5094	kmem_free(fport, sizeof (fcip_port_info_t));
5095
5096	return (next_fport);
5097}
5098
5099
5100/*
5101 * This is called by transport for any ioctl operations performed
5102 * on the devctl or other transport minor nodes. It is currently
5103 * unused for fcip
5104 */
5105/* ARGSUSED */
5106static int
5107fcip_port_ioctl(opaque_t ulp_handle,  opaque_t port_handle, dev_t dev,
5108	int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
5109	uint32_t claimed)
5110{
5111	return (FC_UNCLAIMED);
5112}
5113
5114/*
5115 * DL_INFO_REQ - returns information about the DLPI stream to the DLS user
5116 * requesting information about this interface
5117 */
5118static void
5119fcip_ireq(queue_t *wq, mblk_t *mp)
5120{
5121	struct fcipstr		*slp;
5122	struct fcip		*fptr;
5123	dl_info_ack_t		*dlip;
5124	struct fcipdladdr	*dlap;
5125	la_wwn_t		*ep;
5126	int 			size;
5127	char			etherstr[ETHERSTRL];
5128
5129	slp = (struct fcipstr *)wq->q_ptr;
5130
5131	fptr = slp->sl_fcip;
5132
5133	FCIP_DEBUG(FCIP_DEBUG_DLPI,
5134	    (CE_NOTE, "fcip_ireq: info request req rcvd"));
5135
5136	FCIP_TNF_PROBE_1((fcip_ireq, "fcip io", /* CSTYLED */,
5137	    tnf_string, msg, "fcip ireq entered"));
5138
5139	if (MBLKL(mp) < DL_INFO_REQ_SIZE) {
5140		dlerrorack(wq, mp, DL_INFO_REQ, DL_BADPRIM, 0);
5141		return;
5142	}
5143
5144	/*
5145	 * Exchange current message for a DL_INFO_ACK
5146	 */
5147	size = sizeof (dl_info_ack_t) + FCIPADDRL + ETHERADDRL;
5148	if ((mp = mexchange(wq, mp, size, M_PCPROTO, DL_INFO_ACK)) == NULL) {
5149		return;
5150	}
5151
5152	/*
5153	 * FILL in the DL_INFO_ACK fields and reply
5154	 */
5155	dlip = (dl_info_ack_t *)mp->b_rptr;
5156	*dlip = fcip_infoack;
5157	dlip->dl_current_state = slp->sl_state;
5158	dlap = (struct fcipdladdr *)(mp->b_rptr + dlip->dl_addr_offset);
5159	dlap->dl_sap = slp->sl_sap;
5160
5161
5162	if (fptr) {
5163		fcip_ether_to_str(&fptr->fcip_macaddr, etherstr);
5164		FCIP_DEBUG(FCIP_DEBUG_DLPI,
5165		    (CE_NOTE, "ireq - our mac: %s", etherstr));
5166		ether_bcopy(&fptr->fcip_macaddr, &dlap->dl_phys);
5167	} else {
5168		bzero((caddr_t)&dlap->dl_phys, ETHERADDRL);
5169	}
5170
5171	ep = (la_wwn_t *)(mp->b_rptr + dlip->dl_brdcst_addr_offset);
5172	ether_bcopy(&fcip_arpbroadcast_addr, ep);
5173
5174	FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "sending back info req.."));
5175	qreply(wq, mp);
5176}
5177
5178
5179/*
5180 * To handle DL_UNITDATA_REQ requests.
5181 */
5182
5183static void
5184fcip_udreq(queue_t *wq, mblk_t *mp)
5185{
5186	struct fcipstr		*slp;
5187	struct fcip		*fptr;
5188	fcip_port_info_t	*fport;
5189	dl_unitdata_req_t	*dludp;
5190	mblk_t			*nmp;
5191	struct fcipdladdr	*dlap;
5192	fcph_network_hdr_t 	*headerp;
5193	llc_snap_hdr_t		*lsnap;
5194	t_uscalar_t		off, len;
5195	struct fcip_dest	*fdestp;
5196	la_wwn_t		wwn;
5197	int			hdr_size;
5198
5199	FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "inside fcip_udreq"));
5200
5201	FCIP_TNF_PROBE_1((fcip_udreq, "fcip io", /* CSTYLED */,
5202	    tnf_string, msg, "fcip udreq entered"));
5203
5204	slp = (struct fcipstr *)wq->q_ptr;
5205
5206	if (slp->sl_state != DL_IDLE) {
5207		dlerrorack(wq, mp, DL_UNITDATA_REQ, DL_OUTSTATE, 0);
5208		return;
5209	}
5210
5211	fptr = slp->sl_fcip;
5212
5213	if (fptr == NULL) {
5214		dlerrorack(wq, mp, DL_UNITDATA_REQ, DL_OUTSTATE, 0);
5215		return;
5216	}
5217
5218	fport = fptr->fcip_port_info;
5219
5220	dludp = (dl_unitdata_req_t *)mp->b_rptr;
5221	off = dludp->dl_dest_addr_offset;
5222	len = dludp->dl_dest_addr_length;
5223
5224	/*
5225	 * Validate destination address format
5226	 */
5227	if (!MBLKIN(mp, off, len) || (len != FCIPADDRL)) {
5228		dluderrorind(wq, mp, (mp->b_rptr + off), len, DL_BADADDR, 0);
5229		return;
5230	}
5231
5232	/*
5233	 * Error if no M_DATA follows
5234	 */
5235	nmp = mp->b_cont;
5236	if (nmp == NULL) {
5237		dluderrorind(wq, mp, (mp->b_rptr + off), len, DL_BADDATA, 0);
5238		return;
5239	}
5240	dlap = (struct fcipdladdr *)(mp->b_rptr + off);
5241
5242	/*
5243	 * Now get the destination structure for the remote NPORT
5244	 */
5245	ether_to_wwn(&dlap->dl_phys, &wwn);
5246	fdestp = fcip_get_dest(fptr, &wwn);
5247
5248	if (fdestp == NULL) {
5249		FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE,
5250		    "udreq - couldn't find dest struct for remote port");
5251		dluderrorind(wq, mp, (mp->b_rptr + off), len, DL_BADDATA, 0));
5252		return;
5253	}
5254
5255	/*
5256	 * Network header + SAP
5257	 */
5258	hdr_size = sizeof (fcph_network_hdr_t) + sizeof (llc_snap_hdr_t);
5259
5260	/* DB_REF gives the no. of msgs pointing to this block */
5261	if ((DB_REF(nmp) == 1) &&
5262	    (MBLKHEAD(nmp) >= hdr_size) &&
5263	    (((uintptr_t)mp->b_rptr & 0x1) == 0)) {
5264		la_wwn_t wwn;
5265		nmp->b_rptr -= hdr_size;
5266
5267		/* first put the network header */
5268		headerp = (fcph_network_hdr_t *)nmp->b_rptr;
5269		if (ether_cmp(&dlap->dl_phys, &fcip_arpbroadcast_addr) == 0) {
5270			ether_to_wwn(&fcipnhbroadcastaddr, &wwn);
5271		} else {
5272			ether_to_wwn(&dlap->dl_phys, &wwn);
5273		}
5274		bcopy(&wwn, &headerp->net_dest_addr, sizeof (la_wwn_t));
5275		ether_to_wwn(&fptr->fcip_macaddr, &wwn);
5276		bcopy(&wwn, &headerp->net_src_addr, sizeof (la_wwn_t));
5277
5278		/* Now the snap header */
5279		lsnap = (llc_snap_hdr_t *)(nmp->b_rptr +
5280		    sizeof (fcph_network_hdr_t));
5281		lsnap->dsap = 0xAA;
5282		lsnap->ssap = 0xAA;
5283		lsnap->ctrl = 0x03;
5284		lsnap->oui[0] = 0x00;
5285		lsnap->oui[1] = 0x00; 	/* 80 */
5286		lsnap->oui[2] = 0x00;	/* C2 */
5287		lsnap->pid = BE_16((dlap->dl_sap));
5288
5289		freeb(mp);
5290		mp = nmp;
5291
5292	} else {
5293		la_wwn_t wwn;
5294
5295		DB_TYPE(mp) = M_DATA;
5296		headerp = (fcph_network_hdr_t *)mp->b_rptr;
5297
5298		/*
5299		 * Only fill in the low 48bits of WWN for now - we can
5300		 * fill in the NAA_ID after we find the port in the
5301		 * routing tables
5302		 */
5303		if (ether_cmp(&dlap->dl_phys, &fcip_arpbroadcast_addr) == 0) {
5304			ether_to_wwn(&fcipnhbroadcastaddr, &wwn);
5305		} else {
5306			ether_to_wwn(&dlap->dl_phys, &wwn);
5307		}
5308		bcopy(&wwn, &headerp->net_dest_addr, sizeof (la_wwn_t));
5309		/* need to send our PWWN */
5310		bcopy(&fport->fcipp_pwwn, &headerp->net_src_addr,
5311		    sizeof (la_wwn_t));
5312
5313		lsnap = (llc_snap_hdr_t *)(nmp->b_rptr +
5314		    sizeof (fcph_network_hdr_t));
5315		lsnap->dsap = 0xAA;
5316		lsnap->ssap = 0xAA;
5317		lsnap->ctrl = 0x03;
5318		lsnap->oui[0] = 0x00;
5319		lsnap->oui[1] = 0x00;
5320		lsnap->oui[2] = 0x00;
5321		lsnap->pid = BE_16(dlap->dl_sap);
5322
5323		mp->b_wptr = mp->b_rptr + hdr_size;
5324	}
5325
5326	/*
5327	 * Ethernet drivers have a lot of gunk here to put the Type
5328	 * information (for Ethernet encapsulation (RFC 894) or the
5329	 * Length (for 802.2/802.3) - I guess we'll just ignore that
5330	 * here.
5331	 */
5332
5333	/*
5334	 * Start the I/O on this port. If fcip_start failed for some reason
5335	 * we call putbq in fcip_start so we don't need to check the
5336	 * return value from fcip_start
5337	 */
5338	(void) fcip_start(wq, mp, fptr, fdestp, KM_SLEEP);
5339}
5340
5341/*
5342 * DL_ATTACH_REQ: attaches a PPA with a stream. ATTACH requets are needed
5343 * for style 2 DLS providers to identify the physical medium through which
5344 * the streams communication will happen
5345 */
5346static void
5347fcip_areq(queue_t *wq, mblk_t *mp)
5348{
5349	struct fcipstr		*slp;
5350	union DL_primitives	*dlp;
5351	fcip_port_info_t	*fport;
5352	struct fcip		*fptr;
5353	int			ppa;
5354
5355	slp = (struct fcipstr *)wq->q_ptr;
5356	dlp = (union DL_primitives *)mp->b_rptr;
5357
5358	if (MBLKL(mp) < DL_ATTACH_REQ_SIZE) {
5359		dlerrorack(wq, mp, DL_ATTACH_REQ, DL_BADPRIM, 0);
5360		return;
5361	}
5362
5363	if (slp->sl_state != DL_UNATTACHED) {
5364		dlerrorack(wq, mp, DL_ATTACH_REQ, DL_OUTSTATE, 0);
5365		return;
5366	}
5367
5368	ppa = dlp->attach_req.dl_ppa;
5369	FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "attach req: ppa %x", ppa));
5370
5371	/*
5372	 * check if the PPA is valid
5373	 */
5374
5375	mutex_enter(&fcip_global_mutex);
5376
5377	for (fport = fcip_port_head; fport; fport = fport->fcipp_next) {
5378		if ((fptr = fport->fcipp_fcip) == NULL) {
5379			continue;
5380		}
5381		FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "ppa %x, inst %x", ppa,
5382		    ddi_get_instance(fptr->fcip_dip)));
5383
5384		if (ppa == ddi_get_instance(fptr->fcip_dip)) {
5385			FCIP_DEBUG(FCIP_DEBUG_DLPI,
5386			    (CE_NOTE, "ppa found %x", ppa));
5387			break;
5388		}
5389	}
5390
5391	if (fport == NULL) {
5392		FCIP_DEBUG(FCIP_DEBUG_DLPI,
5393		    (CE_NOTE, "dlerrorack coz fport==NULL"));
5394
5395		mutex_exit(&fcip_global_mutex);
5396
5397		if (fc_ulp_get_port_handle(ppa) == NULL) {
5398			dlerrorack(wq, mp, DL_ATTACH_REQ, DL_BADPPA, 0);
5399			return;
5400		}
5401
5402		/*
5403		 * Wait for Port attach callback to trigger.  If port_detach
5404		 * got in while we were waiting, then ddi_get_soft_state
5405		 * will return NULL, and we'll return error.
5406		 */
5407
5408		delay(drv_usectohz(FCIP_INIT_DELAY));
5409		mutex_enter(&fcip_global_mutex);
5410
5411		fptr = ddi_get_soft_state(fcip_softp, ppa);
5412		if (fptr == NULL) {
5413			mutex_exit(&fcip_global_mutex);
5414			dlerrorack(wq, mp, DL_ATTACH_REQ, DL_BADPPA, 0);
5415			return;
5416		}
5417	}
5418
5419	/*
5420	 * set link to device and update our state
5421	 */
5422	slp->sl_fcip = fptr;
5423	slp->sl_state = DL_UNBOUND;
5424
5425	mutex_exit(&fcip_global_mutex);
5426
5427#ifdef DEBUG
5428	mutex_enter(&fptr->fcip_mutex);
5429	if (fptr->fcip_flags & FCIP_LINK_DOWN) {
5430		FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_WARN, "port not online yet"));
5431	}
5432	mutex_exit(&fptr->fcip_mutex);
5433#endif
5434
5435	dlokack(wq, mp, DL_ATTACH_REQ);
5436}
5437
5438
5439/*
5440 * DL_DETACH request - detaches a PPA from a stream
5441 */
5442static void
5443fcip_dreq(queue_t *wq, mblk_t *mp)
5444{
5445	struct fcipstr		*slp;
5446
5447	slp = (struct fcipstr *)wq->q_ptr;
5448
5449	if (MBLKL(mp) < DL_DETACH_REQ_SIZE) {
5450		dlerrorack(wq, mp, DL_DETACH_REQ, DL_BADPRIM, 0);
5451		return;
5452	}
5453
5454	if (slp->sl_state != DL_UNBOUND) {
5455		dlerrorack(wq, mp, DL_DETACH_REQ, DL_OUTSTATE, 0);
5456		return;
5457	}
5458
5459	fcip_dodetach(slp);
5460	dlokack(wq, mp, DL_DETACH_REQ);
5461}
5462
5463/*
5464 * DL_BIND request: requests a DLS provider to bind a DLSAP to the stream.
5465 * DLS users communicate with a physical interface through DLSAPs. Multiple
5466 * DLSAPs can be bound to the same stream (PPA)
5467 */
5468static void
5469fcip_breq(queue_t *wq, mblk_t *mp)
5470{
5471	struct fcipstr		*slp;
5472	union DL_primitives	*dlp;
5473	struct fcip		*fptr;
5474	struct fcipdladdr	fcipaddr;
5475	t_uscalar_t		sap;
5476	int			xidtest;
5477
5478	slp = (struct fcipstr *)wq->q_ptr;
5479
5480	if (MBLKL(mp) < DL_BIND_REQ_SIZE) {
5481		dlerrorack(wq, mp, DL_BIND_REQ, DL_BADPRIM, 0);
5482		return;
5483	}
5484
5485	if (slp->sl_state != DL_UNBOUND) {
5486		dlerrorack(wq, mp, DL_BIND_REQ, DL_OUTSTATE, 0);
5487		return;
5488	}
5489
5490	dlp = (union DL_primitives *)mp->b_rptr;
5491	fptr = slp->sl_fcip;
5492
5493	if (fptr == NULL) {
5494		dlerrorack(wq, mp, DL_BIND_REQ, DL_OUTSTATE, 0);
5495		return;
5496	}
5497
5498	sap = dlp->bind_req.dl_sap;
5499	FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "fcip_breq - sap: %x", sap));
5500	xidtest = dlp->bind_req.dl_xidtest_flg;
5501
5502	if (xidtest) {
5503		dlerrorack(wq, mp, DL_BIND_REQ, DL_NOAUTO, 0);
5504		return;
5505	}
5506
5507	FCIP_DEBUG(FCIP_DEBUG_DLPI, (CE_NOTE, "DLBIND: sap : %x", sap));
5508
5509	if (sap > ETHERTYPE_MAX) {
5510		dlerrorack(wq, mp, dlp->dl_primitive, DL_BADSAP, 0);
5511		return;
5512	}
5513	/*
5514	 * save SAP for this stream and change the link state
5515	 */
5516	slp->sl_sap = sap;
5517	slp->sl_state = DL_IDLE;
5518
5519	fcipaddr.dl_sap = sap;
5520	ether_bcopy(&fptr->fcip_macaddr, &fcipaddr.dl_phys);
5521	dlbindack(wq, mp, sap, &fcipaddr, FCIPADDRL, 0, 0);
5522
5523	fcip_setipq(fptr);
5524}
5525
5526/*
5527 * DL_UNBIND request to unbind a previously bound DLSAP, from this stream
5528 */
5529static void
5530fcip_ubreq(queue_t *wq, mblk_t *mp)
5531{
5532	struct fcipstr	*slp;
5533
5534	slp = (struct fcipstr *)wq->q_ptr;
5535
5536	if (MBLKL(mp) < DL_UNBIND_REQ_SIZE) {
5537		dlerrorack(wq, mp, DL_UNBIND_REQ, DL_BADPRIM, 0);
5538		return;
5539	}
5540
5541	if (slp->sl_state != DL_IDLE) {
5542		dlerrorack(wq, mp, DL_UNBIND_REQ, DL_OUTSTATE, 0);
5543		return;
5544	}
5545
5546	slp->sl_state = DL_UNBOUND;
5547	slp->sl_sap = 0;
5548
5549	(void) putnextctl1(RD(wq), M_FLUSH, FLUSHRW);
5550	dlokack(wq, mp, DL_UNBIND_REQ);
5551
5552	fcip_setipq(slp->sl_fcip);
5553}
5554
5555/*
5556 * Return our physical address
5557 */
5558static void
5559fcip_pareq(queue_t *wq, mblk_t *mp)
5560{
5561	struct fcipstr 		*slp;
5562	union DL_primitives	*dlp;
5563	int			type;
5564	struct fcip		*fptr;
5565	fcip_port_info_t	*fport;
5566	struct ether_addr	addr;
5567
5568	slp = (struct fcipstr *)wq->q_ptr;
5569
5570	if (MBLKL(mp) < DL_PHYS_ADDR_REQ_SIZE) {
5571		dlerrorack(wq, mp, DL_PHYS_ADDR_REQ, DL_BADPRIM, 0);
5572		return;
5573	}
5574
5575	dlp = (union DL_primitives *)mp->b_rptr;
5576	type = dlp->physaddr_req.dl_addr_type;
5577	fptr = slp->sl_fcip;
5578
5579	if (fptr == NULL) {
5580		dlerrorack(wq, mp, DL_PHYS_ADDR_REQ, DL_OUTSTATE, 0);
5581		return;
5582	}
5583
5584	fport = fptr->fcip_port_info;
5585
5586	switch (type) {
5587	case DL_FACT_PHYS_ADDR:
5588		FCIP_DEBUG(FCIP_DEBUG_DLPI,
5589		    (CE_NOTE, "returning factory phys addr"));
5590		wwn_to_ether(&fport->fcipp_pwwn, &addr);
5591		break;
5592
5593	case DL_CURR_PHYS_ADDR:
5594		FCIP_DEBUG(FCIP_DEBUG_DLPI,
5595		    (CE_NOTE, "returning current phys addr"));
5596		ether_bcopy(&fptr->fcip_macaddr, &addr);
5597		break;
5598
5599	default:
5600		FCIP_DEBUG(FCIP_DEBUG_DLPI,
5601		    (CE_NOTE, "Not known cmd type in phys addr"));
5602		dlerrorack(wq, mp, DL_PHYS_ADDR_REQ, DL_NOTSUPPORTED, 0);
5603		return;
5604	}
5605	dlphysaddrack(wq, mp, &addr, ETHERADDRL);
5606}
5607
5608/*
5609 * Set physical address DLPI request
5610 */
5611static void
5612fcip_spareq(queue_t *wq, mblk_t *mp)
5613{
5614	struct fcipstr		*slp;
5615	union DL_primitives	*dlp;
5616	t_uscalar_t		off, len;
5617	struct ether_addr	*addrp;
5618	la_wwn_t		wwn;
5619	struct fcip		*fptr;
5620	fc_ns_cmd_t		fcip_ns_cmd;
5621
5622	slp = (struct fcipstr *)wq->q_ptr;
5623
5624	if (MBLKL(mp) < DL_SET_PHYS_ADDR_REQ_SIZE) {
5625		dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADPRIM, 0);
5626		return;
5627	}
5628
5629	dlp = (union DL_primitives *)mp->b_rptr;
5630	len = dlp->set_physaddr_req.dl_addr_length;
5631	off = dlp->set_physaddr_req.dl_addr_offset;
5632
5633	if (!MBLKIN(mp, off, len)) {
5634		dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADPRIM, 0);
5635		return;
5636	}
5637
5638	addrp = (struct ether_addr *)(mp->b_rptr + off);
5639
5640	/*
5641	 * If the length of physical address is not correct or address
5642	 * specified is a broadcast address or multicast addr -
5643	 * return an error.
5644	 */
5645	if ((len != ETHERADDRL) ||
5646	    ((addrp->ether_addr_octet[0] & 01) == 1) ||
5647	    (ether_cmp(addrp, &fcip_arpbroadcast_addr) == 0)) {
5648		dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADADDR, 0);
5649		return;
5650	}
5651
5652	/*
5653	 * check if a stream is attached to this device. Else return an error
5654	 */
5655	if ((fptr = slp->sl_fcip) == NULL) {
5656		dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_OUTSTATE, 0);
5657		return;
5658	}
5659
5660	/*
5661	 * set the new interface local address. We request the transport
5662	 * layer to change the Port WWN for this device - return an error
5663	 * if we don't succeed.
5664	 */
5665
5666	ether_to_wwn(addrp, &wwn);
5667	if (fcip_set_wwn(&wwn) == FC_SUCCESS) {
5668		FCIP_DEBUG(FCIP_DEBUG_DLPI,
5669		    (CE_WARN, "WWN changed in spareq"));
5670	} else {
5671		dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADADDR, 0);
5672	}
5673
5674	/*
5675	 * register The new Port WWN and Node WWN with the transport
5676	 * and Nameserver. Hope the transport ensures all current I/O
5677	 * has stopped before actually attempting to register a new
5678	 * port and Node WWN else we are hosed. Maybe a Link reset
5679	 * will get everyone's attention.
5680	 */
5681	fcip_ns_cmd.ns_flags = 0;
5682	fcip_ns_cmd.ns_cmd = NS_RPN_ID;
5683	fcip_ns_cmd.ns_req_len = sizeof (la_wwn_t);
5684	fcip_ns_cmd.ns_req_payload = (caddr_t)&wwn.raw_wwn[0];
5685	fcip_ns_cmd.ns_resp_len = 0;
5686	fcip_ns_cmd.ns_resp_payload = (caddr_t)0;
5687	if (fc_ulp_port_ns(fptr->fcip_port_info->fcipp_handle,
5688	    (opaque_t)0, &fcip_ns_cmd) != FC_SUCCESS) {
5689		FCIP_DEBUG(FCIP_DEBUG_DLPI,
5690		    (CE_WARN, "setting Port WWN failed"));
5691		dlerrorack(wq, mp, DL_SET_PHYS_ADDR_REQ, DL_BADPRIM, 0);
5692		return;
5693	}
5694
5695	dlokack(wq, mp, DL_SET_PHYS_ADDR_REQ);
5696}
5697
5698/*
5699 * change our port's WWN if permitted by hardware
5700 */
5701/* ARGSUSED */
5702static int
5703fcip_set_wwn(la_wwn_t *pwwn)
5704{
5705	/*
5706	 * We're usually not allowed to change the WWN of adapters
5707	 * but some adapters do permit us to change the WWN - don't
5708	 * permit setting of WWNs (yet?) - This behavior could be
5709	 * modified if needed
5710	 */
5711	return (FC_FAILURE);
5712}
5713
5714
5715/*
5716 * This routine fills in the header for fastpath data requests. What this
5717 * does in simple terms is, instead of sending all data through the Unitdata
5718 * request dlpi code paths (which will then append the protocol specific
5719 * header - network and snap headers in our case), the upper layers issue
5720 * a M_IOCTL with a DL_IOC_HDR_INFO request and ask the streams endpoint
5721 * driver to give the header it needs appended and the upper layer
5722 * allocates and fills in the header and calls our put routine
5723 */
5724static void
5725fcip_dl_ioc_hdr_info(queue_t *wq, mblk_t *mp)
5726{
5727	mblk_t			*nmp;
5728	struct fcipstr		*slp;
5729	struct fcipdladdr	*dlap;
5730	dl_unitdata_req_t	*dlup;
5731	fcph_network_hdr_t	*headerp;
5732	la_wwn_t		wwn;
5733	llc_snap_hdr_t		*lsnap;
5734	struct fcip		*fptr;
5735	fcip_port_info_t	*fport;
5736	t_uscalar_t		off, len;
5737	size_t			hdrlen;
5738	int 			error;
5739
5740	slp = (struct fcipstr *)wq->q_ptr;
5741	fptr = slp->sl_fcip;
5742	if (fptr == NULL) {
5743		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5744		    (CE_NOTE, "dliochdr : returns EINVAL1"));
5745		miocnak(wq, mp, 0, EINVAL);
5746		return;
5747	}
5748
5749	error = miocpullup(mp, sizeof (dl_unitdata_req_t) + FCIPADDRL);
5750	if (error != 0) {
5751		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5752		    (CE_NOTE, "dliochdr : returns %d", error));
5753		miocnak(wq, mp, 0, error);
5754		return;
5755	}
5756
5757	fport = fptr->fcip_port_info;
5758
5759	/*
5760	 * check if the DL_UNITDATA_REQ destination addr has valid offset
5761	 * and length values
5762	 */
5763	dlup = (dl_unitdata_req_t *)mp->b_cont->b_rptr;
5764	off = dlup->dl_dest_addr_offset;
5765	len = dlup->dl_dest_addr_length;
5766	if (dlup->dl_primitive != DL_UNITDATA_REQ ||
5767	    !MBLKIN(mp->b_cont, off, len) || (len != FCIPADDRL)) {
5768		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5769		    (CE_NOTE, "dliochdr : returns EINVAL2"));
5770		miocnak(wq, mp, 0, EINVAL);
5771		return;
5772	}
5773
5774	dlap = (struct fcipdladdr *)(mp->b_cont->b_rptr + off);
5775
5776	/*
5777	 * Allocate a new mblk to hold the ether header
5778	 */
5779
5780	/*
5781	 * setup space for network header
5782	 */
5783	hdrlen = (sizeof (llc_snap_hdr_t) + sizeof (fcph_network_hdr_t));
5784	if ((nmp = allocb(hdrlen, BPRI_MED)) == NULL) {
5785		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5786		    (CE_NOTE, "dliochdr : returns ENOMEM"));
5787		miocnak(wq, mp, 0, ENOMEM);
5788		return;
5789	}
5790	nmp->b_wptr += hdrlen;
5791
5792	/*
5793	 * Fill in the Network Hdr and LLC SNAP header;
5794	 */
5795	headerp = (fcph_network_hdr_t *)nmp->b_rptr;
5796	/*
5797	 * just fill in the Node WWN here - we can fill in the NAA_ID when
5798	 * we search the routing table
5799	 */
5800	if (ether_cmp(&dlap->dl_phys, &fcip_arpbroadcast_addr) == 0) {
5801		ether_to_wwn(&fcipnhbroadcastaddr, &wwn);
5802	} else {
5803		ether_to_wwn(&dlap->dl_phys, &wwn);
5804	}
5805	bcopy(&wwn, &headerp->net_dest_addr, sizeof (la_wwn_t));
5806	bcopy(&fport->fcipp_pwwn, &headerp->net_src_addr, sizeof (la_wwn_t));
5807	lsnap = (llc_snap_hdr_t *)(nmp->b_rptr + sizeof (fcph_network_hdr_t));
5808	lsnap->dsap = 0xAA;
5809	lsnap->ssap = 0xAA;
5810	lsnap->ctrl = 0x03;
5811	lsnap->oui[0] = 0x00;
5812	lsnap->oui[1] = 0x00;
5813	lsnap->oui[2] = 0x00;
5814	lsnap->pid = BE_16(dlap->dl_sap);
5815
5816	/*
5817	 * Link new mblk in after the "request" mblks.
5818	 */
5819	linkb(mp, nmp);
5820
5821	slp->sl_flags |= FCIP_SLFAST;
5822
5823	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5824	    (CE_NOTE, "dliochdr : returns success "));
5825	miocack(wq, mp, msgsize(mp->b_cont), 0);
5826}
5827
5828
5829/*
5830 * Establish a kmem cache for fcip packets
5831 */
5832static int
5833fcip_cache_constructor(void *buf, void *arg, int flags)
5834{
5835	fcip_pkt_t		*fcip_pkt = buf;
5836	fc_packet_t		*fc_pkt;
5837	fcip_port_info_t	*fport = (fcip_port_info_t *)arg;
5838	int			(*cb) (caddr_t);
5839	struct fcip		*fptr;
5840
5841	cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
5842
5843	ASSERT(fport != NULL);
5844
5845	fptr = fport->fcipp_fcip;
5846
5847	/*
5848	 * we allocated space for our private area at the end of the
5849	 * fc packet. Make sure we point to it correctly. Ideally we
5850	 * should just push fc_packet_private to the beginning or end
5851	 * of the fc_packet structure
5852	 */
5853	fcip_pkt->fcip_pkt_next = NULL;
5854	fcip_pkt->fcip_pkt_prev = NULL;
5855	fcip_pkt->fcip_pkt_dest = NULL;
5856	fcip_pkt->fcip_pkt_state = 0;
5857	fcip_pkt->fcip_pkt_reason = 0;
5858	fcip_pkt->fcip_pkt_flags = 0;
5859	fcip_pkt->fcip_pkt_fptr = fptr;
5860	fcip_pkt->fcip_pkt_dma_flags = 0;
5861
5862	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
5863	fc_pkt->pkt_ulp_rscn_infop = NULL;
5864
5865	/*
5866	 * We use pkt_cmd_dma for OUTBOUND requests. We don't expect
5867	 * any responses for outbound IP data so no need to setup
5868	 * response or data dma handles.
5869	 */
5870	if (ddi_dma_alloc_handle(fport->fcipp_dip,
5871	    &fport->fcipp_cmd_dma_attr, cb, NULL,
5872	    &fc_pkt->pkt_cmd_dma) != DDI_SUCCESS) {
5873		return (FCIP_FAILURE);
5874	}
5875
5876	fc_pkt->pkt_cmd_acc = fc_pkt->pkt_resp_acc = NULL;
5877	fc_pkt->pkt_fca_private = (opaque_t)((caddr_t)buf +
5878	    sizeof (fcip_pkt_t));
5879	fc_pkt->pkt_ulp_private = (opaque_t)fcip_pkt;
5880
5881	fc_pkt->pkt_cmd_cookie_cnt = fc_pkt->pkt_resp_cookie_cnt =
5882	    fc_pkt->pkt_data_cookie_cnt = 0;
5883	fc_pkt->pkt_cmd_cookie = fc_pkt->pkt_resp_cookie =
5884	    fc_pkt->pkt_data_cookie = NULL;
5885
5886	return (FCIP_SUCCESS);
5887}
5888
5889/*
5890 * destroy the fcip kmem cache
5891 */
5892static void
5893fcip_cache_destructor(void *buf, void *arg)
5894{
5895	fcip_pkt_t		*fcip_pkt = (fcip_pkt_t *)buf;
5896	fc_packet_t		*fc_pkt;
5897	fcip_port_info_t	*fport = (fcip_port_info_t *)arg;
5898	struct fcip		*fptr;
5899
5900	ASSERT(fport != NULL);
5901
5902	fptr = fport->fcipp_fcip;
5903
5904	ASSERT(fptr == fcip_pkt->fcip_pkt_fptr);
5905	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
5906
5907	if (fc_pkt->pkt_cmd_dma) {
5908		ddi_dma_free_handle(&fc_pkt->pkt_cmd_dma);
5909	}
5910}
5911
5912/*
5913 * the fcip destination structure is hashed on Node WWN assuming
5914 * a  NAA_ID of 0x1 (IEEE)
5915 */
5916static struct fcip_dest *
5917fcip_get_dest(struct fcip *fptr, la_wwn_t *pwwn)
5918{
5919	struct fcip_dest	*fdestp = NULL;
5920	fcip_port_info_t	*fport;
5921	int			hash_bucket;
5922	opaque_t		pd;
5923	int			rval;
5924	struct fcip_routing_table *frp;
5925	la_wwn_t		twwn;
5926	uint32_t		*twwnp = (uint32_t *)&twwn;
5927
5928	hash_bucket = FCIP_DEST_HASH(pwwn->raw_wwn);
5929	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5930	    (CE_NOTE, "get dest hashbucket : 0x%x", hash_bucket));
5931	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5932	    (CE_NOTE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x",
5933	    pwwn->raw_wwn[2], pwwn->raw_wwn[3], pwwn->raw_wwn[4],
5934	    pwwn->raw_wwn[5], pwwn->raw_wwn[6], pwwn->raw_wwn[7]));
5935
5936	ASSERT(hash_bucket < FCIP_DEST_HASH_ELEMS);
5937
5938	if (fcip_check_port_exists(fptr)) {
5939		/* fptr is stale, return fdestp */
5940		return (fdestp);
5941	}
5942	fport = fptr->fcip_port_info;
5943
5944	/*
5945	 * First check if we have active I/Os going on with the
5946	 * destination port (an entry would exist in fcip_dest hash table)
5947	 */
5948	mutex_enter(&fptr->fcip_dest_mutex);
5949	fdestp = fptr->fcip_dest[hash_bucket];
5950	while (fdestp != NULL) {
5951		mutex_enter(&fdestp->fcipd_mutex);
5952		if (fdestp->fcipd_rtable) {
5953			if (fcip_wwn_compare(pwwn, &fdestp->fcipd_pwwn,
5954			    FCIP_COMPARE_NWWN) == 0) {
5955				FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
5956				    (CE_NOTE, "found fdestp"));
5957				mutex_exit(&fdestp->fcipd_mutex);
5958				mutex_exit(&fptr->fcip_dest_mutex);
5959				return (fdestp);
5960			}
5961		}
5962		mutex_exit(&fdestp->fcipd_mutex);
5963		fdestp = fdestp->fcipd_next;
5964	}
5965	mutex_exit(&fptr->fcip_dest_mutex);
5966
5967	/*
5968	 * We did not find the destination port information in our
5969	 * active port list so search for an entry in our routing
5970	 * table.
5971	 */
5972	mutex_enter(&fptr->fcip_rt_mutex);
5973	frp = fcip_lookup_rtable(fptr, pwwn, FCIP_COMPARE_NWWN);
5974	mutex_exit(&fptr->fcip_rt_mutex);
5975
5976	if (frp == NULL || (frp && (!FCIP_RTE_UNAVAIL(frp->fcipr_state)) &&
5977	    frp->fcipr_state != PORT_DEVICE_LOGGED_IN) ||
5978	    (frp && frp->fcipr_pd == NULL)) {
5979		/*
5980		 * No entry for the destination port in our routing
5981		 * table too. First query the transport to see if it
5982		 * already has structures for the destination port in
5983		 * its hash tables. This must be done for all topologies
5984		 * since we could have retired entries in the hash tables
5985		 * which may have to be re-added without a statechange
5986		 * callback happening. Its better to try and get an entry
5987		 * for the destination port rather than simply failing a
5988		 * request though it may be an overkill in private loop
5989		 * topologies.
5990		 * If a entry for the remote port exists in the transport's
5991		 * hash tables, we are fine and can add the entry to our
5992		 * routing and dest hash lists, Else for fabric configs we
5993		 * query the nameserver if one exists or issue FARP ELS.
5994		 */
5995
5996		/*
5997		 * We need to do a PortName based Nameserver
5998		 * query operation. So get the right PortWWN
5999		 * for the adapter.
6000		 */
6001		bcopy(pwwn, &twwn, sizeof (la_wwn_t));
6002
6003		/*
6004		 * Try IEEE Name (Format 1) first, this is the default and
6005		 * Emulex uses this format.
6006		 */
6007		pd = fc_ulp_get_remote_port(fport->fcipp_handle,
6008					    &twwn, &rval, 1);
6009
6010		if (rval != FC_SUCCESS) {
6011			/*
6012			 * If IEEE Name (Format 1) query failed, try IEEE
6013			 * Extended Name (Format 2) which Qlogic uses.
6014			 * And try port 1 on Qlogic FC-HBA first.
6015			 * Note: On x86, we need to byte swap the 32-bit
6016			 * word first, after the modification, swap it back.
6017			 */
6018			*twwnp = BE_32(*twwnp);
6019			twwn.w.nport_id = QLC_PORT_1_ID_BITS;
6020			twwn.w.naa_id = QLC_PORT_NAA;
6021			*twwnp = BE_32(*twwnp);
6022			pd = fc_ulp_get_remote_port(fport->fcipp_handle,
6023						    &twwn, &rval, 1);
6024		}
6025
6026		if (rval != FC_SUCCESS) {
6027			/* If still failed, try port 2 on Qlogic FC-HBA. */
6028			*twwnp = BE_32(*twwnp);
6029			twwn.w.nport_id = QLC_PORT_2_ID_BITS;
6030			*twwnp = BE_32(*twwnp);
6031			pd = fc_ulp_get_remote_port(fport->fcipp_handle,
6032						    &twwn, &rval, 1);
6033		}
6034
6035		if (rval == FC_SUCCESS) {
6036			fc_portmap_t	map;
6037			/*
6038			 * Add the newly found destination structure
6039			 * to our routing table. Create a map with
6040			 * the device we found. We could ask the
6041			 * transport to give us the list of all
6042			 * devices connected to our port but we
6043			 * probably don't need to know all the devices
6044			 * so let us just constuct a list with only
6045			 * one device instead.
6046			 */
6047
6048			fc_ulp_copy_portmap(&map, pd);
6049			fcip_rt_update(fptr, &map, 1);
6050
6051			mutex_enter(&fptr->fcip_rt_mutex);
6052			frp = fcip_lookup_rtable(fptr, pwwn,
6053			    FCIP_COMPARE_NWWN);
6054			mutex_exit(&fptr->fcip_rt_mutex);
6055
6056			fdestp = fcip_add_dest(fptr, frp);
6057		} else if (fcip_farp_supported &&
6058			(FC_TOP_EXTERNAL(fport->fcipp_topology) ||
6059			(fport->fcipp_topology == FC_TOP_PT_PT))) {
6060			/*
6061			 * The Name server request failed so
6062			 * issue an FARP
6063			 */
6064			fdestp = fcip_do_farp(fptr, pwwn, NULL,
6065				0, 0);
6066		} else {
6067		    fdestp = NULL;
6068		}
6069	} else if (frp && frp->fcipr_state == PORT_DEVICE_LOGGED_IN) {
6070		/*
6071		 * Prepare a dest structure to return to caller
6072		 */
6073		fdestp = fcip_add_dest(fptr, frp);
6074		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
6075		    (CE_NOTE, "in fcip get dest non fabric"));
6076	}
6077	return (fdestp);
6078}
6079
6080
6081/*
6082 * Endian clean WWN compare.
6083 * Returns 0 if they compare OK, else return non zero value.
6084 * flag can be bitwise OR of FCIP_COMPARE_NWWN, FCIP_COMPARE_PWWN,
6085 * FCIP_COMPARE_BROADCAST.
6086 */
6087static int
6088fcip_wwn_compare(la_wwn_t *wwn1, la_wwn_t *wwn2, int flag)
6089{
6090	int rval = 0;
6091	if ((wwn1->raw_wwn[2] != wwn2->raw_wwn[2]) ||
6092	    (wwn1->raw_wwn[3] != wwn2->raw_wwn[3]) ||
6093	    (wwn1->raw_wwn[4] != wwn2->raw_wwn[4]) ||
6094	    (wwn1->raw_wwn[5] != wwn2->raw_wwn[5]) ||
6095	    (wwn1->raw_wwn[6] != wwn2->raw_wwn[6]) ||
6096	    (wwn1->raw_wwn[7] != wwn2->raw_wwn[7])) {
6097		rval = 1;
6098	} else if ((flag == FCIP_COMPARE_PWWN) &&
6099	    (((wwn1->raw_wwn[0] & 0xf0) != (wwn2->raw_wwn[0] & 0xf0)) ||
6100	    (wwn1->raw_wwn[1] != wwn2->raw_wwn[1]))) {
6101		rval = 1;
6102	}
6103	return (rval);
6104}
6105
6106
6107/*
6108 * Add an entry for a remote port in the dest hash table. Dest hash table
6109 * has entries for ports in the routing hash table with which we decide
6110 * to establish IP communication with. The no. of entries in the dest hash
6111 * table must always be less than or equal to the entries in the routing
6112 * hash table. Every entry in the dest hash table ofcourse must have a
6113 * corresponding entry in the routing hash table
6114 */
6115static struct fcip_dest *
6116fcip_add_dest(struct fcip *fptr, struct fcip_routing_table *frp)
6117{
6118	struct fcip_dest *fdestp = NULL;
6119	la_wwn_t	*pwwn;
6120	int hash_bucket;
6121	struct fcip_dest *fdest_new;
6122
6123	if (frp == NULL) {
6124		return (fdestp);
6125	}
6126
6127	pwwn = &frp->fcipr_pwwn;
6128	mutex_enter(&fptr->fcip_dest_mutex);
6129	hash_bucket = FCIP_DEST_HASH(pwwn->raw_wwn);
6130	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
6131	    (CE_NOTE, "add dest hash_bucket: 0x%x", hash_bucket));
6132
6133	ASSERT(hash_bucket < FCIP_DEST_HASH_ELEMS);
6134
6135	fdestp = fptr->fcip_dest[hash_bucket];
6136	while (fdestp != NULL) {
6137		mutex_enter(&fdestp->fcipd_mutex);
6138		if (fdestp->fcipd_rtable) {
6139			if (fcip_wwn_compare(pwwn, &fdestp->fcipd_pwwn,
6140			    FCIP_COMPARE_PWWN) == 0) {
6141				mutex_exit(&fdestp->fcipd_mutex);
6142				mutex_exit(&fptr->fcip_dest_mutex);
6143				return (fdestp);
6144			}
6145		}
6146		mutex_exit(&fdestp->fcipd_mutex);
6147		fdestp = fdestp->fcipd_next;
6148	}
6149
6150	ASSERT(fdestp == NULL);
6151
6152	fdest_new = (struct fcip_dest *)
6153			kmem_zalloc(sizeof (struct fcip_dest), KM_SLEEP);
6154
6155	mutex_init(&fdest_new->fcipd_mutex, NULL, MUTEX_DRIVER, NULL);
6156	fdest_new->fcipd_next = fptr->fcip_dest[hash_bucket];
6157	fdest_new->fcipd_refcnt = 0;
6158	fdest_new->fcipd_rtable = frp;
6159	fdest_new->fcipd_ncmds = 0;
6160	fptr->fcip_dest[hash_bucket] = fdest_new;
6161	fdest_new->fcipd_flags = FCIP_PORT_NOTLOGGED;
6162
6163	mutex_exit(&fptr->fcip_dest_mutex);
6164	return (fdest_new);
6165}
6166
6167/*
6168 * Cleanup the dest hash table and remove all entries
6169 */
6170static void
6171fcip_cleanup_dest(struct fcip *fptr)
6172{
6173	struct fcip_dest *fdestp = NULL;
6174	struct fcip_dest *fdest_delp = NULL;
6175	int i;
6176
6177	mutex_enter(&fptr->fcip_dest_mutex);
6178
6179	for (i = 0; i < FCIP_DEST_HASH_ELEMS; i++) {
6180		fdestp = fptr->fcip_dest[i];
6181		while (fdestp != NULL) {
6182			mutex_destroy(&fdestp->fcipd_mutex);
6183			fdest_delp = fdestp;
6184			fdestp = fdestp->fcipd_next;
6185			kmem_free(fdest_delp, sizeof (struct fcip_dest));
6186			fptr->fcip_dest[i] = NULL;
6187		}
6188	}
6189	mutex_exit(&fptr->fcip_dest_mutex);
6190}
6191
6192
6193/*
6194 * Send FARP requests for Fabric ports when we don't have the port
6195 * we wish to talk to in our routing hash table. FARP is specially required
6196 * to talk to FC switches for inband switch management. Most FC switches
6197 * today have a switch FC IP address for IP over FC inband switch management
6198 * but the WWN and Port_ID for this traffic is not available through the
6199 * Nameservers since the switch themeselves are transparent.
6200 */
6201/* ARGSUSED */
6202static struct fcip_dest *
6203fcip_do_farp(struct fcip *fptr, la_wwn_t *pwwn, char *ip_addr,
6204    size_t ip_addr_len, int flags)
6205{
6206	fcip_pkt_t		*fcip_pkt;
6207	fc_packet_t		*fc_pkt;
6208	fcip_port_info_t	*fport = fptr->fcip_port_info;
6209	la_els_farp_t		farp_cmd;
6210	la_els_farp_t		*fcmd;
6211	struct fcip_dest	*fdestp = NULL;
6212	int			rval;
6213	clock_t			farp_lbolt;
6214	la_wwn_t		broadcast_wwn;
6215	struct fcip_dest	*bdestp;
6216	struct fcip_routing_table 	*frp;
6217
6218	bdestp = fcip_get_dest(fptr, &broadcast_wwn);
6219
6220	if (bdestp == NULL) {
6221		return (fdestp);
6222	}
6223
6224	fcip_pkt = fcip_ipkt_alloc(fptr, sizeof (la_els_farp_t),
6225	    sizeof (la_els_farp_t), bdestp->fcipd_pd, KM_SLEEP);
6226
6227	if (fcip_pkt == NULL) {
6228		return (fdestp);
6229	}
6230
6231	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6232	ether_to_wwn(&fcip_arpbroadcast_addr, &broadcast_wwn);
6233
6234	mutex_enter(&bdestp->fcipd_mutex);
6235	if (bdestp->fcipd_rtable == NULL) {
6236		mutex_exit(&bdestp->fcipd_mutex);
6237		fcip_ipkt_free(fcip_pkt);
6238		return (fdestp);
6239	}
6240
6241	fcip_pkt->fcip_pkt_dest = bdestp;
6242	fc_pkt->pkt_fca_device = bdestp->fcipd_fca_dev;
6243
6244	bdestp->fcipd_ncmds++;
6245	mutex_exit(&bdestp->fcipd_mutex);
6246
6247	fcip_init_broadcast_pkt(fcip_pkt, NULL, 1);
6248	fcip_pkt->fcip_pkt_flags |= FCIP_PKT_IN_LIST;
6249
6250	/*
6251	 * Now initialize the FARP payload itself
6252	 */
6253	fcmd = &farp_cmd;
6254	fcmd->ls_code.ls_code = LA_ELS_FARP_REQ;
6255	fcmd->ls_code.mbz = 0;
6256	/*
6257	 * for now just match the Port WWN since the other match addr
6258	 * code points are optional. We can explore matching the IP address
6259	 * if needed
6260	 */
6261	if (ip_addr) {
6262		fcmd->match_addr = FARP_MATCH_WW_PN_IPv4;
6263	} else {
6264		fcmd->match_addr = FARP_MATCH_WW_PN;
6265	}
6266
6267	/*
6268	 * Request the responder port to log into us - that way
6269	 * the Transport is aware of the remote port when we create
6270	 * an entry for it in our tables
6271	 */
6272	fcmd->resp_flags = FARP_INIT_REPLY | FARP_INIT_P_LOGI;
6273	fcmd->req_id = fport->fcipp_sid;
6274	fcmd->dest_id.port_id = fc_pkt->pkt_cmd_fhdr.d_id;
6275	bcopy(&fport->fcipp_pwwn, &fcmd->req_pwwn, sizeof (la_wwn_t));
6276	bcopy(&fport->fcipp_nwwn, &fcmd->req_nwwn, sizeof (la_wwn_t));
6277	bcopy(pwwn, &fcmd->resp_pwwn, sizeof (la_wwn_t));
6278	/*
6279	 * copy in source IP address if we get to know it
6280	 */
6281	if (ip_addr) {
6282		bcopy(ip_addr, fcmd->resp_ip, ip_addr_len);
6283	}
6284
6285	fc_pkt->pkt_cmdlen = sizeof (la_els_farp_t);
6286	fc_pkt->pkt_rsplen = sizeof (la_els_farp_t);
6287	fc_pkt->pkt_tran_type = FC_PKT_EXCHANGE;
6288	fc_pkt->pkt_ulp_private = (opaque_t)fcip_pkt;
6289
6290	/*
6291	 * Endian safe copy
6292	 */
6293	FCIP_CP_OUT(fcmd, fc_pkt->pkt_cmd, fc_pkt->pkt_cmd_acc,
6294	    sizeof (la_els_farp_t));
6295
6296	/*
6297	 * send the packet in polled mode.
6298	 */
6299	rval = fc_ulp_issue_els(fport->fcipp_handle, fc_pkt);
6300	if (rval != FC_SUCCESS) {
6301		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_WARN,
6302		    "fcip_transport of farp pkt failed 0x%x", rval));
6303		fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_LIST;
6304		fcip_ipkt_free(fcip_pkt);
6305
6306		mutex_enter(&bdestp->fcipd_mutex);
6307		bdestp->fcipd_ncmds--;
6308		mutex_exit(&bdestp->fcipd_mutex);
6309
6310		return (fdestp);
6311	}
6312
6313	farp_lbolt = ddi_get_lbolt();
6314	farp_lbolt += drv_usectohz(FCIP_FARP_TIMEOUT);
6315
6316	mutex_enter(&fptr->fcip_mutex);
6317	fptr->fcip_farp_rsp_flag = 0;
6318	while (!fptr->fcip_farp_rsp_flag) {
6319		if (cv_timedwait(&fptr->fcip_farp_cv, &fptr->fcip_mutex,
6320		    farp_lbolt) == -1) {
6321			/*
6322			 * No FARP response from any destination port
6323			 * so bail out.
6324			 */
6325			fptr->fcip_farp_rsp_flag = 1;
6326		} else {
6327			/*
6328			 * We received a FARP response - check to see if the
6329			 * response was in reply to our FARP request.
6330			 */
6331
6332			mutex_enter(&fptr->fcip_rt_mutex);
6333			frp = fcip_lookup_rtable(fptr, pwwn, FCIP_COMPARE_NWWN);
6334			mutex_exit(&fptr->fcip_rt_mutex);
6335
6336			if ((frp != NULL) &&
6337			    !FCIP_RTE_UNAVAIL(frp->fcipr_state)) {
6338				fdestp = fcip_get_dest(fptr, pwwn);
6339			} else {
6340				/*
6341				 * Not our FARP response so go back and wait
6342				 * again till FARP_TIMEOUT expires
6343				 */
6344				fptr->fcip_farp_rsp_flag = 0;
6345			}
6346		}
6347	}
6348	mutex_exit(&fptr->fcip_mutex);
6349
6350	fcip_pkt->fcip_pkt_flags |= FCIP_PKT_IN_LIST;
6351	fcip_ipkt_free(fcip_pkt);
6352	mutex_enter(&bdestp->fcipd_mutex);
6353	bdestp->fcipd_ncmds--;
6354	mutex_exit(&bdestp->fcipd_mutex);
6355	return (fdestp);
6356}
6357
6358
6359
6360/*
6361 * Helper routine to PLOGI to a remote port we wish to talk to.
6362 * This may not be required since the port driver does logins anyway,
6363 * but this can be required in fabric cases since FARP requests/responses
6364 * don't require you to be logged in?
6365 */
6366
6367/* ARGSUSED */
6368static int
6369fcip_do_plogi(struct fcip *fptr, struct fcip_routing_table *frp)
6370{
6371	fcip_pkt_t		*fcip_pkt;
6372	fc_packet_t		*fc_pkt;
6373	fcip_port_info_t	*fport = fptr->fcip_port_info;
6374	la_els_logi_t		logi;
6375	int			rval;
6376	fc_frame_hdr_t		*fr_hdr;
6377
6378	/*
6379	 * Don't bother to login for broadcast RTE entries
6380	 */
6381	if ((frp->fcipr_d_id.port_id == 0x0) ||
6382	    (frp->fcipr_d_id.port_id == 0xffffff)) {
6383		return (FC_FAILURE);
6384	}
6385
6386	/*
6387	 * We shouldn't pound in too many logins here
6388	 *
6389	 */
6390	if (frp->fcipr_state == FCIP_RT_LOGIN_PROGRESS ||
6391	    frp->fcipr_state == PORT_DEVICE_LOGGED_IN) {
6392		return (FC_SUCCESS);
6393	}
6394
6395	fcip_pkt = fcip_ipkt_alloc(fptr, sizeof (la_els_logi_t),
6396	    sizeof (la_els_logi_t), frp->fcipr_pd, KM_SLEEP);
6397
6398	if (fcip_pkt == NULL) {
6399		return (FC_FAILURE);
6400	}
6401
6402	/*
6403	 * Update back pointer for login state update
6404	 */
6405	fcip_pkt->fcip_pkt_frp = frp;
6406	frp->fcipr_state = FCIP_RT_LOGIN_PROGRESS;
6407
6408	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6409
6410	/*
6411	 * Initialize frame header for ELS
6412	 */
6413	fr_hdr = &fc_pkt->pkt_cmd_fhdr;
6414	fr_hdr->r_ctl = R_CTL_ELS_REQ;
6415	fr_hdr->type = FC_TYPE_EXTENDED_LS;
6416	fr_hdr->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
6417	fr_hdr->df_ctl = 0;
6418	fr_hdr->s_id = fport->fcipp_sid.port_id;
6419	fr_hdr->d_id = frp->fcipr_d_id.port_id;
6420	fr_hdr->seq_cnt = 0;
6421	fr_hdr->ox_id = 0xffff;
6422	fr_hdr->rx_id = 0xffff;
6423	fr_hdr->ro = 0;
6424
6425	fc_pkt->pkt_rsplen = sizeof (la_els_logi_t);
6426	fc_pkt->pkt_comp = fcip_ipkt_callback;
6427	fc_pkt->pkt_tran_type = FC_PKT_EXCHANGE;
6428	fc_pkt->pkt_timeout = 10;	/* 10 seconds */
6429	fcip_pkt->fcip_pkt_ttl = fptr->fcip_timeout_ticks + fc_pkt->pkt_timeout;
6430	fc_pkt->pkt_ulp_private = (opaque_t)fcip_pkt;
6431
6432	/*
6433	 * Everybody does class 3, so let's just set it.  If the transport
6434	 * knows better, it will deal with the class appropriately.
6435	 */
6436
6437	fc_pkt->pkt_tran_flags = FC_TRAN_INTR | FC_TRAN_CLASS3;
6438
6439	/*
6440	 * we need only fill in the ls_code and the cmd frame header
6441	 */
6442	bzero((void *)&logi, sizeof (la_els_logi_t));
6443	logi.ls_code.ls_code = LA_ELS_PLOGI;
6444	logi.ls_code.mbz = 0;
6445
6446	FCIP_CP_OUT((uint8_t *)&logi, fc_pkt->pkt_cmd, fc_pkt->pkt_cmd_acc,
6447	    sizeof (la_els_logi_t));
6448
6449	rval = fc_ulp_login(fport->fcipp_handle, &fc_pkt, 1);
6450	if (rval != FC_SUCCESS) {
6451		cmn_err(CE_WARN,
6452		    "!fc_ulp_login failed for d_id: 0x%x, rval: 0x%x",
6453		    frp->fcipr_d_id.port_id, rval);
6454		fcip_ipkt_free(fcip_pkt);
6455	}
6456	return (rval);
6457}
6458
6459/*
6460 * The packet callback routine - called from the transport/FCA after
6461 * it is done DMA'ing/sending out the packet contents on the wire so
6462 * that the alloc'ed packet can be freed
6463 */
6464static void
6465fcip_ipkt_callback(fc_packet_t *fc_pkt)
6466{
6467	ls_code_t			logi_req;
6468	ls_code_t			logi_resp;
6469	fcip_pkt_t			*fcip_pkt;
6470	fc_frame_hdr_t			*fr_hdr;
6471	struct fcip 			*fptr;
6472	fcip_port_info_t		*fport;
6473	struct fcip_routing_table	*frp;
6474
6475	fr_hdr = &fc_pkt->pkt_cmd_fhdr;
6476
6477	FCIP_CP_IN(fc_pkt->pkt_resp, (uint8_t *)&logi_resp,
6478	    fc_pkt->pkt_resp_acc, sizeof (logi_resp));
6479
6480	FCIP_CP_IN(fc_pkt->pkt_cmd, (uint8_t *)&logi_req, fc_pkt->pkt_cmd_acc,
6481	    sizeof (logi_req));
6482
6483	fcip_pkt = (fcip_pkt_t *)fc_pkt->pkt_ulp_private;
6484	frp = fcip_pkt->fcip_pkt_frp;
6485	fptr = fcip_pkt->fcip_pkt_fptr;
6486	fport = fptr->fcip_port_info;
6487
6488	ASSERT(logi_req.ls_code == LA_ELS_PLOGI);
6489
6490	if (fc_pkt->pkt_state != FC_PKT_SUCCESS ||
6491	    logi_resp.ls_code != LA_ELS_ACC) {
6492		/* EMPTY */
6493
6494		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_WARN,
6495		    "opcode : 0x%x to d_id: 0x%x failed",
6496		    logi_req.ls_code, fr_hdr->d_id));
6497
6498		mutex_enter(&fptr->fcip_rt_mutex);
6499		frp->fcipr_state = PORT_DEVICE_INVALID;
6500		frp->fcipr_invalid_timeout = fptr->fcip_timeout_ticks +
6501		    (FCIP_RTE_TIMEOUT / 2);
6502		mutex_exit(&fptr->fcip_rt_mutex);
6503	} else {
6504		fc_portid_t	d_id;
6505
6506		d_id.port_id = fr_hdr->d_id;
6507		d_id.priv_lilp_posit = 0;
6508
6509		/*
6510		 * Update PLOGI results; FCA Handle, and Port device handles
6511		 */
6512		mutex_enter(&fptr->fcip_rt_mutex);
6513		frp->fcipr_pd = fc_pkt->pkt_pd;
6514		frp->fcipr_fca_dev =
6515		    fc_ulp_get_fca_device(fport->fcipp_handle, d_id);
6516		frp->fcipr_state = PORT_DEVICE_LOGGED_IN;
6517		mutex_exit(&fptr->fcip_rt_mutex);
6518	}
6519
6520	fcip_ipkt_free(fcip_pkt);
6521}
6522
6523
6524/*
6525 * pkt_alloc routine for outbound IP datagrams. The cache constructor
6526 * Only initializes the pkt_cmd_dma (which is where the outbound datagram
6527 * is stuffed) since we don't expect response
6528 */
6529static fcip_pkt_t *
6530fcip_pkt_alloc(struct fcip *fptr, mblk_t *bp, int flags, int datalen)
6531{
6532	fcip_pkt_t 	*fcip_pkt;
6533	fc_packet_t	*fc_pkt;
6534	ddi_dma_cookie_t	pkt_cookie;
6535	ddi_dma_cookie_t	*cp;
6536	uint32_t		cnt;
6537	fcip_port_info_t	*fport = fptr->fcip_port_info;
6538
6539	fcip_pkt = kmem_cache_alloc(fptr->fcip_xmit_cache, flags);
6540	if (fcip_pkt == NULL) {
6541		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM, (CE_WARN,
6542		    "fcip_pkt_alloc: kmem_cache_alloc failed"));
6543		return (NULL);
6544	}
6545
6546	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6547	fcip_pkt->fcip_pkt_fcpktp = fc_pkt;
6548	fc_pkt->pkt_tran_flags = 0;
6549	fcip_pkt->fcip_pkt_dma_flags = 0;
6550
6551	/*
6552	 * the cache constructor has allocated the dma handle
6553	 */
6554	fc_pkt->pkt_cmd = (caddr_t)bp->b_rptr;
6555	if (ddi_dma_addr_bind_handle(fc_pkt->pkt_cmd_dma, NULL,
6556	    (caddr_t)bp->b_rptr, datalen, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
6557	    DDI_DMA_DONTWAIT, NULL, &pkt_cookie,
6558	    &fc_pkt->pkt_cmd_cookie_cnt) != DDI_DMA_MAPPED) {
6559			goto fail;
6560	}
6561
6562	fcip_pkt->fcip_pkt_dma_flags |= FCIP_CMD_DMA_BOUND;
6563
6564	if (fc_pkt->pkt_cmd_cookie_cnt >
6565	    fport->fcipp_cmd_dma_attr.dma_attr_sgllen) {
6566		goto fail;
6567	}
6568
6569	ASSERT(fc_pkt->pkt_cmd_cookie_cnt != 0);
6570
6571	cp = fc_pkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
6572	    fc_pkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
6573	    KM_NOSLEEP);
6574
6575	if (cp == NULL) {
6576		goto fail;
6577	}
6578
6579	*cp = pkt_cookie;
6580	cp++;
6581	for (cnt = 1; cnt < fc_pkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
6582		ddi_dma_nextcookie(fc_pkt->pkt_cmd_dma, &pkt_cookie);
6583		*cp = pkt_cookie;
6584	}
6585
6586	fc_pkt->pkt_cmdlen = datalen;
6587
6588	fcip_pkt->fcip_pkt_mp = NULL;
6589	fcip_pkt->fcip_pkt_wq = NULL;
6590	fcip_pkt->fcip_pkt_dest = NULL;
6591	fcip_pkt->fcip_pkt_next = NULL;
6592	fcip_pkt->fcip_pkt_prev = NULL;
6593	fcip_pkt->fcip_pkt_state = 0;
6594	fcip_pkt->fcip_pkt_reason = 0;
6595	fcip_pkt->fcip_pkt_flags = 0;
6596	fcip_pkt->fcip_pkt_frp = NULL;
6597
6598	return (fcip_pkt);
6599fail:
6600	if (fcip_pkt) {
6601		fcip_pkt_free(fcip_pkt, 0);
6602	}
6603	return ((fcip_pkt_t *)0);
6604}
6605
6606/*
6607 * Free a packet and all its associated resources
6608 */
6609static void
6610fcip_pkt_free(struct fcip_pkt *fcip_pkt, int free_mblk)
6611{
6612	fc_packet_t	*fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6613	struct fcip *fptr = fcip_pkt->fcip_pkt_fptr;
6614
6615	if (fc_pkt->pkt_cmd_cookie != NULL) {
6616		kmem_free(fc_pkt->pkt_cmd_cookie, fc_pkt->pkt_cmd_cookie_cnt *
6617		    sizeof (ddi_dma_cookie_t));
6618		fc_pkt->pkt_cmd_cookie = NULL;
6619	}
6620
6621	fcip_free_pkt_dma(fcip_pkt);
6622	if (free_mblk && fcip_pkt->fcip_pkt_mp) {
6623		freemsg(fcip_pkt->fcip_pkt_mp);
6624		fcip_pkt->fcip_pkt_mp = NULL;
6625	}
6626
6627	(void) fc_ulp_uninit_packet(fptr->fcip_port_info->fcipp_handle, fc_pkt);
6628
6629	kmem_cache_free(fptr->fcip_xmit_cache, (void *)fcip_pkt);
6630}
6631
6632/*
6633 * Allocate a Packet for internal driver use. This is for requests
6634 * that originate from within the driver
6635 */
6636static fcip_pkt_t *
6637fcip_ipkt_alloc(struct fcip *fptr, int cmdlen, int resplen,
6638    opaque_t pd, int flags)
6639{
6640	fcip_pkt_t 		*fcip_pkt;
6641	fc_packet_t		*fc_pkt;
6642	int			(*cb)(caddr_t);
6643	fcip_port_info_t	*fport = fptr->fcip_port_info;
6644	size_t			real_len;
6645	uint_t			held_here = 0;
6646	ddi_dma_cookie_t	pkt_cookie;
6647	ddi_dma_cookie_t	*cp;
6648	uint32_t		cnt;
6649
6650	cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
6651
6652	fcip_pkt = kmem_zalloc((sizeof (fcip_pkt_t) +
6653	    fport->fcipp_fca_pkt_size), flags);
6654
6655	if (fcip_pkt == NULL) {
6656		FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
6657		    (CE_WARN, "pkt alloc of ineternal pkt failed"));
6658		goto fail;
6659	}
6660
6661	fcip_pkt->fcip_pkt_flags = FCIP_PKT_INTERNAL;
6662	fcip_pkt->fcip_pkt_fptr = fptr;
6663	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6664	fcip_pkt->fcip_pkt_fcpktp = fc_pkt;
6665	fc_pkt->pkt_tran_flags = 0;
6666	fc_pkt->pkt_cmdlen = 0;
6667	fc_pkt->pkt_rsplen = 0;
6668	fc_pkt->pkt_datalen = 0;
6669	fc_pkt->pkt_fca_private = (opaque_t)((caddr_t)fcip_pkt +
6670	    sizeof (fcip_pkt_t));
6671	fc_pkt->pkt_ulp_private = (opaque_t)fcip_pkt;
6672
6673	if (cmdlen) {
6674		if (ddi_dma_alloc_handle(fptr->fcip_dip,
6675		    &fport->fcipp_cmd_dma_attr, cb, NULL,
6676		    &fc_pkt->pkt_cmd_dma) != DDI_SUCCESS) {
6677			goto fail;
6678		}
6679
6680		if (ddi_dma_mem_alloc(fc_pkt->pkt_cmd_dma, cmdlen,
6681		    &fport->fcipp_fca_acc_attr, DDI_DMA_CONSISTENT,
6682		    cb, NULL, (caddr_t *)&fc_pkt->pkt_cmd,
6683		    &real_len, &fc_pkt->pkt_cmd_acc) != DDI_SUCCESS) {
6684			goto fail;
6685		}
6686
6687		fcip_pkt->fcip_pkt_dma_flags |= FCIP_CMD_DMA_MEM;
6688		fc_pkt->pkt_cmdlen = cmdlen;
6689
6690		if (real_len < cmdlen) {
6691			goto fail;
6692		}
6693
6694		if (ddi_dma_addr_bind_handle(fc_pkt->pkt_cmd_dma, NULL,
6695		    (caddr_t)fc_pkt->pkt_cmd, real_len,
6696		    DDI_DMA_WRITE | DDI_DMA_CONSISTENT, cb, NULL,
6697		    &pkt_cookie, &fc_pkt->pkt_cmd_cookie_cnt) !=
6698		    DDI_DMA_MAPPED) {
6699			goto fail;
6700		}
6701
6702		fcip_pkt->fcip_pkt_dma_flags |= FCIP_CMD_DMA_BOUND;
6703
6704		if (fc_pkt->pkt_cmd_cookie_cnt >
6705		    fport->fcipp_cmd_dma_attr.dma_attr_sgllen) {
6706			goto fail;
6707		}
6708
6709		ASSERT(fc_pkt->pkt_cmd_cookie_cnt != 0);
6710
6711		cp = fc_pkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
6712		    fc_pkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
6713		    KM_NOSLEEP);
6714
6715		if (cp == NULL) {
6716			goto fail;
6717		}
6718
6719		*cp = pkt_cookie;
6720		cp++;
6721		for (cnt = 1; cnt < fc_pkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
6722			ddi_dma_nextcookie(fc_pkt->pkt_cmd_dma, &pkt_cookie);
6723			*cp = pkt_cookie;
6724		}
6725	}
6726
6727	if (resplen) {
6728		if (ddi_dma_alloc_handle(fptr->fcip_dip,
6729		    &fport->fcipp_resp_dma_attr, cb, NULL,
6730		    &fc_pkt->pkt_resp_dma) != DDI_SUCCESS) {
6731			goto fail;
6732		}
6733
6734		if (ddi_dma_mem_alloc(fc_pkt->pkt_resp_dma, resplen,
6735		    &fport->fcipp_fca_acc_attr, DDI_DMA_CONSISTENT,
6736		    cb, NULL, (caddr_t *)&fc_pkt->pkt_resp,
6737		    &real_len, &fc_pkt->pkt_resp_acc) != DDI_SUCCESS) {
6738			goto fail;
6739		}
6740
6741		fcip_pkt->fcip_pkt_dma_flags |= FCIP_RESP_DMA_MEM;
6742
6743		if (real_len < resplen) {
6744			goto fail;
6745		}
6746
6747		if (ddi_dma_addr_bind_handle(fc_pkt->pkt_resp_dma, NULL,
6748		    (caddr_t)fc_pkt->pkt_resp, real_len,
6749		    DDI_DMA_WRITE | DDI_DMA_CONSISTENT, cb, NULL,
6750		    &pkt_cookie, &fc_pkt->pkt_resp_cookie_cnt) !=
6751		    DDI_DMA_MAPPED) {
6752			goto fail;
6753		}
6754
6755		fcip_pkt->fcip_pkt_dma_flags |= FCIP_RESP_DMA_BOUND;
6756		fc_pkt->pkt_rsplen = resplen;
6757
6758		if (fc_pkt->pkt_resp_cookie_cnt >
6759		    fport->fcipp_resp_dma_attr.dma_attr_sgllen) {
6760			goto fail;
6761		}
6762
6763		ASSERT(fc_pkt->pkt_resp_cookie_cnt != 0);
6764
6765		cp = fc_pkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
6766		    fc_pkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
6767		    KM_NOSLEEP);
6768
6769		if (cp == NULL) {
6770			goto fail;
6771		}
6772
6773		*cp = pkt_cookie;
6774		cp++;
6775		for (cnt = 1; cnt < fc_pkt->pkt_resp_cookie_cnt; cnt++, cp++) {
6776			ddi_dma_nextcookie(fc_pkt->pkt_resp_dma, &pkt_cookie);
6777			*cp = pkt_cookie;
6778		}
6779	}
6780
6781	/*
6782	 * Initialize pkt_pd prior to calling fc_ulp_init_packet
6783	 */
6784
6785	fc_pkt->pkt_pd = pd;
6786
6787	/*
6788	 * Ask the FCA to bless the internal packet
6789	 */
6790	if (fc_ulp_init_packet((opaque_t)fport->fcipp_handle,
6791	    fc_pkt, flags) != FC_SUCCESS) {
6792		goto fail;
6793	}
6794
6795	/*
6796	 * Keep track of # of ipkts alloc-ed
6797	 * This function can get called with mutex either held or not. So, we'll
6798	 * grab mutex if it is not already held by this thread.
6799	 * This has to be cleaned up someday.
6800	 */
6801	if (!MUTEX_HELD(&fptr->fcip_mutex)) {
6802		held_here = 1;
6803		mutex_enter(&fptr->fcip_mutex);
6804	}
6805
6806	fptr->fcip_num_ipkts_pending++;
6807
6808	if (held_here)
6809		mutex_exit(&fptr->fcip_mutex);
6810
6811	return (fcip_pkt);
6812fail:
6813	if (fcip_pkt) {
6814		fcip_ipkt_free(fcip_pkt);
6815	}
6816
6817	return (NULL);
6818}
6819
6820/*
6821 * free up an internal IP packet (like a FARP pkt etc)
6822 */
6823static void
6824fcip_ipkt_free(fcip_pkt_t *fcip_pkt)
6825{
6826	fc_packet_t		*fc_pkt;
6827	struct fcip		*fptr = fcip_pkt->fcip_pkt_fptr;
6828	fcip_port_info_t	*fport = fptr->fcip_port_info;
6829
6830	ASSERT(fptr != NULL);
6831	ASSERT(!mutex_owned(&fptr->fcip_mutex));
6832
6833	/* One less ipkt to wait for */
6834	mutex_enter(&fptr->fcip_mutex);
6835	if (fptr->fcip_num_ipkts_pending)	/* Safety check */
6836		fptr->fcip_num_ipkts_pending--;
6837	mutex_exit(&fptr->fcip_mutex);
6838
6839	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6840
6841	if (fc_pkt->pkt_cmd_cookie != NULL) {
6842		kmem_free(fc_pkt->pkt_cmd_cookie, fc_pkt->pkt_cmd_cookie_cnt *
6843		    sizeof (ddi_dma_cookie_t));
6844		fc_pkt->pkt_cmd_cookie = NULL;
6845	}
6846
6847	if (fc_pkt->pkt_resp_cookie != NULL) {
6848		kmem_free(fc_pkt->pkt_resp_cookie, fc_pkt->pkt_resp_cookie_cnt *
6849		    sizeof (ddi_dma_cookie_t));
6850		fc_pkt->pkt_resp_cookie = NULL;
6851	}
6852
6853	if (fc_ulp_uninit_packet(fport->fcipp_handle, fc_pkt) != FC_SUCCESS) {
6854		FCIP_DEBUG(FCIP_DEBUG_ELS, (CE_WARN,
6855		    "fc_ulp_uninit_pkt failed for internal fc pkt 0x%p",
6856		    (void *)fc_pkt));
6857	}
6858	fcip_free_pkt_dma(fcip_pkt);
6859	kmem_free(fcip_pkt, (sizeof (fcip_pkt_t) + fport->fcipp_fca_pkt_size));
6860}
6861
6862/*
6863 * initialize a unicast request. This is a misnomer because even the
6864 * broadcast requests are initialized with this routine
6865 */
6866static void
6867fcip_init_unicast_pkt(fcip_pkt_t *fcip_pkt, fc_portid_t sid, fc_portid_t did,
6868    void (*comp) ())
6869{
6870	fc_packet_t		*fc_pkt;
6871	fc_frame_hdr_t		*fr_hdr;
6872	struct fcip		*fptr = fcip_pkt->fcip_pkt_fptr;
6873
6874	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6875	fr_hdr = &fc_pkt->pkt_cmd_fhdr;
6876
6877	fr_hdr->r_ctl = R_CTL_DEVICE_DATA | R_CTL_UNSOL_DATA;
6878	fr_hdr->s_id = sid.port_id;
6879	fr_hdr->d_id = did.port_id;
6880	fr_hdr->type = FC_TYPE_IS8802_SNAP;
6881	fr_hdr->f_ctl = F_CTL_FIRST_SEQ | F_CTL_LAST_SEQ;
6882	fr_hdr->df_ctl = DF_CTL_NET_HDR;
6883	fr_hdr->seq_cnt = 0;
6884	fr_hdr->ox_id = 0xffff;
6885	fr_hdr->rx_id = 0xffff;
6886	fr_hdr->ro = 0;
6887	/*
6888	 * reset all the length fields
6889	 */
6890	fc_pkt->pkt_rsplen = 0;
6891	fc_pkt->pkt_datalen = 0;
6892	fc_pkt->pkt_comp = comp;
6893	if (comp) {
6894		fc_pkt->pkt_tran_flags |= FC_TRAN_INTR;
6895	} else {
6896		fc_pkt->pkt_tran_flags |= FC_TRAN_NO_INTR;
6897	}
6898	fc_pkt->pkt_tran_type = FC_PKT_OUTBOUND | FC_PKT_IP_WRITE;
6899	fc_pkt->pkt_timeout = fcip_pkt_ttl_ticks;
6900	fcip_pkt->fcip_pkt_ttl = fptr->fcip_timeout_ticks + fc_pkt->pkt_timeout;
6901}
6902
6903
6904/*
6905 * Initialize a fcip_packet for broadcast data transfers
6906 */
6907static void
6908fcip_init_broadcast_pkt(fcip_pkt_t *fcip_pkt, void (*comp) (), int is_els)
6909{
6910	fc_packet_t		*fc_pkt;
6911	fc_frame_hdr_t		*fr_hdr;
6912	struct fcip		*fptr = fcip_pkt->fcip_pkt_fptr;
6913	fcip_port_info_t	*fport = fptr->fcip_port_info;
6914	uint32_t		sid;
6915	uint32_t		did;
6916
6917	FCIP_TNF_PROBE_1((fcip_init_broadcast_pkt, "fcip io", /* CSTYLED */,
6918		tnf_string, msg, "enter"));
6919	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6920	fr_hdr = &fc_pkt->pkt_cmd_fhdr;
6921	sid = fport->fcipp_sid.port_id;
6922
6923	if (is_els) {
6924		fr_hdr->r_ctl = R_CTL_ELS_REQ;
6925	} else {
6926		fr_hdr->r_ctl = R_CTL_DEVICE_DATA | R_CTL_UNSOL_DATA;
6927	}
6928	fr_hdr->s_id = sid;
6929	/*
6930	 * The destination broadcast address depends on the topology
6931	 * of the underlying port
6932	 */
6933	did = fptr->fcip_broadcast_did;
6934	/*
6935	 * mark pkt a broadcast pkt
6936	 */
6937	fc_pkt->pkt_tran_type = FC_PKT_BROADCAST;
6938
6939	fr_hdr->d_id = did;
6940	fr_hdr->type = FC_TYPE_IS8802_SNAP;
6941	fr_hdr->f_ctl = F_CTL_FIRST_SEQ | F_CTL_LAST_SEQ | F_CTL_END_SEQ;
6942	fr_hdr->f_ctl &= ~(F_CTL_SEQ_INITIATIVE);
6943	fr_hdr->df_ctl = DF_CTL_NET_HDR;
6944	fr_hdr->seq_cnt = 0;
6945	fr_hdr->ox_id = 0xffff;
6946	fr_hdr->rx_id = 0xffff;
6947	fr_hdr->ro = 0;
6948	fc_pkt->pkt_comp = comp;
6949
6950	if (comp) {
6951		fc_pkt->pkt_tran_flags |= FC_TRAN_INTR;
6952	} else {
6953		fc_pkt->pkt_tran_flags |= FC_TRAN_NO_INTR;
6954	}
6955
6956	fc_pkt->pkt_tran_type = FC_PKT_BROADCAST;
6957	fc_pkt->pkt_timeout = fcip_pkt_ttl_ticks;
6958	fcip_pkt->fcip_pkt_ttl = fptr->fcip_timeout_ticks + fc_pkt->pkt_timeout;
6959}
6960
6961
6962
6963/*
6964 * Free up all DMA resources associated with an allocated packet
6965 */
6966static void
6967fcip_free_pkt_dma(fcip_pkt_t *fcip_pkt)
6968{
6969	fc_packet_t	*fc_pkt;
6970
6971	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
6972
6973	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
6974	    (CE_NOTE, "in freepktdma : flags 0x%x",
6975	    fcip_pkt->fcip_pkt_dma_flags));
6976
6977	if (fcip_pkt->fcip_pkt_dma_flags & FCIP_CMD_DMA_BOUND) {
6978		(void) ddi_dma_unbind_handle(fc_pkt->pkt_cmd_dma);
6979	}
6980	if (fcip_pkt->fcip_pkt_dma_flags & FCIP_CMD_DMA_MEM) {
6981		ddi_dma_mem_free(&fc_pkt->pkt_cmd_acc);
6982	}
6983
6984	if (fcip_pkt->fcip_pkt_dma_flags & FCIP_RESP_DMA_BOUND) {
6985		(void) ddi_dma_unbind_handle(fc_pkt->pkt_resp_dma);
6986	}
6987	if (fcip_pkt->fcip_pkt_dma_flags & FCIP_RESP_DMA_MEM) {
6988		ddi_dma_mem_free(&fc_pkt->pkt_resp_acc);
6989	}
6990	/*
6991	 * for internal commands, we need to free up the dma handles too.
6992	 * This is done in the cache destructor for non internal cmds
6993	 */
6994	if (fcip_pkt->fcip_pkt_flags & FCIP_PKT_INTERNAL) {
6995		if (fc_pkt->pkt_cmd_dma) {
6996			ddi_dma_free_handle(&fc_pkt->pkt_cmd_dma);
6997		}
6998		if (fc_pkt->pkt_resp_dma) {
6999			ddi_dma_free_handle(&fc_pkt->pkt_resp_dma);
7000		}
7001	}
7002}
7003
7004
7005/*
7006 * helper routine to generate a string, given an ether addr
7007 */
7008static void
7009fcip_ether_to_str(struct ether_addr *e, caddr_t s)
7010{
7011	int i;
7012
7013	for (i = 0; i < sizeof (struct ether_addr); i++, s += 2) {
7014		FCIP_DEBUG(FCIP_DEBUG_MISC,
7015		    (CE_CONT, "0x%02X:", e->ether_addr_octet[i]));
7016		(void) sprintf(s, "%02X", e->ether_addr_octet[i]);
7017	}
7018
7019	*s = '\0';
7020}
7021
7022/*
7023 * When a broadcast request comes from the upper streams modules, it
7024 * is ugly to look into every datagram to figure out if it is a broadcast
7025 * datagram or a unicast packet. Instead just add the broadcast entries
7026 * into our routing and dest tables and the standard hash table look ups
7027 * will find the entries. It is a lot cleaner this way. Also Solaris ifconfig
7028 * seems to be very ethernet specific and it requires broadcasts to the
7029 * ether broadcast addr of 0xffffffffff to succeed even though we specified
7030 * in the dl_info request that our broadcast MAC addr is 0x0000000000
7031 * (can't figure out why RFC2625 did this though). So add broadcast entries
7032 * for both MAC address
7033 */
7034static int
7035fcip_dest_add_broadcast_entry(struct fcip *fptr, int new_flag)
7036{
7037	fc_portmap_t 		map;
7038	struct fcip_routing_table *frp;
7039	uint32_t		did;
7040	la_wwn_t		broadcast_wwn;
7041
7042	/*
7043	 * get port_id of destination for broadcast - this is topology
7044	 * dependent
7045	 */
7046	did = fptr->fcip_broadcast_did;
7047
7048	ether_to_wwn(&fcip_arpbroadcast_addr, &broadcast_wwn);
7049	bcopy((void *)&broadcast_wwn, (void *)&map.map_pwwn, sizeof (la_wwn_t));
7050	bcopy((void *)&broadcast_wwn, (void *)&map.map_nwwn, sizeof (la_wwn_t));
7051
7052	map.map_did.port_id = did;
7053	map.map_hard_addr.hard_addr = did;
7054	map.map_state = PORT_DEVICE_VALID;
7055	if (new_flag) {
7056		map.map_type = PORT_DEVICE_NEW;
7057	} else {
7058		map.map_type = PORT_DEVICE_CHANGED;
7059	}
7060	map.map_flags = 0;
7061	map.map_pd = NULL;
7062	bzero(&map.map_fc4_types, sizeof (map.map_fc4_types));
7063	fcip_rt_update(fptr, &map, 1);
7064	mutex_enter(&fptr->fcip_rt_mutex);
7065	frp = fcip_lookup_rtable(fptr, &broadcast_wwn, FCIP_COMPARE_NWWN);
7066	mutex_exit(&fptr->fcip_rt_mutex);
7067	if (frp == NULL) {
7068		return (FC_FAILURE);
7069	}
7070	(void) fcip_add_dest(fptr, frp);
7071	/*
7072	 * The Upper IP layers expect the traditional broadcast MAC addr
7073	 * of 0xff ff ff ff ff ff to work too if we want to plumb the fcip
7074	 * stream through the /etc/hostname.fcipXX file. Instead of checking
7075	 * each phys addr for a match with fcip's ARP header broadcast
7076	 * addr (0x00 00 00 00 00 00), its simply easier to add another
7077	 * broadcast entry for 0xff ff ff ff ff ff.
7078	 */
7079	ether_to_wwn(&fcipnhbroadcastaddr, &broadcast_wwn);
7080	bcopy((void *)&broadcast_wwn, (void *)&map.map_pwwn, sizeof (la_wwn_t));
7081	bcopy((void *)&broadcast_wwn, (void *)&map.map_nwwn, sizeof (la_wwn_t));
7082	fcip_rt_update(fptr, &map, 1);
7083	mutex_enter(&fptr->fcip_rt_mutex);
7084	frp = fcip_lookup_rtable(fptr, &broadcast_wwn, FCIP_COMPARE_NWWN);
7085	mutex_exit(&fptr->fcip_rt_mutex);
7086	if (frp == NULL) {
7087		return (FC_FAILURE);
7088	}
7089	(void) fcip_add_dest(fptr, frp);
7090	return (FC_SUCCESS);
7091}
7092
7093/*
7094 * We need to obtain the D_ID of the broadcast port for transmitting all
7095 * our broadcast (and multicast) requests. The broadcast D_ID as we know
7096 * is dependent on the link topology
7097 */
7098static uint32_t
7099fcip_get_broadcast_did(struct fcip *fptr)
7100{
7101	fcip_port_info_t	*fport = fptr->fcip_port_info;
7102	uint32_t		did = 0;
7103	uint32_t		sid;
7104
7105	FCIP_TNF_PROBE_2((fcip_get_broadcast_did, "fcip io", /* CSTYLED */,
7106		tnf_string, msg, "enter",
7107		tnf_opaque, fptr, fptr));
7108
7109	sid = fport->fcipp_sid.port_id;
7110
7111	switch (fport->fcipp_topology) {
7112
7113	case FC_TOP_PT_PT: {
7114		fc_portmap_t	*port_map = NULL;
7115		uint32_t	listlen = 0;
7116
7117		if (fc_ulp_getportmap(fport->fcipp_handle, &port_map,
7118		    &listlen, FC_ULP_PLOGI_DONTCARE) == FC_SUCCESS) {
7119			FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_NOTE,
7120			    "fcip_gpmap: listlen :  0x%x", listlen));
7121			if (listlen == 1) {
7122				did = port_map->map_did.port_id;
7123			}
7124		}
7125		if (port_map) {
7126			kmem_free(port_map, listlen * sizeof (fc_portmap_t));
7127		}
7128		if (listlen != 1) {
7129			/* Dummy return value */
7130			return (0x00FFFFFF);
7131		}
7132		break;
7133	}
7134
7135	case FC_TOP_NO_NS:
7136	/* FALLTHROUGH */
7137	case FC_TOP_FABRIC:
7138		/*
7139		 * The broadcast address is the same whether or not
7140		 * the switch/fabric contains a Name service.
7141		 */
7142		did = 0x00FFFFFF;
7143		break;
7144
7145	case FC_TOP_PUBLIC_LOOP:
7146		/*
7147		 * The open replicate primitive must not be used. The
7148		 * broadcast sequence is simply sent to ALPA 0x00. The
7149		 * fabric controller then propagates the broadcast to all
7150		 * other ports. The fabric propagates the broadcast by
7151		 * using the OPNfr primitive.
7152		 */
7153		did = 0x00;
7154		break;
7155
7156	case FC_TOP_PRIVATE_LOOP:
7157		/*
7158		 * The source port for broadcast in private loop mode
7159		 * must send an OPN(fr) signal forcing all ports in the
7160		 * loop to replicate the frames that they receive.
7161		 */
7162		did = 0x00FFFFFF;
7163		break;
7164
7165	case FC_TOP_UNKNOWN:
7166	/* FALLTHROUGH */
7167	default:
7168		did = sid;
7169		FCIP_DEBUG(FCIP_DEBUG_INIT, (CE_WARN,
7170		    "fcip(0x%x):unknown topology in init_broadcast_pkt",
7171		    fptr->fcip_instance));
7172		break;
7173	}
7174	FCIP_TNF_PROBE_2((fcip_get_broadcast_did, "fcip io", /* CSTYLED */,
7175		tnf_string, msg, "return",
7176		tnf_opaque, did, did));
7177
7178	return (did);
7179}
7180
7181
7182/*
7183 * fcip timeout performs 2 operations:
7184 * 1. timeout any packets sent to the FCA for which a callback hasn't
7185 *    happened. If you are wondering why we need a callback since all
7186 *    traffic in FCIP is unidirectional, hence all exchanges are unidirectional
7187 *    but wait, we can only free up the resources after we know the FCA has
7188 *    DMA'ed out the data. pretty obvious eh :)
7189 *
7190 * 2. Retire and routing table entries we marked up for retiring. This is
7191 *    to give the link a chance to recover instead of marking a port down
7192 *    when we have lost all communication with it after a link transition
7193 */
7194static void
7195fcip_timeout(void *arg)
7196{
7197	struct fcip 			*fptr = (struct fcip *)arg;
7198	int				i;
7199	fcip_pkt_t			*fcip_pkt;
7200	struct fcip_dest		*fdestp;
7201	int 				index;
7202	struct fcip_routing_table 	*frtp;
7203	int				dispatch_rte_removal = 0;
7204
7205	mutex_enter(&fptr->fcip_mutex);
7206
7207	fptr->fcip_flags |= FCIP_IN_TIMEOUT;
7208	fptr->fcip_timeout_ticks += fcip_tick_incr;
7209
7210	if (fptr->fcip_flags & (FCIP_DETACHED | FCIP_DETACHING | \
7211	    FCIP_SUSPENDED | FCIP_POWER_DOWN)) {
7212		fptr->fcip_flags &= ~(FCIP_IN_TIMEOUT);
7213		mutex_exit(&fptr->fcip_mutex);
7214		return;
7215	}
7216
7217	if (fptr->fcip_port_state == FCIP_PORT_OFFLINE) {
7218		if (fptr->fcip_timeout_ticks > fptr->fcip_mark_offline) {
7219			fptr->fcip_flags |= FCIP_LINK_DOWN;
7220		}
7221	}
7222	if (!fptr->fcip_flags & FCIP_RTE_REMOVING) {
7223		dispatch_rte_removal = 1;
7224	}
7225	mutex_exit(&fptr->fcip_mutex);
7226
7227	/*
7228	 * Check if we have any Invalid routing table entries in our
7229	 * hashtable we have marked off for deferred removal. If any,
7230	 * we can spawn a taskq thread to do the cleanup for us. We
7231	 * need to avoid cleanup in the timeout thread since we may
7232	 * have to wait for outstanding commands to complete before
7233	 * we retire a routing table entry. Also dispatch the taskq
7234	 * thread only if we are already do not have a taskq thread
7235	 * dispatched.
7236	 */
7237	if (dispatch_rte_removal) {
7238		mutex_enter(&fptr->fcip_rt_mutex);
7239		for (index = 0; index < FCIP_RT_HASH_ELEMS; index++) {
7240			frtp = fptr->fcip_rtable[index];
7241			while (frtp) {
7242				if ((frtp->fcipr_state == FCIP_RT_INVALID) &&
7243				    (fptr->fcip_timeout_ticks >
7244				    frtp->fcipr_invalid_timeout)) {
7245					/*
7246					 * If we cannot schedule a task thread
7247					 * let us attempt again on the next
7248					 * tick rather than call
7249					 * fcip_rte_remove_deferred() from here
7250					 * directly since the routine can sleep.
7251					 */
7252					frtp->fcipr_state = FCIP_RT_RETIRED;
7253
7254					mutex_enter(&fptr->fcip_mutex);
7255					fptr->fcip_flags |= FCIP_RTE_REMOVING;
7256					mutex_exit(&fptr->fcip_mutex);
7257
7258					if (taskq_dispatch(fptr->fcip_tq,
7259					    fcip_rte_remove_deferred, fptr,
7260					    KM_NOSLEEP) == 0) {
7261						/*
7262						 * failed - so mark the entry
7263						 * as invalid again.
7264						 */
7265						frtp->fcipr_state =
7266						    FCIP_RT_INVALID;
7267
7268						mutex_enter(&fptr->fcip_mutex);
7269						fptr->fcip_flags &=
7270						    ~FCIP_RTE_REMOVING;
7271						mutex_exit(&fptr->fcip_mutex);
7272					}
7273				}
7274				frtp = frtp->fcipr_next;
7275			}
7276		}
7277		mutex_exit(&fptr->fcip_rt_mutex);
7278	}
7279
7280	mutex_enter(&fptr->fcip_dest_mutex);
7281
7282	/*
7283	 * Now timeout any packets stuck with the transport/FCA for too long
7284	 */
7285	for (i = 0; i < FCIP_DEST_HASH_ELEMS; i++) {
7286		fdestp = fptr->fcip_dest[i];
7287		while (fdestp != NULL) {
7288			mutex_enter(&fdestp->fcipd_mutex);
7289			for (fcip_pkt = fdestp->fcipd_head; fcip_pkt != NULL;
7290			    fcip_pkt = fcip_pkt->fcip_pkt_next) {
7291				if (fcip_pkt->fcip_pkt_flags &
7292				    (FCIP_PKT_RETURNED | FCIP_PKT_IN_TIMEOUT |
7293				    FCIP_PKT_IN_ABORT)) {
7294					continue;
7295				}
7296				if (fptr->fcip_timeout_ticks >
7297				    fcip_pkt->fcip_pkt_ttl) {
7298					fcip_pkt->fcip_pkt_flags |=
7299					    FCIP_PKT_IN_TIMEOUT;
7300
7301					mutex_exit(&fdestp->fcipd_mutex);
7302					if (taskq_dispatch(fptr->fcip_tq,
7303					    fcip_pkt_timeout, fcip_pkt,
7304					    KM_NOSLEEP) == 0) {
7305						/*
7306						 * timeout immediately
7307						 */
7308						fcip_pkt_timeout(fcip_pkt);
7309					}
7310					mutex_enter(&fdestp->fcipd_mutex);
7311					/*
7312					 * The linked list is altered because
7313					 * of one of the following reasons:
7314					 *	a. Timeout code dequeued a pkt
7315					 *	b. Pkt completion happened
7316					 *
7317					 * So restart the spin starting at
7318					 * the head again; This is a bit
7319					 * excessive, but okay since
7320					 * fcip_timeout_ticks isn't incremented
7321					 * for this spin, we will skip the
7322					 * not-to-be-timedout packets quickly
7323					 */
7324					fcip_pkt = fdestp->fcipd_head;
7325					if (fcip_pkt == NULL) {
7326						break;
7327					}
7328				}
7329			}
7330			mutex_exit(&fdestp->fcipd_mutex);
7331			fdestp = fdestp->fcipd_next;
7332		}
7333	}
7334	mutex_exit(&fptr->fcip_dest_mutex);
7335
7336	/*
7337	 * reschedule the timeout thread
7338	 */
7339	mutex_enter(&fptr->fcip_mutex);
7340
7341	fptr->fcip_timeout_id = timeout(fcip_timeout, fptr,
7342	    drv_usectohz(1000000));
7343	fptr->fcip_flags &= ~(FCIP_IN_TIMEOUT);
7344	mutex_exit(&fptr->fcip_mutex);
7345}
7346
7347
7348/*
7349 * This routine is either called from taskq or directly from fcip_timeout
7350 * does the actual job of aborting the packet
7351 */
7352static void
7353fcip_pkt_timeout(void *arg)
7354{
7355	fcip_pkt_t		*fcip_pkt = (fcip_pkt_t *)arg;
7356	struct fcip_dest	*fdestp;
7357	struct fcip		*fptr;
7358	fc_packet_t		*fc_pkt;
7359	fcip_port_info_t	*fport;
7360	int			rval;
7361
7362	fdestp = fcip_pkt->fcip_pkt_dest;
7363	fptr = fcip_pkt->fcip_pkt_fptr;
7364	fport = fptr->fcip_port_info;
7365	fc_pkt = FCIP_PKT_TO_FC_PKT(fcip_pkt);
7366
7367	/*
7368	 * try to abort the pkt
7369	 */
7370	fcip_pkt->fcip_pkt_flags |= FCIP_PKT_IN_ABORT;
7371	rval = fc_ulp_abort(fport->fcipp_handle, fc_pkt, KM_NOSLEEP);
7372
7373	FCIP_DEBUG(FCIP_DEBUG_DOWNSTREAM,
7374	    (CE_NOTE, "fc_ulp_abort returns: 0x%x", rval));
7375
7376	if (rval == FC_SUCCESS) {
7377		ASSERT(fdestp != NULL);
7378
7379		/*
7380		 * dequeue the pkt from the dest structure pkt list
7381		 */
7382		fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_ABORT;
7383		mutex_enter(&fdestp->fcipd_mutex);
7384		rval = fcip_fdestp_dequeue_pkt(fdestp, fcip_pkt);
7385		ASSERT(rval == 1);
7386		mutex_exit(&fdestp->fcipd_mutex);
7387
7388		/*
7389		 * Now cleanup the pkt and free the mblk
7390		 */
7391		fcip_pkt_free(fcip_pkt, 1);
7392	} else {
7393		/*
7394		 * abort failed - just mark the pkt as done and
7395		 * wait for it to complete in fcip_pkt_callback since
7396		 * the pkt has already been xmitted by the FCA
7397		 */
7398		fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_TIMEOUT;
7399		if (fcip_pkt->fcip_pkt_flags & FCIP_PKT_RETURNED) {
7400			fcip_pkt->fcip_pkt_flags &= ~FCIP_PKT_IN_ABORT;
7401			mutex_enter(&fdestp->fcipd_mutex);
7402			rval = fcip_fdestp_dequeue_pkt(fdestp, fcip_pkt);
7403			ASSERT(rval == 1);
7404			mutex_exit(&fdestp->fcipd_mutex);
7405
7406			fcip_pkt_free(fcip_pkt, 1);
7407		}
7408		return;
7409	}
7410}
7411
7412
7413/*
7414 * Remove  a routing table entry marked for deferred removal. This routine
7415 * unlike fcip_pkt_timeout, is always called from a taskq context
7416 */
7417static void
7418fcip_rte_remove_deferred(void *arg)
7419{
7420	struct fcip 			*fptr = (struct fcip *)arg;
7421	int				hash_bucket;
7422	struct fcip_dest 		*fdestp;
7423	la_wwn_t			*pwwn;
7424	int 				index;
7425	struct fcip_routing_table 	*frtp, *frtp_next, *frtp_prev;
7426
7427
7428	mutex_enter(&fptr->fcip_rt_mutex);
7429	for (index = 0; index < FCIP_RT_HASH_ELEMS; index++) {
7430		frtp = fptr->fcip_rtable[index];
7431		frtp_prev = NULL;
7432		while (frtp) {
7433			frtp_next = frtp->fcipr_next;
7434
7435			if (frtp->fcipr_state == FCIP_RT_RETIRED) {
7436
7437				pwwn = &frtp->fcipr_pwwn;
7438				/*
7439				 * Get hold of destination pointer
7440				 */
7441				mutex_enter(&fptr->fcip_dest_mutex);
7442
7443				hash_bucket = FCIP_DEST_HASH(pwwn->raw_wwn);
7444				ASSERT(hash_bucket < FCIP_DEST_HASH_ELEMS);
7445
7446				fdestp = fptr->fcip_dest[hash_bucket];
7447				while (fdestp != NULL) {
7448					mutex_enter(&fdestp->fcipd_mutex);
7449					if (fdestp->fcipd_rtable) {
7450						if (fcip_wwn_compare(pwwn,
7451						    &fdestp->fcipd_pwwn,
7452						    FCIP_COMPARE_PWWN) == 0) {
7453							mutex_exit(
7454							&fdestp->fcipd_mutex);
7455							break;
7456						}
7457					}
7458					mutex_exit(&fdestp->fcipd_mutex);
7459					fdestp = fdestp->fcipd_next;
7460				}
7461
7462				mutex_exit(&fptr->fcip_dest_mutex);
7463				if (fdestp == NULL) {
7464					frtp_prev = frtp;
7465					frtp = frtp_next;
7466					continue;
7467				}
7468
7469				mutex_enter(&fdestp->fcipd_mutex);
7470				if (fdestp->fcipd_ncmds) {
7471					/*
7472					 * Instead of waiting to drain commands
7473					 * let us revisit this RT entry in
7474					 * the next pass.
7475					 */
7476					mutex_exit(&fdestp->fcipd_mutex);
7477					frtp_prev = frtp;
7478					frtp = frtp_next;
7479					continue;
7480				}
7481
7482				/*
7483				 * We are clean, so remove the RTE
7484				 */
7485				fdestp->fcipd_rtable = NULL;
7486				mutex_exit(&fdestp->fcipd_mutex);
7487
7488				FCIP_TNF_PROBE_2((fcip_rte_remove_deferred,
7489					"fcip io", /* CSTYLED */,
7490					tnf_string, msg,
7491					"remove retired routing entry",
7492					tnf_int, index, index));
7493
7494				if (frtp_prev == NULL) {
7495					/* first element */
7496					fptr->fcip_rtable[index] =
7497					    frtp->fcipr_next;
7498				} else {
7499					frtp_prev->fcipr_next =
7500					    frtp->fcipr_next;
7501				}
7502				kmem_free(frtp,
7503				    sizeof (struct fcip_routing_table));
7504
7505				frtp = frtp_next;
7506			} else {
7507				frtp_prev = frtp;
7508				frtp = frtp_next;
7509			}
7510		}
7511	}
7512	mutex_exit(&fptr->fcip_rt_mutex);
7513	/*
7514	 * Clear the RTE_REMOVING flag
7515	 */
7516	mutex_enter(&fptr->fcip_mutex);
7517	fptr->fcip_flags &= ~FCIP_RTE_REMOVING;
7518	mutex_exit(&fptr->fcip_mutex);
7519}
7520
7521/*
7522 * Walk through all the dest hash table entries and count up the total
7523 * no. of packets outstanding against a given port
7524 */
7525static int
7526fcip_port_get_num_pkts(struct fcip *fptr)
7527{
7528	int 			num_cmds = 0;
7529	int 			i;
7530	struct fcip_dest	*fdestp;
7531
7532	ASSERT(mutex_owned(&fptr->fcip_dest_mutex));
7533
7534	for (i = 0; i < FCIP_DEST_HASH_ELEMS; i++) {
7535		fdestp = fptr->fcip_dest[i];
7536		while (fdestp != NULL) {
7537			mutex_enter(&fdestp->fcipd_mutex);
7538
7539			ASSERT(fdestp->fcipd_ncmds >= 0);
7540
7541			if (fdestp->fcipd_ncmds > 0) {
7542				num_cmds += fdestp->fcipd_ncmds;
7543			}
7544			mutex_exit(&fdestp->fcipd_mutex);
7545			fdestp = fdestp->fcipd_next;
7546		}
7547	}
7548
7549	return (num_cmds);
7550}
7551
7552
7553/*
7554 * Walk through the routing table for this state instance and see if there is a
7555 * PLOGI in progress for any of the entries. Return success even if we find one.
7556 */
7557static int
7558fcip_plogi_in_progress(struct fcip *fptr)
7559{
7560	int				i;
7561	struct fcip_routing_table	*frp;
7562
7563	ASSERT(mutex_owned(&fptr->fcip_rt_mutex));
7564
7565	for (i = 0; i < FCIP_RT_HASH_ELEMS; i++) {
7566		frp = fptr->fcip_rtable[i];
7567		while (frp) {
7568			if (frp->fcipr_state == FCIP_RT_LOGIN_PROGRESS) {
7569				/* Found an entry where PLOGI is in progress */
7570				return (1);
7571			}
7572			frp = frp->fcipr_next;
7573		}
7574	}
7575
7576	return (0);
7577}
7578
7579/*
7580 * Walk through the fcip port global list and check if the given port exists in
7581 * the list. Returns "0" if port exists and "1" if otherwise.
7582 */
7583static int
7584fcip_check_port_exists(struct fcip *fptr)
7585{
7586	fcip_port_info_t	*cur_fport;
7587	fcip_port_info_t	*fport;
7588
7589	mutex_enter(&fcip_global_mutex);
7590	fport = fptr->fcip_port_info;
7591	cur_fport = fcip_port_head;
7592	while (cur_fport != NULL) {
7593		if (cur_fport == fport) {
7594			/* Found */
7595			mutex_exit(&fcip_global_mutex);
7596			return (0);
7597		} else {
7598			cur_fport = cur_fport->fcipp_next;
7599		}
7600	}
7601	mutex_exit(&fcip_global_mutex);
7602
7603	return (1);
7604}
7605
7606/*
7607 * Constructor to initialize the sendup elements for callback into
7608 * modules upstream
7609 */
7610
7611/* ARGSUSED */
7612static int
7613fcip_sendup_constructor(void *buf, void *arg, int flags)
7614{
7615	struct fcip_sendup_elem	*msg_elem = (struct fcip_sendup_elem *)buf;
7616	fcip_port_info_t	*fport = (fcip_port_info_t *)arg;
7617
7618	ASSERT(fport != NULL);
7619
7620	msg_elem->fcipsu_mp = NULL;
7621	msg_elem->fcipsu_func = NULL;
7622	msg_elem->fcipsu_next = NULL;
7623
7624	return (FCIP_SUCCESS);
7625}
7626