sctp_usrreq.c revision 178251
1/*-
2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * a) Redistributions of source code must retain the above copyright notice,
8 *   this list of conditions and the following disclaimer.
9 *
10 * b) Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *   the documentation and/or other materials provided with the distribution.
13 *
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 *    contributors may be used to endorse or promote products derived
16 *    from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/* $KAME: sctp_usrreq.c,v 1.48 2005/03/07 23:26:08 itojun Exp $	 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/netinet/sctp_usrreq.c 178251 2008-04-16 17:24:18Z rrs $");
35#include <netinet/sctp_os.h>
36#include <sys/proc.h>
37#include <netinet/sctp_pcb.h>
38#include <netinet/sctp_header.h>
39#include <netinet/sctp_var.h>
40#if defined(INET6)
41#include <netinet6/sctp6_var.h>
42#endif
43#include <netinet/sctp_sysctl.h>
44#include <netinet/sctp_output.h>
45#include <netinet/sctp_uio.h>
46#include <netinet/sctp_asconf.h>
47#include <netinet/sctputil.h>
48#include <netinet/sctp_indata.h>
49#include <netinet/sctp_timer.h>
50#include <netinet/sctp_auth.h>
51#include <netinet/sctp_bsd_addr.h>
52#include <netinet/sctp_cc_functions.h>
53
54
55
56
57void
58sctp_init(void)
59{
60	/* Init the SCTP pcb in sctp_pcb.c */
61	u_long sb_max_adj;
62
63	sctp_pcb_init();
64
65
66	if ((nmbclusters / 8) > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE)
67		sctp_max_chunks_on_queue = (nmbclusters / 8);
68	/*
69	 * Allow a user to take no more than 1/2 the number of clusters or
70	 * the SB_MAX whichever is smaller for the send window.
71	 */
72	sb_max_adj = (u_long)((u_quad_t) (SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES));
73	sctp_sendspace = min(sb_max_adj,
74	    (((uint32_t) nmbclusters / 2) * SCTP_DEFAULT_MAXSEGMENT));
75	/*
76	 * Now for the recv window, should we take the same amount? or
77	 * should I do 1/2 the SB_MAX instead in the SB_MAX min above. For
78	 * now I will just copy.
79	 */
80	sctp_recvspace = sctp_sendspace;
81
82}
83
84
85
86/*
87 * cleanup of the sctppcbinfo structure.
88 * Assumes that the sctppcbinfo lock is held.
89 */
90void
91sctp_pcbinfo_cleanup(void)
92{
93	/* free the hash tables */
94	if (sctppcbinfo.sctp_asochash != NULL)
95		SCTP_HASH_FREE(sctppcbinfo.sctp_asochash, sctppcbinfo.hashasocmark);
96	if (sctppcbinfo.sctp_ephash != NULL)
97		SCTP_HASH_FREE(sctppcbinfo.sctp_ephash, sctppcbinfo.hashmark);
98	if (sctppcbinfo.sctp_tcpephash != NULL)
99		SCTP_HASH_FREE(sctppcbinfo.sctp_tcpephash, sctppcbinfo.hashtcpmark);
100	if (sctppcbinfo.sctp_restarthash != NULL)
101		SCTP_HASH_FREE(sctppcbinfo.sctp_restarthash, sctppcbinfo.hashrestartmark);
102}
103
104
105static void
106sctp_pathmtu_adjustment(struct sctp_inpcb *inp,
107    struct sctp_tcb *stcb,
108    struct sctp_nets *net,
109    uint16_t nxtsz)
110{
111	struct sctp_tmit_chunk *chk;
112
113	/* Adjust that too */
114	stcb->asoc.smallest_mtu = nxtsz;
115	/* now off to subtract IP_DF flag if needed */
116#ifdef SCTP_PRINT_FOR_B_AND_M
117	SCTP_PRINTF("sctp_pathmtu_adjust called inp:%p stcb:%p net:%p nxtsz:%d\n",
118	    inp, stcb, net, nxtsz);
119#endif
120	TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) {
121		if ((chk->send_size + IP_HDR_SIZE) > nxtsz) {
122			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
123		}
124	}
125	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
126		if ((chk->send_size + IP_HDR_SIZE) > nxtsz) {
127			/*
128			 * For this guy we also mark for immediate resend
129			 * since we sent to big of chunk
130			 */
131			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
132			if (chk->sent != SCTP_DATAGRAM_RESEND) {
133				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
134			}
135			chk->sent = SCTP_DATAGRAM_RESEND;
136			chk->rec.data.doing_fast_retransmit = 0;
137			if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
138				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PMTU,
139				    chk->whoTo->flight_size,
140				    chk->book_size,
141				    (uintptr_t) chk->whoTo,
142				    chk->rec.data.TSN_seq);
143			}
144			/* Clear any time so NO RTT is being done */
145			chk->do_rtt = 0;
146			sctp_flight_size_decrease(chk);
147			sctp_total_flight_decrease(stcb, chk);
148		}
149	}
150}
151
152static void
153sctp_notify_mbuf(struct sctp_inpcb *inp,
154    struct sctp_tcb *stcb,
155    struct sctp_nets *net,
156    struct ip *ip,
157    struct sctphdr *sh)
158{
159	struct icmp *icmph;
160	int totsz, tmr_stopped = 0;
161	uint16_t nxtsz;
162
163	/* protection */
164	if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
165	    (ip == NULL) || (sh == NULL)) {
166		if (stcb != NULL) {
167			SCTP_TCB_UNLOCK(stcb);
168		}
169		return;
170	}
171	/* First job is to verify the vtag matches what I would send */
172	if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
173		SCTP_TCB_UNLOCK(stcb);
174		return;
175	}
176	icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) -
177	    sizeof(struct ip)));
178	if (icmph->icmp_type != ICMP_UNREACH) {
179		/* We only care about unreachable */
180		SCTP_TCB_UNLOCK(stcb);
181		return;
182	}
183	if (icmph->icmp_code != ICMP_UNREACH_NEEDFRAG) {
184		/* not a unreachable message due to frag. */
185		SCTP_TCB_UNLOCK(stcb);
186		return;
187	}
188	totsz = ip->ip_len;
189
190	nxtsz = ntohs(icmph->icmp_nextmtu);
191	if (nxtsz == 0) {
192		/*
193		 * old type router that does not tell us what the next size
194		 * mtu is. Rats we will have to guess (in a educated fashion
195		 * of course)
196		 */
197		nxtsz = find_next_best_mtu(totsz);
198	}
199	/* Stop any PMTU timer */
200	if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
201		tmr_stopped = 1;
202		sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
203		    SCTP_FROM_SCTP_USRREQ + SCTP_LOC_1);
204	}
205	/* Adjust destination size limit */
206	if (net->mtu > nxtsz) {
207		net->mtu = nxtsz;
208	}
209	/* now what about the ep? */
210	if (stcb->asoc.smallest_mtu > nxtsz) {
211#ifdef SCTP_PRINT_FOR_B_AND_M
212		SCTP_PRINTF("notify_mbuf (ICMP) calls sctp_pathmtu_adjust mtu:%d\n",
213		    nxtsz);
214#endif
215		sctp_pathmtu_adjustment(inp, stcb, net, nxtsz);
216	}
217	if (tmr_stopped)
218		sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
219
220	SCTP_TCB_UNLOCK(stcb);
221}
222
223
224void
225sctp_notify(struct sctp_inpcb *inp,
226    struct ip *ip,
227    struct sctphdr *sh,
228    struct sockaddr *to,
229    struct sctp_tcb *stcb,
230    struct sctp_nets *net)
231{
232#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
233	struct socket *so;
234
235#endif
236	/* protection */
237	int reason;
238	struct icmp *icmph;
239
240
241	if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
242	    (sh == NULL) || (to == NULL)) {
243		if (stcb)
244			SCTP_TCB_UNLOCK(stcb);
245		return;
246	}
247	/* First job is to verify the vtag matches what I would send */
248	if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
249		SCTP_TCB_UNLOCK(stcb);
250		return;
251	}
252	icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) -
253	    sizeof(struct ip)));
254	if (icmph->icmp_type != ICMP_UNREACH) {
255		/* We only care about unreachable */
256		SCTP_TCB_UNLOCK(stcb);
257		return;
258	}
259	if ((icmph->icmp_code == ICMP_UNREACH_NET) ||
260	    (icmph->icmp_code == ICMP_UNREACH_HOST) ||
261	    (icmph->icmp_code == ICMP_UNREACH_NET_UNKNOWN) ||
262	    (icmph->icmp_code == ICMP_UNREACH_HOST_UNKNOWN) ||
263	    (icmph->icmp_code == ICMP_UNREACH_ISOLATED) ||
264	    (icmph->icmp_code == ICMP_UNREACH_NET_PROHIB) ||
265	    (icmph->icmp_code == ICMP_UNREACH_HOST_PROHIB) ||
266	    (icmph->icmp_code == ICMP_UNREACH_FILTER_PROHIB)) {
267
268		/*
269		 * Hmm reachablity problems we must examine closely. If its
270		 * not reachable, we may have lost a network. Or if there is
271		 * NO protocol at the other end named SCTP. well we consider
272		 * it a OOTB abort.
273		 */
274		if (net->dest_state & SCTP_ADDR_REACHABLE) {
275			/* Ok that destination is NOT reachable */
276			SCTP_PRINTF("ICMP (thresh %d/%d) takes interface %p down\n",
277			    net->error_count,
278			    net->failure_threshold,
279			    net);
280
281			net->dest_state &= ~SCTP_ADDR_REACHABLE;
282			net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
283			/*
284			 * JRS 5/14/07 - If a destination is unreachable,
285			 * the PF bit is turned off.  This allows an
286			 * unambiguous use of the PF bit for destinations
287			 * that are reachable but potentially failed. If the
288			 * destination is set to the unreachable state, also
289			 * set the destination to the PF state.
290			 */
291			/*
292			 * Add debug message here if destination is not in
293			 * PF state.
294			 */
295			/* Stop any running T3 timers here? */
296			if (sctp_cmt_on_off && sctp_cmt_pf) {
297				net->dest_state &= ~SCTP_ADDR_PF;
298				SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n",
299				    net);
300			}
301			net->error_count = net->failure_threshold + 1;
302			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
303			    stcb, SCTP_FAILED_THRESHOLD,
304			    (void *)net, SCTP_SO_NOT_LOCKED);
305		}
306		SCTP_TCB_UNLOCK(stcb);
307	} else if ((icmph->icmp_code == ICMP_UNREACH_PROTOCOL) ||
308	    (icmph->icmp_code == ICMP_UNREACH_PORT)) {
309		/*
310		 * Here the peer is either playing tricks on us, including
311		 * an address that belongs to someone who does not support
312		 * SCTP OR was a userland implementation that shutdown and
313		 * now is dead. In either case treat it like a OOTB abort
314		 * with no TCB
315		 */
316		reason = SCTP_PEER_FAULTY;
317		sctp_abort_notification(stcb, reason, SCTP_SO_NOT_LOCKED);
318#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
319		so = SCTP_INP_SO(inp);
320		atomic_add_int(&stcb->asoc.refcnt, 1);
321		SCTP_TCB_UNLOCK(stcb);
322		SCTP_SOCKET_LOCK(so, 1);
323		SCTP_TCB_LOCK(stcb);
324		atomic_subtract_int(&stcb->asoc.refcnt, 1);
325#endif
326		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_2);
327#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
328		SCTP_SOCKET_UNLOCK(so, 1);
329		/* SCTP_TCB_UNLOCK(stcb); MT: I think this is not needed. */
330#endif
331		/* no need to unlock here, since the TCB is gone */
332	} else {
333		SCTP_TCB_UNLOCK(stcb);
334	}
335}
336
337void
338sctp_ctlinput(cmd, sa, vip)
339	int cmd;
340	struct sockaddr *sa;
341	void *vip;
342{
343	struct ip *ip = vip;
344	struct sctphdr *sh;
345	uint32_t vrf_id;
346
347	/* FIX, for non-bsd is this right? */
348	vrf_id = SCTP_DEFAULT_VRFID;
349	if (sa->sa_family != AF_INET ||
350	    ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) {
351		return;
352	}
353	if (PRC_IS_REDIRECT(cmd)) {
354		ip = 0;
355	} else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) {
356		return;
357	}
358	if (ip) {
359		struct sctp_inpcb *inp = NULL;
360		struct sctp_tcb *stcb = NULL;
361		struct sctp_nets *net = NULL;
362		struct sockaddr_in to, from;
363
364		sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2));
365		bzero(&to, sizeof(to));
366		bzero(&from, sizeof(from));
367		from.sin_family = to.sin_family = AF_INET;
368		from.sin_len = to.sin_len = sizeof(to);
369		from.sin_port = sh->src_port;
370		from.sin_addr = ip->ip_src;
371		to.sin_port = sh->dest_port;
372		to.sin_addr = ip->ip_dst;
373
374		/*
375		 * 'to' holds the dest of the packet that failed to be sent.
376		 * 'from' holds our local endpoint address. Thus we reverse
377		 * the to and the from in the lookup.
378		 */
379		stcb = sctp_findassociation_addr_sa((struct sockaddr *)&from,
380		    (struct sockaddr *)&to,
381		    &inp, &net, 1, vrf_id);
382		if (stcb != NULL && inp && (inp->sctp_socket != NULL)) {
383			if (cmd != PRC_MSGSIZE) {
384				sctp_notify(inp, ip, sh,
385				    (struct sockaddr *)&to, stcb,
386				    net);
387			} else {
388				/* handle possible ICMP size messages */
389				sctp_notify_mbuf(inp, stcb, net, ip, sh);
390			}
391		} else {
392			if ((stcb == NULL) && (inp != NULL)) {
393				/* reduce ref-count */
394				SCTP_INP_WLOCK(inp);
395				SCTP_INP_DECR_REF(inp);
396				SCTP_INP_WUNLOCK(inp);
397			}
398		}
399	}
400	return;
401}
402
403static int
404sctp_getcred(SYSCTL_HANDLER_ARGS)
405{
406	struct xucred xuc;
407	struct sockaddr_in addrs[2];
408	struct sctp_inpcb *inp;
409	struct sctp_nets *net;
410	struct sctp_tcb *stcb;
411	int error;
412	uint32_t vrf_id;
413
414	/* FIX, for non-bsd is this right? */
415	vrf_id = SCTP_DEFAULT_VRFID;
416
417	error = priv_check(req->td, PRIV_NETINET_GETCRED);
418
419	if (error)
420		return (error);
421
422	error = SYSCTL_IN(req, addrs, sizeof(addrs));
423	if (error)
424		return (error);
425
426	stcb = sctp_findassociation_addr_sa(sintosa(&addrs[0]),
427	    sintosa(&addrs[1]),
428	    &inp, &net, 1, vrf_id);
429	if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) {
430		if ((inp != NULL) && (stcb == NULL)) {
431			/* reduce ref-count */
432			SCTP_INP_WLOCK(inp);
433			SCTP_INP_DECR_REF(inp);
434			goto cred_can_cont;
435		}
436		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
437		error = ENOENT;
438		goto out;
439	}
440	SCTP_TCB_UNLOCK(stcb);
441	/*
442	 * We use the write lock here, only since in the error leg we need
443	 * it. If we used RLOCK, then we would have to
444	 * wlock/decr/unlock/rlock. Which in theory could create a hole.
445	 * Better to use higher wlock.
446	 */
447	SCTP_INP_WLOCK(inp);
448cred_can_cont:
449	error = cr_canseesocket(req->td->td_ucred, inp->sctp_socket);
450	if (error) {
451		SCTP_INP_WUNLOCK(inp);
452		goto out;
453	}
454	cru2x(inp->sctp_socket->so_cred, &xuc);
455	SCTP_INP_WUNLOCK(inp);
456	error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
457out:
458	return (error);
459}
460
461SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW,
462    0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection");
463
464
465static void
466sctp_abort(struct socket *so)
467{
468	struct sctp_inpcb *inp;
469	uint32_t flags;
470
471	inp = (struct sctp_inpcb *)so->so_pcb;
472	if (inp == 0) {
473		return;
474	}
475sctp_must_try_again:
476	flags = inp->sctp_flags;
477#ifdef SCTP_LOG_CLOSING
478	sctp_log_closing(inp, NULL, 17);
479#endif
480	if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
481	    (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
482#ifdef SCTP_LOG_CLOSING
483		sctp_log_closing(inp, NULL, 16);
484#endif
485		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
486		    SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
487		SOCK_LOCK(so);
488		SCTP_SB_CLEAR(so->so_snd);
489		/*
490		 * same for the rcv ones, they are only here for the
491		 * accounting/select.
492		 */
493		SCTP_SB_CLEAR(so->so_rcv);
494
495		/* Now null out the reference, we are completely detached. */
496		so->so_pcb = NULL;
497		SOCK_UNLOCK(so);
498	} else {
499		flags = inp->sctp_flags;
500		if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
501			goto sctp_must_try_again;
502		}
503	}
504	return;
505}
506
507static int
508sctp_attach(struct socket *so, int proto, struct thread *p)
509{
510	struct sctp_inpcb *inp;
511	struct inpcb *ip_inp;
512	int error;
513	uint32_t vrf_id = SCTP_DEFAULT_VRFID;
514
515#ifdef IPSEC
516	uint32_t flags;
517
518#endif
519	inp = (struct sctp_inpcb *)so->so_pcb;
520	if (inp != 0) {
521		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
522		return EINVAL;
523	}
524	error = SCTP_SORESERVE(so, sctp_sendspace, sctp_recvspace);
525	if (error) {
526		return error;
527	}
528	error = sctp_inpcb_alloc(so, vrf_id);
529	if (error) {
530		return error;
531	}
532	inp = (struct sctp_inpcb *)so->so_pcb;
533	SCTP_INP_WLOCK(inp);
534	inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6;	/* I'm not v6! */
535	ip_inp = &inp->ip_inp.inp;
536	ip_inp->inp_vflag |= INP_IPV4;
537	ip_inp->inp_ip_ttl = ip_defttl;
538#ifdef IPSEC
539	error = ipsec_init_policy(so, &ip_inp->inp_sp);
540#ifdef SCTP_LOG_CLOSING
541	sctp_log_closing(inp, NULL, 17);
542#endif
543	if (error != 0) {
544		flags = inp->sctp_flags;
545		if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
546		    (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
547#ifdef SCTP_LOG_CLOSING
548			sctp_log_closing(inp, NULL, 15);
549#endif
550			SCTP_INP_WUNLOCK(inp);
551			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
552			    SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
553		} else {
554			SCTP_INP_WUNLOCK(inp);
555		}
556		return error;
557	}
558#endif				/* IPSEC */
559	SCTP_INP_WUNLOCK(inp);
560	return 0;
561}
562
563static int
564sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p)
565{
566	struct sctp_inpcb *inp = NULL;
567	int error;
568
569#ifdef INET6
570	if (addr && addr->sa_family != AF_INET) {
571		/* must be a v4 address! */
572		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
573		return EINVAL;
574	}
575#endif				/* INET6 */
576	if (addr && (addr->sa_len != sizeof(struct sockaddr_in))) {
577		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
578		return EINVAL;
579	}
580	inp = (struct sctp_inpcb *)so->so_pcb;
581	if (inp == 0) {
582		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
583		return EINVAL;
584	}
585	error = sctp_inpcb_bind(so, addr, NULL, p);
586	return error;
587}
588
589void
590sctp_close(struct socket *so)
591{
592	struct sctp_inpcb *inp;
593	uint32_t flags;
594
595	inp = (struct sctp_inpcb *)so->so_pcb;
596	if (inp == 0)
597		return;
598
599	/*
600	 * Inform all the lower layer assoc that we are done.
601	 */
602sctp_must_try_again:
603	flags = inp->sctp_flags;
604#ifdef SCTP_LOG_CLOSING
605	sctp_log_closing(inp, NULL, 17);
606#endif
607	if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
608	    (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
609		if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) ||
610		    (so->so_rcv.sb_cc > 0)) {
611#ifdef SCTP_LOG_CLOSING
612			sctp_log_closing(inp, NULL, 13);
613#endif
614			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
615			    SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
616		} else {
617#ifdef SCTP_LOG_CLOSING
618			sctp_log_closing(inp, NULL, 14);
619#endif
620			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE,
621			    SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
622		}
623		/*
624		 * The socket is now detached, no matter what the state of
625		 * the SCTP association.
626		 */
627		SOCK_LOCK(so);
628		SCTP_SB_CLEAR(so->so_snd);
629		/*
630		 * same for the rcv ones, they are only here for the
631		 * accounting/select.
632		 */
633		SCTP_SB_CLEAR(so->so_rcv);
634
635		/* Now null out the reference, we are completely detached. */
636		so->so_pcb = NULL;
637		SOCK_UNLOCK(so);
638	} else {
639		flags = inp->sctp_flags;
640		if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
641			goto sctp_must_try_again;
642		}
643	}
644	return;
645}
646
647
648int
649sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
650    struct mbuf *control, struct thread *p);
651
652
653int
654sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
655    struct mbuf *control, struct thread *p)
656{
657	struct sctp_inpcb *inp;
658	int error;
659
660	inp = (struct sctp_inpcb *)so->so_pcb;
661	if (inp == 0) {
662		if (control) {
663			sctp_m_freem(control);
664			control = NULL;
665		}
666		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
667		sctp_m_freem(m);
668		return EINVAL;
669	}
670	/* Got to have an to address if we are NOT a connected socket */
671	if ((addr == NULL) &&
672	    ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
673	    (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE))
674	    ) {
675		goto connected_type;
676	} else if (addr == NULL) {
677		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ);
678		error = EDESTADDRREQ;
679		sctp_m_freem(m);
680		if (control) {
681			sctp_m_freem(control);
682			control = NULL;
683		}
684		return (error);
685	}
686#ifdef INET6
687	if (addr->sa_family != AF_INET) {
688		/* must be a v4 address! */
689		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ);
690		sctp_m_freem(m);
691		if (control) {
692			sctp_m_freem(control);
693			control = NULL;
694		}
695		error = EDESTADDRREQ;
696		return EDESTADDRREQ;
697	}
698#endif				/* INET6 */
699connected_type:
700	/* now what about control */
701	if (control) {
702		if (inp->control) {
703			SCTP_PRINTF("huh? control set?\n");
704			sctp_m_freem(inp->control);
705			inp->control = NULL;
706		}
707		inp->control = control;
708	}
709	/* Place the data */
710	if (inp->pkt) {
711		SCTP_BUF_NEXT(inp->pkt_last) = m;
712		inp->pkt_last = m;
713	} else {
714		inp->pkt_last = inp->pkt = m;
715	}
716	if (
717	/* FreeBSD uses a flag passed */
718	    ((flags & PRUS_MORETOCOME) == 0)
719	    ) {
720		/*
721		 * note with the current version this code will only be used
722		 * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for
723		 * re-defining sosend to use the sctp_sosend. One can
724		 * optionally switch back to this code (by changing back the
725		 * definitions) but this is not advisable. This code is used
726		 * by FreeBSD when sending a file with sendfile() though.
727		 */
728		int ret;
729
730		ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags);
731		inp->pkt = NULL;
732		inp->control = NULL;
733		return (ret);
734	} else {
735		return (0);
736	}
737}
738
739int
740sctp_disconnect(struct socket *so)
741{
742	struct sctp_inpcb *inp;
743
744	inp = (struct sctp_inpcb *)so->so_pcb;
745	if (inp == NULL) {
746		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
747		return (ENOTCONN);
748	}
749	SCTP_INP_RLOCK(inp);
750	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
751	    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
752		if (SCTP_LIST_EMPTY(&inp->sctp_asoc_list)) {
753			/* No connection */
754			SCTP_INP_RUNLOCK(inp);
755			return (0);
756		} else {
757			struct sctp_association *asoc;
758			struct sctp_tcb *stcb;
759
760			stcb = LIST_FIRST(&inp->sctp_asoc_list);
761			if (stcb == NULL) {
762				SCTP_INP_RUNLOCK(inp);
763				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
764				return (EINVAL);
765			}
766			SCTP_TCB_LOCK(stcb);
767			asoc = &stcb->asoc;
768			if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
769				/* We are about to be freed, out of here */
770				SCTP_TCB_UNLOCK(stcb);
771				SCTP_INP_RUNLOCK(inp);
772				return (0);
773			}
774			if (((so->so_options & SO_LINGER) &&
775			    (so->so_linger == 0)) ||
776			    (so->so_rcv.sb_cc > 0)) {
777				if (SCTP_GET_STATE(asoc) !=
778				    SCTP_STATE_COOKIE_WAIT) {
779					/* Left with Data unread */
780					struct mbuf *err;
781
782					err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
783					if (err) {
784						/*
785						 * Fill in the user
786						 * initiated abort
787						 */
788						struct sctp_paramhdr *ph;
789
790						ph = mtod(err, struct sctp_paramhdr *);
791						SCTP_BUF_LEN(err) = sizeof(struct sctp_paramhdr);
792						ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
793						ph->param_length = htons(SCTP_BUF_LEN(err));
794					}
795#if defined(SCTP_PANIC_ON_ABORT)
796					panic("disconnect does an abort");
797#endif
798					sctp_send_abort_tcb(stcb, err, SCTP_SO_LOCKED);
799					SCTP_STAT_INCR_COUNTER32(sctps_aborted);
800				}
801				SCTP_INP_RUNLOCK(inp);
802				if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
803				    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
804					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
805				}
806				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_3);
807				/* No unlock tcb assoc is gone */
808				return (0);
809			}
810			if (TAILQ_EMPTY(&asoc->send_queue) &&
811			    TAILQ_EMPTY(&asoc->sent_queue) &&
812			    (asoc->stream_queue_cnt == 0)) {
813				/* there is nothing queued to send, so done */
814				if (asoc->locked_on_sending) {
815					goto abort_anyway;
816				}
817				if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
818				    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
819					/* only send SHUTDOWN 1st time thru */
820					sctp_stop_timers_for_shutdown(stcb);
821					sctp_send_shutdown(stcb,
822					    stcb->asoc.primary_destination);
823					sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED);
824					if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
825					    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
826						SCTP_STAT_DECR_GAUGE32(sctps_currestab);
827					}
828					SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
829					SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
830					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
831					    stcb->sctp_ep, stcb,
832					    asoc->primary_destination);
833					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
834					    stcb->sctp_ep, stcb,
835					    asoc->primary_destination);
836				}
837			} else {
838				/*
839				 * we still got (or just got) data to send,
840				 * so set SHUTDOWN_PENDING
841				 */
842				/*
843				 * XXX sockets draft says that SCTP_EOF
844				 * should be sent with no data. currently,
845				 * we will allow user data to be sent first
846				 * and move to SHUTDOWN-PENDING
847				 */
848				asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
849				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
850				    asoc->primary_destination);
851				if (asoc->locked_on_sending) {
852					/* Locked to send out the data */
853					struct sctp_stream_queue_pending *sp;
854
855					sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
856					if (sp == NULL) {
857						SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n",
858						    asoc->locked_on_sending->stream_no);
859					} else {
860						if ((sp->length == 0) && (sp->msg_is_complete == 0))
861							asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
862					}
863				}
864				if (TAILQ_EMPTY(&asoc->send_queue) &&
865				    TAILQ_EMPTY(&asoc->sent_queue) &&
866				    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
867					struct mbuf *op_err;
868
869			abort_anyway:
870					op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
871					    0, M_DONTWAIT, 1, MT_DATA);
872					if (op_err) {
873						/*
874						 * Fill in the user
875						 * initiated abort
876						 */
877						struct sctp_paramhdr *ph;
878						uint32_t *ippp;
879
880						SCTP_BUF_LEN(op_err) =
881						    (sizeof(struct sctp_paramhdr) + sizeof(uint32_t));
882						ph = mtod(op_err,
883						    struct sctp_paramhdr *);
884						ph->param_type = htons(
885						    SCTP_CAUSE_USER_INITIATED_ABT);
886						ph->param_length = htons(SCTP_BUF_LEN(op_err));
887						ippp = (uint32_t *) (ph + 1);
888						*ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4);
889					}
890#if defined(SCTP_PANIC_ON_ABORT)
891					panic("disconnect does an abort");
892#endif
893
894					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4;
895					sctp_send_abort_tcb(stcb, op_err, SCTP_SO_LOCKED);
896					SCTP_STAT_INCR_COUNTER32(sctps_aborted);
897					if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
898					    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
899						SCTP_STAT_DECR_GAUGE32(sctps_currestab);
900					}
901					SCTP_INP_RUNLOCK(inp);
902					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_5);
903					return (0);
904				} else {
905					sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED);
906				}
907			}
908			SCTP_TCB_UNLOCK(stcb);
909			SCTP_INP_RUNLOCK(inp);
910			return (0);
911		}
912		/* not reached */
913	} else {
914		/* UDP model does not support this */
915		SCTP_INP_RUNLOCK(inp);
916		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
917		return EOPNOTSUPP;
918	}
919}
920
921int
922sctp_flush(struct socket *so, int how)
923{
924	/*
925	 * We will just clear out the values and let subsequent close clear
926	 * out the data, if any. Note if the user did a shutdown(SHUT_RD)
927	 * they will not be able to read the data, the socket will block
928	 * that from happening.
929	 */
930	if ((how == PRU_FLUSH_RD) || (how == PRU_FLUSH_RDWR)) {
931		/*
932		 * First make sure the sb will be happy, we don't use these
933		 * except maybe the count
934		 */
935		so->so_rcv.sb_cc = 0;
936		so->so_rcv.sb_mbcnt = 0;
937		so->so_rcv.sb_mb = NULL;
938	}
939	if ((how == PRU_FLUSH_WR) || (how == PRU_FLUSH_RDWR)) {
940		/*
941		 * First make sure the sb will be happy, we don't use these
942		 * except maybe the count
943		 */
944		so->so_snd.sb_cc = 0;
945		so->so_snd.sb_mbcnt = 0;
946		so->so_snd.sb_mb = NULL;
947
948	}
949	return (0);
950}
951
952int
953sctp_shutdown(struct socket *so)
954{
955	struct sctp_inpcb *inp;
956
957	inp = (struct sctp_inpcb *)so->so_pcb;
958	if (inp == 0) {
959		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
960		return EINVAL;
961	}
962	SCTP_INP_RLOCK(inp);
963	/* For UDP model this is a invalid call */
964	if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
965		/* Restore the flags that the soshutdown took away. */
966		so->so_rcv.sb_state &= ~SBS_CANTRCVMORE;
967		/* This proc will wakeup for read and do nothing (I hope) */
968		SCTP_INP_RUNLOCK(inp);
969		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
970		return (EOPNOTSUPP);
971	}
972	/*
973	 * Ok if we reach here its the TCP model and it is either a SHUT_WR
974	 * or SHUT_RDWR. This means we put the shutdown flag against it.
975	 */
976	{
977		struct sctp_tcb *stcb;
978		struct sctp_association *asoc;
979
980		socantsendmore(so);
981
982		stcb = LIST_FIRST(&inp->sctp_asoc_list);
983		if (stcb == NULL) {
984			/*
985			 * Ok we hit the case that the shutdown call was
986			 * made after an abort or something. Nothing to do
987			 * now.
988			 */
989			SCTP_INP_RUNLOCK(inp);
990			return (0);
991		}
992		SCTP_TCB_LOCK(stcb);
993		asoc = &stcb->asoc;
994		if (TAILQ_EMPTY(&asoc->send_queue) &&
995		    TAILQ_EMPTY(&asoc->sent_queue) &&
996		    (asoc->stream_queue_cnt == 0)) {
997			if (asoc->locked_on_sending) {
998				goto abort_anyway;
999			}
1000			/* there is nothing queued to send, so I'm done... */
1001			if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1002				/* only send SHUTDOWN the first time through */
1003				sctp_stop_timers_for_shutdown(stcb);
1004				sctp_send_shutdown(stcb,
1005				    stcb->asoc.primary_destination);
1006				sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED);
1007				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
1008				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
1009					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
1010				}
1011				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
1012				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
1013				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
1014				    stcb->sctp_ep, stcb,
1015				    asoc->primary_destination);
1016				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1017				    stcb->sctp_ep, stcb,
1018				    asoc->primary_destination);
1019			}
1020		} else {
1021			/*
1022			 * we still got (or just got) data to send, so set
1023			 * SHUTDOWN_PENDING
1024			 */
1025			asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
1026			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
1027			    asoc->primary_destination);
1028
1029			if (asoc->locked_on_sending) {
1030				/* Locked to send out the data */
1031				struct sctp_stream_queue_pending *sp;
1032
1033				sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
1034				if (sp == NULL) {
1035					SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n",
1036					    asoc->locked_on_sending->stream_no);
1037				} else {
1038					if ((sp->length == 0) && (sp->msg_is_complete == 0)) {
1039						asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
1040					}
1041				}
1042			}
1043			if (TAILQ_EMPTY(&asoc->send_queue) &&
1044			    TAILQ_EMPTY(&asoc->sent_queue) &&
1045			    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
1046				struct mbuf *op_err;
1047
1048		abort_anyway:
1049				op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
1050				    0, M_DONTWAIT, 1, MT_DATA);
1051				if (op_err) {
1052					/* Fill in the user initiated abort */
1053					struct sctp_paramhdr *ph;
1054					uint32_t *ippp;
1055
1056					SCTP_BUF_LEN(op_err) =
1057					    sizeof(struct sctp_paramhdr) + sizeof(uint32_t);
1058					ph = mtod(op_err,
1059					    struct sctp_paramhdr *);
1060					ph->param_type = htons(
1061					    SCTP_CAUSE_USER_INITIATED_ABT);
1062					ph->param_length = htons(SCTP_BUF_LEN(op_err));
1063					ippp = (uint32_t *) (ph + 1);
1064					*ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6);
1065				}
1066#if defined(SCTP_PANIC_ON_ABORT)
1067				panic("shutdown does an abort");
1068#endif
1069				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6;
1070				sctp_abort_an_association(stcb->sctp_ep, stcb,
1071				    SCTP_RESPONSE_TO_USER_REQ,
1072				    op_err, SCTP_SO_LOCKED);
1073				goto skip_unlock;
1074			} else {
1075				sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED);
1076			}
1077		}
1078		SCTP_TCB_UNLOCK(stcb);
1079	}
1080skip_unlock:
1081	SCTP_INP_RUNLOCK(inp);
1082	return 0;
1083}
1084
1085/*
1086 * copies a "user" presentable address and removes embedded scope, etc.
1087 * returns 0 on success, 1 on error
1088 */
1089static uint32_t
1090sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa)
1091{
1092#ifdef INET6
1093	struct sockaddr_in6 lsa6;
1094
1095	sa = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)sa,
1096	    &lsa6);
1097#endif
1098	memcpy(ss, sa, sa->sa_len);
1099	return (0);
1100}
1101
1102
1103
1104/*
1105 * NOTE: assumes addr lock is held
1106 */
1107static size_t
1108sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp,
1109    struct sctp_tcb *stcb,
1110    size_t limit,
1111    struct sockaddr_storage *sas,
1112    uint32_t vrf_id)
1113{
1114	struct sctp_ifn *sctp_ifn;
1115	struct sctp_ifa *sctp_ifa;
1116	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
1117	size_t actual;
1118	int ipv4_addr_legal, ipv6_addr_legal;
1119	struct sctp_vrf *vrf;
1120
1121	actual = 0;
1122	if (limit <= 0)
1123		return (actual);
1124
1125	if (stcb) {
1126		/* Turn on all the appropriate scope */
1127		loopback_scope = stcb->asoc.loopback_scope;
1128		ipv4_local_scope = stcb->asoc.ipv4_local_scope;
1129		local_scope = stcb->asoc.local_scope;
1130		site_scope = stcb->asoc.site_scope;
1131	} else {
1132		/* Turn on ALL scope, since we look at the EP */
1133		loopback_scope = ipv4_local_scope = local_scope =
1134		    site_scope = 1;
1135	}
1136	ipv4_addr_legal = ipv6_addr_legal = 0;
1137	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1138		ipv6_addr_legal = 1;
1139		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1140			ipv4_addr_legal = 1;
1141		}
1142	} else {
1143		ipv4_addr_legal = 1;
1144	}
1145	vrf = sctp_find_vrf(vrf_id);
1146	if (vrf == NULL) {
1147		return (0);
1148	}
1149	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1150		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
1151			if ((loopback_scope == 0) &&
1152			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
1153				/* Skip loopback if loopback_scope not set */
1154				continue;
1155			}
1156			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
1157				if (stcb) {
1158					/*
1159					 * For the BOUND-ALL case, the list
1160					 * associated with a TCB is Always
1161					 * considered a reverse list.. i.e.
1162					 * it lists addresses that are NOT
1163					 * part of the association. If this
1164					 * is one of those we must skip it.
1165					 */
1166					if (sctp_is_addr_restricted(stcb,
1167					    sctp_ifa)) {
1168						continue;
1169					}
1170				}
1171				switch (sctp_ifa->address.sa.sa_family) {
1172				case AF_INET:
1173					if (ipv4_addr_legal) {
1174						struct sockaddr_in *sin;
1175
1176						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
1177						if (sin->sin_addr.s_addr == 0) {
1178							/*
1179							 * we skip
1180							 * unspecifed
1181							 * addresses
1182							 */
1183							continue;
1184						}
1185						if ((ipv4_local_scope == 0) &&
1186						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1187							continue;
1188						}
1189#ifdef INET6
1190						if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
1191							in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)sas);
1192							((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1193							sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6));
1194							actual += sizeof(struct sockaddr_in6);
1195						} else {
1196#endif
1197							memcpy(sas, sin, sizeof(*sin));
1198							((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport;
1199							sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin));
1200							actual += sizeof(*sin);
1201#ifdef INET6
1202						}
1203#endif
1204						if (actual >= limit) {
1205							return (actual);
1206						}
1207					} else {
1208						continue;
1209					}
1210					break;
1211#ifdef INET6
1212				case AF_INET6:
1213					if (ipv6_addr_legal) {
1214						struct sockaddr_in6 *sin6;
1215
1216						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
1217						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1218							/*
1219							 * we skip
1220							 * unspecifed
1221							 * addresses
1222							 */
1223							continue;
1224						}
1225						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
1226							if (local_scope == 0)
1227								continue;
1228							if (sin6->sin6_scope_id == 0) {
1229								if (sa6_recoverscope(sin6) != 0)
1230									/*
1231									 *
1232									 * bad
1233									 *
1234									 * li
1235									 * nk
1236									 *
1237									 * loc
1238									 * al
1239									 *
1240									 * add
1241									 * re
1242									 * ss
1243									 * */
1244									continue;
1245							}
1246						}
1247						if ((site_scope == 0) &&
1248						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1249							continue;
1250						}
1251						memcpy(sas, sin6, sizeof(*sin6));
1252						((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1253						sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin6));
1254						actual += sizeof(*sin6);
1255						if (actual >= limit) {
1256							return (actual);
1257						}
1258					} else {
1259						continue;
1260					}
1261					break;
1262#endif
1263				default:
1264					/* TSNH */
1265					break;
1266				}
1267			}
1268		}
1269	} else {
1270		struct sctp_laddr *laddr;
1271
1272		LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
1273			if (stcb) {
1274				if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
1275					continue;
1276				}
1277			}
1278			if (sctp_fill_user_address(sas, &laddr->ifa->address.sa))
1279				continue;
1280
1281			((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1282			sas = (struct sockaddr_storage *)((caddr_t)sas +
1283			    laddr->ifa->address.sa.sa_len);
1284			actual += laddr->ifa->address.sa.sa_len;
1285			if (actual >= limit) {
1286				return (actual);
1287			}
1288		}
1289	}
1290	return (actual);
1291}
1292
1293static size_t
1294sctp_fill_up_addresses(struct sctp_inpcb *inp,
1295    struct sctp_tcb *stcb,
1296    size_t limit,
1297    struct sockaddr_storage *sas)
1298{
1299	size_t size = 0;
1300
1301	SCTP_IPI_ADDR_RLOCK();
1302	/* fill up addresses for the endpoint's default vrf */
1303	size = sctp_fill_up_addresses_vrf(inp, stcb, limit, sas,
1304	    inp->def_vrf_id);
1305	SCTP_IPI_ADDR_RUNLOCK();
1306	return (size);
1307}
1308
1309/*
1310 * NOTE: assumes addr lock is held
1311 */
1312static int
1313sctp_count_max_addresses_vrf(struct sctp_inpcb *inp, uint32_t vrf_id)
1314{
1315	int cnt = 0;
1316	struct sctp_vrf *vrf = NULL;
1317
1318	/*
1319	 * In both sub-set bound an bound_all cases we return the MAXIMUM
1320	 * number of addresses that you COULD get. In reality the sub-set
1321	 * bound may have an exclusion list for a given TCB OR in the
1322	 * bound-all case a TCB may NOT include the loopback or other
1323	 * addresses as well.
1324	 */
1325	vrf = sctp_find_vrf(vrf_id);
1326	if (vrf == NULL) {
1327		return (0);
1328	}
1329	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1330		struct sctp_ifn *sctp_ifn;
1331		struct sctp_ifa *sctp_ifa;
1332
1333		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
1334			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
1335				/* Count them if they are the right type */
1336				if (sctp_ifa->address.sa.sa_family == AF_INET) {
1337					if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4))
1338						cnt += sizeof(struct sockaddr_in6);
1339					else
1340						cnt += sizeof(struct sockaddr_in);
1341
1342				} else if (sctp_ifa->address.sa.sa_family == AF_INET6)
1343					cnt += sizeof(struct sockaddr_in6);
1344			}
1345		}
1346	} else {
1347		struct sctp_laddr *laddr;
1348
1349		LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
1350			if (laddr->ifa->address.sa.sa_family == AF_INET) {
1351				if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4))
1352					cnt += sizeof(struct sockaddr_in6);
1353				else
1354					cnt += sizeof(struct sockaddr_in);
1355
1356			} else if (laddr->ifa->address.sa.sa_family == AF_INET6)
1357				cnt += sizeof(struct sockaddr_in6);
1358		}
1359	}
1360	return (cnt);
1361}
1362
1363static int
1364sctp_count_max_addresses(struct sctp_inpcb *inp)
1365{
1366	int cnt = 0;
1367
1368	SCTP_IPI_ADDR_RLOCK();
1369	/* count addresses for the endpoint's default VRF */
1370	cnt = sctp_count_max_addresses_vrf(inp, inp->def_vrf_id);
1371	SCTP_IPI_ADDR_RUNLOCK();
1372	return (cnt);
1373}
1374
1375static int
1376sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval,
1377    size_t optsize, void *p, int delay)
1378{
1379	int error = 0;
1380	int creat_lock_on = 0;
1381	struct sctp_tcb *stcb = NULL;
1382	struct sockaddr *sa;
1383	int num_v6 = 0, num_v4 = 0, *totaddrp, totaddr;
1384	int added = 0;
1385	uint32_t vrf_id;
1386	int bad_addresses = 0;
1387	sctp_assoc_t *a_id;
1388
1389	SCTPDBG(SCTP_DEBUG_PCB1, "Connectx called\n");
1390
1391	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
1392	    (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
1393		/* We are already connected AND the TCP model */
1394		SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
1395		return (EADDRINUSE);
1396	}
1397	if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) {
1398		SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1399		return (EINVAL);
1400	}
1401	if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
1402		SCTP_INP_RLOCK(inp);
1403		stcb = LIST_FIRST(&inp->sctp_asoc_list);
1404		SCTP_INP_RUNLOCK(inp);
1405	}
1406	if (stcb) {
1407		SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
1408		return (EALREADY);
1409	}
1410	SCTP_INP_INCR_REF(inp);
1411	SCTP_ASOC_CREATE_LOCK(inp);
1412	creat_lock_on = 1;
1413	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1414	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
1415		SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT);
1416		error = EFAULT;
1417		goto out_now;
1418	}
1419	totaddrp = (int *)optval;
1420	totaddr = *totaddrp;
1421	sa = (struct sockaddr *)(totaddrp + 1);
1422	stcb = sctp_connectx_helper_find(inp, sa, &totaddr, &num_v4, &num_v6, &error, (optsize - sizeof(int)), &bad_addresses);
1423	if ((stcb != NULL) || bad_addresses) {
1424		/* Already have or am bring up an association */
1425		SCTP_ASOC_CREATE_UNLOCK(inp);
1426		creat_lock_on = 0;
1427		if (stcb)
1428			SCTP_TCB_UNLOCK(stcb);
1429		if (bad_addresses == 0) {
1430			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
1431			error = EALREADY;
1432		}
1433		goto out_now;
1434	}
1435#ifdef INET6
1436	if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
1437	    (num_v6 > 0)) {
1438		error = EINVAL;
1439		goto out_now;
1440	}
1441	if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
1442	    (num_v4 > 0)) {
1443		struct in6pcb *inp6;
1444
1445		inp6 = (struct in6pcb *)inp;
1446		if (SCTP_IPV6_V6ONLY(inp6)) {
1447			/*
1448			 * if IPV6_V6ONLY flag, ignore connections destined
1449			 * to a v4 addr or v4-mapped addr
1450			 */
1451			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1452			error = EINVAL;
1453			goto out_now;
1454		}
1455	}
1456#endif				/* INET6 */
1457	if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
1458	    SCTP_PCB_FLAGS_UNBOUND) {
1459		/* Bind a ephemeral port */
1460		error = sctp_inpcb_bind(so, NULL, NULL, p);
1461		if (error) {
1462			goto out_now;
1463		}
1464	}
1465	/* FIX ME: do we want to pass in a vrf on the connect call? */
1466	vrf_id = inp->def_vrf_id;
1467
1468	/* We are GOOD to go */
1469	stcb = sctp_aloc_assoc(inp, sa, 1, &error, 0, vrf_id,
1470	    (struct thread *)p
1471	    );
1472	if (stcb == NULL) {
1473		/* Gak! no memory */
1474		goto out_now;
1475	}
1476	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
1477	/* move to second address */
1478	if (sa->sa_family == AF_INET)
1479		sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in));
1480	else
1481		sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6));
1482
1483	error = 0;
1484	added = sctp_connectx_helper_add(stcb, sa, (totaddr - 1), &error);
1485	/* Fill in the return id */
1486	if (error) {
1487		(void)sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_12);
1488		goto out_now;
1489	}
1490	a_id = (sctp_assoc_t *) optval;
1491	*a_id = sctp_get_associd(stcb);
1492
1493	/* initialize authentication parameters for the assoc */
1494	sctp_initialize_auth_params(inp, stcb);
1495
1496	if (delay) {
1497		/* doing delayed connection */
1498		stcb->asoc.delayed_connection = 1;
1499		sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination);
1500	} else {
1501		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1502		sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
1503	}
1504	SCTP_TCB_UNLOCK(stcb);
1505	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
1506		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
1507		/* Set the connected flag so we can queue data */
1508		soisconnecting(so);
1509	}
1510out_now:
1511	if (creat_lock_on) {
1512		SCTP_ASOC_CREATE_UNLOCK(inp);
1513	}
1514	SCTP_INP_DECR_REF(inp);
1515	return error;
1516}
1517
1518#define SCTP_FIND_STCB(inp, stcb, assoc_id) { \
1519	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||\
1520	    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { \
1521		SCTP_INP_RLOCK(inp); \
1522		stcb = LIST_FIRST(&inp->sctp_asoc_list); \
1523		if (stcb) { \
1524			SCTP_TCB_LOCK(stcb); \
1525                } \
1526		SCTP_INP_RUNLOCK(inp); \
1527	} else if (assoc_id != 0) { \
1528		stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1); \
1529		if (stcb == NULL) { \
1530		        SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); \
1531			error = ENOENT; \
1532			break; \
1533		} \
1534	} else { \
1535		stcb = NULL; \
1536        } \
1537  }
1538
1539
1540#define SCTP_CHECK_AND_CAST(destp, srcp, type, size)  {\
1541	if (size < sizeof(type)) { \
1542		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); \
1543		error = EINVAL; \
1544		break; \
1545	} else { \
1546		destp = (type *)srcp; \
1547	} \
1548      }
1549
1550static int
1551sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize,
1552    void *p)
1553{
1554	struct sctp_inpcb *inp = NULL;
1555	int error, val = 0;
1556	struct sctp_tcb *stcb = NULL;
1557
1558	if (optval == NULL) {
1559		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1560		return (EINVAL);
1561	}
1562	inp = (struct sctp_inpcb *)so->so_pcb;
1563	if (inp == 0) {
1564		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1565		return EINVAL;
1566	}
1567	error = 0;
1568
1569	switch (optname) {
1570	case SCTP_NODELAY:
1571	case SCTP_AUTOCLOSE:
1572	case SCTP_EXPLICIT_EOR:
1573	case SCTP_AUTO_ASCONF:
1574	case SCTP_DISABLE_FRAGMENTS:
1575	case SCTP_I_WANT_MAPPED_V4_ADDR:
1576	case SCTP_USE_EXT_RCVINFO:
1577		SCTP_INP_RLOCK(inp);
1578		switch (optname) {
1579		case SCTP_DISABLE_FRAGMENTS:
1580			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT);
1581			break;
1582		case SCTP_I_WANT_MAPPED_V4_ADDR:
1583			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4);
1584			break;
1585		case SCTP_AUTO_ASCONF:
1586			if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1587				/* only valid for bound all sockets */
1588				val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
1589			} else {
1590				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1591				error = EINVAL;
1592				goto flags_out;
1593			}
1594			break;
1595		case SCTP_EXPLICIT_EOR:
1596			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
1597			break;
1598		case SCTP_NODELAY:
1599			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY);
1600			break;
1601		case SCTP_USE_EXT_RCVINFO:
1602			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO);
1603			break;
1604		case SCTP_AUTOCLOSE:
1605			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))
1606				val = TICKS_TO_SEC(inp->sctp_ep.auto_close_time);
1607			else
1608				val = 0;
1609			break;
1610
1611		default:
1612			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
1613			error = ENOPROTOOPT;
1614		}		/* end switch (sopt->sopt_name) */
1615		if (optname != SCTP_AUTOCLOSE) {
1616			/* make it an "on/off" value */
1617			val = (val != 0);
1618		}
1619		if (*optsize < sizeof(val)) {
1620			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1621			error = EINVAL;
1622		}
1623flags_out:
1624		SCTP_INP_RUNLOCK(inp);
1625		if (error == 0) {
1626			/* return the option value */
1627			*(int *)optval = val;
1628			*optsize = sizeof(val);
1629		}
1630		break;
1631	case SCTP_GET_PACKET_LOG:
1632		{
1633#ifdef  SCTP_PACKET_LOGGING
1634			uint8_t *target;
1635			int ret;
1636
1637			SCTP_CHECK_AND_CAST(target, optval, uint8_t, *optsize);
1638			ret = sctp_copy_out_packet_log(target, (int)*optsize);
1639			*optsize = ret;
1640#else
1641			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
1642			error = EOPNOTSUPP;
1643#endif
1644			break;
1645		}
1646	case SCTP_PARTIAL_DELIVERY_POINT:
1647		{
1648			uint32_t *value;
1649
1650			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1651			*value = inp->partial_delivery_point;
1652			*optsize = sizeof(uint32_t);
1653		}
1654		break;
1655	case SCTP_FRAGMENT_INTERLEAVE:
1656		{
1657			uint32_t *value;
1658
1659			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1660			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) {
1661				if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) {
1662					*value = SCTP_FRAG_LEVEL_2;
1663				} else {
1664					*value = SCTP_FRAG_LEVEL_1;
1665				}
1666			} else {
1667				*value = SCTP_FRAG_LEVEL_0;
1668			}
1669			*optsize = sizeof(uint32_t);
1670		}
1671		break;
1672	case SCTP_CMT_ON_OFF:
1673		{
1674			struct sctp_assoc_value *av;
1675
1676			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1677			if (sctp_cmt_on_off) {
1678				SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1679				if (stcb) {
1680					av->assoc_value = stcb->asoc.sctp_cmt_on_off;
1681					SCTP_TCB_UNLOCK(stcb);
1682
1683				} else {
1684					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
1685					error = ENOTCONN;
1686				}
1687			} else {
1688				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
1689				error = ENOPROTOOPT;
1690			}
1691			*optsize = sizeof(*av);
1692		}
1693		break;
1694		/* JRS - Get socket option for pluggable congestion control */
1695	case SCTP_PLUGGABLE_CC:
1696		{
1697			struct sctp_assoc_value *av;
1698
1699			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1700			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1701			if (stcb) {
1702				av->assoc_value = stcb->asoc.congestion_control_module;
1703				SCTP_TCB_UNLOCK(stcb);
1704			} else {
1705				av->assoc_value = inp->sctp_ep.sctp_default_cc_module;
1706			}
1707			*optsize = sizeof(*av);
1708		}
1709		break;
1710	case SCTP_GET_ADDR_LEN:
1711		{
1712			struct sctp_assoc_value *av;
1713
1714			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1715			error = EINVAL;
1716#ifdef INET
1717			if (av->assoc_value == AF_INET) {
1718				av->assoc_value = sizeof(struct sockaddr_in);
1719				error = 0;
1720			}
1721#endif
1722#ifdef INET6
1723			if (av->assoc_value == AF_INET6) {
1724				av->assoc_value = sizeof(struct sockaddr_in6);
1725				error = 0;
1726			}
1727#endif
1728			if (error) {
1729				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
1730			}
1731			*optsize = sizeof(*av);
1732		}
1733		break;
1734	case SCTP_GET_ASSOC_NUMBER:
1735		{
1736			uint32_t *value, cnt;
1737
1738			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1739			cnt = 0;
1740			SCTP_INP_RLOCK(inp);
1741			LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
1742				cnt++;
1743			}
1744			SCTP_INP_RUNLOCK(inp);
1745			*value = cnt;
1746			*optsize = sizeof(uint32_t);
1747		}
1748		break;
1749
1750	case SCTP_GET_ASSOC_ID_LIST:
1751		{
1752			struct sctp_assoc_ids *ids;
1753			unsigned int at, limit;
1754
1755			SCTP_CHECK_AND_CAST(ids, optval, struct sctp_assoc_ids, *optsize);
1756			at = 0;
1757			limit = *optsize / sizeof(sctp_assoc_t);
1758			SCTP_INP_RLOCK(inp);
1759			LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
1760				if (at < limit) {
1761					ids->gaids_assoc_id[at++] = sctp_get_associd(stcb);
1762				} else {
1763					error = EINVAL;
1764					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
1765					break;
1766				}
1767			}
1768			SCTP_INP_RUNLOCK(inp);
1769			*optsize = at * sizeof(sctp_assoc_t);
1770		}
1771		break;
1772	case SCTP_CONTEXT:
1773		{
1774			struct sctp_assoc_value *av;
1775
1776			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1777			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1778
1779			if (stcb) {
1780				av->assoc_value = stcb->asoc.context;
1781				SCTP_TCB_UNLOCK(stcb);
1782			} else {
1783				SCTP_INP_RLOCK(inp);
1784				av->assoc_value = inp->sctp_context;
1785				SCTP_INP_RUNLOCK(inp);
1786			}
1787			*optsize = sizeof(*av);
1788		}
1789		break;
1790	case SCTP_VRF_ID:
1791		{
1792			uint32_t *default_vrfid;
1793
1794			SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, *optsize);
1795			*default_vrfid = inp->def_vrf_id;
1796			break;
1797		}
1798	case SCTP_GET_ASOC_VRF:
1799		{
1800			struct sctp_assoc_value *id;
1801
1802			SCTP_CHECK_AND_CAST(id, optval, struct sctp_assoc_value, *optsize);
1803			SCTP_FIND_STCB(inp, stcb, id->assoc_id);
1804			if (stcb == NULL) {
1805				error = EINVAL;
1806				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
1807				break;
1808			}
1809			id->assoc_value = stcb->asoc.vrf_id;
1810			break;
1811		}
1812	case SCTP_GET_VRF_IDS:
1813		{
1814			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
1815			error = EOPNOTSUPP;
1816			break;
1817		}
1818	case SCTP_GET_NONCE_VALUES:
1819		{
1820			struct sctp_get_nonce_values *gnv;
1821
1822			SCTP_CHECK_AND_CAST(gnv, optval, struct sctp_get_nonce_values, *optsize);
1823			SCTP_FIND_STCB(inp, stcb, gnv->gn_assoc_id);
1824
1825			if (stcb) {
1826				gnv->gn_peers_tag = stcb->asoc.peer_vtag;
1827				gnv->gn_local_tag = stcb->asoc.my_vtag;
1828				SCTP_TCB_UNLOCK(stcb);
1829			} else {
1830				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
1831				error = ENOTCONN;
1832			}
1833			*optsize = sizeof(*gnv);
1834		}
1835		break;
1836	case SCTP_DELAYED_SACK:
1837		{
1838			struct sctp_sack_info *sack;
1839
1840			SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, *optsize);
1841			SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id);
1842			if (stcb) {
1843				sack->sack_delay = stcb->asoc.delayed_ack;
1844				sack->sack_freq = stcb->asoc.sack_freq;
1845				SCTP_TCB_UNLOCK(stcb);
1846			} else {
1847				SCTP_INP_RLOCK(inp);
1848				sack->sack_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1849				sack->sack_freq = inp->sctp_ep.sctp_sack_freq;
1850				SCTP_INP_RUNLOCK(inp);
1851			}
1852			*optsize = sizeof(*sack);
1853		}
1854		break;
1855
1856	case SCTP_GET_SNDBUF_USE:
1857		{
1858			struct sctp_sockstat *ss;
1859
1860			SCTP_CHECK_AND_CAST(ss, optval, struct sctp_sockstat, *optsize);
1861			SCTP_FIND_STCB(inp, stcb, ss->ss_assoc_id);
1862
1863			if (stcb) {
1864				ss->ss_total_sndbuf = stcb->asoc.total_output_queue_size;
1865				ss->ss_total_recv_buf = (stcb->asoc.size_on_reasm_queue +
1866				    stcb->asoc.size_on_all_streams);
1867				SCTP_TCB_UNLOCK(stcb);
1868			} else {
1869				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
1870				error = ENOTCONN;
1871			}
1872			*optsize = sizeof(struct sctp_sockstat);
1873		}
1874		break;
1875	case SCTP_MAX_BURST:
1876		{
1877			uint8_t *value;
1878
1879			SCTP_CHECK_AND_CAST(value, optval, uint8_t, *optsize);
1880
1881			SCTP_INP_RLOCK(inp);
1882			*value = inp->sctp_ep.max_burst;
1883			SCTP_INP_RUNLOCK(inp);
1884			*optsize = sizeof(uint8_t);
1885		}
1886		break;
1887	case SCTP_MAXSEG:
1888		{
1889			struct sctp_assoc_value *av;
1890			int ovh;
1891
1892			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1893			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1894
1895			if (stcb) {
1896				av->assoc_value = sctp_get_frag_point(stcb, &stcb->asoc);
1897				SCTP_TCB_UNLOCK(stcb);
1898			} else {
1899				SCTP_INP_RLOCK(inp);
1900				if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1901					ovh = SCTP_MED_OVERHEAD;
1902				} else {
1903					ovh = SCTP_MED_V4_OVERHEAD;
1904				}
1905				if (inp->sctp_frag_point >= SCTP_DEFAULT_MAXSEGMENT)
1906					av->assoc_value = 0;
1907				else
1908					av->assoc_value = inp->sctp_frag_point - ovh;
1909				SCTP_INP_RUNLOCK(inp);
1910			}
1911			*optsize = sizeof(struct sctp_assoc_value);
1912		}
1913		break;
1914	case SCTP_GET_STAT_LOG:
1915		error = sctp_fill_stat_log(optval, optsize);
1916		break;
1917	case SCTP_EVENTS:
1918		{
1919			struct sctp_event_subscribe *events;
1920
1921			SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, *optsize);
1922			memset(events, 0, sizeof(*events));
1923			SCTP_INP_RLOCK(inp);
1924			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT))
1925				events->sctp_data_io_event = 1;
1926
1927			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT))
1928				events->sctp_association_event = 1;
1929
1930			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT))
1931				events->sctp_address_event = 1;
1932
1933			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT))
1934				events->sctp_send_failure_event = 1;
1935
1936			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR))
1937				events->sctp_peer_error_event = 1;
1938
1939			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
1940				events->sctp_shutdown_event = 1;
1941
1942			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT))
1943				events->sctp_partial_delivery_event = 1;
1944
1945			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT))
1946				events->sctp_adaptation_layer_event = 1;
1947
1948			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT))
1949				events->sctp_authentication_event = 1;
1950
1951			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT))
1952				events->sctp_stream_reset_events = 1;
1953			SCTP_INP_RUNLOCK(inp);
1954			*optsize = sizeof(struct sctp_event_subscribe);
1955		}
1956		break;
1957
1958	case SCTP_ADAPTATION_LAYER:
1959		{
1960			uint32_t *value;
1961
1962			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1963
1964			SCTP_INP_RLOCK(inp);
1965			*value = inp->sctp_ep.adaptation_layer_indicator;
1966			SCTP_INP_RUNLOCK(inp);
1967			*optsize = sizeof(uint32_t);
1968		}
1969		break;
1970	case SCTP_SET_INITIAL_DBG_SEQ:
1971		{
1972			uint32_t *value;
1973
1974			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1975			SCTP_INP_RLOCK(inp);
1976			*value = inp->sctp_ep.initial_sequence_debug;
1977			SCTP_INP_RUNLOCK(inp);
1978			*optsize = sizeof(uint32_t);
1979		}
1980		break;
1981	case SCTP_GET_LOCAL_ADDR_SIZE:
1982		{
1983			uint32_t *value;
1984
1985			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1986			SCTP_INP_RLOCK(inp);
1987			*value = sctp_count_max_addresses(inp);
1988			SCTP_INP_RUNLOCK(inp);
1989			*optsize = sizeof(uint32_t);
1990		}
1991		break;
1992	case SCTP_GET_REMOTE_ADDR_SIZE:
1993		{
1994			uint32_t *value;
1995			size_t size;
1996			struct sctp_nets *net;
1997
1998			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1999			/* FIXME MT: change to sctp_assoc_value? */
2000			SCTP_FIND_STCB(inp, stcb, (sctp_assoc_t) * value);
2001
2002			if (stcb) {
2003				size = 0;
2004				/* Count the sizes */
2005				TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2006					if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) ||
2007					    (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
2008						size += sizeof(struct sockaddr_in6);
2009					} else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
2010						size += sizeof(struct sockaddr_in);
2011					} else {
2012						/* huh */
2013						break;
2014					}
2015				}
2016				SCTP_TCB_UNLOCK(stcb);
2017				*value = (uint32_t) size;
2018			} else {
2019				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
2020				error = ENOTCONN;
2021			}
2022			*optsize = sizeof(uint32_t);
2023		}
2024		break;
2025	case SCTP_GET_PEER_ADDRESSES:
2026		/*
2027		 * Get the address information, an array is passed in to
2028		 * fill up we pack it.
2029		 */
2030		{
2031			size_t cpsz, left;
2032			struct sockaddr_storage *sas;
2033			struct sctp_nets *net;
2034			struct sctp_getaddresses *saddr;
2035
2036			SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize);
2037			SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id);
2038
2039			if (stcb) {
2040				left = (*optsize) - sizeof(struct sctp_getaddresses);
2041				*optsize = sizeof(struct sctp_getaddresses);
2042				sas = (struct sockaddr_storage *)&saddr->addr[0];
2043
2044				TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2045					if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) ||
2046					    (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
2047						cpsz = sizeof(struct sockaddr_in6);
2048					} else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
2049						cpsz = sizeof(struct sockaddr_in);
2050					} else {
2051						/* huh */
2052						break;
2053					}
2054					if (left < cpsz) {
2055						/* not enough room. */
2056						break;
2057					}
2058#ifdef INET6
2059					if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
2060					    (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET)) {
2061						/* Must map the address */
2062						in6_sin_2_v4mapsin6((struct sockaddr_in *)&net->ro._l_addr,
2063						    (struct sockaddr_in6 *)sas);
2064					} else {
2065#endif
2066						memcpy(sas, &net->ro._l_addr, cpsz);
2067#ifdef INET6
2068					}
2069#endif
2070					((struct sockaddr_in *)sas)->sin_port = stcb->rport;
2071
2072					sas = (struct sockaddr_storage *)((caddr_t)sas + cpsz);
2073					left -= cpsz;
2074					*optsize += cpsz;
2075				}
2076				SCTP_TCB_UNLOCK(stcb);
2077			} else {
2078				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
2079				error = ENOENT;
2080			}
2081		}
2082		break;
2083	case SCTP_GET_LOCAL_ADDRESSES:
2084		{
2085			size_t limit, actual;
2086			struct sockaddr_storage *sas;
2087			struct sctp_getaddresses *saddr;
2088
2089			SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize);
2090			SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id);
2091
2092			sas = (struct sockaddr_storage *)&saddr->addr[0];
2093			limit = *optsize - sizeof(sctp_assoc_t);
2094			actual = sctp_fill_up_addresses(inp, stcb, limit, sas);
2095			if (stcb) {
2096				SCTP_TCB_UNLOCK(stcb);
2097			}
2098			*optsize = sizeof(struct sockaddr_storage) + actual;
2099		}
2100		break;
2101	case SCTP_PEER_ADDR_PARAMS:
2102		{
2103			struct sctp_paddrparams *paddrp;
2104			struct sctp_nets *net;
2105
2106			SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, *optsize);
2107			SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id);
2108
2109			net = NULL;
2110			if (stcb) {
2111				net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
2112			} else {
2113				/*
2114				 * We increment here since
2115				 * sctp_findassociation_ep_addr() wil do a
2116				 * decrement if it finds the stcb as long as
2117				 * the locked tcb (last argument) is NOT a
2118				 * TCB.. aka NULL.
2119				 */
2120				SCTP_INP_INCR_REF(inp);
2121				stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddrp->spp_address, &net, NULL, NULL);
2122				if (stcb == NULL) {
2123					SCTP_INP_DECR_REF(inp);
2124				}
2125			}
2126			if (stcb && (net == NULL)) {
2127				struct sockaddr *sa;
2128
2129				sa = (struct sockaddr *)&paddrp->spp_address;
2130				if (sa->sa_family == AF_INET) {
2131					struct sockaddr_in *sin;
2132
2133					sin = (struct sockaddr_in *)sa;
2134					if (sin->sin_addr.s_addr) {
2135						error = EINVAL;
2136						SCTP_TCB_UNLOCK(stcb);
2137						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2138						break;
2139					}
2140				} else if (sa->sa_family == AF_INET6) {
2141					struct sockaddr_in6 *sin6;
2142
2143					sin6 = (struct sockaddr_in6 *)sa;
2144					if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
2145						error = EINVAL;
2146						SCTP_TCB_UNLOCK(stcb);
2147						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2148						break;
2149					}
2150				} else {
2151					error = EAFNOSUPPORT;
2152					SCTP_TCB_UNLOCK(stcb);
2153					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2154					break;
2155				}
2156			}
2157			if (stcb) {
2158				/* Applys to the specific association */
2159				paddrp->spp_flags = 0;
2160				if (net) {
2161					int ovh;
2162
2163					if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2164						ovh = SCTP_MED_OVERHEAD;
2165					} else {
2166						ovh = SCTP_MED_V4_OVERHEAD;
2167					}
2168
2169
2170					paddrp->spp_pathmaxrxt = net->failure_threshold;
2171					paddrp->spp_pathmtu = net->mtu - ovh;
2172					/* get flags for HB */
2173					if (net->dest_state & SCTP_ADDR_NOHB)
2174						paddrp->spp_flags |= SPP_HB_DISABLE;
2175					else
2176						paddrp->spp_flags |= SPP_HB_ENABLE;
2177					/* get flags for PMTU */
2178					if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
2179						paddrp->spp_flags |= SPP_PMTUD_ENABLE;
2180					} else {
2181						paddrp->spp_flags |= SPP_PMTUD_DISABLE;
2182					}
2183#ifdef INET
2184					if (net->ro._l_addr.sin.sin_family == AF_INET) {
2185						paddrp->spp_ipv4_tos = net->tos_flowlabel & 0x000000fc;
2186						paddrp->spp_flags |= SPP_IPV4_TOS;
2187					}
2188#endif
2189#ifdef INET6
2190					if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
2191						paddrp->spp_ipv6_flowlabel = net->tos_flowlabel;
2192						paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
2193					}
2194#endif
2195				} else {
2196					/*
2197					 * No destination so return default
2198					 * value
2199					 */
2200					int cnt = 0;
2201
2202					paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure;
2203					paddrp->spp_pathmtu = sctp_get_frag_point(stcb, &stcb->asoc);
2204#ifdef INET
2205					paddrp->spp_ipv4_tos = stcb->asoc.default_tos & 0x000000fc;
2206					paddrp->spp_flags |= SPP_IPV4_TOS;
2207#endif
2208#ifdef INET6
2209					paddrp->spp_ipv6_flowlabel = stcb->asoc.default_flowlabel;
2210					paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
2211#endif
2212					/* default settings should be these */
2213					if (stcb->asoc.hb_is_disabled == 0) {
2214						paddrp->spp_flags |= SPP_HB_ENABLE;
2215					} else {
2216						paddrp->spp_flags |= SPP_HB_DISABLE;
2217					}
2218					TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2219						if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
2220							cnt++;
2221						}
2222					}
2223					if (cnt) {
2224						paddrp->spp_flags |= SPP_PMTUD_ENABLE;
2225					}
2226				}
2227				paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay;
2228				paddrp->spp_assoc_id = sctp_get_associd(stcb);
2229				SCTP_TCB_UNLOCK(stcb);
2230			} else {
2231				/* Use endpoint defaults */
2232				SCTP_INP_RLOCK(inp);
2233				paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure;
2234				paddrp->spp_hbinterval = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
2235				paddrp->spp_assoc_id = (sctp_assoc_t) 0;
2236				/* get inp's default */
2237#ifdef INET
2238				paddrp->spp_ipv4_tos = inp->ip_inp.inp.inp_ip_tos;
2239				paddrp->spp_flags |= SPP_IPV4_TOS;
2240#endif
2241#ifdef INET6
2242				if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2243					paddrp->spp_ipv6_flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo;
2244					paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
2245				}
2246#endif
2247				/* can't return this */
2248				paddrp->spp_pathmtu = 0;
2249
2250				/* default behavior, no stcb */
2251				paddrp->spp_flags = SPP_PMTUD_ENABLE;
2252
2253				if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) {
2254					paddrp->spp_flags |= SPP_HB_ENABLE;
2255				} else {
2256					paddrp->spp_flags |= SPP_HB_DISABLE;
2257				}
2258				SCTP_INP_RUNLOCK(inp);
2259			}
2260			*optsize = sizeof(struct sctp_paddrparams);
2261		}
2262		break;
2263	case SCTP_GET_PEER_ADDR_INFO:
2264		{
2265			struct sctp_paddrinfo *paddri;
2266			struct sctp_nets *net;
2267
2268			SCTP_CHECK_AND_CAST(paddri, optval, struct sctp_paddrinfo, *optsize);
2269			SCTP_FIND_STCB(inp, stcb, paddri->spinfo_assoc_id);
2270
2271			net = NULL;
2272			if (stcb) {
2273				net = sctp_findnet(stcb, (struct sockaddr *)&paddri->spinfo_address);
2274			} else {
2275				/*
2276				 * We increment here since
2277				 * sctp_findassociation_ep_addr() wil do a
2278				 * decrement if it finds the stcb as long as
2279				 * the locked tcb (last argument) is NOT a
2280				 * TCB.. aka NULL.
2281				 */
2282				SCTP_INP_INCR_REF(inp);
2283				stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddri->spinfo_address, &net, NULL, NULL);
2284				if (stcb == NULL) {
2285					SCTP_INP_DECR_REF(inp);
2286				}
2287			}
2288
2289			if ((stcb) && (net)) {
2290				paddri->spinfo_state = net->dest_state & (SCTP_REACHABLE_MASK | SCTP_ADDR_NOHB);
2291				paddri->spinfo_cwnd = net->cwnd;
2292				paddri->spinfo_srtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
2293				paddri->spinfo_rto = net->RTO;
2294				paddri->spinfo_assoc_id = sctp_get_associd(stcb);
2295				SCTP_TCB_UNLOCK(stcb);
2296			} else {
2297				if (stcb) {
2298					SCTP_TCB_UNLOCK(stcb);
2299				}
2300				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
2301				error = ENOENT;
2302			}
2303			*optsize = sizeof(struct sctp_paddrinfo);
2304		}
2305		break;
2306	case SCTP_PCB_STATUS:
2307		{
2308			struct sctp_pcbinfo *spcb;
2309
2310			SCTP_CHECK_AND_CAST(spcb, optval, struct sctp_pcbinfo, *optsize);
2311			sctp_fill_pcbinfo(spcb);
2312			*optsize = sizeof(struct sctp_pcbinfo);
2313		}
2314		break;
2315
2316	case SCTP_STATUS:
2317		{
2318			struct sctp_nets *net;
2319			struct sctp_status *sstat;
2320
2321			SCTP_CHECK_AND_CAST(sstat, optval, struct sctp_status, *optsize);
2322			SCTP_FIND_STCB(inp, stcb, sstat->sstat_assoc_id);
2323
2324			if (stcb == NULL) {
2325				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2326				error = EINVAL;
2327				break;
2328			}
2329			/*
2330			 * I think passing the state is fine since
2331			 * sctp_constants.h will be available to the user
2332			 * land.
2333			 */
2334			sstat->sstat_state = stcb->asoc.state;
2335			sstat->sstat_assoc_id = sctp_get_associd(stcb);
2336			sstat->sstat_rwnd = stcb->asoc.peers_rwnd;
2337			sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt;
2338			/*
2339			 * We can't include chunks that have been passed to
2340			 * the socket layer. Only things in queue.
2341			 */
2342			sstat->sstat_penddata = (stcb->asoc.cnt_on_reasm_queue +
2343			    stcb->asoc.cnt_on_all_streams);
2344
2345
2346			sstat->sstat_instrms = stcb->asoc.streamincnt;
2347			sstat->sstat_outstrms = stcb->asoc.streamoutcnt;
2348			sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc);
2349			memcpy(&sstat->sstat_primary.spinfo_address,
2350			    &stcb->asoc.primary_destination->ro._l_addr,
2351			    ((struct sockaddr *)(&stcb->asoc.primary_destination->ro._l_addr))->sa_len);
2352			net = stcb->asoc.primary_destination;
2353			((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport;
2354			/*
2355			 * Again the user can get info from sctp_constants.h
2356			 * for what the state of the network is.
2357			 */
2358			sstat->sstat_primary.spinfo_state = net->dest_state & SCTP_REACHABLE_MASK;
2359			sstat->sstat_primary.spinfo_cwnd = net->cwnd;
2360			sstat->sstat_primary.spinfo_srtt = net->lastsa;
2361			sstat->sstat_primary.spinfo_rto = net->RTO;
2362			sstat->sstat_primary.spinfo_mtu = net->mtu;
2363			sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb);
2364			SCTP_TCB_UNLOCK(stcb);
2365			*optsize = sizeof(*sstat);
2366		}
2367		break;
2368	case SCTP_RTOINFO:
2369		{
2370			struct sctp_rtoinfo *srto;
2371
2372			SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, *optsize);
2373			SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id);
2374
2375			if (stcb) {
2376				srto->srto_initial = stcb->asoc.initial_rto;
2377				srto->srto_max = stcb->asoc.maxrto;
2378				srto->srto_min = stcb->asoc.minrto;
2379				SCTP_TCB_UNLOCK(stcb);
2380			} else {
2381				SCTP_INP_RLOCK(inp);
2382				srto->srto_initial = inp->sctp_ep.initial_rto;
2383				srto->srto_max = inp->sctp_ep.sctp_maxrto;
2384				srto->srto_min = inp->sctp_ep.sctp_minrto;
2385				SCTP_INP_RUNLOCK(inp);
2386			}
2387			*optsize = sizeof(*srto);
2388		}
2389		break;
2390	case SCTP_ASSOCINFO:
2391		{
2392			struct sctp_assocparams *sasoc;
2393			uint32_t oldval;
2394
2395			SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, *optsize);
2396			SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id);
2397
2398			if (stcb) {
2399				oldval = sasoc->sasoc_cookie_life;
2400				sasoc->sasoc_cookie_life = TICKS_TO_MSEC(stcb->asoc.cookie_life);
2401				sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times;
2402				sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
2403				sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd;
2404				sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd;
2405				SCTP_TCB_UNLOCK(stcb);
2406			} else {
2407				SCTP_INP_RLOCK(inp);
2408				sasoc->sasoc_cookie_life = TICKS_TO_MSEC(inp->sctp_ep.def_cookie_life);
2409				sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times;
2410				sasoc->sasoc_number_peer_destinations = 0;
2411				sasoc->sasoc_peer_rwnd = 0;
2412				sasoc->sasoc_local_rwnd = sbspace(&inp->sctp_socket->so_rcv);
2413				SCTP_INP_RUNLOCK(inp);
2414			}
2415			*optsize = sizeof(*sasoc);
2416		}
2417		break;
2418	case SCTP_DEFAULT_SEND_PARAM:
2419		{
2420			struct sctp_sndrcvinfo *s_info;
2421
2422			SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, *optsize);
2423			SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id);
2424
2425			if (stcb) {
2426				memcpy(s_info, &stcb->asoc.def_send, sizeof(stcb->asoc.def_send));
2427				SCTP_TCB_UNLOCK(stcb);
2428			} else {
2429				SCTP_INP_RLOCK(inp);
2430				memcpy(s_info, &inp->def_send, sizeof(inp->def_send));
2431				SCTP_INP_RUNLOCK(inp);
2432			}
2433			*optsize = sizeof(*s_info);
2434		}
2435		break;
2436	case SCTP_INITMSG:
2437		{
2438			struct sctp_initmsg *sinit;
2439
2440			SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, *optsize);
2441			SCTP_INP_RLOCK(inp);
2442			sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count;
2443			sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome;
2444			sinit->sinit_max_attempts = inp->sctp_ep.max_init_times;
2445			sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max;
2446			SCTP_INP_RUNLOCK(inp);
2447			*optsize = sizeof(*sinit);
2448		}
2449		break;
2450	case SCTP_PRIMARY_ADDR:
2451		/* we allow a "get" operation on this */
2452		{
2453			struct sctp_setprim *ssp;
2454
2455			SCTP_CHECK_AND_CAST(ssp, optval, struct sctp_setprim, *optsize);
2456			SCTP_FIND_STCB(inp, stcb, ssp->ssp_assoc_id);
2457
2458			if (stcb) {
2459				/* simply copy out the sockaddr_storage... */
2460				int len;
2461
2462				len = *optsize;
2463				if (len > stcb->asoc.primary_destination->ro._l_addr.sa.sa_len)
2464					len = stcb->asoc.primary_destination->ro._l_addr.sa.sa_len;
2465
2466				memcpy(&ssp->ssp_addr,
2467				    &stcb->asoc.primary_destination->ro._l_addr,
2468				    len);
2469				SCTP_TCB_UNLOCK(stcb);
2470			} else {
2471				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2472				error = EINVAL;
2473			}
2474			*optsize = sizeof(*ssp);
2475		}
2476		break;
2477
2478	case SCTP_HMAC_IDENT:
2479		{
2480			struct sctp_hmacalgo *shmac;
2481			sctp_hmaclist_t *hmaclist;
2482			uint32_t size;
2483			int i;
2484
2485			SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, *optsize);
2486
2487			SCTP_INP_RLOCK(inp);
2488			hmaclist = inp->sctp_ep.local_hmacs;
2489			if (hmaclist == NULL) {
2490				/* no HMACs to return */
2491				*optsize = sizeof(*shmac);
2492				SCTP_INP_RUNLOCK(inp);
2493				break;
2494			}
2495			/* is there room for all of the hmac ids? */
2496			size = sizeof(*shmac) + (hmaclist->num_algo *
2497			    sizeof(shmac->shmac_idents[0]));
2498			if ((size_t)(*optsize) < size) {
2499				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2500				error = EINVAL;
2501				SCTP_INP_RUNLOCK(inp);
2502				break;
2503			}
2504			/* copy in the list */
2505			for (i = 0; i < hmaclist->num_algo; i++)
2506				shmac->shmac_idents[i] = hmaclist->hmac[i];
2507			SCTP_INP_RUNLOCK(inp);
2508			*optsize = size;
2509			break;
2510		}
2511	case SCTP_AUTH_ACTIVE_KEY:
2512		{
2513			struct sctp_authkeyid *scact;
2514
2515			SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, *optsize);
2516			SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id);
2517
2518			if (stcb) {
2519				/* get the active key on the assoc */
2520				scact->scact_keynumber = stcb->asoc.authinfo.assoc_keyid;
2521				SCTP_TCB_UNLOCK(stcb);
2522			} else {
2523				/* get the endpoint active key */
2524				SCTP_INP_RLOCK(inp);
2525				scact->scact_keynumber = inp->sctp_ep.default_keyid;
2526				SCTP_INP_RUNLOCK(inp);
2527			}
2528			*optsize = sizeof(*scact);
2529			break;
2530		}
2531	case SCTP_LOCAL_AUTH_CHUNKS:
2532		{
2533			struct sctp_authchunks *sac;
2534			sctp_auth_chklist_t *chklist = NULL;
2535			size_t size = 0;
2536
2537			SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize);
2538			SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id);
2539
2540			if (stcb) {
2541				/* get off the assoc */
2542				chklist = stcb->asoc.local_auth_chunks;
2543				/* is there enough space? */
2544				size = sctp_auth_get_chklist_size(chklist);
2545				if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
2546					error = EINVAL;
2547					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2548				} else {
2549					/* copy in the chunks */
2550					(void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
2551				}
2552				SCTP_TCB_UNLOCK(stcb);
2553			} else {
2554				/* get off the endpoint */
2555				SCTP_INP_RLOCK(inp);
2556				chklist = inp->sctp_ep.local_auth_chunks;
2557				/* is there enough space? */
2558				size = sctp_auth_get_chklist_size(chklist);
2559				if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
2560					error = EINVAL;
2561					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2562				} else {
2563					/* copy in the chunks */
2564					(void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
2565				}
2566				SCTP_INP_RUNLOCK(inp);
2567			}
2568			*optsize = sizeof(struct sctp_authchunks) + size;
2569			break;
2570		}
2571	case SCTP_PEER_AUTH_CHUNKS:
2572		{
2573			struct sctp_authchunks *sac;
2574			sctp_auth_chklist_t *chklist = NULL;
2575			size_t size = 0;
2576
2577			SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize);
2578			SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id);
2579
2580			if (stcb) {
2581				/* get off the assoc */
2582				chklist = stcb->asoc.peer_auth_chunks;
2583				/* is there enough space? */
2584				size = sctp_auth_get_chklist_size(chklist);
2585				if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
2586					error = EINVAL;
2587					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2588				} else {
2589					/* copy in the chunks */
2590					(void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
2591				}
2592				SCTP_TCB_UNLOCK(stcb);
2593			} else {
2594				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
2595				error = ENOENT;
2596			}
2597			*optsize = sizeof(struct sctp_authchunks) + size;
2598			break;
2599		}
2600
2601
2602	default:
2603		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
2604		error = ENOPROTOOPT;
2605		*optsize = 0;
2606		break;
2607	}			/* end switch (sopt->sopt_name) */
2608	return (error);
2609}
2610
2611static int
2612sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
2613    void *p)
2614{
2615	int error, set_opt;
2616	uint32_t *mopt;
2617	struct sctp_tcb *stcb = NULL;
2618	struct sctp_inpcb *inp = NULL;
2619	uint32_t vrf_id;
2620
2621	if (optval == NULL) {
2622		SCTP_PRINTF("optval is NULL\n");
2623		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2624		return (EINVAL);
2625	}
2626	inp = (struct sctp_inpcb *)so->so_pcb;
2627	if (inp == 0) {
2628		SCTP_PRINTF("inp is NULL?\n");
2629		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2630		return EINVAL;
2631	}
2632	vrf_id = inp->def_vrf_id;
2633
2634	error = 0;
2635	switch (optname) {
2636	case SCTP_NODELAY:
2637	case SCTP_AUTOCLOSE:
2638	case SCTP_AUTO_ASCONF:
2639	case SCTP_EXPLICIT_EOR:
2640	case SCTP_DISABLE_FRAGMENTS:
2641	case SCTP_USE_EXT_RCVINFO:
2642	case SCTP_I_WANT_MAPPED_V4_ADDR:
2643		/* copy in the option value */
2644		SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize);
2645		set_opt = 0;
2646		if (error)
2647			break;
2648		switch (optname) {
2649		case SCTP_DISABLE_FRAGMENTS:
2650			set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT;
2651			break;
2652		case SCTP_AUTO_ASCONF:
2653			/*
2654			 * NOTE: we don't really support this flag
2655			 */
2656			if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2657				/* only valid for bound all sockets */
2658				set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF;
2659			} else {
2660				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2661				return (EINVAL);
2662			}
2663			break;
2664		case SCTP_EXPLICIT_EOR:
2665			set_opt = SCTP_PCB_FLAGS_EXPLICIT_EOR;
2666			break;
2667		case SCTP_USE_EXT_RCVINFO:
2668			set_opt = SCTP_PCB_FLAGS_EXT_RCVINFO;
2669			break;
2670		case SCTP_I_WANT_MAPPED_V4_ADDR:
2671			if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2672				set_opt = SCTP_PCB_FLAGS_NEEDS_MAPPED_V4;
2673			} else {
2674				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2675				return (EINVAL);
2676			}
2677			break;
2678		case SCTP_NODELAY:
2679			set_opt = SCTP_PCB_FLAGS_NODELAY;
2680			break;
2681		case SCTP_AUTOCLOSE:
2682			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2683			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2684				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2685				return (EINVAL);
2686			}
2687			set_opt = SCTP_PCB_FLAGS_AUTOCLOSE;
2688			/*
2689			 * The value is in ticks. Note this does not effect
2690			 * old associations, only new ones.
2691			 */
2692			inp->sctp_ep.auto_close_time = SEC_TO_TICKS(*mopt);
2693			break;
2694		}
2695		SCTP_INP_WLOCK(inp);
2696		if (*mopt != 0) {
2697			sctp_feature_on(inp, set_opt);
2698		} else {
2699			sctp_feature_off(inp, set_opt);
2700		}
2701		SCTP_INP_WUNLOCK(inp);
2702		break;
2703	case SCTP_PARTIAL_DELIVERY_POINT:
2704		{
2705			uint32_t *value;
2706
2707			SCTP_CHECK_AND_CAST(value, optval, uint32_t, optsize);
2708			if (*value > SCTP_SB_LIMIT_RCV(so)) {
2709				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2710				error = EINVAL;
2711				break;
2712			}
2713			inp->partial_delivery_point = *value;
2714		}
2715		break;
2716	case SCTP_FRAGMENT_INTERLEAVE:
2717		/* not yet until we re-write sctp_recvmsg() */
2718		{
2719			uint32_t *level;
2720
2721			SCTP_CHECK_AND_CAST(level, optval, uint32_t, optsize);
2722			if (*level == SCTP_FRAG_LEVEL_2) {
2723				sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
2724				sctp_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
2725			} else if (*level == SCTP_FRAG_LEVEL_1) {
2726				sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
2727				sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
2728			} else if (*level == SCTP_FRAG_LEVEL_0) {
2729				sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
2730				sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
2731
2732			} else {
2733				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2734				error = EINVAL;
2735			}
2736		}
2737		break;
2738	case SCTP_CMT_ON_OFF:
2739		{
2740			struct sctp_assoc_value *av;
2741
2742			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
2743			if (sctp_cmt_on_off) {
2744				SCTP_FIND_STCB(inp, stcb, av->assoc_id);
2745				if (stcb) {
2746					stcb->asoc.sctp_cmt_on_off = (uint8_t) av->assoc_value;
2747					SCTP_TCB_UNLOCK(stcb);
2748				} else {
2749					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
2750					error = ENOTCONN;
2751				}
2752			} else {
2753				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
2754				error = ENOPROTOOPT;
2755			}
2756		}
2757		break;
2758		/* JRS - Set socket option for pluggable congestion control */
2759	case SCTP_PLUGGABLE_CC:
2760		{
2761			struct sctp_assoc_value *av;
2762
2763			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
2764			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
2765			if (stcb) {
2766				switch (av->assoc_value) {
2767					/*
2768					 * JRS - Standard TCP congestion
2769					 * control
2770					 */
2771				case SCTP_CC_RFC2581:
2772					{
2773						stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
2774						stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
2775						stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
2776						stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
2777						stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
2778						stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
2779						stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
2780						stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
2781						stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
2782						SCTP_TCB_UNLOCK(stcb);
2783						break;
2784					}
2785					/*
2786					 * JRS - High Speed TCP congestion
2787					 * control (Floyd)
2788					 */
2789				case SCTP_CC_HSTCP:
2790					{
2791						stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
2792						stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
2793						stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
2794						stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
2795						stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
2796						stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
2797						stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
2798						stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
2799						stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
2800						SCTP_TCB_UNLOCK(stcb);
2801						break;
2802					}
2803					/* JRS - HTCP congestion control */
2804				case SCTP_CC_HTCP:
2805					{
2806						stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
2807						stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
2808						stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
2809						stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
2810						stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
2811						stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
2812						stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
2813						stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
2814						stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
2815						SCTP_TCB_UNLOCK(stcb);
2816						break;
2817					}
2818					/*
2819					 * JRS - All other values are
2820					 * invalid
2821					 */
2822				default:
2823					{
2824						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2825						error = EINVAL;
2826						SCTP_TCB_UNLOCK(stcb);
2827						break;
2828					}
2829				}
2830			} else {
2831				switch (av->assoc_value) {
2832				case SCTP_CC_RFC2581:
2833				case SCTP_CC_HSTCP:
2834				case SCTP_CC_HTCP:
2835					inp->sctp_ep.sctp_default_cc_module = av->assoc_value;
2836					break;
2837				default:
2838					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2839					error = EINVAL;
2840					break;
2841				};
2842			}
2843		}
2844		break;
2845	case SCTP_CLR_STAT_LOG:
2846		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
2847		error = EOPNOTSUPP;
2848		break;
2849	case SCTP_CONTEXT:
2850		{
2851			struct sctp_assoc_value *av;
2852
2853			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
2854			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
2855
2856			if (stcb) {
2857				stcb->asoc.context = av->assoc_value;
2858				SCTP_TCB_UNLOCK(stcb);
2859			} else {
2860				SCTP_INP_WLOCK(inp);
2861				inp->sctp_context = av->assoc_value;
2862				SCTP_INP_WUNLOCK(inp);
2863			}
2864		}
2865		break;
2866	case SCTP_VRF_ID:
2867		{
2868			uint32_t *default_vrfid;
2869
2870			SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, optsize);
2871			if (*default_vrfid > SCTP_MAX_VRF_ID) {
2872				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2873				error = EINVAL;
2874				break;
2875			}
2876			inp->def_vrf_id = *default_vrfid;
2877			break;
2878		}
2879	case SCTP_DEL_VRF_ID:
2880		{
2881			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
2882			error = EOPNOTSUPP;
2883			break;
2884		}
2885	case SCTP_ADD_VRF_ID:
2886		{
2887			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
2888			error = EOPNOTSUPP;
2889			break;
2890		}
2891	case SCTP_DELAYED_SACK:
2892		{
2893			struct sctp_sack_info *sack;
2894
2895			SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, optsize);
2896			SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id);
2897			if (sack->sack_delay) {
2898				if (sack->sack_delay > SCTP_MAX_SACK_DELAY)
2899					sack->sack_delay = SCTP_MAX_SACK_DELAY;
2900			}
2901			if (stcb) {
2902				if (sack->sack_delay) {
2903					if (MSEC_TO_TICKS(sack->sack_delay) < 1) {
2904						sack->sack_delay = TICKS_TO_MSEC(1);
2905					}
2906					stcb->asoc.delayed_ack = sack->sack_delay;
2907				}
2908				if (sack->sack_freq) {
2909					stcb->asoc.sack_freq = sack->sack_freq;
2910				}
2911				SCTP_TCB_UNLOCK(stcb);
2912			} else {
2913				SCTP_INP_WLOCK(inp);
2914				if (sack->sack_delay) {
2915					if (MSEC_TO_TICKS(sack->sack_delay) < 1) {
2916						sack->sack_delay = TICKS_TO_MSEC(1);
2917					}
2918					inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(sack->sack_delay);
2919				}
2920				if (sack->sack_freq) {
2921					inp->sctp_ep.sctp_sack_freq = sack->sack_freq;
2922				}
2923				SCTP_INP_WUNLOCK(inp);
2924			}
2925			break;
2926		}
2927	case SCTP_AUTH_CHUNK:
2928		{
2929			struct sctp_authchunk *sauth;
2930
2931			SCTP_CHECK_AND_CAST(sauth, optval, struct sctp_authchunk, optsize);
2932
2933			SCTP_INP_WLOCK(inp);
2934			if (sctp_auth_add_chunk(sauth->sauth_chunk, inp->sctp_ep.local_auth_chunks)) {
2935				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2936				error = EINVAL;
2937			}
2938			SCTP_INP_WUNLOCK(inp);
2939			break;
2940		}
2941	case SCTP_AUTH_KEY:
2942		{
2943			struct sctp_authkey *sca;
2944			struct sctp_keyhead *shared_keys;
2945			sctp_sharedkey_t *shared_key;
2946			sctp_key_t *key = NULL;
2947			size_t size;
2948
2949			SCTP_CHECK_AND_CAST(sca, optval, struct sctp_authkey, optsize);
2950			SCTP_FIND_STCB(inp, stcb, sca->sca_assoc_id);
2951			size = optsize - sizeof(*sca);
2952
2953			if (stcb) {
2954				/* set it on the assoc */
2955				shared_keys = &stcb->asoc.shared_keys;
2956				/* clear the cached keys for this key id */
2957				sctp_clear_cachedkeys(stcb, sca->sca_keynumber);
2958				/*
2959				 * create the new shared key and
2960				 * insert/replace it
2961				 */
2962				if (size > 0) {
2963					key = sctp_set_key(sca->sca_key, (uint32_t) size);
2964					if (key == NULL) {
2965						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
2966						error = ENOMEM;
2967						SCTP_TCB_UNLOCK(stcb);
2968						break;
2969					}
2970				}
2971				shared_key = sctp_alloc_sharedkey();
2972				if (shared_key == NULL) {
2973					sctp_free_key(key);
2974					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
2975					error = ENOMEM;
2976					SCTP_TCB_UNLOCK(stcb);
2977					break;
2978				}
2979				shared_key->key = key;
2980				shared_key->keyid = sca->sca_keynumber;
2981				sctp_insert_sharedkey(shared_keys, shared_key);
2982				SCTP_TCB_UNLOCK(stcb);
2983			} else {
2984				/* set it on the endpoint */
2985				SCTP_INP_WLOCK(inp);
2986				shared_keys = &inp->sctp_ep.shared_keys;
2987				/*
2988				 * clear the cached keys on all assocs for
2989				 * this key id
2990				 */
2991				sctp_clear_cachedkeys_ep(inp, sca->sca_keynumber);
2992				/*
2993				 * create the new shared key and
2994				 * insert/replace it
2995				 */
2996				if (size > 0) {
2997					key = sctp_set_key(sca->sca_key, (uint32_t) size);
2998					if (key == NULL) {
2999						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
3000						error = ENOMEM;
3001						SCTP_INP_WUNLOCK(inp);
3002						break;
3003					}
3004				}
3005				shared_key = sctp_alloc_sharedkey();
3006				if (shared_key == NULL) {
3007					sctp_free_key(key);
3008					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
3009					error = ENOMEM;
3010					SCTP_INP_WUNLOCK(inp);
3011					break;
3012				}
3013				shared_key->key = key;
3014				shared_key->keyid = sca->sca_keynumber;
3015				sctp_insert_sharedkey(shared_keys, shared_key);
3016				SCTP_INP_WUNLOCK(inp);
3017			}
3018			break;
3019		}
3020	case SCTP_HMAC_IDENT:
3021		{
3022			struct sctp_hmacalgo *shmac;
3023			sctp_hmaclist_t *hmaclist;
3024			uint32_t hmacid;
3025			size_t size, i, found;
3026
3027			SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, optsize);
3028			size = (optsize - sizeof(*shmac)) / sizeof(shmac->shmac_idents[0]);
3029			hmaclist = sctp_alloc_hmaclist(size);
3030			if (hmaclist == NULL) {
3031				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
3032				error = ENOMEM;
3033				break;
3034			}
3035			for (i = 0; i < size; i++) {
3036				hmacid = shmac->shmac_idents[i];
3037				if (sctp_auth_add_hmacid(hmaclist, (uint16_t) hmacid)) {
3038					 /* invalid HMACs were found */ ;
3039					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3040					error = EINVAL;
3041					sctp_free_hmaclist(hmaclist);
3042					goto sctp_set_hmac_done;
3043				}
3044			}
3045			found = 0;
3046			for (i = 0; i < hmaclist->num_algo; i++) {
3047				if (hmaclist->hmac[i] == SCTP_AUTH_HMAC_ID_SHA1) {
3048					/* already in list */
3049					found = 1;
3050				}
3051			}
3052			if (!found) {
3053				sctp_free_hmaclist(hmaclist);
3054				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3055				error = EINVAL;
3056				break;
3057			}
3058			/* set it on the endpoint */
3059			SCTP_INP_WLOCK(inp);
3060			if (inp->sctp_ep.local_hmacs)
3061				sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
3062			inp->sctp_ep.local_hmacs = hmaclist;
3063			SCTP_INP_WUNLOCK(inp);
3064	sctp_set_hmac_done:
3065			break;
3066		}
3067	case SCTP_AUTH_ACTIVE_KEY:
3068		{
3069			struct sctp_authkeyid *scact;
3070
3071			SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, optsize);
3072			SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id);
3073
3074			/* set the active key on the right place */
3075			if (stcb) {
3076				/* set the active key on the assoc */
3077				if (sctp_auth_setactivekey(stcb, scact->scact_keynumber)) {
3078					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3079					error = EINVAL;
3080				}
3081				SCTP_TCB_UNLOCK(stcb);
3082			} else {
3083				/* set the active key on the endpoint */
3084				SCTP_INP_WLOCK(inp);
3085				if (sctp_auth_setactivekey_ep(inp, scact->scact_keynumber)) {
3086					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3087					error = EINVAL;
3088				}
3089				SCTP_INP_WUNLOCK(inp);
3090			}
3091			break;
3092		}
3093	case SCTP_AUTH_DELETE_KEY:
3094		{
3095			struct sctp_authkeyid *scdel;
3096
3097			SCTP_CHECK_AND_CAST(scdel, optval, struct sctp_authkeyid, optsize);
3098			SCTP_FIND_STCB(inp, stcb, scdel->scact_assoc_id);
3099
3100			/* delete the key from the right place */
3101			if (stcb) {
3102				if (sctp_delete_sharedkey(stcb, scdel->scact_keynumber)) {
3103					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3104					error = EINVAL;
3105				}
3106				SCTP_TCB_UNLOCK(stcb);
3107			} else {
3108				SCTP_INP_WLOCK(inp);
3109				if (sctp_delete_sharedkey_ep(inp, scdel->scact_keynumber)) {
3110					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3111					error = EINVAL;
3112				}
3113				SCTP_INP_WUNLOCK(inp);
3114			}
3115			break;
3116		}
3117
3118	case SCTP_RESET_STREAMS:
3119		{
3120			struct sctp_stream_reset *strrst;
3121			uint8_t send_in = 0, send_tsn = 0, send_out = 0;
3122			int i;
3123
3124			SCTP_CHECK_AND_CAST(strrst, optval, struct sctp_stream_reset, optsize);
3125			SCTP_FIND_STCB(inp, stcb, strrst->strrst_assoc_id);
3126
3127			if (stcb == NULL) {
3128				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
3129				error = ENOENT;
3130				break;
3131			}
3132			if (stcb->asoc.peer_supports_strreset == 0) {
3133				/*
3134				 * Peer does not support it, we return
3135				 * protocol not supported since this is true
3136				 * for this feature and this peer, not the
3137				 * socket request in general.
3138				 */
3139				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EPROTONOSUPPORT);
3140				error = EPROTONOSUPPORT;
3141				SCTP_TCB_UNLOCK(stcb);
3142				break;
3143			}
3144			if (stcb->asoc.stream_reset_outstanding) {
3145				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
3146				error = EALREADY;
3147				SCTP_TCB_UNLOCK(stcb);
3148				break;
3149			}
3150			if (strrst->strrst_flags == SCTP_RESET_LOCAL_RECV) {
3151				send_in = 1;
3152			} else if (strrst->strrst_flags == SCTP_RESET_LOCAL_SEND) {
3153				send_out = 1;
3154			} else if (strrst->strrst_flags == SCTP_RESET_BOTH) {
3155				send_in = 1;
3156				send_out = 1;
3157			} else if (strrst->strrst_flags == SCTP_RESET_TSN) {
3158				send_tsn = 1;
3159			} else {
3160				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3161				error = EINVAL;
3162				SCTP_TCB_UNLOCK(stcb);
3163				break;
3164			}
3165			for (i = 0; i < strrst->strrst_num_streams; i++) {
3166				if ((send_in) &&
3167
3168				    (strrst->strrst_list[i] > stcb->asoc.streamincnt)) {
3169					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3170					error = EINVAL;
3171					goto get_out;
3172				}
3173				if ((send_out) &&
3174				    (strrst->strrst_list[i] > stcb->asoc.streamoutcnt)) {
3175					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3176					error = EINVAL;
3177					goto get_out;
3178				}
3179			}
3180			if (error) {
3181		get_out:
3182				SCTP_TCB_UNLOCK(stcb);
3183				break;
3184			}
3185			error = sctp_send_str_reset_req(stcb, strrst->strrst_num_streams,
3186			    strrst->strrst_list,
3187			    send_out, (stcb->asoc.str_reset_seq_in - 3),
3188			    send_in, send_tsn);
3189
3190			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ, SCTP_SO_LOCKED);
3191			SCTP_TCB_UNLOCK(stcb);
3192		}
3193		break;
3194
3195	case SCTP_CONNECT_X:
3196		if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) {
3197			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3198			error = EINVAL;
3199			break;
3200		}
3201		error = sctp_do_connect_x(so, inp, optval, optsize, p, 0);
3202		break;
3203
3204	case SCTP_CONNECT_X_DELAYED:
3205		if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) {
3206			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3207			error = EINVAL;
3208			break;
3209		}
3210		error = sctp_do_connect_x(so, inp, optval, optsize, p, 1);
3211		break;
3212
3213	case SCTP_CONNECT_X_COMPLETE:
3214		{
3215			struct sockaddr *sa;
3216			struct sctp_nets *net;
3217
3218			/* FIXME MT: check correct? */
3219			SCTP_CHECK_AND_CAST(sa, optval, struct sockaddr, optsize);
3220
3221			/* find tcb */
3222			if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3223				SCTP_INP_RLOCK(inp);
3224				stcb = LIST_FIRST(&inp->sctp_asoc_list);
3225				if (stcb) {
3226					SCTP_TCB_LOCK(stcb);
3227					net = sctp_findnet(stcb, sa);
3228				}
3229				SCTP_INP_RUNLOCK(inp);
3230			} else {
3231				/*
3232				 * We increment here since
3233				 * sctp_findassociation_ep_addr() wil do a
3234				 * decrement if it finds the stcb as long as
3235				 * the locked tcb (last argument) is NOT a
3236				 * TCB.. aka NULL.
3237				 */
3238				SCTP_INP_INCR_REF(inp);
3239				stcb = sctp_findassociation_ep_addr(&inp, sa, &net, NULL, NULL);
3240				if (stcb == NULL) {
3241					SCTP_INP_DECR_REF(inp);
3242				}
3243			}
3244
3245			if (stcb == NULL) {
3246				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
3247				error = ENOENT;
3248				break;
3249			}
3250			if (stcb->asoc.delayed_connection == 1) {
3251				stcb->asoc.delayed_connection = 0;
3252				(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
3253				sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb,
3254				    stcb->asoc.primary_destination,
3255				    SCTP_FROM_SCTP_USRREQ + SCTP_LOC_9);
3256				sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
3257			} else {
3258				/*
3259				 * already expired or did not use delayed
3260				 * connectx
3261				 */
3262				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
3263				error = EALREADY;
3264			}
3265			SCTP_TCB_UNLOCK(stcb);
3266		}
3267		break;
3268	case SCTP_MAX_BURST:
3269		{
3270			uint8_t *burst;
3271
3272			SCTP_CHECK_AND_CAST(burst, optval, uint8_t, optsize);
3273
3274			SCTP_INP_WLOCK(inp);
3275			if (*burst) {
3276				inp->sctp_ep.max_burst = *burst;
3277			}
3278			SCTP_INP_WUNLOCK(inp);
3279		}
3280		break;
3281	case SCTP_MAXSEG:
3282		{
3283			struct sctp_assoc_value *av;
3284			int ovh;
3285
3286			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
3287			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
3288
3289			if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
3290				ovh = SCTP_MED_OVERHEAD;
3291			} else {
3292				ovh = SCTP_MED_V4_OVERHEAD;
3293			}
3294			if (stcb) {
3295				if (av->assoc_value) {
3296					stcb->asoc.sctp_frag_point = (av->assoc_value + ovh);
3297				} else {
3298					stcb->asoc.sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT;
3299				}
3300				SCTP_TCB_UNLOCK(stcb);
3301			} else {
3302				SCTP_INP_WLOCK(inp);
3303				/*
3304				 * FIXME MT: I think this is not in tune
3305				 * with the API ID
3306				 */
3307				if (av->assoc_value) {
3308					inp->sctp_frag_point = (av->assoc_value + ovh);
3309				} else {
3310					inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT;
3311				}
3312				SCTP_INP_WUNLOCK(inp);
3313			}
3314		}
3315		break;
3316	case SCTP_EVENTS:
3317		{
3318			struct sctp_event_subscribe *events;
3319
3320			SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, optsize);
3321
3322			SCTP_INP_WLOCK(inp);
3323			if (events->sctp_data_io_event) {
3324				sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
3325			} else {
3326				sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
3327			}
3328
3329			if (events->sctp_association_event) {
3330				sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT);
3331			} else {
3332				sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT);
3333			}
3334
3335			if (events->sctp_address_event) {
3336				sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT);
3337			} else {
3338				sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPADDREVNT);
3339			}
3340
3341			if (events->sctp_send_failure_event) {
3342				sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT);
3343			} else {
3344				sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT);
3345			}
3346
3347			if (events->sctp_peer_error_event) {
3348				sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR);
3349			} else {
3350				sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPEERERR);
3351			}
3352
3353			if (events->sctp_shutdown_event) {
3354				sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT);
3355			} else {
3356				sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT);
3357			}
3358
3359			if (events->sctp_partial_delivery_event) {
3360				sctp_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT);
3361			} else {
3362				sctp_feature_off(inp, SCTP_PCB_FLAGS_PDAPIEVNT);
3363			}
3364
3365			if (events->sctp_adaptation_layer_event) {
3366				sctp_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT);
3367			} else {
3368				sctp_feature_off(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT);
3369			}
3370
3371			if (events->sctp_authentication_event) {
3372				sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT);
3373			} else {
3374				sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTHEVNT);
3375			}
3376
3377			if (events->sctp_stream_reset_events) {
3378				sctp_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
3379			} else {
3380				sctp_feature_off(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
3381			}
3382			SCTP_INP_WUNLOCK(inp);
3383		}
3384		break;
3385
3386	case SCTP_ADAPTATION_LAYER:
3387		{
3388			struct sctp_setadaptation *adap_bits;
3389
3390			SCTP_CHECK_AND_CAST(adap_bits, optval, struct sctp_setadaptation, optsize);
3391			SCTP_INP_WLOCK(inp);
3392			inp->sctp_ep.adaptation_layer_indicator = adap_bits->ssb_adaptation_ind;
3393			SCTP_INP_WUNLOCK(inp);
3394		}
3395		break;
3396#ifdef SCTP_DEBUG
3397	case SCTP_SET_INITIAL_DBG_SEQ:
3398		{
3399			uint32_t *vvv;
3400
3401			SCTP_CHECK_AND_CAST(vvv, optval, uint32_t, optsize);
3402			SCTP_INP_WLOCK(inp);
3403			inp->sctp_ep.initial_sequence_debug = *vvv;
3404			SCTP_INP_WUNLOCK(inp);
3405		}
3406		break;
3407#endif
3408	case SCTP_DEFAULT_SEND_PARAM:
3409		{
3410			struct sctp_sndrcvinfo *s_info;
3411
3412			SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, optsize);
3413			SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id);
3414
3415			if (stcb) {
3416				if (s_info->sinfo_stream <= stcb->asoc.streamoutcnt) {
3417					memcpy(&stcb->asoc.def_send, s_info, min(optsize, sizeof(stcb->asoc.def_send)));
3418				} else {
3419					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3420					error = EINVAL;
3421				}
3422				SCTP_TCB_UNLOCK(stcb);
3423			} else {
3424				SCTP_INP_WLOCK(inp);
3425				memcpy(&inp->def_send, s_info, min(optsize, sizeof(inp->def_send)));
3426				SCTP_INP_WUNLOCK(inp);
3427			}
3428		}
3429		break;
3430	case SCTP_PEER_ADDR_PARAMS:
3431		/* Applys to the specific association */
3432		{
3433			struct sctp_paddrparams *paddrp;
3434			struct sctp_nets *net;
3435
3436			SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, optsize);
3437			SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id);
3438			net = NULL;
3439			if (stcb) {
3440				net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
3441			} else {
3442				/*
3443				 * We increment here since
3444				 * sctp_findassociation_ep_addr() wil do a
3445				 * decrement if it finds the stcb as long as
3446				 * the locked tcb (last argument) is NOT a
3447				 * TCB.. aka NULL.
3448				 */
3449				SCTP_INP_INCR_REF(inp);
3450				stcb = sctp_findassociation_ep_addr(&inp,
3451				    (struct sockaddr *)&paddrp->spp_address,
3452				    &net, NULL, NULL);
3453				if (stcb == NULL) {
3454					SCTP_INP_DECR_REF(inp);
3455				}
3456			}
3457			if (stcb && (net == NULL)) {
3458				struct sockaddr *sa;
3459
3460				sa = (struct sockaddr *)&paddrp->spp_address;
3461				if (sa->sa_family == AF_INET) {
3462					struct sockaddr_in *sin;
3463
3464					sin = (struct sockaddr_in *)sa;
3465					if (sin->sin_addr.s_addr) {
3466						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3467						SCTP_TCB_UNLOCK(stcb);
3468						error = EINVAL;
3469						break;
3470					}
3471				} else if (sa->sa_family == AF_INET6) {
3472					struct sockaddr_in6 *sin6;
3473
3474					sin6 = (struct sockaddr_in6 *)sa;
3475					if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
3476						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3477						SCTP_TCB_UNLOCK(stcb);
3478						error = EINVAL;
3479						break;
3480					}
3481				} else {
3482					error = EAFNOSUPPORT;
3483					SCTP_TCB_UNLOCK(stcb);
3484					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
3485					break;
3486				}
3487			}
3488			/* sanity checks */
3489			if ((paddrp->spp_flags & SPP_HB_ENABLE) && (paddrp->spp_flags & SPP_HB_DISABLE)) {
3490				if (stcb)
3491					SCTP_TCB_UNLOCK(stcb);
3492				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3493				return (EINVAL);
3494			}
3495			if ((paddrp->spp_flags & SPP_PMTUD_ENABLE) && (paddrp->spp_flags & SPP_PMTUD_DISABLE)) {
3496				if (stcb)
3497					SCTP_TCB_UNLOCK(stcb);
3498				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3499				return (EINVAL);
3500			}
3501			if (stcb) {
3502				/************************TCB SPECIFIC SET ******************/
3503				/*
3504				 * do we change the timer for HB, we run
3505				 * only one?
3506				 */
3507				int ovh = 0;
3508
3509				if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
3510					ovh = SCTP_MED_OVERHEAD;
3511				} else {
3512					ovh = SCTP_MED_V4_OVERHEAD;
3513				}
3514
3515				if (paddrp->spp_hbinterval)
3516					stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval;
3517				else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO)
3518					stcb->asoc.heart_beat_delay = 0;
3519
3520				/* network sets ? */
3521				if (net) {
3522					/************************NET SPECIFIC SET ******************/
3523					if (paddrp->spp_flags & SPP_HB_DEMAND) {
3524						/* on demand HB */
3525						if (sctp_send_hb(stcb, 1, net) < 0) {
3526							/* asoc destroyed */
3527							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3528							error = EINVAL;
3529							break;
3530						}
3531					}
3532					if (paddrp->spp_flags & SPP_HB_DISABLE) {
3533						net->dest_state |= SCTP_ADDR_NOHB;
3534					}
3535					if (paddrp->spp_flags & SPP_HB_ENABLE) {
3536						net->dest_state &= ~SCTP_ADDR_NOHB;
3537					}
3538					if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) {
3539						if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3540							sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
3541							    SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10);
3542						}
3543						if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) {
3544							net->mtu = paddrp->spp_pathmtu + ovh;
3545							if (net->mtu < stcb->asoc.smallest_mtu) {
3546#ifdef SCTP_PRINT_FOR_B_AND_M
3547								SCTP_PRINTF("SCTP_PMTU_DISABLE calls sctp_pathmtu_adjustment:%d\n",
3548								    net->mtu);
3549#endif
3550								sctp_pathmtu_adjustment(inp, stcb, net, net->mtu);
3551							}
3552						}
3553					}
3554					if (paddrp->spp_flags & SPP_PMTUD_ENABLE) {
3555						if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3556							sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
3557						}
3558					}
3559					if (paddrp->spp_pathmaxrxt)
3560						net->failure_threshold = paddrp->spp_pathmaxrxt;
3561#ifdef INET
3562					if (paddrp->spp_flags & SPP_IPV4_TOS) {
3563						if (net->ro._l_addr.sin.sin_family == AF_INET) {
3564							net->tos_flowlabel = paddrp->spp_ipv4_tos & 0x000000fc;
3565						}
3566					}
3567#endif
3568#ifdef INET6
3569					if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) {
3570						if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
3571							net->tos_flowlabel = paddrp->spp_ipv6_flowlabel;
3572						}
3573					}
3574#endif
3575				} else {
3576					/************************ASSOC ONLY -- NO NET SPECIFIC SET ******************/
3577					if (paddrp->spp_pathmaxrxt)
3578						stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt;
3579
3580					if (paddrp->spp_flags & SPP_HB_ENABLE) {
3581						/* Turn back on the timer */
3582						stcb->asoc.hb_is_disabled = 0;
3583						sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
3584					}
3585					if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) {
3586						TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3587							if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3588								sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
3589								    SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10);
3590							}
3591							if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) {
3592								net->mtu = paddrp->spp_pathmtu + ovh;
3593								if (net->mtu < stcb->asoc.smallest_mtu) {
3594#ifdef SCTP_PRINT_FOR_B_AND_M
3595									SCTP_PRINTF("SCTP_PMTU_DISABLE calls sctp_pathmtu_adjustment:%d\n",
3596									    net->mtu);
3597#endif
3598									sctp_pathmtu_adjustment(inp, stcb, net, net->mtu);
3599								}
3600							}
3601						}
3602					}
3603					if (paddrp->spp_flags & SPP_PMTUD_ENABLE) {
3604						TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3605							if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3606								sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
3607							}
3608						}
3609					}
3610					if (paddrp->spp_flags & SPP_HB_DISABLE) {
3611						int cnt_of_unconf = 0;
3612						struct sctp_nets *lnet;
3613
3614						stcb->asoc.hb_is_disabled = 1;
3615						TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
3616							if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) {
3617								cnt_of_unconf++;
3618							}
3619						}
3620						/*
3621						 * stop the timer ONLY if we
3622						 * have no unconfirmed
3623						 * addresses
3624						 */
3625						if (cnt_of_unconf == 0) {
3626							TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3627								sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
3628								    SCTP_FROM_SCTP_USRREQ + SCTP_LOC_11);
3629							}
3630						}
3631					}
3632					if (paddrp->spp_flags & SPP_HB_ENABLE) {
3633						/* start up the timer. */
3634						TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3635							sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
3636						}
3637					}
3638#ifdef INET
3639					if (paddrp->spp_flags & SPP_IPV4_TOS)
3640						stcb->asoc.default_tos = paddrp->spp_ipv4_tos & 0x000000fc;
3641#endif
3642#ifdef INET6
3643					if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL)
3644						stcb->asoc.default_flowlabel = paddrp->spp_ipv6_flowlabel;
3645#endif
3646
3647				}
3648				SCTP_TCB_UNLOCK(stcb);
3649			} else {
3650				/************************NO TCB, SET TO default stuff ******************/
3651				SCTP_INP_WLOCK(inp);
3652				/*
3653				 * For the TOS/FLOWLABEL stuff you set it
3654				 * with the options on the socket
3655				 */
3656				if (paddrp->spp_pathmaxrxt) {
3657					inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt;
3658				}
3659				if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO)
3660					inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = 0;
3661				else if (paddrp->spp_hbinterval) {
3662					if (paddrp->spp_hbinterval > SCTP_MAX_HB_INTERVAL)
3663						paddrp->spp_hbinterval = SCTP_MAX_HB_INTERVAL;
3664					inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(paddrp->spp_hbinterval);
3665				}
3666				if (paddrp->spp_flags & SPP_HB_ENABLE) {
3667					sctp_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
3668
3669				} else if (paddrp->spp_flags & SPP_HB_DISABLE) {
3670					sctp_feature_on(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
3671				}
3672				SCTP_INP_WUNLOCK(inp);
3673			}
3674		}
3675		break;
3676	case SCTP_RTOINFO:
3677		{
3678			struct sctp_rtoinfo *srto;
3679			uint32_t new_init, new_min, new_max;
3680
3681			SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, optsize);
3682			SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id);
3683
3684			if (stcb) {
3685				if (srto->srto_initial)
3686					new_init = srto->srto_initial;
3687				else
3688					new_init = stcb->asoc.initial_rto;
3689				if (srto->srto_max)
3690					new_max = srto->srto_max;
3691				else
3692					new_max = stcb->asoc.maxrto;
3693				if (srto->srto_min)
3694					new_min = srto->srto_min;
3695				else
3696					new_min = stcb->asoc.minrto;
3697				if ((new_min <= new_init) && (new_init <= new_max)) {
3698					stcb->asoc.initial_rto = new_init;
3699					stcb->asoc.maxrto = new_max;
3700					stcb->asoc.minrto = new_min;
3701				} else {
3702					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDOM);
3703					error = EDOM;
3704				}
3705				SCTP_TCB_UNLOCK(stcb);
3706			} else {
3707				SCTP_INP_WLOCK(inp);
3708				if (srto->srto_initial)
3709					new_init = srto->srto_initial;
3710				else
3711					new_init = inp->sctp_ep.initial_rto;
3712				if (srto->srto_max)
3713					new_max = srto->srto_max;
3714				else
3715					new_max = inp->sctp_ep.sctp_maxrto;
3716				if (srto->srto_min)
3717					new_min = srto->srto_min;
3718				else
3719					new_min = inp->sctp_ep.sctp_minrto;
3720				if ((new_min <= new_init) && (new_init <= new_max)) {
3721					inp->sctp_ep.initial_rto = new_init;
3722					inp->sctp_ep.sctp_maxrto = new_max;
3723					inp->sctp_ep.sctp_minrto = new_min;
3724				} else {
3725					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDOM);
3726					error = EDOM;
3727				}
3728				SCTP_INP_WUNLOCK(inp);
3729			}
3730		}
3731		break;
3732	case SCTP_ASSOCINFO:
3733		{
3734			struct sctp_assocparams *sasoc;
3735
3736			SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, optsize);
3737			SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id);
3738			if (sasoc->sasoc_cookie_life) {
3739				/* boundary check the cookie life */
3740				if (sasoc->sasoc_cookie_life < 1000)
3741					sasoc->sasoc_cookie_life = 1000;
3742				if (sasoc->sasoc_cookie_life > SCTP_MAX_COOKIE_LIFE) {
3743					sasoc->sasoc_cookie_life = SCTP_MAX_COOKIE_LIFE;
3744				}
3745			}
3746			if (stcb) {
3747				if (sasoc->sasoc_asocmaxrxt)
3748					stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt;
3749				sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
3750				sasoc->sasoc_peer_rwnd = 0;
3751				sasoc->sasoc_local_rwnd = 0;
3752				if (sasoc->sasoc_cookie_life) {
3753					stcb->asoc.cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life);
3754				}
3755				SCTP_TCB_UNLOCK(stcb);
3756			} else {
3757				SCTP_INP_WLOCK(inp);
3758				if (sasoc->sasoc_asocmaxrxt)
3759					inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt;
3760				sasoc->sasoc_number_peer_destinations = 0;
3761				sasoc->sasoc_peer_rwnd = 0;
3762				sasoc->sasoc_local_rwnd = 0;
3763				if (sasoc->sasoc_cookie_life) {
3764					inp->sctp_ep.def_cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life);
3765				}
3766				SCTP_INP_WUNLOCK(inp);
3767			}
3768		}
3769		break;
3770	case SCTP_INITMSG:
3771		{
3772			struct sctp_initmsg *sinit;
3773
3774			SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, optsize);
3775			SCTP_INP_WLOCK(inp);
3776			if (sinit->sinit_num_ostreams)
3777				inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams;
3778
3779			if (sinit->sinit_max_instreams)
3780				inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams;
3781
3782			if (sinit->sinit_max_attempts)
3783				inp->sctp_ep.max_init_times = sinit->sinit_max_attempts;
3784
3785			if (sinit->sinit_max_init_timeo)
3786				inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo;
3787			SCTP_INP_WUNLOCK(inp);
3788		}
3789		break;
3790	case SCTP_PRIMARY_ADDR:
3791		{
3792			struct sctp_setprim *spa;
3793			struct sctp_nets *net, *lnet;
3794
3795			SCTP_CHECK_AND_CAST(spa, optval, struct sctp_setprim, optsize);
3796			SCTP_FIND_STCB(inp, stcb, spa->ssp_assoc_id);
3797
3798			net = NULL;
3799			if (stcb) {
3800				net = sctp_findnet(stcb, (struct sockaddr *)&spa->ssp_addr);
3801			} else {
3802				/*
3803				 * We increment here since
3804				 * sctp_findassociation_ep_addr() wil do a
3805				 * decrement if it finds the stcb as long as
3806				 * the locked tcb (last argument) is NOT a
3807				 * TCB.. aka NULL.
3808				 */
3809				SCTP_INP_INCR_REF(inp);
3810				stcb = sctp_findassociation_ep_addr(&inp,
3811				    (struct sockaddr *)&spa->ssp_addr,
3812				    &net, NULL, NULL);
3813				if (stcb == NULL) {
3814					SCTP_INP_DECR_REF(inp);
3815				}
3816			}
3817
3818			if ((stcb) && (net)) {
3819				if ((net != stcb->asoc.primary_destination) &&
3820				    (!(net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
3821					/* Ok we need to set it */
3822					lnet = stcb->asoc.primary_destination;
3823					if (sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net) == 0) {
3824						if (net->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
3825							net->dest_state |= SCTP_ADDR_DOUBLE_SWITCH;
3826						}
3827						net->dest_state |= SCTP_ADDR_SWITCH_PRIMARY;
3828					}
3829				}
3830			} else {
3831				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3832				error = EINVAL;
3833			}
3834			if (stcb) {
3835				SCTP_TCB_UNLOCK(stcb);
3836			}
3837		}
3838		break;
3839	case SCTP_SET_DYNAMIC_PRIMARY:
3840		{
3841			union sctp_sockstore *ss;
3842
3843			error = priv_check(curthread,
3844			    PRIV_NETINET_RESERVEDPORT);
3845			if (error)
3846				break;
3847
3848			SCTP_CHECK_AND_CAST(ss, optval, union sctp_sockstore, optsize);
3849			/* SUPER USER CHECK? */
3850			error = sctp_dynamic_set_primary(&ss->sa, vrf_id);
3851		}
3852		break;
3853	case SCTP_SET_PEER_PRIMARY_ADDR:
3854		{
3855			struct sctp_setpeerprim *sspp;
3856
3857			SCTP_CHECK_AND_CAST(sspp, optval, struct sctp_setpeerprim, optsize);
3858			SCTP_FIND_STCB(inp, stcb, sspp->sspp_assoc_id);
3859			if (stcb != NULL) {
3860				struct sctp_ifa *ifa;
3861
3862				ifa = sctp_find_ifa_by_addr((struct sockaddr *)&sspp->sspp_addr,
3863				    stcb->asoc.vrf_id, SCTP_ADDR_NOT_LOCKED);
3864				if (ifa == NULL) {
3865					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3866					error = EINVAL;
3867					goto out_of_it;
3868				}
3869				if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
3870					/*
3871					 * Must validate the ifa found is in
3872					 * our ep
3873					 */
3874					struct sctp_laddr *laddr;
3875					int found = 0;
3876
3877					LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
3878						if (laddr->ifa == NULL) {
3879							SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
3880							    __FUNCTION__);
3881							continue;
3882						}
3883						if (laddr->ifa == ifa) {
3884							found = 1;
3885							break;
3886						}
3887					}
3888					if (!found) {
3889						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3890						error = EINVAL;
3891						goto out_of_it;
3892					}
3893				}
3894				if (sctp_set_primary_ip_address_sa(stcb,
3895				    (struct sockaddr *)&sspp->sspp_addr) != 0) {
3896					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3897					error = EINVAL;
3898				}
3899		out_of_it:
3900				SCTP_TCB_UNLOCK(stcb);
3901			} else {
3902				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3903				error = EINVAL;
3904			}
3905
3906		}
3907		break;
3908	case SCTP_BINDX_ADD_ADDR:
3909		{
3910			struct sctp_getaddresses *addrs;
3911			size_t sz;
3912			struct thread *td;
3913			int prison = 0;
3914
3915			td = (struct thread *)p;
3916			if (jailed(td->td_ucred)) {
3917				prison = 1;
3918			}
3919			SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses,
3920			    optsize);
3921			if (addrs->addr->sa_family == AF_INET) {
3922				sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in);
3923				if (optsize < sz) {
3924					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3925					error = EINVAL;
3926					break;
3927				}
3928				if (prison && prison_ip(td->td_ucred, 0, &(((struct sockaddr_in *)(addrs->addr))->sin_addr.s_addr))) {
3929					SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRNOTAVAIL);
3930					error = EADDRNOTAVAIL;
3931				}
3932			} else if (addrs->addr->sa_family == AF_INET6) {
3933				sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6);
3934				if (optsize < sz) {
3935					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3936					error = EINVAL;
3937					break;
3938				}
3939				/* JAIL XXXX Add else here for V6 */
3940			}
3941			sctp_bindx_add_address(so, inp, addrs->addr,
3942			    addrs->sget_assoc_id, vrf_id,
3943			    &error, p);
3944		}
3945		break;
3946	case SCTP_BINDX_REM_ADDR:
3947		{
3948			struct sctp_getaddresses *addrs;
3949			size_t sz;
3950			struct thread *td;
3951			int prison = 0;
3952
3953			td = (struct thread *)p;
3954			if (jailed(td->td_ucred)) {
3955				prison = 1;
3956			}
3957			SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, optsize);
3958			if (addrs->addr->sa_family == AF_INET) {
3959				sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in);
3960				if (optsize < sz) {
3961					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3962					error = EINVAL;
3963					break;
3964				}
3965				if (prison && prison_ip(td->td_ucred, 0, &(((struct sockaddr_in *)(addrs->addr))->sin_addr.s_addr))) {
3966					SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRNOTAVAIL);
3967					error = EADDRNOTAVAIL;
3968				}
3969			} else if (addrs->addr->sa_family == AF_INET6) {
3970				sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6);
3971				if (optsize < sz) {
3972					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3973					error = EINVAL;
3974					break;
3975				}
3976				/* JAIL XXXX Add else here for V6 */
3977			}
3978			sctp_bindx_delete_address(so, inp, addrs->addr,
3979			    addrs->sget_assoc_id, vrf_id,
3980			    &error);
3981		}
3982		break;
3983	default:
3984		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
3985		error = ENOPROTOOPT;
3986		break;
3987	}			/* end switch (opt) */
3988	return (error);
3989}
3990
3991
3992int
3993sctp_ctloutput(struct socket *so, struct sockopt *sopt)
3994{
3995	void *optval = NULL;
3996	size_t optsize = 0;
3997	struct sctp_inpcb *inp;
3998	void *p;
3999	int error = 0;
4000
4001	inp = (struct sctp_inpcb *)so->so_pcb;
4002	if (inp == 0) {
4003		/* I made the same as TCP since we are not setup? */
4004		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4005		return (ECONNRESET);
4006	}
4007	if (sopt->sopt_level != IPPROTO_SCTP) {
4008		/* wrong proto level... send back up to IP */
4009#ifdef INET6
4010		if (INP_CHECK_SOCKAF(so, AF_INET6))
4011			error = ip6_ctloutput(so, sopt);
4012		else
4013#endif				/* INET6 */
4014			error = ip_ctloutput(so, sopt);
4015		return (error);
4016	}
4017	optsize = sopt->sopt_valsize;
4018	if (optsize) {
4019		SCTP_MALLOC(optval, void *, optsize, SCTP_M_SOCKOPT);
4020		if (optval == NULL) {
4021			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOBUFS);
4022			return (ENOBUFS);
4023		}
4024		error = sooptcopyin(sopt, optval, optsize, optsize);
4025		if (error) {
4026			SCTP_FREE(optval, SCTP_M_SOCKOPT);
4027			goto out;
4028		}
4029	}
4030	p = (void *)sopt->sopt_td;
4031	if (sopt->sopt_dir == SOPT_SET) {
4032		error = sctp_setopt(so, sopt->sopt_name, optval, optsize, p);
4033	} else if (sopt->sopt_dir == SOPT_GET) {
4034		error = sctp_getopt(so, sopt->sopt_name, optval, &optsize, p);
4035	} else {
4036		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4037		error = EINVAL;
4038	}
4039	if ((error == 0) && (optval != NULL)) {
4040		error = sooptcopyout(sopt, optval, optsize);
4041		SCTP_FREE(optval, SCTP_M_SOCKOPT);
4042	} else if (optval != NULL) {
4043		SCTP_FREE(optval, SCTP_M_SOCKOPT);
4044	}
4045out:
4046	return (error);
4047}
4048
4049
4050static int
4051sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p)
4052{
4053	int error = 0;
4054	int create_lock_on = 0;
4055	uint32_t vrf_id;
4056	struct sctp_inpcb *inp;
4057	struct sctp_tcb *stcb = NULL;
4058
4059	inp = (struct sctp_inpcb *)so->so_pcb;
4060	if (inp == 0) {
4061		/* I made the same as TCP since we are not setup? */
4062		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4063		return (ECONNRESET);
4064	}
4065	if (addr == NULL) {
4066		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4067		return EINVAL;
4068	}
4069	if ((addr->sa_family == AF_INET6) && (addr->sa_len != sizeof(struct sockaddr_in6))) {
4070		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4071		return (EINVAL);
4072	}
4073	if ((addr->sa_family == AF_INET) && (addr->sa_len != sizeof(struct sockaddr_in))) {
4074		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4075		return (EINVAL);
4076	}
4077	SCTP_INP_INCR_REF(inp);
4078	SCTP_ASOC_CREATE_LOCK(inp);
4079	create_lock_on = 1;
4080
4081
4082	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4083	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4084		/* Should I really unlock ? */
4085		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT);
4086		error = EFAULT;
4087		goto out_now;
4088	}
4089#ifdef INET6
4090	if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
4091	    (addr->sa_family == AF_INET6)) {
4092		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4093		error = EINVAL;
4094		goto out_now;
4095	}
4096#endif				/* INET6 */
4097	if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
4098	    SCTP_PCB_FLAGS_UNBOUND) {
4099		/* Bind a ephemeral port */
4100		error = sctp_inpcb_bind(so, NULL, NULL, p);
4101		if (error) {
4102			goto out_now;
4103		}
4104	}
4105	/* Now do we connect? */
4106	if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) {
4107		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4108		error = EINVAL;
4109		goto out_now;
4110	}
4111	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4112	    (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
4113		/* We are already connected AND the TCP model */
4114		SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
4115		error = EADDRINUSE;
4116		goto out_now;
4117	}
4118	if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
4119		SCTP_INP_RLOCK(inp);
4120		stcb = LIST_FIRST(&inp->sctp_asoc_list);
4121		SCTP_INP_RUNLOCK(inp);
4122	} else {
4123		/*
4124		 * We increment here since sctp_findassociation_ep_addr()
4125		 * wil do a decrement if it finds the stcb as long as the
4126		 * locked tcb (last argument) is NOT a TCB.. aka NULL.
4127		 */
4128		SCTP_INP_INCR_REF(inp);
4129		stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL);
4130		if (stcb == NULL) {
4131			SCTP_INP_DECR_REF(inp);
4132		} else {
4133			SCTP_TCB_UNLOCK(stcb);
4134		}
4135	}
4136	if (stcb != NULL) {
4137		/* Already have or am bring up an association */
4138		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
4139		error = EALREADY;
4140		goto out_now;
4141	}
4142	vrf_id = inp->def_vrf_id;
4143	/* We are GOOD to go */
4144	stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0, vrf_id, p);
4145	if (stcb == NULL) {
4146		/* Gak! no memory */
4147		goto out_now;
4148	}
4149	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
4150		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
4151		/* Set the connected flag so we can queue data */
4152		soisconnecting(so);
4153	}
4154	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
4155	(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
4156
4157	/* initialize authentication parameters for the assoc */
4158	sctp_initialize_auth_params(inp, stcb);
4159
4160	sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
4161	SCTP_TCB_UNLOCK(stcb);
4162out_now:
4163	if (create_lock_on) {
4164		SCTP_ASOC_CREATE_UNLOCK(inp);
4165	}
4166	SCTP_INP_DECR_REF(inp);
4167	return error;
4168}
4169
4170int
4171sctp_listen(struct socket *so, int backlog, struct thread *p)
4172{
4173	/*
4174	 * Note this module depends on the protocol processing being called
4175	 * AFTER any socket level flags and backlog are applied to the
4176	 * socket. The traditional way that the socket flags are applied is
4177	 * AFTER protocol processing. We have made a change to the
4178	 * sys/kern/uipc_socket.c module to reverse this but this MUST be in
4179	 * place if the socket API for SCTP is to work properly.
4180	 */
4181
4182	int error = 0;
4183	struct sctp_inpcb *inp;
4184
4185	inp = (struct sctp_inpcb *)so->so_pcb;
4186	if (inp == 0) {
4187		/* I made the same as TCP since we are not setup? */
4188		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4189		return (ECONNRESET);
4190	}
4191	SCTP_INP_RLOCK(inp);
4192#ifdef SCTP_LOCK_LOGGING
4193	if (sctp_logging_level & SCTP_LOCK_LOGGING_ENABLE) {
4194		sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK);
4195	}
4196#endif
4197	SOCK_LOCK(so);
4198	error = solisten_proto_check(so);
4199	if (error) {
4200		SOCK_UNLOCK(so);
4201		SCTP_INP_RUNLOCK(inp);
4202		return (error);
4203	}
4204	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4205	    (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
4206		/* We are already connected AND the TCP model */
4207		SCTP_INP_RUNLOCK(inp);
4208		SOCK_UNLOCK(so);
4209		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
4210		return (EADDRINUSE);
4211	}
4212	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
4213		/* We must do a bind. */
4214		SOCK_UNLOCK(so);
4215		SCTP_INP_RUNLOCK(inp);
4216		if ((error = sctp_inpcb_bind(so, NULL, NULL, p))) {
4217			/* bind error, probably perm */
4218			return (error);
4219		}
4220		SOCK_LOCK(so);
4221	} else {
4222		if (backlog != 0) {
4223			inp->sctp_flags |= SCTP_PCB_FLAGS_LISTENING;
4224		} else {
4225			inp->sctp_flags &= ~SCTP_PCB_FLAGS_LISTENING;
4226		}
4227		SCTP_INP_RUNLOCK(inp);
4228	}
4229	/* It appears for 7.0 and on, we must always call this. */
4230	solisten_proto(so, backlog);
4231	if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
4232		/* remove the ACCEPTCONN flag for one-to-many sockets */
4233		so->so_options &= ~SO_ACCEPTCONN;
4234	}
4235	if (backlog == 0) {
4236		/* turning off listen */
4237		so->so_options &= ~SO_ACCEPTCONN;
4238	}
4239	SOCK_UNLOCK(so);
4240	return (error);
4241}
4242
4243static int sctp_defered_wakeup_cnt = 0;
4244
4245int
4246sctp_accept(struct socket *so, struct sockaddr **addr)
4247{
4248	struct sctp_tcb *stcb;
4249	struct sctp_inpcb *inp;
4250	union sctp_sockstore store;
4251
4252#ifdef INET6
4253	int error;
4254
4255#endif
4256	inp = (struct sctp_inpcb *)so->so_pcb;
4257
4258	if (inp == 0) {
4259		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4260		return (ECONNRESET);
4261	}
4262	SCTP_INP_RLOCK(inp);
4263	if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
4264		SCTP_INP_RUNLOCK(inp);
4265		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
4266		return (EOPNOTSUPP);
4267	}
4268	if (so->so_state & SS_ISDISCONNECTED) {
4269		SCTP_INP_RUNLOCK(inp);
4270		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ECONNABORTED);
4271		return (ECONNABORTED);
4272	}
4273	stcb = LIST_FIRST(&inp->sctp_asoc_list);
4274	if (stcb == NULL) {
4275		SCTP_INP_RUNLOCK(inp);
4276		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4277		return (ECONNRESET);
4278	}
4279	SCTP_TCB_LOCK(stcb);
4280	SCTP_INP_RUNLOCK(inp);
4281	store = stcb->asoc.primary_destination->ro._l_addr;
4282	SCTP_TCB_UNLOCK(stcb);
4283	switch (store.sa.sa_family) {
4284	case AF_INET:
4285		{
4286			struct sockaddr_in *sin;
4287
4288			SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
4289			sin->sin_family = AF_INET;
4290			sin->sin_len = sizeof(*sin);
4291			sin->sin_port = ((struct sockaddr_in *)&store)->sin_port;
4292			sin->sin_addr = ((struct sockaddr_in *)&store)->sin_addr;
4293			*addr = (struct sockaddr *)sin;
4294			break;
4295		}
4296#ifdef INET6
4297	case AF_INET6:
4298		{
4299			struct sockaddr_in6 *sin6;
4300
4301			SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6);
4302			sin6->sin6_family = AF_INET6;
4303			sin6->sin6_len = sizeof(*sin6);
4304			sin6->sin6_port = ((struct sockaddr_in6 *)&store)->sin6_port;
4305
4306			sin6->sin6_addr = ((struct sockaddr_in6 *)&store)->sin6_addr;
4307			if ((error = sa6_recoverscope(sin6)) != 0) {
4308				SCTP_FREE_SONAME(sin6);
4309				return (error);
4310			}
4311			*addr = (struct sockaddr *)sin6;
4312			break;
4313		}
4314#endif
4315	default:
4316		/* TSNH */
4317		break;
4318	}
4319	/* Wake any delayed sleep action */
4320	if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) {
4321		SCTP_INP_WLOCK(inp);
4322		inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE;
4323		if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) {
4324			inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT;
4325			SCTP_INP_WUNLOCK(inp);
4326			SOCKBUF_LOCK(&inp->sctp_socket->so_snd);
4327			if (sowriteable(inp->sctp_socket)) {
4328				sowwakeup_locked(inp->sctp_socket);
4329			} else {
4330				SOCKBUF_UNLOCK(&inp->sctp_socket->so_snd);
4331			}
4332			SCTP_INP_WLOCK(inp);
4333		}
4334		if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) {
4335			inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT;
4336			SCTP_INP_WUNLOCK(inp);
4337			SOCKBUF_LOCK(&inp->sctp_socket->so_rcv);
4338			if (soreadable(inp->sctp_socket)) {
4339				sctp_defered_wakeup_cnt++;
4340				sorwakeup_locked(inp->sctp_socket);
4341			} else {
4342				SOCKBUF_UNLOCK(&inp->sctp_socket->so_rcv);
4343			}
4344			SCTP_INP_WLOCK(inp);
4345		}
4346		SCTP_INP_WUNLOCK(inp);
4347	}
4348	return (0);
4349}
4350
4351int
4352sctp_ingetaddr(struct socket *so, struct sockaddr **addr)
4353{
4354	struct sockaddr_in *sin;
4355	uint32_t vrf_id;
4356	struct sctp_inpcb *inp;
4357	struct sctp_ifa *sctp_ifa;
4358
4359	/*
4360	 * Do the malloc first in case it blocks.
4361	 */
4362	SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
4363	sin->sin_family = AF_INET;
4364	sin->sin_len = sizeof(*sin);
4365	inp = (struct sctp_inpcb *)so->so_pcb;
4366	if (!inp) {
4367		SCTP_FREE_SONAME(sin);
4368		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4369		return ECONNRESET;
4370	}
4371	SCTP_INP_RLOCK(inp);
4372	sin->sin_port = inp->sctp_lport;
4373	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
4374		if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
4375			struct sctp_tcb *stcb;
4376			struct sockaddr_in *sin_a;
4377			struct sctp_nets *net;
4378			int fnd;
4379
4380			stcb = LIST_FIRST(&inp->sctp_asoc_list);
4381			if (stcb == NULL) {
4382				goto notConn;
4383			}
4384			fnd = 0;
4385			sin_a = NULL;
4386			SCTP_TCB_LOCK(stcb);
4387			TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
4388				sin_a = (struct sockaddr_in *)&net->ro._l_addr;
4389				if (sin_a == NULL)
4390					/* this will make coverity happy */
4391					continue;
4392
4393				if (sin_a->sin_family == AF_INET) {
4394					fnd = 1;
4395					break;
4396				}
4397			}
4398			if ((!fnd) || (sin_a == NULL)) {
4399				/* punt */
4400				SCTP_TCB_UNLOCK(stcb);
4401				goto notConn;
4402			}
4403			vrf_id = inp->def_vrf_id;
4404			sctp_ifa = sctp_source_address_selection(inp,
4405			    stcb,
4406			    (sctp_route_t *) & net->ro,
4407			    net, 0, vrf_id);
4408			if (sctp_ifa) {
4409				sin->sin_addr = sctp_ifa->address.sin.sin_addr;
4410				sctp_free_ifa(sctp_ifa);
4411			}
4412			SCTP_TCB_UNLOCK(stcb);
4413		} else {
4414			/* For the bound all case you get back 0 */
4415	notConn:
4416			sin->sin_addr.s_addr = 0;
4417		}
4418
4419	} else {
4420		/* Take the first IPv4 address in the list */
4421		struct sctp_laddr *laddr;
4422		int fnd = 0;
4423
4424		LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4425			if (laddr->ifa->address.sa.sa_family == AF_INET) {
4426				struct sockaddr_in *sin_a;
4427
4428				sin_a = (struct sockaddr_in *)&laddr->ifa->address.sa;
4429				sin->sin_addr = sin_a->sin_addr;
4430				fnd = 1;
4431				break;
4432			}
4433		}
4434		if (!fnd) {
4435			SCTP_FREE_SONAME(sin);
4436			SCTP_INP_RUNLOCK(inp);
4437			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
4438			return ENOENT;
4439		}
4440	}
4441	SCTP_INP_RUNLOCK(inp);
4442	(*addr) = (struct sockaddr *)sin;
4443	return (0);
4444}
4445
4446int
4447sctp_peeraddr(struct socket *so, struct sockaddr **addr)
4448{
4449	struct sockaddr_in *sin = (struct sockaddr_in *)*addr;
4450	int fnd;
4451	struct sockaddr_in *sin_a;
4452	struct sctp_inpcb *inp;
4453	struct sctp_tcb *stcb;
4454	struct sctp_nets *net;
4455
4456	/* Do the malloc first in case it blocks. */
4457	inp = (struct sctp_inpcb *)so->so_pcb;
4458	if ((inp == NULL) ||
4459	    ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
4460		/* UDP type and listeners will drop out here */
4461		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
4462		return (ENOTCONN);
4463	}
4464	SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
4465	sin->sin_family = AF_INET;
4466	sin->sin_len = sizeof(*sin);
4467
4468	/* We must recapture incase we blocked */
4469	inp = (struct sctp_inpcb *)so->so_pcb;
4470	if (!inp) {
4471		SCTP_FREE_SONAME(sin);
4472		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4473		return ECONNRESET;
4474	}
4475	SCTP_INP_RLOCK(inp);
4476	stcb = LIST_FIRST(&inp->sctp_asoc_list);
4477	if (stcb) {
4478		SCTP_TCB_LOCK(stcb);
4479	}
4480	SCTP_INP_RUNLOCK(inp);
4481	if (stcb == NULL) {
4482		SCTP_FREE_SONAME(sin);
4483		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4484		return ECONNRESET;
4485	}
4486	fnd = 0;
4487	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
4488		sin_a = (struct sockaddr_in *)&net->ro._l_addr;
4489		if (sin_a->sin_family == AF_INET) {
4490			fnd = 1;
4491			sin->sin_port = stcb->rport;
4492			sin->sin_addr = sin_a->sin_addr;
4493			break;
4494		}
4495	}
4496	SCTP_TCB_UNLOCK(stcb);
4497	if (!fnd) {
4498		/* No IPv4 address */
4499		SCTP_FREE_SONAME(sin);
4500		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
4501		return ENOENT;
4502	}
4503	(*addr) = (struct sockaddr *)sin;
4504	return (0);
4505}
4506
4507struct pr_usrreqs sctp_usrreqs = {
4508	.pru_abort = sctp_abort,
4509	.pru_accept = sctp_accept,
4510	.pru_attach = sctp_attach,
4511	.pru_bind = sctp_bind,
4512	.pru_connect = sctp_connect,
4513	.pru_control = in_control,
4514	.pru_close = sctp_close,
4515	.pru_detach = sctp_close,
4516	.pru_sopoll = sopoll_generic,
4517	.pru_flush = sctp_flush,
4518	.pru_disconnect = sctp_disconnect,
4519	.pru_listen = sctp_listen,
4520	.pru_peeraddr = sctp_peeraddr,
4521	.pru_send = sctp_sendm,
4522	.pru_shutdown = sctp_shutdown,
4523	.pru_sockaddr = sctp_ingetaddr,
4524	.pru_sosend = sctp_sosend,
4525	.pru_soreceive = sctp_soreceive
4526};
4527