sctp_usrreq.c revision 211944
1/*-
2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * a) Redistributions of source code must retain the above copyright notice,
8 *   this list of conditions and the following disclaimer.
9 *
10 * b) Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *   the documentation and/or other materials provided with the distribution.
13 *
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 *    contributors may be used to endorse or promote products derived
16 *    from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/* $KAME: sctp_usrreq.c,v 1.48 2005/03/07 23:26:08 itojun Exp $	 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/netinet/sctp_usrreq.c 211944 2010-08-28 17:59:51Z tuexen $");
35#include <netinet/sctp_os.h>
36#include <sys/proc.h>
37#include <netinet/sctp_pcb.h>
38#include <netinet/sctp_header.h>
39#include <netinet/sctp_var.h>
40#if defined(INET6)
41#endif
42#include <netinet/sctp_sysctl.h>
43#include <netinet/sctp_output.h>
44#include <netinet/sctp_uio.h>
45#include <netinet/sctp_asconf.h>
46#include <netinet/sctputil.h>
47#include <netinet/sctp_indata.h>
48#include <netinet/sctp_timer.h>
49#include <netinet/sctp_auth.h>
50#include <netinet/sctp_bsd_addr.h>
51#include <netinet/sctp_cc_functions.h>
52#include <netinet/udp.h>
53
54
55
56
57void
58sctp_init(void)
59{
60	u_long sb_max_adj;
61
62	bzero(&SCTP_BASE_STATS, sizeof(struct sctpstat));
63
64	/* Initialize and modify the sysctled variables */
65	sctp_init_sysctls();
66	if ((nmbclusters / 8) > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE)
67		SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue) = (nmbclusters / 8);
68	/*
69	 * Allow a user to take no more than 1/2 the number of clusters or
70	 * the SB_MAX whichever is smaller for the send window.
71	 */
72	sb_max_adj = (u_long)((u_quad_t) (SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES));
73	SCTP_BASE_SYSCTL(sctp_sendspace) = min(sb_max_adj,
74	    (((uint32_t) nmbclusters / 2) * SCTP_DEFAULT_MAXSEGMENT));
75	/*
76	 * Now for the recv window, should we take the same amount? or
77	 * should I do 1/2 the SB_MAX instead in the SB_MAX min above. For
78	 * now I will just copy.
79	 */
80	SCTP_BASE_SYSCTL(sctp_recvspace) = SCTP_BASE_SYSCTL(sctp_sendspace);
81
82	SCTP_BASE_VAR(first_time) = 0;
83	SCTP_BASE_VAR(sctp_pcb_initialized) = 0;
84	sctp_pcb_init();
85#if defined(SCTP_PACKET_LOGGING)
86	SCTP_BASE_VAR(packet_log_writers) = 0;
87	SCTP_BASE_VAR(packet_log_end) = 0;
88	bzero(&SCTP_BASE_VAR(packet_log_buffer), SCTP_PACKET_LOG_SIZE);
89#endif
90
91
92}
93
94void
95sctp_finish(void)
96{
97	sctp_pcb_finish();
98}
99
100
101
102void
103sctp_pathmtu_adjustment(struct sctp_inpcb *inp,
104    struct sctp_tcb *stcb,
105    struct sctp_nets *net,
106    uint16_t nxtsz)
107{
108	struct sctp_tmit_chunk *chk;
109	uint16_t overhead;
110
111	/* Adjust that too */
112	stcb->asoc.smallest_mtu = nxtsz;
113	/* now off to subtract IP_DF flag if needed */
114#ifdef SCTP_PRINT_FOR_B_AND_M
115	SCTP_PRINTF("sctp_pathmtu_adjust called inp:%p stcb:%p net:%p nxtsz:%d\n",
116	    inp, stcb, net, nxtsz);
117#endif
118	overhead = IP_HDR_SIZE;
119	if (sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.peer_auth_chunks)) {
120		overhead += sctp_get_auth_chunk_len(stcb->asoc.peer_hmac_id);
121	}
122	TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) {
123		if ((chk->send_size + overhead) > nxtsz) {
124			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
125		}
126	}
127	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
128		if ((chk->send_size + overhead) > nxtsz) {
129			/*
130			 * For this guy we also mark for immediate resend
131			 * since we sent to big of chunk
132			 */
133			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
134			if (chk->sent < SCTP_DATAGRAM_RESEND) {
135				sctp_flight_size_decrease(chk);
136				sctp_total_flight_decrease(stcb, chk);
137			}
138			if (chk->sent != SCTP_DATAGRAM_RESEND) {
139				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
140			}
141			chk->sent = SCTP_DATAGRAM_RESEND;
142			chk->rec.data.doing_fast_retransmit = 0;
143			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
144				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PMTU,
145				    chk->whoTo->flight_size,
146				    chk->book_size,
147				    (uintptr_t) chk->whoTo,
148				    chk->rec.data.TSN_seq);
149			}
150			/* Clear any time so NO RTT is being done */
151			chk->do_rtt = 0;
152		}
153	}
154}
155
156static void
157sctp_notify_mbuf(struct sctp_inpcb *inp,
158    struct sctp_tcb *stcb,
159    struct sctp_nets *net,
160    struct ip *ip,
161    struct sctphdr *sh)
162{
163	struct icmp *icmph;
164	int totsz, tmr_stopped = 0;
165	uint16_t nxtsz;
166
167	/* protection */
168	if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
169	    (ip == NULL) || (sh == NULL)) {
170		if (stcb != NULL) {
171			SCTP_TCB_UNLOCK(stcb);
172		}
173		return;
174	}
175	/* First job is to verify the vtag matches what I would send */
176	if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
177		SCTP_TCB_UNLOCK(stcb);
178		return;
179	}
180	icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) -
181	    sizeof(struct ip)));
182	if (icmph->icmp_type != ICMP_UNREACH) {
183		/* We only care about unreachable */
184		SCTP_TCB_UNLOCK(stcb);
185		return;
186	}
187	if (icmph->icmp_code != ICMP_UNREACH_NEEDFRAG) {
188		/* not a unreachable message due to frag. */
189		SCTP_TCB_UNLOCK(stcb);
190		return;
191	}
192	totsz = ip->ip_len;
193
194	nxtsz = ntohs(icmph->icmp_nextmtu);
195	if (nxtsz == 0) {
196		/*
197		 * old type router that does not tell us what the next size
198		 * mtu is. Rats we will have to guess (in a educated fashion
199		 * of course)
200		 */
201		nxtsz = find_next_best_mtu(totsz);
202	}
203	/* Stop any PMTU timer */
204	if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
205		tmr_stopped = 1;
206		sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
207		    SCTP_FROM_SCTP_USRREQ + SCTP_LOC_1);
208	}
209	/* Adjust destination size limit */
210	if (net->mtu > nxtsz) {
211		net->mtu = nxtsz;
212		if (net->port) {
213			net->mtu -= sizeof(struct udphdr);
214		}
215	}
216	/* now what about the ep? */
217	if (stcb->asoc.smallest_mtu > nxtsz) {
218#ifdef SCTP_PRINT_FOR_B_AND_M
219		SCTP_PRINTF("notify_mbuf (ICMP) calls sctp_pathmtu_adjust mtu:%d\n",
220		    nxtsz);
221#endif
222		sctp_pathmtu_adjustment(inp, stcb, net, nxtsz);
223	}
224	if (tmr_stopped)
225		sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
226
227	SCTP_TCB_UNLOCK(stcb);
228}
229
230
231void
232sctp_notify(struct sctp_inpcb *inp,
233    struct ip *ip,
234    struct sctphdr *sh,
235    struct sockaddr *to,
236    struct sctp_tcb *stcb,
237    struct sctp_nets *net)
238{
239#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
240	struct socket *so;
241
242#endif
243	/* protection */
244	int reason;
245	struct icmp *icmph;
246
247
248	if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
249	    (sh == NULL) || (to == NULL)) {
250		if (stcb)
251			SCTP_TCB_UNLOCK(stcb);
252		return;
253	}
254	/* First job is to verify the vtag matches what I would send */
255	if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
256		SCTP_TCB_UNLOCK(stcb);
257		return;
258	}
259	icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) -
260	    sizeof(struct ip)));
261	if (icmph->icmp_type != ICMP_UNREACH) {
262		/* We only care about unreachable */
263		SCTP_TCB_UNLOCK(stcb);
264		return;
265	}
266	if ((icmph->icmp_code == ICMP_UNREACH_NET) ||
267	    (icmph->icmp_code == ICMP_UNREACH_HOST) ||
268	    (icmph->icmp_code == ICMP_UNREACH_NET_UNKNOWN) ||
269	    (icmph->icmp_code == ICMP_UNREACH_HOST_UNKNOWN) ||
270	    (icmph->icmp_code == ICMP_UNREACH_ISOLATED) ||
271	    (icmph->icmp_code == ICMP_UNREACH_NET_PROHIB) ||
272	    (icmph->icmp_code == ICMP_UNREACH_HOST_PROHIB) ||
273	    (icmph->icmp_code == ICMP_UNREACH_FILTER_PROHIB)) {
274
275		/*
276		 * Hmm reachablity problems we must examine closely. If its
277		 * not reachable, we may have lost a network. Or if there is
278		 * NO protocol at the other end named SCTP. well we consider
279		 * it a OOTB abort.
280		 */
281		if (net->dest_state & SCTP_ADDR_REACHABLE) {
282			/* Ok that destination is NOT reachable */
283			SCTP_PRINTF("ICMP (thresh %d/%d) takes interface %p down\n",
284			    net->error_count,
285			    net->failure_threshold,
286			    net);
287
288			net->dest_state &= ~SCTP_ADDR_REACHABLE;
289			net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
290			/*
291			 * JRS 5/14/07 - If a destination is unreachable,
292			 * the PF bit is turned off.  This allows an
293			 * unambiguous use of the PF bit for destinations
294			 * that are reachable but potentially failed. If the
295			 * destination is set to the unreachable state, also
296			 * set the destination to the PF state.
297			 */
298			/*
299			 * Add debug message here if destination is not in
300			 * PF state.
301			 */
302			/* Stop any running T3 timers here? */
303			if ((stcb->asoc.sctp_cmt_on_off == 1) &&
304			    (stcb->asoc.sctp_cmt_pf > 0)) {
305				net->dest_state &= ~SCTP_ADDR_PF;
306				SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n",
307				    net);
308			}
309			net->error_count = net->failure_threshold + 1;
310			sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
311			    stcb, SCTP_FAILED_THRESHOLD,
312			    (void *)net, SCTP_SO_NOT_LOCKED);
313		}
314		SCTP_TCB_UNLOCK(stcb);
315	} else if ((icmph->icmp_code == ICMP_UNREACH_PROTOCOL) ||
316	    (icmph->icmp_code == ICMP_UNREACH_PORT)) {
317		/*
318		 * Here the peer is either playing tricks on us, including
319		 * an address that belongs to someone who does not support
320		 * SCTP OR was a userland implementation that shutdown and
321		 * now is dead. In either case treat it like a OOTB abort
322		 * with no TCB
323		 */
324		reason = SCTP_PEER_FAULTY;
325		sctp_abort_notification(stcb, reason, SCTP_SO_NOT_LOCKED);
326#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
327		so = SCTP_INP_SO(inp);
328		atomic_add_int(&stcb->asoc.refcnt, 1);
329		SCTP_TCB_UNLOCK(stcb);
330		SCTP_SOCKET_LOCK(so, 1);
331		SCTP_TCB_LOCK(stcb);
332		atomic_subtract_int(&stcb->asoc.refcnt, 1);
333#endif
334		(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_2);
335#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
336		SCTP_SOCKET_UNLOCK(so, 1);
337		/* SCTP_TCB_UNLOCK(stcb); MT: I think this is not needed. */
338#endif
339		/* no need to unlock here, since the TCB is gone */
340	} else {
341		SCTP_TCB_UNLOCK(stcb);
342	}
343}
344
345void
346sctp_ctlinput(cmd, sa, vip)
347	int cmd;
348	struct sockaddr *sa;
349	void *vip;
350{
351	struct ip *ip = vip;
352	struct sctphdr *sh;
353	uint32_t vrf_id;
354
355	/* FIX, for non-bsd is this right? */
356	vrf_id = SCTP_DEFAULT_VRFID;
357	if (sa->sa_family != AF_INET ||
358	    ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) {
359		return;
360	}
361	if (PRC_IS_REDIRECT(cmd)) {
362		ip = 0;
363	} else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) {
364		return;
365	}
366	if (ip) {
367		struct sctp_inpcb *inp = NULL;
368		struct sctp_tcb *stcb = NULL;
369		struct sctp_nets *net = NULL;
370		struct sockaddr_in to, from;
371
372		sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2));
373		bzero(&to, sizeof(to));
374		bzero(&from, sizeof(from));
375		from.sin_family = to.sin_family = AF_INET;
376		from.sin_len = to.sin_len = sizeof(to);
377		from.sin_port = sh->src_port;
378		from.sin_addr = ip->ip_src;
379		to.sin_port = sh->dest_port;
380		to.sin_addr = ip->ip_dst;
381
382		/*
383		 * 'to' holds the dest of the packet that failed to be sent.
384		 * 'from' holds our local endpoint address. Thus we reverse
385		 * the to and the from in the lookup.
386		 */
387		stcb = sctp_findassociation_addr_sa((struct sockaddr *)&from,
388		    (struct sockaddr *)&to,
389		    &inp, &net, 1, vrf_id);
390		if (stcb != NULL && inp && (inp->sctp_socket != NULL)) {
391			if (cmd != PRC_MSGSIZE) {
392				sctp_notify(inp, ip, sh,
393				    (struct sockaddr *)&to, stcb,
394				    net);
395			} else {
396				/* handle possible ICMP size messages */
397				sctp_notify_mbuf(inp, stcb, net, ip, sh);
398			}
399		} else {
400			if ((stcb == NULL) && (inp != NULL)) {
401				/* reduce ref-count */
402				SCTP_INP_WLOCK(inp);
403				SCTP_INP_DECR_REF(inp);
404				SCTP_INP_WUNLOCK(inp);
405			}
406			if (stcb) {
407				SCTP_TCB_UNLOCK(stcb);
408			}
409		}
410	}
411	return;
412}
413
414static int
415sctp_getcred(SYSCTL_HANDLER_ARGS)
416{
417	struct xucred xuc;
418	struct sockaddr_in addrs[2];
419	struct sctp_inpcb *inp;
420	struct sctp_nets *net;
421	struct sctp_tcb *stcb;
422	int error;
423	uint32_t vrf_id;
424
425	/* FIX, for non-bsd is this right? */
426	vrf_id = SCTP_DEFAULT_VRFID;
427
428	error = priv_check(req->td, PRIV_NETINET_GETCRED);
429
430	if (error)
431		return (error);
432
433	error = SYSCTL_IN(req, addrs, sizeof(addrs));
434	if (error)
435		return (error);
436
437	stcb = sctp_findassociation_addr_sa(sintosa(&addrs[0]),
438	    sintosa(&addrs[1]),
439	    &inp, &net, 1, vrf_id);
440	if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) {
441		if ((inp != NULL) && (stcb == NULL)) {
442			/* reduce ref-count */
443			SCTP_INP_WLOCK(inp);
444			SCTP_INP_DECR_REF(inp);
445			goto cred_can_cont;
446		}
447		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
448		error = ENOENT;
449		goto out;
450	}
451	SCTP_TCB_UNLOCK(stcb);
452	/*
453	 * We use the write lock here, only since in the error leg we need
454	 * it. If we used RLOCK, then we would have to
455	 * wlock/decr/unlock/rlock. Which in theory could create a hole.
456	 * Better to use higher wlock.
457	 */
458	SCTP_INP_WLOCK(inp);
459cred_can_cont:
460	error = cr_canseesocket(req->td->td_ucred, inp->sctp_socket);
461	if (error) {
462		SCTP_INP_WUNLOCK(inp);
463		goto out;
464	}
465	cru2x(inp->sctp_socket->so_cred, &xuc);
466	SCTP_INP_WUNLOCK(inp);
467	error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
468out:
469	return (error);
470}
471
472SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW,
473    0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection");
474
475
476static void
477sctp_abort(struct socket *so)
478{
479	struct sctp_inpcb *inp;
480	uint32_t flags;
481
482	inp = (struct sctp_inpcb *)so->so_pcb;
483	if (inp == 0) {
484		return;
485	}
486sctp_must_try_again:
487	flags = inp->sctp_flags;
488#ifdef SCTP_LOG_CLOSING
489	sctp_log_closing(inp, NULL, 17);
490#endif
491	if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
492	    (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
493#ifdef SCTP_LOG_CLOSING
494		sctp_log_closing(inp, NULL, 16);
495#endif
496		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
497		    SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
498		SOCK_LOCK(so);
499		SCTP_SB_CLEAR(so->so_snd);
500		/*
501		 * same for the rcv ones, they are only here for the
502		 * accounting/select.
503		 */
504		SCTP_SB_CLEAR(so->so_rcv);
505
506		/* Now null out the reference, we are completely detached. */
507		so->so_pcb = NULL;
508		SOCK_UNLOCK(so);
509	} else {
510		flags = inp->sctp_flags;
511		if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
512			goto sctp_must_try_again;
513		}
514	}
515	return;
516}
517
518static int
519sctp_attach(struct socket *so, int proto, struct thread *p)
520{
521	struct sctp_inpcb *inp;
522	struct inpcb *ip_inp;
523	int error;
524	uint32_t vrf_id = SCTP_DEFAULT_VRFID;
525
526#ifdef IPSEC
527	uint32_t flags;
528
529#endif
530
531	inp = (struct sctp_inpcb *)so->so_pcb;
532	if (inp != 0) {
533		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
534		return EINVAL;
535	}
536	if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
537		error = SCTP_SORESERVE(so, SCTP_BASE_SYSCTL(sctp_sendspace), SCTP_BASE_SYSCTL(sctp_recvspace));
538		if (error) {
539			return error;
540		}
541	}
542	error = sctp_inpcb_alloc(so, vrf_id);
543	if (error) {
544		return error;
545	}
546	inp = (struct sctp_inpcb *)so->so_pcb;
547	SCTP_INP_WLOCK(inp);
548	inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6;	/* I'm not v6! */
549	ip_inp = &inp->ip_inp.inp;
550	ip_inp->inp_vflag |= INP_IPV4;
551	ip_inp->inp_ip_ttl = MODULE_GLOBAL(ip_defttl);
552#ifdef IPSEC
553	error = ipsec_init_policy(so, &ip_inp->inp_sp);
554#ifdef SCTP_LOG_CLOSING
555	sctp_log_closing(inp, NULL, 17);
556#endif
557	if (error != 0) {
558try_again:
559		flags = inp->sctp_flags;
560		if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
561		    (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
562#ifdef SCTP_LOG_CLOSING
563			sctp_log_closing(inp, NULL, 15);
564#endif
565			SCTP_INP_WUNLOCK(inp);
566			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
567			    SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
568		} else {
569			flags = inp->sctp_flags;
570			if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
571				goto try_again;
572			} else {
573				SCTP_INP_WUNLOCK(inp);
574			}
575		}
576		return error;
577	}
578#endif				/* IPSEC */
579	SCTP_INP_WUNLOCK(inp);
580	return 0;
581}
582
583static int
584sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p)
585{
586	struct sctp_inpcb *inp = NULL;
587	int error;
588
589#ifdef INET6
590	if (addr && addr->sa_family != AF_INET) {
591		/* must be a v4 address! */
592		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
593		return EINVAL;
594	}
595#endif				/* INET6 */
596	if (addr && (addr->sa_len != sizeof(struct sockaddr_in))) {
597		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
598		return EINVAL;
599	}
600	inp = (struct sctp_inpcb *)so->so_pcb;
601	if (inp == 0) {
602		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
603		return EINVAL;
604	}
605	error = sctp_inpcb_bind(so, addr, NULL, p);
606	return error;
607}
608
609void
610sctp_close(struct socket *so)
611{
612	struct sctp_inpcb *inp;
613	uint32_t flags;
614
615	inp = (struct sctp_inpcb *)so->so_pcb;
616	if (inp == 0)
617		return;
618
619	/*
620	 * Inform all the lower layer assoc that we are done.
621	 */
622sctp_must_try_again:
623	flags = inp->sctp_flags;
624#ifdef SCTP_LOG_CLOSING
625	sctp_log_closing(inp, NULL, 17);
626#endif
627	if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
628	    (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
629		if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) ||
630		    (so->so_rcv.sb_cc > 0)) {
631#ifdef SCTP_LOG_CLOSING
632			sctp_log_closing(inp, NULL, 13);
633#endif
634			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
635			    SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
636		} else {
637#ifdef SCTP_LOG_CLOSING
638			sctp_log_closing(inp, NULL, 14);
639#endif
640			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE,
641			    SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
642		}
643		/*
644		 * The socket is now detached, no matter what the state of
645		 * the SCTP association.
646		 */
647		SOCK_LOCK(so);
648		SCTP_SB_CLEAR(so->so_snd);
649		/*
650		 * same for the rcv ones, they are only here for the
651		 * accounting/select.
652		 */
653		SCTP_SB_CLEAR(so->so_rcv);
654
655		/* Now null out the reference, we are completely detached. */
656		so->so_pcb = NULL;
657		SOCK_UNLOCK(so);
658	} else {
659		flags = inp->sctp_flags;
660		if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
661			goto sctp_must_try_again;
662		}
663	}
664	return;
665}
666
667
668int
669sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
670    struct mbuf *control, struct thread *p);
671
672
673int
674sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
675    struct mbuf *control, struct thread *p)
676{
677	struct sctp_inpcb *inp;
678	int error;
679
680	inp = (struct sctp_inpcb *)so->so_pcb;
681	if (inp == 0) {
682		if (control) {
683			sctp_m_freem(control);
684			control = NULL;
685		}
686		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
687		sctp_m_freem(m);
688		return EINVAL;
689	}
690	/* Got to have an to address if we are NOT a connected socket */
691	if ((addr == NULL) &&
692	    ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
693	    (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE))
694	    ) {
695		goto connected_type;
696	} else if (addr == NULL) {
697		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ);
698		error = EDESTADDRREQ;
699		sctp_m_freem(m);
700		if (control) {
701			sctp_m_freem(control);
702			control = NULL;
703		}
704		return (error);
705	}
706#ifdef INET6
707	if (addr->sa_family != AF_INET) {
708		/* must be a v4 address! */
709		SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ);
710		sctp_m_freem(m);
711		if (control) {
712			sctp_m_freem(control);
713			control = NULL;
714		}
715		error = EDESTADDRREQ;
716		return EDESTADDRREQ;
717	}
718#endif				/* INET6 */
719connected_type:
720	/* now what about control */
721	if (control) {
722		if (inp->control) {
723			SCTP_PRINTF("huh? control set?\n");
724			sctp_m_freem(inp->control);
725			inp->control = NULL;
726		}
727		inp->control = control;
728	}
729	/* Place the data */
730	if (inp->pkt) {
731		SCTP_BUF_NEXT(inp->pkt_last) = m;
732		inp->pkt_last = m;
733	} else {
734		inp->pkt_last = inp->pkt = m;
735	}
736	if (
737	/* FreeBSD uses a flag passed */
738	    ((flags & PRUS_MORETOCOME) == 0)
739	    ) {
740		/*
741		 * note with the current version this code will only be used
742		 * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for
743		 * re-defining sosend to use the sctp_sosend. One can
744		 * optionally switch back to this code (by changing back the
745		 * definitions) but this is not advisable. This code is used
746		 * by FreeBSD when sending a file with sendfile() though.
747		 */
748		int ret;
749
750		ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags);
751		inp->pkt = NULL;
752		inp->control = NULL;
753		return (ret);
754	} else {
755		return (0);
756	}
757}
758
759int
760sctp_disconnect(struct socket *so)
761{
762	struct sctp_inpcb *inp;
763
764	inp = (struct sctp_inpcb *)so->so_pcb;
765	if (inp == NULL) {
766		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
767		return (ENOTCONN);
768	}
769	SCTP_INP_RLOCK(inp);
770	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
771	    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
772		if (LIST_EMPTY(&inp->sctp_asoc_list)) {
773			/* No connection */
774			SCTP_INP_RUNLOCK(inp);
775			return (0);
776		} else {
777			struct sctp_association *asoc;
778			struct sctp_tcb *stcb;
779
780			stcb = LIST_FIRST(&inp->sctp_asoc_list);
781			if (stcb == NULL) {
782				SCTP_INP_RUNLOCK(inp);
783				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
784				return (EINVAL);
785			}
786			SCTP_TCB_LOCK(stcb);
787			asoc = &stcb->asoc;
788			if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
789				/* We are about to be freed, out of here */
790				SCTP_TCB_UNLOCK(stcb);
791				SCTP_INP_RUNLOCK(inp);
792				return (0);
793			}
794			if (((so->so_options & SO_LINGER) &&
795			    (so->so_linger == 0)) ||
796			    (so->so_rcv.sb_cc > 0)) {
797				if (SCTP_GET_STATE(asoc) !=
798				    SCTP_STATE_COOKIE_WAIT) {
799					/* Left with Data unread */
800					struct mbuf *err;
801
802					err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
803					if (err) {
804						/*
805						 * Fill in the user
806						 * initiated abort
807						 */
808						struct sctp_paramhdr *ph;
809
810						ph = mtod(err, struct sctp_paramhdr *);
811						SCTP_BUF_LEN(err) = sizeof(struct sctp_paramhdr);
812						ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
813						ph->param_length = htons(SCTP_BUF_LEN(err));
814					}
815#if defined(SCTP_PANIC_ON_ABORT)
816					panic("disconnect does an abort");
817#endif
818					sctp_send_abort_tcb(stcb, err, SCTP_SO_LOCKED);
819					SCTP_STAT_INCR_COUNTER32(sctps_aborted);
820				}
821				SCTP_INP_RUNLOCK(inp);
822				if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
823				    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
824					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
825				}
826				(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_3);
827				/* No unlock tcb assoc is gone */
828				return (0);
829			}
830			if (TAILQ_EMPTY(&asoc->send_queue) &&
831			    TAILQ_EMPTY(&asoc->sent_queue) &&
832			    (asoc->stream_queue_cnt == 0)) {
833				/* there is nothing queued to send, so done */
834				if (asoc->locked_on_sending) {
835					goto abort_anyway;
836				}
837				if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
838				    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
839					/* only send SHUTDOWN 1st time thru */
840					sctp_stop_timers_for_shutdown(stcb);
841					sctp_send_shutdown(stcb,
842					    stcb->asoc.primary_destination);
843					sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED);
844					if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
845					    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
846						SCTP_STAT_DECR_GAUGE32(sctps_currestab);
847					}
848					SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
849					SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
850					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
851					    stcb->sctp_ep, stcb,
852					    asoc->primary_destination);
853					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
854					    stcb->sctp_ep, stcb,
855					    asoc->primary_destination);
856				}
857			} else {
858				/*
859				 * we still got (or just got) data to send,
860				 * so set SHUTDOWN_PENDING
861				 */
862				/*
863				 * XXX sockets draft says that SCTP_EOF
864				 * should be sent with no data. currently,
865				 * we will allow user data to be sent first
866				 * and move to SHUTDOWN-PENDING
867				 */
868				asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
869				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
870				    asoc->primary_destination);
871				if (asoc->locked_on_sending) {
872					/* Locked to send out the data */
873					struct sctp_stream_queue_pending *sp;
874
875					sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
876					if (sp == NULL) {
877						SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n",
878						    asoc->locked_on_sending->stream_no);
879					} else {
880						if ((sp->length == 0) && (sp->msg_is_complete == 0))
881							asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
882					}
883				}
884				if (TAILQ_EMPTY(&asoc->send_queue) &&
885				    TAILQ_EMPTY(&asoc->sent_queue) &&
886				    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
887					struct mbuf *op_err;
888
889			abort_anyway:
890					op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
891					    0, M_DONTWAIT, 1, MT_DATA);
892					if (op_err) {
893						/*
894						 * Fill in the user
895						 * initiated abort
896						 */
897						struct sctp_paramhdr *ph;
898						uint32_t *ippp;
899
900						SCTP_BUF_LEN(op_err) =
901						    (sizeof(struct sctp_paramhdr) + sizeof(uint32_t));
902						ph = mtod(op_err,
903						    struct sctp_paramhdr *);
904						ph->param_type = htons(
905						    SCTP_CAUSE_USER_INITIATED_ABT);
906						ph->param_length = htons(SCTP_BUF_LEN(op_err));
907						ippp = (uint32_t *) (ph + 1);
908						*ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4);
909					}
910#if defined(SCTP_PANIC_ON_ABORT)
911					panic("disconnect does an abort");
912#endif
913
914					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4;
915					sctp_send_abort_tcb(stcb, op_err, SCTP_SO_LOCKED);
916					SCTP_STAT_INCR_COUNTER32(sctps_aborted);
917					if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
918					    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
919						SCTP_STAT_DECR_GAUGE32(sctps_currestab);
920					}
921					SCTP_INP_RUNLOCK(inp);
922					(void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_5);
923					return (0);
924				} else {
925					sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED);
926				}
927			}
928			soisdisconnecting(so);
929			SCTP_TCB_UNLOCK(stcb);
930			SCTP_INP_RUNLOCK(inp);
931			return (0);
932		}
933		/* not reached */
934	} else {
935		/* UDP model does not support this */
936		SCTP_INP_RUNLOCK(inp);
937		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
938		return EOPNOTSUPP;
939	}
940}
941
942int
943sctp_flush(struct socket *so, int how)
944{
945	/*
946	 * We will just clear out the values and let subsequent close clear
947	 * out the data, if any. Note if the user did a shutdown(SHUT_RD)
948	 * they will not be able to read the data, the socket will block
949	 * that from happening.
950	 */
951	struct sctp_inpcb *inp;
952
953	inp = (struct sctp_inpcb *)so->so_pcb;
954	if (inp == NULL) {
955		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
956		return EINVAL;
957	}
958	SCTP_INP_RLOCK(inp);
959	/* For the 1 to many model this does nothing */
960	if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
961		SCTP_INP_RUNLOCK(inp);
962		return (0);
963	}
964	SCTP_INP_RUNLOCK(inp);
965	if ((how == PRU_FLUSH_RD) || (how == PRU_FLUSH_RDWR)) {
966		/*
967		 * First make sure the sb will be happy, we don't use these
968		 * except maybe the count
969		 */
970		SCTP_INP_WLOCK(inp);
971		SCTP_INP_READ_LOCK(inp);
972		inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_CANT_READ;
973		SCTP_INP_READ_UNLOCK(inp);
974		SCTP_INP_WUNLOCK(inp);
975		so->so_rcv.sb_cc = 0;
976		so->so_rcv.sb_mbcnt = 0;
977		so->so_rcv.sb_mb = NULL;
978	}
979	if ((how == PRU_FLUSH_WR) || (how == PRU_FLUSH_RDWR)) {
980		/*
981		 * First make sure the sb will be happy, we don't use these
982		 * except maybe the count
983		 */
984		so->so_snd.sb_cc = 0;
985		so->so_snd.sb_mbcnt = 0;
986		so->so_snd.sb_mb = NULL;
987
988	}
989	return (0);
990}
991
992int
993sctp_shutdown(struct socket *so)
994{
995	struct sctp_inpcb *inp;
996
997	inp = (struct sctp_inpcb *)so->so_pcb;
998	if (inp == 0) {
999		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1000		return EINVAL;
1001	}
1002	SCTP_INP_RLOCK(inp);
1003	/* For UDP model this is a invalid call */
1004	if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
1005		/* Restore the flags that the soshutdown took away. */
1006		SOCKBUF_LOCK(&so->so_rcv);
1007		so->so_rcv.sb_state &= ~SBS_CANTRCVMORE;
1008		SOCKBUF_UNLOCK(&so->so_rcv);
1009		/* This proc will wakeup for read and do nothing (I hope) */
1010		SCTP_INP_RUNLOCK(inp);
1011		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
1012		return (EOPNOTSUPP);
1013	}
1014	/*
1015	 * Ok if we reach here its the TCP model and it is either a SHUT_WR
1016	 * or SHUT_RDWR. This means we put the shutdown flag against it.
1017	 */
1018	{
1019		struct sctp_tcb *stcb;
1020		struct sctp_association *asoc;
1021
1022		if ((so->so_state &
1023		    (SS_ISCONNECTED | SS_ISCONNECTING | SS_ISDISCONNECTING)) == 0) {
1024			SCTP_INP_RUNLOCK(inp);
1025			return (ENOTCONN);
1026		}
1027		socantsendmore(so);
1028
1029		stcb = LIST_FIRST(&inp->sctp_asoc_list);
1030		if (stcb == NULL) {
1031			/*
1032			 * Ok we hit the case that the shutdown call was
1033			 * made after an abort or something. Nothing to do
1034			 * now.
1035			 */
1036			SCTP_INP_RUNLOCK(inp);
1037			return (0);
1038		}
1039		SCTP_TCB_LOCK(stcb);
1040		asoc = &stcb->asoc;
1041		if (TAILQ_EMPTY(&asoc->send_queue) &&
1042		    TAILQ_EMPTY(&asoc->sent_queue) &&
1043		    (asoc->stream_queue_cnt == 0)) {
1044			if (asoc->locked_on_sending) {
1045				goto abort_anyway;
1046			}
1047			/* there is nothing queued to send, so I'm done... */
1048			if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1049				/* only send SHUTDOWN the first time through */
1050				sctp_stop_timers_for_shutdown(stcb);
1051				sctp_send_shutdown(stcb,
1052				    stcb->asoc.primary_destination);
1053				sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED);
1054				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
1055				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
1056					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
1057				}
1058				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
1059				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
1060				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
1061				    stcb->sctp_ep, stcb,
1062				    asoc->primary_destination);
1063				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1064				    stcb->sctp_ep, stcb,
1065				    asoc->primary_destination);
1066			}
1067		} else {
1068			/*
1069			 * we still got (or just got) data to send, so set
1070			 * SHUTDOWN_PENDING
1071			 */
1072			asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
1073			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
1074			    asoc->primary_destination);
1075
1076			if (asoc->locked_on_sending) {
1077				/* Locked to send out the data */
1078				struct sctp_stream_queue_pending *sp;
1079
1080				sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
1081				if (sp == NULL) {
1082					SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n",
1083					    asoc->locked_on_sending->stream_no);
1084				} else {
1085					if ((sp->length == 0) && (sp->msg_is_complete == 0)) {
1086						asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
1087					}
1088				}
1089			}
1090			if (TAILQ_EMPTY(&asoc->send_queue) &&
1091			    TAILQ_EMPTY(&asoc->sent_queue) &&
1092			    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
1093				struct mbuf *op_err;
1094
1095		abort_anyway:
1096				op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
1097				    0, M_DONTWAIT, 1, MT_DATA);
1098				if (op_err) {
1099					/* Fill in the user initiated abort */
1100					struct sctp_paramhdr *ph;
1101					uint32_t *ippp;
1102
1103					SCTP_BUF_LEN(op_err) =
1104					    sizeof(struct sctp_paramhdr) + sizeof(uint32_t);
1105					ph = mtod(op_err,
1106					    struct sctp_paramhdr *);
1107					ph->param_type = htons(
1108					    SCTP_CAUSE_USER_INITIATED_ABT);
1109					ph->param_length = htons(SCTP_BUF_LEN(op_err));
1110					ippp = (uint32_t *) (ph + 1);
1111					*ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6);
1112				}
1113#if defined(SCTP_PANIC_ON_ABORT)
1114				panic("shutdown does an abort");
1115#endif
1116				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6;
1117				sctp_abort_an_association(stcb->sctp_ep, stcb,
1118				    SCTP_RESPONSE_TO_USER_REQ,
1119				    op_err, SCTP_SO_LOCKED);
1120				goto skip_unlock;
1121			} else {
1122				sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED);
1123			}
1124		}
1125		SCTP_TCB_UNLOCK(stcb);
1126	}
1127skip_unlock:
1128	SCTP_INP_RUNLOCK(inp);
1129	return 0;
1130}
1131
1132/*
1133 * copies a "user" presentable address and removes embedded scope, etc.
1134 * returns 0 on success, 1 on error
1135 */
1136static uint32_t
1137sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa)
1138{
1139#ifdef INET6
1140	struct sockaddr_in6 lsa6;
1141
1142	sa = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)sa,
1143	    &lsa6);
1144#endif
1145	memcpy(ss, sa, sa->sa_len);
1146	return (0);
1147}
1148
1149
1150
1151/*
1152 * NOTE: assumes addr lock is held
1153 */
1154static size_t
1155sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp,
1156    struct sctp_tcb *stcb,
1157    size_t limit,
1158    struct sockaddr_storage *sas,
1159    uint32_t vrf_id)
1160{
1161	struct sctp_ifn *sctp_ifn;
1162	struct sctp_ifa *sctp_ifa;
1163	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
1164	size_t actual;
1165	int ipv4_addr_legal, ipv6_addr_legal;
1166	struct sctp_vrf *vrf;
1167
1168	actual = 0;
1169	if (limit <= 0)
1170		return (actual);
1171
1172	if (stcb) {
1173		/* Turn on all the appropriate scope */
1174		loopback_scope = stcb->asoc.loopback_scope;
1175		ipv4_local_scope = stcb->asoc.ipv4_local_scope;
1176		local_scope = stcb->asoc.local_scope;
1177		site_scope = stcb->asoc.site_scope;
1178	} else {
1179		/* Turn on ALL scope, since we look at the EP */
1180		loopback_scope = ipv4_local_scope = local_scope =
1181		    site_scope = 1;
1182	}
1183	ipv4_addr_legal = ipv6_addr_legal = 0;
1184	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1185		ipv6_addr_legal = 1;
1186		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1187			ipv4_addr_legal = 1;
1188		}
1189	} else {
1190		ipv4_addr_legal = 1;
1191	}
1192	vrf = sctp_find_vrf(vrf_id);
1193	if (vrf == NULL) {
1194		return (0);
1195	}
1196	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1197		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
1198			if ((loopback_scope == 0) &&
1199			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
1200				/* Skip loopback if loopback_scope not set */
1201				continue;
1202			}
1203			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
1204				if (stcb) {
1205					/*
1206					 * For the BOUND-ALL case, the list
1207					 * associated with a TCB is Always
1208					 * considered a reverse list.. i.e.
1209					 * it lists addresses that are NOT
1210					 * part of the association. If this
1211					 * is one of those we must skip it.
1212					 */
1213					if (sctp_is_addr_restricted(stcb,
1214					    sctp_ifa)) {
1215						continue;
1216					}
1217				}
1218				switch (sctp_ifa->address.sa.sa_family) {
1219				case AF_INET:
1220					if (ipv4_addr_legal) {
1221						struct sockaddr_in *sin;
1222
1223						sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
1224						if (sin->sin_addr.s_addr == 0) {
1225							/*
1226							 * we skip
1227							 * unspecifed
1228							 * addresses
1229							 */
1230							continue;
1231						}
1232						if ((ipv4_local_scope == 0) &&
1233						    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1234							continue;
1235						}
1236#ifdef INET6
1237						if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
1238							in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)sas);
1239							((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1240							sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6));
1241							actual += sizeof(struct sockaddr_in6);
1242						} else {
1243#endif
1244							memcpy(sas, sin, sizeof(*sin));
1245							((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport;
1246							sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin));
1247							actual += sizeof(*sin);
1248#ifdef INET6
1249						}
1250#endif
1251						if (actual >= limit) {
1252							return (actual);
1253						}
1254					} else {
1255						continue;
1256					}
1257					break;
1258#ifdef INET6
1259				case AF_INET6:
1260					if (ipv6_addr_legal) {
1261						struct sockaddr_in6 *sin6;
1262
1263						sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
1264						if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1265							/*
1266							 * we skip
1267							 * unspecifed
1268							 * addresses
1269							 */
1270							continue;
1271						}
1272						if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
1273							if (local_scope == 0)
1274								continue;
1275							if (sin6->sin6_scope_id == 0) {
1276								if (sa6_recoverscope(sin6) != 0)
1277									/*
1278									 *
1279									 * bad
1280									 *
1281									 * li
1282									 * nk
1283									 *
1284									 * loc
1285									 * al
1286									 *
1287									 * add
1288									 * re
1289									 * ss
1290									 * */
1291									continue;
1292							}
1293						}
1294						if ((site_scope == 0) &&
1295						    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1296							continue;
1297						}
1298						memcpy(sas, sin6, sizeof(*sin6));
1299						((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1300						sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin6));
1301						actual += sizeof(*sin6);
1302						if (actual >= limit) {
1303							return (actual);
1304						}
1305					} else {
1306						continue;
1307					}
1308					break;
1309#endif
1310				default:
1311					/* TSNH */
1312					break;
1313				}
1314			}
1315		}
1316	} else {
1317		struct sctp_laddr *laddr;
1318
1319		LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
1320			if (stcb) {
1321				if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
1322					continue;
1323				}
1324			}
1325			if (sctp_fill_user_address(sas, &laddr->ifa->address.sa))
1326				continue;
1327
1328			((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1329			sas = (struct sockaddr_storage *)((caddr_t)sas +
1330			    laddr->ifa->address.sa.sa_len);
1331			actual += laddr->ifa->address.sa.sa_len;
1332			if (actual >= limit) {
1333				return (actual);
1334			}
1335		}
1336	}
1337	return (actual);
1338}
1339
1340static size_t
1341sctp_fill_up_addresses(struct sctp_inpcb *inp,
1342    struct sctp_tcb *stcb,
1343    size_t limit,
1344    struct sockaddr_storage *sas)
1345{
1346	size_t size = 0;
1347
1348	SCTP_IPI_ADDR_RLOCK();
1349	/* fill up addresses for the endpoint's default vrf */
1350	size = sctp_fill_up_addresses_vrf(inp, stcb, limit, sas,
1351	    inp->def_vrf_id);
1352	SCTP_IPI_ADDR_RUNLOCK();
1353	return (size);
1354}
1355
1356/*
1357 * NOTE: assumes addr lock is held
1358 */
1359static int
1360sctp_count_max_addresses_vrf(struct sctp_inpcb *inp, uint32_t vrf_id)
1361{
1362	int cnt = 0;
1363	struct sctp_vrf *vrf = NULL;
1364
1365	/*
1366	 * In both sub-set bound an bound_all cases we return the MAXIMUM
1367	 * number of addresses that you COULD get. In reality the sub-set
1368	 * bound may have an exclusion list for a given TCB OR in the
1369	 * bound-all case a TCB may NOT include the loopback or other
1370	 * addresses as well.
1371	 */
1372	vrf = sctp_find_vrf(vrf_id);
1373	if (vrf == NULL) {
1374		return (0);
1375	}
1376	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1377		struct sctp_ifn *sctp_ifn;
1378		struct sctp_ifa *sctp_ifa;
1379
1380		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
1381			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
1382				/* Count them if they are the right type */
1383				if (sctp_ifa->address.sa.sa_family == AF_INET) {
1384					if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4))
1385						cnt += sizeof(struct sockaddr_in6);
1386					else
1387						cnt += sizeof(struct sockaddr_in);
1388
1389				} else if (sctp_ifa->address.sa.sa_family == AF_INET6)
1390					cnt += sizeof(struct sockaddr_in6);
1391			}
1392		}
1393	} else {
1394		struct sctp_laddr *laddr;
1395
1396		LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
1397			if (laddr->ifa->address.sa.sa_family == AF_INET) {
1398				if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4))
1399					cnt += sizeof(struct sockaddr_in6);
1400				else
1401					cnt += sizeof(struct sockaddr_in);
1402
1403			} else if (laddr->ifa->address.sa.sa_family == AF_INET6)
1404				cnt += sizeof(struct sockaddr_in6);
1405		}
1406	}
1407	return (cnt);
1408}
1409
1410static int
1411sctp_count_max_addresses(struct sctp_inpcb *inp)
1412{
1413	int cnt = 0;
1414
1415	SCTP_IPI_ADDR_RLOCK();
1416	/* count addresses for the endpoint's default VRF */
1417	cnt = sctp_count_max_addresses_vrf(inp, inp->def_vrf_id);
1418	SCTP_IPI_ADDR_RUNLOCK();
1419	return (cnt);
1420}
1421
1422static int
1423sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval,
1424    size_t optsize, void *p, int delay)
1425{
1426	int error = 0;
1427	int creat_lock_on = 0;
1428	struct sctp_tcb *stcb = NULL;
1429	struct sockaddr *sa;
1430	int num_v6 = 0, num_v4 = 0, *totaddrp, totaddr;
1431	int added = 0;
1432	uint32_t vrf_id;
1433	int bad_addresses = 0;
1434	sctp_assoc_t *a_id;
1435
1436	SCTPDBG(SCTP_DEBUG_PCB1, "Connectx called\n");
1437
1438	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
1439	    (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
1440		/* We are already connected AND the TCP model */
1441		SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
1442		return (EADDRINUSE);
1443	}
1444	if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) &&
1445	    (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) {
1446		SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1447		return (EINVAL);
1448	}
1449	if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
1450		SCTP_INP_RLOCK(inp);
1451		stcb = LIST_FIRST(&inp->sctp_asoc_list);
1452		SCTP_INP_RUNLOCK(inp);
1453	}
1454	if (stcb) {
1455		SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
1456		return (EALREADY);
1457	}
1458	SCTP_INP_INCR_REF(inp);
1459	SCTP_ASOC_CREATE_LOCK(inp);
1460	creat_lock_on = 1;
1461	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1462	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
1463		SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT);
1464		error = EFAULT;
1465		goto out_now;
1466	}
1467	totaddrp = (int *)optval;
1468	totaddr = *totaddrp;
1469	sa = (struct sockaddr *)(totaddrp + 1);
1470	stcb = sctp_connectx_helper_find(inp, sa, &totaddr, &num_v4, &num_v6, &error, (optsize - sizeof(int)), &bad_addresses);
1471	if ((stcb != NULL) || bad_addresses) {
1472		/* Already have or am bring up an association */
1473		SCTP_ASOC_CREATE_UNLOCK(inp);
1474		creat_lock_on = 0;
1475		if (stcb)
1476			SCTP_TCB_UNLOCK(stcb);
1477		if (bad_addresses == 0) {
1478			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
1479			error = EALREADY;
1480		}
1481		goto out_now;
1482	}
1483#ifdef INET6
1484	if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
1485	    (num_v6 > 0)) {
1486		error = EINVAL;
1487		goto out_now;
1488	}
1489	if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
1490	    (num_v4 > 0)) {
1491		struct in6pcb *inp6;
1492
1493		inp6 = (struct in6pcb *)inp;
1494		if (SCTP_IPV6_V6ONLY(inp6)) {
1495			/*
1496			 * if IPV6_V6ONLY flag, ignore connections destined
1497			 * to a v4 addr or v4-mapped addr
1498			 */
1499			SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1500			error = EINVAL;
1501			goto out_now;
1502		}
1503	}
1504#endif				/* INET6 */
1505	if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
1506	    SCTP_PCB_FLAGS_UNBOUND) {
1507		/* Bind a ephemeral port */
1508		error = sctp_inpcb_bind(so, NULL, NULL, p);
1509		if (error) {
1510			goto out_now;
1511		}
1512	}
1513	/* FIX ME: do we want to pass in a vrf on the connect call? */
1514	vrf_id = inp->def_vrf_id;
1515
1516
1517	/* We are GOOD to go */
1518	stcb = sctp_aloc_assoc(inp, sa, &error, 0, vrf_id,
1519	    (struct thread *)p
1520	    );
1521	if (stcb == NULL) {
1522		/* Gak! no memory */
1523		goto out_now;
1524	}
1525	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
1526	/* move to second address */
1527	if (sa->sa_family == AF_INET)
1528		sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in));
1529	else
1530		sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6));
1531
1532	error = 0;
1533	added = sctp_connectx_helper_add(stcb, sa, (totaddr - 1), &error);
1534	/* Fill in the return id */
1535	if (error) {
1536		(void)sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6);
1537		goto out_now;
1538	}
1539	a_id = (sctp_assoc_t *) optval;
1540	*a_id = sctp_get_associd(stcb);
1541
1542	/* initialize authentication parameters for the assoc */
1543	sctp_initialize_auth_params(inp, stcb);
1544
1545	if (delay) {
1546		/* doing delayed connection */
1547		stcb->asoc.delayed_connection = 1;
1548		sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination);
1549	} else {
1550		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1551		sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
1552	}
1553	SCTP_TCB_UNLOCK(stcb);
1554	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
1555		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
1556		/* Set the connected flag so we can queue data */
1557		soisconnecting(so);
1558	}
1559out_now:
1560	if (creat_lock_on) {
1561		SCTP_ASOC_CREATE_UNLOCK(inp);
1562	}
1563	SCTP_INP_DECR_REF(inp);
1564	return error;
1565}
1566
1567#define SCTP_FIND_STCB(inp, stcb, assoc_id) { \
1568	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||\
1569	    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { \
1570		SCTP_INP_RLOCK(inp); \
1571		stcb = LIST_FIRST(&inp->sctp_asoc_list); \
1572		if (stcb) { \
1573			SCTP_TCB_LOCK(stcb); \
1574                } \
1575		SCTP_INP_RUNLOCK(inp); \
1576	} else if (assoc_id != 0) { \
1577		stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1); \
1578		if (stcb == NULL) { \
1579		        SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); \
1580			error = ENOENT; \
1581			break; \
1582		} \
1583	} else { \
1584		stcb = NULL; \
1585        } \
1586  }
1587
1588
1589#define SCTP_CHECK_AND_CAST(destp, srcp, type, size)  {\
1590	if (size < sizeof(type)) { \
1591		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); \
1592		error = EINVAL; \
1593		break; \
1594	} else { \
1595		destp = (type *)srcp; \
1596	} \
1597      }
1598
1599static int
1600sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize,
1601    void *p)
1602{
1603	struct sctp_inpcb *inp = NULL;
1604	int error, val = 0;
1605	struct sctp_tcb *stcb = NULL;
1606
1607	if (optval == NULL) {
1608		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1609		return (EINVAL);
1610	}
1611	inp = (struct sctp_inpcb *)so->so_pcb;
1612	if (inp == 0) {
1613		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1614		return EINVAL;
1615	}
1616	error = 0;
1617
1618	switch (optname) {
1619	case SCTP_NODELAY:
1620	case SCTP_AUTOCLOSE:
1621	case SCTP_EXPLICIT_EOR:
1622	case SCTP_AUTO_ASCONF:
1623	case SCTP_DISABLE_FRAGMENTS:
1624	case SCTP_I_WANT_MAPPED_V4_ADDR:
1625	case SCTP_USE_EXT_RCVINFO:
1626		SCTP_INP_RLOCK(inp);
1627		switch (optname) {
1628		case SCTP_DISABLE_FRAGMENTS:
1629			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT);
1630			break;
1631		case SCTP_I_WANT_MAPPED_V4_ADDR:
1632			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4);
1633			break;
1634		case SCTP_AUTO_ASCONF:
1635			if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1636				/* only valid for bound all sockets */
1637				val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
1638			} else {
1639				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1640				error = EINVAL;
1641				goto flags_out;
1642			}
1643			break;
1644		case SCTP_EXPLICIT_EOR:
1645			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
1646			break;
1647		case SCTP_NODELAY:
1648			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY);
1649			break;
1650		case SCTP_USE_EXT_RCVINFO:
1651			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO);
1652			break;
1653		case SCTP_AUTOCLOSE:
1654			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))
1655				val = TICKS_TO_SEC(inp->sctp_ep.auto_close_time);
1656			else
1657				val = 0;
1658			break;
1659
1660		default:
1661			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
1662			error = ENOPROTOOPT;
1663		}		/* end switch (sopt->sopt_name) */
1664		if (optname != SCTP_AUTOCLOSE) {
1665			/* make it an "on/off" value */
1666			val = (val != 0);
1667		}
1668		if (*optsize < sizeof(val)) {
1669			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1670			error = EINVAL;
1671		}
1672flags_out:
1673		SCTP_INP_RUNLOCK(inp);
1674		if (error == 0) {
1675			/* return the option value */
1676			*(int *)optval = val;
1677			*optsize = sizeof(val);
1678		}
1679		break;
1680	case SCTP_GET_PACKET_LOG:
1681		{
1682#ifdef  SCTP_PACKET_LOGGING
1683			uint8_t *target;
1684			int ret;
1685
1686			SCTP_CHECK_AND_CAST(target, optval, uint8_t, *optsize);
1687			ret = sctp_copy_out_packet_log(target, (int)*optsize);
1688			*optsize = ret;
1689#else
1690			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
1691			error = EOPNOTSUPP;
1692#endif
1693			break;
1694		}
1695	case SCTP_REUSE_PORT:
1696		{
1697			uint32_t *value;
1698
1699			if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
1700				/* Can't do this for a 1-m socket */
1701				error = EINVAL;
1702				break;
1703			}
1704			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1705			*value = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE);
1706			*optsize = sizeof(uint32_t);
1707		}
1708		break;
1709	case SCTP_PARTIAL_DELIVERY_POINT:
1710		{
1711			uint32_t *value;
1712
1713			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1714			*value = inp->partial_delivery_point;
1715			*optsize = sizeof(uint32_t);
1716		}
1717		break;
1718	case SCTP_FRAGMENT_INTERLEAVE:
1719		{
1720			uint32_t *value;
1721
1722			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1723			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) {
1724				if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) {
1725					*value = SCTP_FRAG_LEVEL_2;
1726				} else {
1727					*value = SCTP_FRAG_LEVEL_1;
1728				}
1729			} else {
1730				*value = SCTP_FRAG_LEVEL_0;
1731			}
1732			*optsize = sizeof(uint32_t);
1733		}
1734		break;
1735	case SCTP_CMT_ON_OFF:
1736		{
1737			struct sctp_assoc_value *av;
1738
1739			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1740			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1741			if (stcb) {
1742				av->assoc_value = stcb->asoc.sctp_cmt_on_off;
1743				SCTP_TCB_UNLOCK(stcb);
1744			} else {
1745				SCTP_INP_RLOCK(inp);
1746				av->assoc_value = inp->sctp_cmt_on_off;
1747				SCTP_INP_RUNLOCK(inp);
1748			}
1749			*optsize = sizeof(*av);
1750		}
1751		break;
1752		/* JRS - Get socket option for pluggable congestion control */
1753	case SCTP_PLUGGABLE_CC:
1754		{
1755			struct sctp_assoc_value *av;
1756
1757			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1758			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1759			if (stcb) {
1760				av->assoc_value = stcb->asoc.congestion_control_module;
1761				SCTP_TCB_UNLOCK(stcb);
1762			} else {
1763				av->assoc_value = inp->sctp_ep.sctp_default_cc_module;
1764			}
1765			*optsize = sizeof(*av);
1766		}
1767		break;
1768	case SCTP_GET_ADDR_LEN:
1769		{
1770			struct sctp_assoc_value *av;
1771
1772			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1773			error = EINVAL;
1774#ifdef INET
1775			if (av->assoc_value == AF_INET) {
1776				av->assoc_value = sizeof(struct sockaddr_in);
1777				error = 0;
1778			}
1779#endif
1780#ifdef INET6
1781			if (av->assoc_value == AF_INET6) {
1782				av->assoc_value = sizeof(struct sockaddr_in6);
1783				error = 0;
1784			}
1785#endif
1786			if (error) {
1787				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
1788			}
1789			*optsize = sizeof(*av);
1790		}
1791		break;
1792	case SCTP_GET_ASSOC_NUMBER:
1793		{
1794			uint32_t *value, cnt;
1795
1796			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1797			cnt = 0;
1798			SCTP_INP_RLOCK(inp);
1799			LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
1800				cnt++;
1801			}
1802			SCTP_INP_RUNLOCK(inp);
1803			*value = cnt;
1804			*optsize = sizeof(uint32_t);
1805		}
1806		break;
1807
1808	case SCTP_GET_ASSOC_ID_LIST:
1809		{
1810			struct sctp_assoc_ids *ids;
1811			unsigned int at, limit;
1812
1813			SCTP_CHECK_AND_CAST(ids, optval, struct sctp_assoc_ids, *optsize);
1814			at = 0;
1815			limit = (*optsize - sizeof(uint32_t)) / sizeof(sctp_assoc_t);
1816			SCTP_INP_RLOCK(inp);
1817			LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
1818				if (at < limit) {
1819					ids->gaids_assoc_id[at++] = sctp_get_associd(stcb);
1820				} else {
1821					error = EINVAL;
1822					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
1823					break;
1824				}
1825			}
1826			SCTP_INP_RUNLOCK(inp);
1827			ids->gaids_number_of_ids = at;
1828			*optsize = ((at * sizeof(sctp_assoc_t)) + sizeof(uint32_t));
1829		}
1830		break;
1831	case SCTP_CONTEXT:
1832		{
1833			struct sctp_assoc_value *av;
1834
1835			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1836			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1837
1838			if (stcb) {
1839				av->assoc_value = stcb->asoc.context;
1840				SCTP_TCB_UNLOCK(stcb);
1841			} else {
1842				SCTP_INP_RLOCK(inp);
1843				av->assoc_value = inp->sctp_context;
1844				SCTP_INP_RUNLOCK(inp);
1845			}
1846			*optsize = sizeof(*av);
1847		}
1848		break;
1849	case SCTP_VRF_ID:
1850		{
1851			uint32_t *default_vrfid;
1852
1853			SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, *optsize);
1854			*default_vrfid = inp->def_vrf_id;
1855			break;
1856		}
1857	case SCTP_GET_ASOC_VRF:
1858		{
1859			struct sctp_assoc_value *id;
1860
1861			SCTP_CHECK_AND_CAST(id, optval, struct sctp_assoc_value, *optsize);
1862			SCTP_FIND_STCB(inp, stcb, id->assoc_id);
1863			if (stcb == NULL) {
1864				error = EINVAL;
1865				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
1866				break;
1867			}
1868			id->assoc_value = stcb->asoc.vrf_id;
1869			break;
1870		}
1871	case SCTP_GET_VRF_IDS:
1872		{
1873			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
1874			error = EOPNOTSUPP;
1875			break;
1876		}
1877	case SCTP_GET_NONCE_VALUES:
1878		{
1879			struct sctp_get_nonce_values *gnv;
1880
1881			SCTP_CHECK_AND_CAST(gnv, optval, struct sctp_get_nonce_values, *optsize);
1882			SCTP_FIND_STCB(inp, stcb, gnv->gn_assoc_id);
1883
1884			if (stcb) {
1885				gnv->gn_peers_tag = stcb->asoc.peer_vtag;
1886				gnv->gn_local_tag = stcb->asoc.my_vtag;
1887				SCTP_TCB_UNLOCK(stcb);
1888			} else {
1889				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
1890				error = ENOTCONN;
1891			}
1892			*optsize = sizeof(*gnv);
1893		}
1894		break;
1895	case SCTP_DELAYED_SACK:
1896		{
1897			struct sctp_sack_info *sack;
1898
1899			SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, *optsize);
1900			SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id);
1901			if (stcb) {
1902				sack->sack_delay = stcb->asoc.delayed_ack;
1903				sack->sack_freq = stcb->asoc.sack_freq;
1904				SCTP_TCB_UNLOCK(stcb);
1905			} else {
1906				SCTP_INP_RLOCK(inp);
1907				sack->sack_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1908				sack->sack_freq = inp->sctp_ep.sctp_sack_freq;
1909				SCTP_INP_RUNLOCK(inp);
1910			}
1911			*optsize = sizeof(*sack);
1912		}
1913		break;
1914
1915	case SCTP_GET_SNDBUF_USE:
1916		{
1917			struct sctp_sockstat *ss;
1918
1919			SCTP_CHECK_AND_CAST(ss, optval, struct sctp_sockstat, *optsize);
1920			SCTP_FIND_STCB(inp, stcb, ss->ss_assoc_id);
1921
1922			if (stcb) {
1923				ss->ss_total_sndbuf = stcb->asoc.total_output_queue_size;
1924				ss->ss_total_recv_buf = (stcb->asoc.size_on_reasm_queue +
1925				    stcb->asoc.size_on_all_streams);
1926				SCTP_TCB_UNLOCK(stcb);
1927			} else {
1928				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
1929				error = ENOTCONN;
1930			}
1931			*optsize = sizeof(struct sctp_sockstat);
1932		}
1933		break;
1934	case SCTP_MAX_BURST:
1935		{
1936			uint8_t *value;
1937
1938			SCTP_CHECK_AND_CAST(value, optval, uint8_t, *optsize);
1939
1940			SCTP_INP_RLOCK(inp);
1941			*value = inp->sctp_ep.max_burst;
1942			SCTP_INP_RUNLOCK(inp);
1943			*optsize = sizeof(uint8_t);
1944		}
1945		break;
1946	case SCTP_MAXSEG:
1947		{
1948			struct sctp_assoc_value *av;
1949			int ovh;
1950
1951			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1952			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1953
1954			if (stcb) {
1955				av->assoc_value = sctp_get_frag_point(stcb, &stcb->asoc);
1956				SCTP_TCB_UNLOCK(stcb);
1957			} else {
1958				SCTP_INP_RLOCK(inp);
1959				if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1960					ovh = SCTP_MED_OVERHEAD;
1961				} else {
1962					ovh = SCTP_MED_V4_OVERHEAD;
1963				}
1964				if (inp->sctp_frag_point >= SCTP_DEFAULT_MAXSEGMENT)
1965					av->assoc_value = 0;
1966				else
1967					av->assoc_value = inp->sctp_frag_point - ovh;
1968				SCTP_INP_RUNLOCK(inp);
1969			}
1970			*optsize = sizeof(struct sctp_assoc_value);
1971		}
1972		break;
1973	case SCTP_GET_STAT_LOG:
1974		error = sctp_fill_stat_log(optval, optsize);
1975		break;
1976	case SCTP_EVENTS:
1977		{
1978			struct sctp_event_subscribe *events;
1979
1980			SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, *optsize);
1981			memset(events, 0, sizeof(*events));
1982			SCTP_INP_RLOCK(inp);
1983			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT))
1984				events->sctp_data_io_event = 1;
1985
1986			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT))
1987				events->sctp_association_event = 1;
1988
1989			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT))
1990				events->sctp_address_event = 1;
1991
1992			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT))
1993				events->sctp_send_failure_event = 1;
1994
1995			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR))
1996				events->sctp_peer_error_event = 1;
1997
1998			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
1999				events->sctp_shutdown_event = 1;
2000
2001			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT))
2002				events->sctp_partial_delivery_event = 1;
2003
2004			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT))
2005				events->sctp_adaptation_layer_event = 1;
2006
2007			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT))
2008				events->sctp_authentication_event = 1;
2009
2010			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT))
2011				events->sctp_sender_dry_event = 1;
2012
2013			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT))
2014				events->sctp_stream_reset_event = 1;
2015			SCTP_INP_RUNLOCK(inp);
2016			*optsize = sizeof(struct sctp_event_subscribe);
2017		}
2018		break;
2019
2020	case SCTP_ADAPTATION_LAYER:
2021		{
2022			uint32_t *value;
2023
2024			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
2025
2026			SCTP_INP_RLOCK(inp);
2027			*value = inp->sctp_ep.adaptation_layer_indicator;
2028			SCTP_INP_RUNLOCK(inp);
2029			*optsize = sizeof(uint32_t);
2030		}
2031		break;
2032	case SCTP_SET_INITIAL_DBG_SEQ:
2033		{
2034			uint32_t *value;
2035
2036			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
2037			SCTP_INP_RLOCK(inp);
2038			*value = inp->sctp_ep.initial_sequence_debug;
2039			SCTP_INP_RUNLOCK(inp);
2040			*optsize = sizeof(uint32_t);
2041		}
2042		break;
2043	case SCTP_GET_LOCAL_ADDR_SIZE:
2044		{
2045			uint32_t *value;
2046
2047			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
2048			SCTP_INP_RLOCK(inp);
2049			*value = sctp_count_max_addresses(inp);
2050			SCTP_INP_RUNLOCK(inp);
2051			*optsize = sizeof(uint32_t);
2052		}
2053		break;
2054	case SCTP_GET_REMOTE_ADDR_SIZE:
2055		{
2056			uint32_t *value;
2057			size_t size;
2058			struct sctp_nets *net;
2059
2060			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
2061			/* FIXME MT: change to sctp_assoc_value? */
2062			SCTP_FIND_STCB(inp, stcb, (sctp_assoc_t) * value);
2063
2064			if (stcb) {
2065				size = 0;
2066				/* Count the sizes */
2067				TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2068					if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) ||
2069					    (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
2070						size += sizeof(struct sockaddr_in6);
2071					} else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
2072						size += sizeof(struct sockaddr_in);
2073					} else {
2074						/* huh */
2075						break;
2076					}
2077				}
2078				SCTP_TCB_UNLOCK(stcb);
2079				*value = (uint32_t) size;
2080			} else {
2081				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
2082				error = ENOTCONN;
2083			}
2084			*optsize = sizeof(uint32_t);
2085		}
2086		break;
2087	case SCTP_GET_PEER_ADDRESSES:
2088		/*
2089		 * Get the address information, an array is passed in to
2090		 * fill up we pack it.
2091		 */
2092		{
2093			size_t cpsz, left;
2094			struct sockaddr_storage *sas;
2095			struct sctp_nets *net;
2096			struct sctp_getaddresses *saddr;
2097
2098			SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize);
2099			SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id);
2100
2101			if (stcb) {
2102				left = (*optsize) - sizeof(struct sctp_getaddresses);
2103				*optsize = sizeof(struct sctp_getaddresses);
2104				sas = (struct sockaddr_storage *)&saddr->addr[0];
2105
2106				TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2107					if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) ||
2108					    (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
2109						cpsz = sizeof(struct sockaddr_in6);
2110					} else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
2111						cpsz = sizeof(struct sockaddr_in);
2112					} else {
2113						/* huh */
2114						break;
2115					}
2116					if (left < cpsz) {
2117						/* not enough room. */
2118						break;
2119					}
2120#ifdef INET6
2121					if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
2122					    (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET)) {
2123						/* Must map the address */
2124						in6_sin_2_v4mapsin6((struct sockaddr_in *)&net->ro._l_addr,
2125						    (struct sockaddr_in6 *)sas);
2126					} else {
2127#endif
2128						memcpy(sas, &net->ro._l_addr, cpsz);
2129#ifdef INET6
2130					}
2131#endif
2132					((struct sockaddr_in *)sas)->sin_port = stcb->rport;
2133
2134					sas = (struct sockaddr_storage *)((caddr_t)sas + cpsz);
2135					left -= cpsz;
2136					*optsize += cpsz;
2137				}
2138				SCTP_TCB_UNLOCK(stcb);
2139			} else {
2140				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
2141				error = ENOENT;
2142			}
2143		}
2144		break;
2145	case SCTP_GET_LOCAL_ADDRESSES:
2146		{
2147			size_t limit, actual;
2148			struct sockaddr_storage *sas;
2149			struct sctp_getaddresses *saddr;
2150
2151			SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize);
2152			SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id);
2153
2154			sas = (struct sockaddr_storage *)&saddr->addr[0];
2155			limit = *optsize - sizeof(sctp_assoc_t);
2156			actual = sctp_fill_up_addresses(inp, stcb, limit, sas);
2157			if (stcb) {
2158				SCTP_TCB_UNLOCK(stcb);
2159			}
2160			*optsize = sizeof(struct sockaddr_storage) + actual;
2161		}
2162		break;
2163	case SCTP_PEER_ADDR_PARAMS:
2164		{
2165			struct sctp_paddrparams *paddrp;
2166			struct sctp_nets *net;
2167
2168			SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, *optsize);
2169			SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id);
2170
2171			net = NULL;
2172			if (stcb) {
2173				net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
2174			} else {
2175				/*
2176				 * We increment here since
2177				 * sctp_findassociation_ep_addr() wil do a
2178				 * decrement if it finds the stcb as long as
2179				 * the locked tcb (last argument) is NOT a
2180				 * TCB.. aka NULL.
2181				 */
2182				SCTP_INP_INCR_REF(inp);
2183				stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddrp->spp_address, &net, NULL, NULL);
2184				if (stcb == NULL) {
2185					SCTP_INP_DECR_REF(inp);
2186				}
2187			}
2188			if (stcb && (net == NULL)) {
2189				struct sockaddr *sa;
2190
2191				sa = (struct sockaddr *)&paddrp->spp_address;
2192				if (sa->sa_family == AF_INET) {
2193					struct sockaddr_in *sin;
2194
2195					sin = (struct sockaddr_in *)sa;
2196					if (sin->sin_addr.s_addr) {
2197						error = EINVAL;
2198						SCTP_TCB_UNLOCK(stcb);
2199						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2200						break;
2201					}
2202				} else if (sa->sa_family == AF_INET6) {
2203					struct sockaddr_in6 *sin6;
2204
2205					sin6 = (struct sockaddr_in6 *)sa;
2206					if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
2207						error = EINVAL;
2208						SCTP_TCB_UNLOCK(stcb);
2209						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2210						break;
2211					}
2212				} else {
2213					error = EAFNOSUPPORT;
2214					SCTP_TCB_UNLOCK(stcb);
2215					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2216					break;
2217				}
2218			}
2219			if (stcb) {
2220				/* Applys to the specific association */
2221				paddrp->spp_flags = 0;
2222				if (net) {
2223					int ovh;
2224
2225					if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2226						ovh = SCTP_MED_OVERHEAD;
2227					} else {
2228						ovh = SCTP_MED_V4_OVERHEAD;
2229					}
2230
2231
2232					paddrp->spp_pathmaxrxt = net->failure_threshold;
2233					paddrp->spp_pathmtu = net->mtu - ovh;
2234					/* get flags for HB */
2235					if (net->dest_state & SCTP_ADDR_NOHB)
2236						paddrp->spp_flags |= SPP_HB_DISABLE;
2237					else
2238						paddrp->spp_flags |= SPP_HB_ENABLE;
2239					/* get flags for PMTU */
2240					if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
2241						paddrp->spp_flags |= SPP_PMTUD_ENABLE;
2242					} else {
2243						paddrp->spp_flags |= SPP_PMTUD_DISABLE;
2244					}
2245#ifdef INET
2246					if (net->ro._l_addr.sin.sin_family == AF_INET) {
2247						paddrp->spp_ipv4_tos = net->tos_flowlabel & 0x000000fc;
2248						paddrp->spp_flags |= SPP_IPV4_TOS;
2249					}
2250#endif
2251#ifdef INET6
2252					if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
2253						paddrp->spp_ipv6_flowlabel = net->tos_flowlabel;
2254						paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
2255					}
2256#endif
2257				} else {
2258					/*
2259					 * No destination so return default
2260					 * value
2261					 */
2262					int cnt = 0;
2263
2264					paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure;
2265					paddrp->spp_pathmtu = sctp_get_frag_point(stcb, &stcb->asoc);
2266#ifdef INET
2267					paddrp->spp_ipv4_tos = stcb->asoc.default_tos & 0x000000fc;
2268					paddrp->spp_flags |= SPP_IPV4_TOS;
2269#endif
2270#ifdef INET6
2271					paddrp->spp_ipv6_flowlabel = stcb->asoc.default_flowlabel;
2272					paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
2273#endif
2274					/* default settings should be these */
2275					if (stcb->asoc.hb_is_disabled == 0) {
2276						paddrp->spp_flags |= SPP_HB_ENABLE;
2277					} else {
2278						paddrp->spp_flags |= SPP_HB_DISABLE;
2279					}
2280					TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2281						if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
2282							cnt++;
2283						}
2284					}
2285					if (cnt) {
2286						paddrp->spp_flags |= SPP_PMTUD_ENABLE;
2287					}
2288				}
2289				paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay;
2290				paddrp->spp_assoc_id = sctp_get_associd(stcb);
2291				SCTP_TCB_UNLOCK(stcb);
2292			} else {
2293				/* Use endpoint defaults */
2294				SCTP_INP_RLOCK(inp);
2295				paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure;
2296				paddrp->spp_hbinterval = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
2297				paddrp->spp_assoc_id = (sctp_assoc_t) 0;
2298				/* get inp's default */
2299#ifdef INET
2300				paddrp->spp_ipv4_tos = inp->ip_inp.inp.inp_ip_tos;
2301				paddrp->spp_flags |= SPP_IPV4_TOS;
2302#endif
2303#ifdef INET6
2304				if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2305					paddrp->spp_ipv6_flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo;
2306					paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
2307				}
2308#endif
2309				/* can't return this */
2310				paddrp->spp_pathmtu = 0;
2311
2312				/* default behavior, no stcb */
2313				paddrp->spp_flags = SPP_PMTUD_ENABLE;
2314
2315				if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) {
2316					paddrp->spp_flags |= SPP_HB_ENABLE;
2317				} else {
2318					paddrp->spp_flags |= SPP_HB_DISABLE;
2319				}
2320				SCTP_INP_RUNLOCK(inp);
2321			}
2322			*optsize = sizeof(struct sctp_paddrparams);
2323		}
2324		break;
2325	case SCTP_GET_PEER_ADDR_INFO:
2326		{
2327			struct sctp_paddrinfo *paddri;
2328			struct sctp_nets *net;
2329
2330			SCTP_CHECK_AND_CAST(paddri, optval, struct sctp_paddrinfo, *optsize);
2331			SCTP_FIND_STCB(inp, stcb, paddri->spinfo_assoc_id);
2332
2333			net = NULL;
2334			if (stcb) {
2335				net = sctp_findnet(stcb, (struct sockaddr *)&paddri->spinfo_address);
2336			} else {
2337				/*
2338				 * We increment here since
2339				 * sctp_findassociation_ep_addr() wil do a
2340				 * decrement if it finds the stcb as long as
2341				 * the locked tcb (last argument) is NOT a
2342				 * TCB.. aka NULL.
2343				 */
2344				SCTP_INP_INCR_REF(inp);
2345				stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddri->spinfo_address, &net, NULL, NULL);
2346				if (stcb == NULL) {
2347					SCTP_INP_DECR_REF(inp);
2348				}
2349			}
2350
2351			if ((stcb) && (net)) {
2352				paddri->spinfo_state = net->dest_state & (SCTP_REACHABLE_MASK | SCTP_ADDR_NOHB);
2353				paddri->spinfo_cwnd = net->cwnd;
2354				paddri->spinfo_srtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
2355				paddri->spinfo_rto = net->RTO;
2356				paddri->spinfo_assoc_id = sctp_get_associd(stcb);
2357				SCTP_TCB_UNLOCK(stcb);
2358			} else {
2359				if (stcb) {
2360					SCTP_TCB_UNLOCK(stcb);
2361				}
2362				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
2363				error = ENOENT;
2364			}
2365			*optsize = sizeof(struct sctp_paddrinfo);
2366		}
2367		break;
2368	case SCTP_PCB_STATUS:
2369		{
2370			struct sctp_pcbinfo *spcb;
2371
2372			SCTP_CHECK_AND_CAST(spcb, optval, struct sctp_pcbinfo, *optsize);
2373			sctp_fill_pcbinfo(spcb);
2374			*optsize = sizeof(struct sctp_pcbinfo);
2375		}
2376		break;
2377
2378	case SCTP_STATUS:
2379		{
2380			struct sctp_nets *net;
2381			struct sctp_status *sstat;
2382
2383			SCTP_CHECK_AND_CAST(sstat, optval, struct sctp_status, *optsize);
2384			SCTP_FIND_STCB(inp, stcb, sstat->sstat_assoc_id);
2385
2386			if (stcb == NULL) {
2387				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2388				error = EINVAL;
2389				break;
2390			}
2391			/*
2392			 * I think passing the state is fine since
2393			 * sctp_constants.h will be available to the user
2394			 * land.
2395			 */
2396			sstat->sstat_state = stcb->asoc.state;
2397			sstat->sstat_assoc_id = sctp_get_associd(stcb);
2398			sstat->sstat_rwnd = stcb->asoc.peers_rwnd;
2399			sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt;
2400			/*
2401			 * We can't include chunks that have been passed to
2402			 * the socket layer. Only things in queue.
2403			 */
2404			sstat->sstat_penddata = (stcb->asoc.cnt_on_reasm_queue +
2405			    stcb->asoc.cnt_on_all_streams);
2406
2407
2408			sstat->sstat_instrms = stcb->asoc.streamincnt;
2409			sstat->sstat_outstrms = stcb->asoc.streamoutcnt;
2410			sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc);
2411			memcpy(&sstat->sstat_primary.spinfo_address,
2412			    &stcb->asoc.primary_destination->ro._l_addr,
2413			    ((struct sockaddr *)(&stcb->asoc.primary_destination->ro._l_addr))->sa_len);
2414			net = stcb->asoc.primary_destination;
2415			((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport;
2416			/*
2417			 * Again the user can get info from sctp_constants.h
2418			 * for what the state of the network is.
2419			 */
2420			sstat->sstat_primary.spinfo_state = net->dest_state & SCTP_REACHABLE_MASK;
2421			sstat->sstat_primary.spinfo_cwnd = net->cwnd;
2422			sstat->sstat_primary.spinfo_srtt = net->lastsa;
2423			sstat->sstat_primary.spinfo_rto = net->RTO;
2424			sstat->sstat_primary.spinfo_mtu = net->mtu;
2425			sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb);
2426			SCTP_TCB_UNLOCK(stcb);
2427			*optsize = sizeof(*sstat);
2428		}
2429		break;
2430	case SCTP_RTOINFO:
2431		{
2432			struct sctp_rtoinfo *srto;
2433
2434			SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, *optsize);
2435			SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id);
2436
2437			if (stcb) {
2438				srto->srto_initial = stcb->asoc.initial_rto;
2439				srto->srto_max = stcb->asoc.maxrto;
2440				srto->srto_min = stcb->asoc.minrto;
2441				SCTP_TCB_UNLOCK(stcb);
2442			} else {
2443				SCTP_INP_RLOCK(inp);
2444				srto->srto_initial = inp->sctp_ep.initial_rto;
2445				srto->srto_max = inp->sctp_ep.sctp_maxrto;
2446				srto->srto_min = inp->sctp_ep.sctp_minrto;
2447				SCTP_INP_RUNLOCK(inp);
2448			}
2449			*optsize = sizeof(*srto);
2450		}
2451		break;
2452	case SCTP_ASSOCINFO:
2453		{
2454			struct sctp_assocparams *sasoc;
2455			uint32_t oldval;
2456
2457			SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, *optsize);
2458			SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id);
2459
2460			if (stcb) {
2461				oldval = sasoc->sasoc_cookie_life;
2462				sasoc->sasoc_cookie_life = TICKS_TO_MSEC(stcb->asoc.cookie_life);
2463				sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times;
2464				sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
2465				sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd;
2466				sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd;
2467				SCTP_TCB_UNLOCK(stcb);
2468			} else {
2469				SCTP_INP_RLOCK(inp);
2470				sasoc->sasoc_cookie_life = TICKS_TO_MSEC(inp->sctp_ep.def_cookie_life);
2471				sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times;
2472				sasoc->sasoc_number_peer_destinations = 0;
2473				sasoc->sasoc_peer_rwnd = 0;
2474				sasoc->sasoc_local_rwnd = sbspace(&inp->sctp_socket->so_rcv);
2475				SCTP_INP_RUNLOCK(inp);
2476			}
2477			*optsize = sizeof(*sasoc);
2478		}
2479		break;
2480	case SCTP_DEFAULT_SEND_PARAM:
2481		{
2482			struct sctp_sndrcvinfo *s_info;
2483
2484			SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, *optsize);
2485			SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id);
2486
2487			if (stcb) {
2488				memcpy(s_info, &stcb->asoc.def_send, sizeof(stcb->asoc.def_send));
2489				SCTP_TCB_UNLOCK(stcb);
2490			} else {
2491				SCTP_INP_RLOCK(inp);
2492				memcpy(s_info, &inp->def_send, sizeof(inp->def_send));
2493				SCTP_INP_RUNLOCK(inp);
2494			}
2495			*optsize = sizeof(*s_info);
2496		}
2497		break;
2498	case SCTP_INITMSG:
2499		{
2500			struct sctp_initmsg *sinit;
2501
2502			SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, *optsize);
2503			SCTP_INP_RLOCK(inp);
2504			sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count;
2505			sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome;
2506			sinit->sinit_max_attempts = inp->sctp_ep.max_init_times;
2507			sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max;
2508			SCTP_INP_RUNLOCK(inp);
2509			*optsize = sizeof(*sinit);
2510		}
2511		break;
2512	case SCTP_PRIMARY_ADDR:
2513		/* we allow a "get" operation on this */
2514		{
2515			struct sctp_setprim *ssp;
2516
2517			SCTP_CHECK_AND_CAST(ssp, optval, struct sctp_setprim, *optsize);
2518			SCTP_FIND_STCB(inp, stcb, ssp->ssp_assoc_id);
2519
2520			if (stcb) {
2521				/* simply copy out the sockaddr_storage... */
2522				int len;
2523
2524				len = *optsize;
2525				if (len > stcb->asoc.primary_destination->ro._l_addr.sa.sa_len)
2526					len = stcb->asoc.primary_destination->ro._l_addr.sa.sa_len;
2527
2528				memcpy(&ssp->ssp_addr,
2529				    &stcb->asoc.primary_destination->ro._l_addr,
2530				    len);
2531				SCTP_TCB_UNLOCK(stcb);
2532			} else {
2533				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2534				error = EINVAL;
2535			}
2536			*optsize = sizeof(*ssp);
2537		}
2538		break;
2539
2540	case SCTP_HMAC_IDENT:
2541		{
2542			struct sctp_hmacalgo *shmac;
2543			sctp_hmaclist_t *hmaclist;
2544			uint32_t size;
2545			int i;
2546
2547			SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, *optsize);
2548
2549			SCTP_INP_RLOCK(inp);
2550			hmaclist = inp->sctp_ep.local_hmacs;
2551			if (hmaclist == NULL) {
2552				/* no HMACs to return */
2553				*optsize = sizeof(*shmac);
2554				SCTP_INP_RUNLOCK(inp);
2555				break;
2556			}
2557			/* is there room for all of the hmac ids? */
2558			size = sizeof(*shmac) + (hmaclist->num_algo *
2559			    sizeof(shmac->shmac_idents[0]));
2560			if ((size_t)(*optsize) < size) {
2561				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2562				error = EINVAL;
2563				SCTP_INP_RUNLOCK(inp);
2564				break;
2565			}
2566			/* copy in the list */
2567			shmac->shmac_number_of_idents = hmaclist->num_algo;
2568			for (i = 0; i < hmaclist->num_algo; i++) {
2569				shmac->shmac_idents[i] = hmaclist->hmac[i];
2570			}
2571			SCTP_INP_RUNLOCK(inp);
2572			*optsize = size;
2573			break;
2574		}
2575	case SCTP_AUTH_ACTIVE_KEY:
2576		{
2577			struct sctp_authkeyid *scact;
2578
2579			SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, *optsize);
2580			SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id);
2581
2582			if (stcb) {
2583				/* get the active key on the assoc */
2584				scact->scact_keynumber = stcb->asoc.authinfo.active_keyid;
2585				SCTP_TCB_UNLOCK(stcb);
2586			} else {
2587				/* get the endpoint active key */
2588				SCTP_INP_RLOCK(inp);
2589				scact->scact_keynumber = inp->sctp_ep.default_keyid;
2590				SCTP_INP_RUNLOCK(inp);
2591			}
2592			*optsize = sizeof(*scact);
2593			break;
2594		}
2595	case SCTP_LOCAL_AUTH_CHUNKS:
2596		{
2597			struct sctp_authchunks *sac;
2598			sctp_auth_chklist_t *chklist = NULL;
2599			size_t size = 0;
2600
2601			SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize);
2602			SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id);
2603
2604			if (stcb) {
2605				/* get off the assoc */
2606				chklist = stcb->asoc.local_auth_chunks;
2607				/* is there enough space? */
2608				size = sctp_auth_get_chklist_size(chklist);
2609				if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
2610					error = EINVAL;
2611					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2612				} else {
2613					/* copy in the chunks */
2614					(void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
2615				}
2616				SCTP_TCB_UNLOCK(stcb);
2617			} else {
2618				/* get off the endpoint */
2619				SCTP_INP_RLOCK(inp);
2620				chklist = inp->sctp_ep.local_auth_chunks;
2621				/* is there enough space? */
2622				size = sctp_auth_get_chklist_size(chklist);
2623				if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
2624					error = EINVAL;
2625					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2626				} else {
2627					/* copy in the chunks */
2628					(void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
2629				}
2630				SCTP_INP_RUNLOCK(inp);
2631			}
2632			*optsize = sizeof(struct sctp_authchunks) + size;
2633			break;
2634		}
2635	case SCTP_PEER_AUTH_CHUNKS:
2636		{
2637			struct sctp_authchunks *sac;
2638			sctp_auth_chklist_t *chklist = NULL;
2639			size_t size = 0;
2640
2641			SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize);
2642			SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id);
2643
2644			if (stcb) {
2645				/* get off the assoc */
2646				chklist = stcb->asoc.peer_auth_chunks;
2647				/* is there enough space? */
2648				size = sctp_auth_get_chklist_size(chklist);
2649				if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
2650					error = EINVAL;
2651					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2652				} else {
2653					/* copy in the chunks */
2654					(void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
2655				}
2656				SCTP_TCB_UNLOCK(stcb);
2657			} else {
2658				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
2659				error = ENOENT;
2660			}
2661			*optsize = sizeof(struct sctp_authchunks) + size;
2662			break;
2663		}
2664
2665
2666	default:
2667		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
2668		error = ENOPROTOOPT;
2669		*optsize = 0;
2670		break;
2671	}			/* end switch (sopt->sopt_name) */
2672	return (error);
2673}
2674
2675static int
2676sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
2677    void *p)
2678{
2679	int error, set_opt;
2680	uint32_t *mopt;
2681	struct sctp_tcb *stcb = NULL;
2682	struct sctp_inpcb *inp = NULL;
2683	uint32_t vrf_id;
2684
2685	if (optval == NULL) {
2686		SCTP_PRINTF("optval is NULL\n");
2687		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2688		return (EINVAL);
2689	}
2690	inp = (struct sctp_inpcb *)so->so_pcb;
2691	if (inp == 0) {
2692		SCTP_PRINTF("inp is NULL?\n");
2693		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2694		return EINVAL;
2695	}
2696	vrf_id = inp->def_vrf_id;
2697
2698	error = 0;
2699	switch (optname) {
2700	case SCTP_NODELAY:
2701	case SCTP_AUTOCLOSE:
2702	case SCTP_AUTO_ASCONF:
2703	case SCTP_EXPLICIT_EOR:
2704	case SCTP_DISABLE_FRAGMENTS:
2705	case SCTP_USE_EXT_RCVINFO:
2706	case SCTP_I_WANT_MAPPED_V4_ADDR:
2707		/* copy in the option value */
2708		SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize);
2709		set_opt = 0;
2710		if (error)
2711			break;
2712		switch (optname) {
2713		case SCTP_DISABLE_FRAGMENTS:
2714			set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT;
2715			break;
2716		case SCTP_AUTO_ASCONF:
2717			/*
2718			 * NOTE: we don't really support this flag
2719			 */
2720			if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2721				/* only valid for bound all sockets */
2722				set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF;
2723			} else {
2724				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2725				return (EINVAL);
2726			}
2727			break;
2728		case SCTP_EXPLICIT_EOR:
2729			set_opt = SCTP_PCB_FLAGS_EXPLICIT_EOR;
2730			break;
2731		case SCTP_USE_EXT_RCVINFO:
2732			set_opt = SCTP_PCB_FLAGS_EXT_RCVINFO;
2733			break;
2734		case SCTP_I_WANT_MAPPED_V4_ADDR:
2735			if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2736				set_opt = SCTP_PCB_FLAGS_NEEDS_MAPPED_V4;
2737			} else {
2738				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2739				return (EINVAL);
2740			}
2741			break;
2742		case SCTP_NODELAY:
2743			set_opt = SCTP_PCB_FLAGS_NODELAY;
2744			break;
2745		case SCTP_AUTOCLOSE:
2746			if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2747			    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2748				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2749				return (EINVAL);
2750			}
2751			set_opt = SCTP_PCB_FLAGS_AUTOCLOSE;
2752			/*
2753			 * The value is in ticks. Note this does not effect
2754			 * old associations, only new ones.
2755			 */
2756			inp->sctp_ep.auto_close_time = SEC_TO_TICKS(*mopt);
2757			break;
2758		}
2759		SCTP_INP_WLOCK(inp);
2760		if (*mopt != 0) {
2761			sctp_feature_on(inp, set_opt);
2762		} else {
2763			sctp_feature_off(inp, set_opt);
2764		}
2765		SCTP_INP_WUNLOCK(inp);
2766		break;
2767	case SCTP_REUSE_PORT:
2768		{
2769			SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize);
2770			if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) {
2771				/* Can't set it after we are bound */
2772				error = EINVAL;
2773				break;
2774			}
2775			if ((inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) {
2776				/* Can't do this for a 1-m socket */
2777				error = EINVAL;
2778				break;
2779			}
2780			if (optval)
2781				sctp_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE);
2782			else
2783				sctp_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE);
2784		}
2785		break;
2786	case SCTP_PARTIAL_DELIVERY_POINT:
2787		{
2788			uint32_t *value;
2789
2790			SCTP_CHECK_AND_CAST(value, optval, uint32_t, optsize);
2791			if (*value > SCTP_SB_LIMIT_RCV(so)) {
2792				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2793				error = EINVAL;
2794				break;
2795			}
2796			inp->partial_delivery_point = *value;
2797		}
2798		break;
2799	case SCTP_FRAGMENT_INTERLEAVE:
2800		/* not yet until we re-write sctp_recvmsg() */
2801		{
2802			uint32_t *level;
2803
2804			SCTP_CHECK_AND_CAST(level, optval, uint32_t, optsize);
2805			if (*level == SCTP_FRAG_LEVEL_2) {
2806				sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
2807				sctp_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
2808			} else if (*level == SCTP_FRAG_LEVEL_1) {
2809				sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
2810				sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
2811			} else if (*level == SCTP_FRAG_LEVEL_0) {
2812				sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
2813				sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
2814
2815			} else {
2816				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2817				error = EINVAL;
2818			}
2819		}
2820		break;
2821	case SCTP_CMT_ON_OFF:
2822		if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
2823			struct sctp_assoc_value *av;
2824
2825			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
2826			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
2827			if (stcb) {
2828				if (av->assoc_value != 0)
2829					stcb->asoc.sctp_cmt_on_off = 1;
2830				else
2831					stcb->asoc.sctp_cmt_on_off = 0;
2832				SCTP_TCB_UNLOCK(stcb);
2833			} else {
2834				SCTP_INP_WLOCK(inp);
2835				if (av->assoc_value != 0)
2836					inp->sctp_cmt_on_off = 1;
2837				else
2838					inp->sctp_cmt_on_off = 0;
2839				SCTP_INP_WUNLOCK(inp);
2840			}
2841		} else {
2842			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
2843			error = ENOPROTOOPT;
2844		}
2845		break;
2846		/* JRS - Set socket option for pluggable congestion control */
2847	case SCTP_PLUGGABLE_CC:
2848		{
2849			struct sctp_assoc_value *av;
2850
2851			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
2852			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
2853			if (stcb) {
2854				switch (av->assoc_value) {
2855					/*
2856					 * JRS - Standard TCP congestion
2857					 * control
2858					 */
2859				case SCTP_CC_RFC2581:
2860					{
2861						stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
2862						stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
2863						stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
2864						stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
2865						stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
2866						stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
2867						stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
2868						stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
2869						stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
2870						SCTP_TCB_UNLOCK(stcb);
2871						break;
2872					}
2873					/*
2874					 * JRS - High Speed TCP congestion
2875					 * control (Floyd)
2876					 */
2877				case SCTP_CC_HSTCP:
2878					{
2879						stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
2880						stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
2881						stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
2882						stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
2883						stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
2884						stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
2885						stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
2886						stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
2887						stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
2888						SCTP_TCB_UNLOCK(stcb);
2889						break;
2890					}
2891					/* JRS - HTCP congestion control */
2892				case SCTP_CC_HTCP:
2893					{
2894						stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
2895						stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
2896						stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
2897						stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
2898						stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
2899						stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
2900						stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
2901						stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
2902						stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
2903						SCTP_TCB_UNLOCK(stcb);
2904						break;
2905					}
2906					/*
2907					 * JRS - All other values are
2908					 * invalid
2909					 */
2910				default:
2911					{
2912						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2913						error = EINVAL;
2914						SCTP_TCB_UNLOCK(stcb);
2915						break;
2916					}
2917				}
2918			} else {
2919				switch (av->assoc_value) {
2920				case SCTP_CC_RFC2581:
2921				case SCTP_CC_HSTCP:
2922				case SCTP_CC_HTCP:
2923					inp->sctp_ep.sctp_default_cc_module = av->assoc_value;
2924					break;
2925				default:
2926					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2927					error = EINVAL;
2928					break;
2929				};
2930			}
2931		}
2932		break;
2933	case SCTP_CLR_STAT_LOG:
2934		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
2935		error = EOPNOTSUPP;
2936		break;
2937	case SCTP_CONTEXT:
2938		{
2939			struct sctp_assoc_value *av;
2940
2941			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
2942			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
2943
2944			if (stcb) {
2945				stcb->asoc.context = av->assoc_value;
2946				SCTP_TCB_UNLOCK(stcb);
2947			} else {
2948				SCTP_INP_WLOCK(inp);
2949				inp->sctp_context = av->assoc_value;
2950				SCTP_INP_WUNLOCK(inp);
2951			}
2952		}
2953		break;
2954	case SCTP_VRF_ID:
2955		{
2956			uint32_t *default_vrfid;
2957
2958			SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, optsize);
2959			if (*default_vrfid > SCTP_MAX_VRF_ID) {
2960				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2961				error = EINVAL;
2962				break;
2963			}
2964			inp->def_vrf_id = *default_vrfid;
2965			break;
2966		}
2967	case SCTP_DEL_VRF_ID:
2968		{
2969			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
2970			error = EOPNOTSUPP;
2971			break;
2972		}
2973	case SCTP_ADD_VRF_ID:
2974		{
2975			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
2976			error = EOPNOTSUPP;
2977			break;
2978		}
2979	case SCTP_DELAYED_SACK:
2980		{
2981			struct sctp_sack_info *sack;
2982
2983			SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, optsize);
2984			SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id);
2985			if (sack->sack_delay) {
2986				if (sack->sack_delay > SCTP_MAX_SACK_DELAY)
2987					sack->sack_delay = SCTP_MAX_SACK_DELAY;
2988			}
2989			if (stcb) {
2990				if (sack->sack_delay) {
2991					if (MSEC_TO_TICKS(sack->sack_delay) < 1) {
2992						sack->sack_delay = TICKS_TO_MSEC(1);
2993					}
2994					stcb->asoc.delayed_ack = sack->sack_delay;
2995				}
2996				if (sack->sack_freq) {
2997					stcb->asoc.sack_freq = sack->sack_freq;
2998				}
2999				SCTP_TCB_UNLOCK(stcb);
3000			} else {
3001				SCTP_INP_WLOCK(inp);
3002				if (sack->sack_delay) {
3003					if (MSEC_TO_TICKS(sack->sack_delay) < 1) {
3004						sack->sack_delay = TICKS_TO_MSEC(1);
3005					}
3006					inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(sack->sack_delay);
3007				}
3008				if (sack->sack_freq) {
3009					inp->sctp_ep.sctp_sack_freq = sack->sack_freq;
3010				}
3011				SCTP_INP_WUNLOCK(inp);
3012			}
3013			break;
3014		}
3015	case SCTP_AUTH_CHUNK:
3016		{
3017			struct sctp_authchunk *sauth;
3018
3019			SCTP_CHECK_AND_CAST(sauth, optval, struct sctp_authchunk, optsize);
3020
3021			SCTP_INP_WLOCK(inp);
3022			if (sctp_auth_add_chunk(sauth->sauth_chunk, inp->sctp_ep.local_auth_chunks)) {
3023				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3024				error = EINVAL;
3025			}
3026			SCTP_INP_WUNLOCK(inp);
3027			break;
3028		}
3029	case SCTP_AUTH_KEY:
3030		{
3031			struct sctp_authkey *sca;
3032			struct sctp_keyhead *shared_keys;
3033			sctp_sharedkey_t *shared_key;
3034			sctp_key_t *key = NULL;
3035			size_t size;
3036
3037			SCTP_CHECK_AND_CAST(sca, optval, struct sctp_authkey, optsize);
3038			SCTP_FIND_STCB(inp, stcb, sca->sca_assoc_id);
3039			size = optsize - sizeof(*sca);
3040
3041			if (stcb) {
3042				/* set it on the assoc */
3043				shared_keys = &stcb->asoc.shared_keys;
3044				/* clear the cached keys for this key id */
3045				sctp_clear_cachedkeys(stcb, sca->sca_keynumber);
3046				/*
3047				 * create the new shared key and
3048				 * insert/replace it
3049				 */
3050				if (size > 0) {
3051					key = sctp_set_key(sca->sca_key, (uint32_t) size);
3052					if (key == NULL) {
3053						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
3054						error = ENOMEM;
3055						SCTP_TCB_UNLOCK(stcb);
3056						break;
3057					}
3058				}
3059				shared_key = sctp_alloc_sharedkey();
3060				if (shared_key == NULL) {
3061					sctp_free_key(key);
3062					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
3063					error = ENOMEM;
3064					SCTP_TCB_UNLOCK(stcb);
3065					break;
3066				}
3067				shared_key->key = key;
3068				shared_key->keyid = sca->sca_keynumber;
3069				error = sctp_insert_sharedkey(shared_keys, shared_key);
3070				SCTP_TCB_UNLOCK(stcb);
3071			} else {
3072				/* set it on the endpoint */
3073				SCTP_INP_WLOCK(inp);
3074				shared_keys = &inp->sctp_ep.shared_keys;
3075				/*
3076				 * clear the cached keys on all assocs for
3077				 * this key id
3078				 */
3079				sctp_clear_cachedkeys_ep(inp, sca->sca_keynumber);
3080				/*
3081				 * create the new shared key and
3082				 * insert/replace it
3083				 */
3084				if (size > 0) {
3085					key = sctp_set_key(sca->sca_key, (uint32_t) size);
3086					if (key == NULL) {
3087						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
3088						error = ENOMEM;
3089						SCTP_INP_WUNLOCK(inp);
3090						break;
3091					}
3092				}
3093				shared_key = sctp_alloc_sharedkey();
3094				if (shared_key == NULL) {
3095					sctp_free_key(key);
3096					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
3097					error = ENOMEM;
3098					SCTP_INP_WUNLOCK(inp);
3099					break;
3100				}
3101				shared_key->key = key;
3102				shared_key->keyid = sca->sca_keynumber;
3103				error = sctp_insert_sharedkey(shared_keys, shared_key);
3104				SCTP_INP_WUNLOCK(inp);
3105			}
3106			break;
3107		}
3108	case SCTP_HMAC_IDENT:
3109		{
3110			struct sctp_hmacalgo *shmac;
3111			sctp_hmaclist_t *hmaclist;
3112			uint16_t hmacid;
3113			uint32_t i;
3114
3115			size_t found;
3116
3117			SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, optsize);
3118			if (optsize < sizeof(struct sctp_hmacalgo) + shmac->shmac_number_of_idents * sizeof(uint16_t)) {
3119				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3120				error = EINVAL;
3121				break;
3122			}
3123			hmaclist = sctp_alloc_hmaclist(shmac->shmac_number_of_idents);
3124			if (hmaclist == NULL) {
3125				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
3126				error = ENOMEM;
3127				break;
3128			}
3129			for (i = 0; i < shmac->shmac_number_of_idents; i++) {
3130				hmacid = shmac->shmac_idents[i];
3131				if (sctp_auth_add_hmacid(hmaclist, hmacid)) {
3132					 /* invalid HMACs were found */ ;
3133					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3134					error = EINVAL;
3135					sctp_free_hmaclist(hmaclist);
3136					goto sctp_set_hmac_done;
3137				}
3138			}
3139			found = 0;
3140			for (i = 0; i < hmaclist->num_algo; i++) {
3141				if (hmaclist->hmac[i] == SCTP_AUTH_HMAC_ID_SHA1) {
3142					/* already in list */
3143					found = 1;
3144				}
3145			}
3146			if (!found) {
3147				sctp_free_hmaclist(hmaclist);
3148				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3149				error = EINVAL;
3150				break;
3151			}
3152			/* set it on the endpoint */
3153			SCTP_INP_WLOCK(inp);
3154			if (inp->sctp_ep.local_hmacs)
3155				sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
3156			inp->sctp_ep.local_hmacs = hmaclist;
3157			SCTP_INP_WUNLOCK(inp);
3158	sctp_set_hmac_done:
3159			break;
3160		}
3161	case SCTP_AUTH_ACTIVE_KEY:
3162		{
3163			struct sctp_authkeyid *scact;
3164
3165			SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid,
3166			    optsize);
3167			SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id);
3168
3169			/* set the active key on the right place */
3170			if (stcb) {
3171				/* set the active key on the assoc */
3172				if (sctp_auth_setactivekey(stcb,
3173				    scact->scact_keynumber)) {
3174					SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
3175					    SCTP_FROM_SCTP_USRREQ,
3176					    EINVAL);
3177					error = EINVAL;
3178				}
3179				SCTP_TCB_UNLOCK(stcb);
3180			} else {
3181				/* set the active key on the endpoint */
3182				SCTP_INP_WLOCK(inp);
3183				if (sctp_auth_setactivekey_ep(inp,
3184				    scact->scact_keynumber)) {
3185					SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
3186					    SCTP_FROM_SCTP_USRREQ,
3187					    EINVAL);
3188					error = EINVAL;
3189				}
3190				SCTP_INP_WUNLOCK(inp);
3191			}
3192			break;
3193		}
3194	case SCTP_AUTH_DELETE_KEY:
3195		{
3196			struct sctp_authkeyid *scdel;
3197
3198			SCTP_CHECK_AND_CAST(scdel, optval, struct sctp_authkeyid,
3199			    optsize);
3200			SCTP_FIND_STCB(inp, stcb, scdel->scact_assoc_id);
3201
3202			/* delete the key from the right place */
3203			if (stcb) {
3204				if (sctp_delete_sharedkey(stcb,
3205				    scdel->scact_keynumber)) {
3206					SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
3207					    SCTP_FROM_SCTP_USRREQ,
3208					    EINVAL);
3209					error = EINVAL;
3210				}
3211				SCTP_TCB_UNLOCK(stcb);
3212			} else {
3213				SCTP_INP_WLOCK(inp);
3214				if (sctp_delete_sharedkey_ep(inp,
3215				    scdel->scact_keynumber)) {
3216					SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
3217					    SCTP_FROM_SCTP_USRREQ,
3218					    EINVAL);
3219					error = EINVAL;
3220				}
3221				SCTP_INP_WUNLOCK(inp);
3222			}
3223			break;
3224		}
3225	case SCTP_AUTH_DEACTIVATE_KEY:
3226		{
3227			struct sctp_authkeyid *keyid;
3228
3229			SCTP_CHECK_AND_CAST(keyid, optval, struct sctp_authkeyid,
3230			    optsize);
3231			SCTP_FIND_STCB(inp, stcb, keyid->scact_assoc_id);
3232
3233			/* deactivate the key from the right place */
3234			if (stcb) {
3235				if (sctp_deact_sharedkey(stcb,
3236				    keyid->scact_keynumber)) {
3237					SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
3238					    SCTP_FROM_SCTP_USRREQ,
3239					    EINVAL);
3240					error = EINVAL;
3241				}
3242				SCTP_TCB_UNLOCK(stcb);
3243			} else {
3244				SCTP_INP_WLOCK(inp);
3245				if (sctp_deact_sharedkey_ep(inp,
3246				    keyid->scact_keynumber)) {
3247					SCTP_LTRACE_ERR_RET(inp, NULL, NULL,
3248					    SCTP_FROM_SCTP_USRREQ,
3249					    EINVAL);
3250					error = EINVAL;
3251				}
3252				SCTP_INP_WUNLOCK(inp);
3253			}
3254			break;
3255		}
3256
3257	case SCTP_RESET_STREAMS:
3258		{
3259			struct sctp_stream_reset *strrst;
3260			uint8_t send_in = 0, send_tsn = 0, send_out = 0,
3261			        addstream = 0;
3262			uint16_t addstrmcnt = 0;
3263			int i;
3264
3265			SCTP_CHECK_AND_CAST(strrst, optval, struct sctp_stream_reset, optsize);
3266			SCTP_FIND_STCB(inp, stcb, strrst->strrst_assoc_id);
3267
3268			if (stcb == NULL) {
3269				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
3270				error = ENOENT;
3271				break;
3272			}
3273			if (stcb->asoc.peer_supports_strreset == 0) {
3274				/*
3275				 * Peer does not support it, we return
3276				 * protocol not supported since this is true
3277				 * for this feature and this peer, not the
3278				 * socket request in general.
3279				 */
3280				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EPROTONOSUPPORT);
3281				error = EPROTONOSUPPORT;
3282				SCTP_TCB_UNLOCK(stcb);
3283				break;
3284			}
3285			if (stcb->asoc.stream_reset_outstanding) {
3286				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
3287				error = EALREADY;
3288				SCTP_TCB_UNLOCK(stcb);
3289				break;
3290			}
3291			if (strrst->strrst_flags == SCTP_RESET_LOCAL_RECV) {
3292				send_in = 1;
3293			} else if (strrst->strrst_flags == SCTP_RESET_LOCAL_SEND) {
3294				send_out = 1;
3295			} else if (strrst->strrst_flags == SCTP_RESET_BOTH) {
3296				send_in = 1;
3297				send_out = 1;
3298			} else if (strrst->strrst_flags == SCTP_RESET_TSN) {
3299				send_tsn = 1;
3300			} else if (strrst->strrst_flags == SCTP_RESET_ADD_STREAMS) {
3301				if (send_tsn ||
3302				    send_in ||
3303				    send_out) {
3304					/* We can't do that and add streams */
3305					error = EINVAL;
3306					goto skip_stuff;
3307				}
3308				if (stcb->asoc.stream_reset_outstanding) {
3309					error = EBUSY;
3310					goto skip_stuff;
3311				}
3312				addstream = 1;
3313				/* We allocate here */
3314				addstrmcnt = strrst->strrst_num_streams;
3315				if ((int)(addstrmcnt + stcb->asoc.streamoutcnt) > 0xffff) {
3316					/* You can't have more than 64k */
3317					error = EINVAL;
3318					goto skip_stuff;
3319				}
3320				if ((stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt) < addstrmcnt) {
3321					/* Need to allocate more */
3322					struct sctp_stream_out *oldstream;
3323					struct sctp_stream_queue_pending *sp;
3324					int removed;
3325
3326					oldstream = stcb->asoc.strmout;
3327					/* get some more */
3328					SCTP_MALLOC(stcb->asoc.strmout, struct sctp_stream_out *,
3329					    ((stcb->asoc.streamoutcnt + addstrmcnt) * sizeof(struct sctp_stream_out)),
3330					    SCTP_M_STRMO);
3331					if (stcb->asoc.strmout == NULL) {
3332						stcb->asoc.strmout = oldstream;
3333						error = ENOMEM;
3334						goto skip_stuff;
3335					}
3336					/*
3337					 * Ok now we proceed with copying
3338					 * the old out stuff and
3339					 * initializing the new stuff.
3340					 */
3341					SCTP_TCB_SEND_LOCK(stcb);
3342					for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
3343						TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3344						stcb->asoc.strmout[i].next_sequence_sent = oldstream[i].next_sequence_sent;
3345						stcb->asoc.strmout[i].last_msg_incomplete = oldstream[i].last_msg_incomplete;
3346						stcb->asoc.strmout[i].stream_no = i;
3347						if (oldstream[i].next_spoke.tqe_next) {
3348							sctp_remove_from_wheel(stcb, &stcb->asoc, &oldstream[i], 1);
3349							stcb->asoc.strmout[i].next_spoke.tqe_next = NULL;
3350							stcb->asoc.strmout[i].next_spoke.tqe_prev = NULL;
3351							removed = 1;
3352						} else {
3353							/* not on out wheel */
3354							stcb->asoc.strmout[i].next_spoke.tqe_next = NULL;
3355							stcb->asoc.strmout[i].next_spoke.tqe_prev = NULL;
3356							removed = 0;
3357						}
3358						/*
3359						 * now anything on those
3360						 * queues?
3361						 */
3362						while (TAILQ_EMPTY(&oldstream[i].outqueue) == 0) {
3363							sp = TAILQ_FIRST(&oldstream[i].outqueue);
3364							TAILQ_REMOVE(&oldstream[i].outqueue, sp, next);
3365							TAILQ_INSERT_TAIL(&stcb->asoc.strmout[i].outqueue, sp, next);
3366						}
3367						/* Did we disrupt the wheel? */
3368						if (removed) {
3369							sctp_insert_on_wheel(stcb,
3370							    &stcb->asoc,
3371							    &stcb->asoc.strmout[i],
3372							    1);
3373						}
3374						/*
3375						 * Now move assoc pointers
3376						 * too
3377						 */
3378						if (stcb->asoc.last_out_stream == &oldstream[i]) {
3379							stcb->asoc.last_out_stream = &stcb->asoc.strmout[i];
3380						}
3381						if (stcb->asoc.locked_on_sending == &oldstream[i]) {
3382							stcb->asoc.locked_on_sending = &stcb->asoc.strmout[i];
3383						}
3384					}
3385					/* now the new streams */
3386					for (i = stcb->asoc.streamoutcnt; i < (stcb->asoc.streamoutcnt + addstrmcnt); i++) {
3387						stcb->asoc.strmout[i].next_sequence_sent = 0x0;
3388						TAILQ_INIT(&stcb->asoc.strmout[i].outqueue);
3389						stcb->asoc.strmout[i].stream_no = i;
3390						stcb->asoc.strmout[i].last_msg_incomplete = 0;
3391						stcb->asoc.strmout[i].next_spoke.tqe_next = NULL;
3392						stcb->asoc.strmout[i].next_spoke.tqe_prev = NULL;
3393					}
3394					stcb->asoc.strm_realoutsize = stcb->asoc.streamoutcnt + addstrmcnt;
3395					SCTP_FREE(oldstream, SCTP_M_STRMO);
3396				}
3397				SCTP_TCB_SEND_UNLOCK(stcb);
3398				goto skip_stuff;
3399			} else {
3400				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3401				error = EINVAL;
3402				SCTP_TCB_UNLOCK(stcb);
3403				break;
3404			}
3405			for (i = 0; i < strrst->strrst_num_streams; i++) {
3406				if ((send_in) &&
3407
3408				    (strrst->strrst_list[i] > stcb->asoc.streamincnt)) {
3409					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3410					error = EINVAL;
3411					goto get_out;
3412				}
3413				if ((send_out) &&
3414				    (strrst->strrst_list[i] > stcb->asoc.streamoutcnt)) {
3415					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3416					error = EINVAL;
3417					goto get_out;
3418				}
3419			}
3420	skip_stuff:
3421			if (error) {
3422		get_out:
3423				SCTP_TCB_UNLOCK(stcb);
3424				break;
3425			}
3426			error = sctp_send_str_reset_req(stcb, strrst->strrst_num_streams,
3427			    strrst->strrst_list,
3428			    send_out, (stcb->asoc.str_reset_seq_in - 3),
3429			    send_in, send_tsn, addstream, addstrmcnt);
3430
3431			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ, SCTP_SO_LOCKED);
3432			SCTP_TCB_UNLOCK(stcb);
3433		}
3434		break;
3435
3436	case SCTP_CONNECT_X:
3437		if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) {
3438			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3439			error = EINVAL;
3440			break;
3441		}
3442		error = sctp_do_connect_x(so, inp, optval, optsize, p, 0);
3443		break;
3444
3445	case SCTP_CONNECT_X_DELAYED:
3446		if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) {
3447			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3448			error = EINVAL;
3449			break;
3450		}
3451		error = sctp_do_connect_x(so, inp, optval, optsize, p, 1);
3452		break;
3453
3454	case SCTP_CONNECT_X_COMPLETE:
3455		{
3456			struct sockaddr *sa;
3457			struct sctp_nets *net;
3458
3459			/* FIXME MT: check correct? */
3460			SCTP_CHECK_AND_CAST(sa, optval, struct sockaddr, optsize);
3461
3462			/* find tcb */
3463			if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3464				SCTP_INP_RLOCK(inp);
3465				stcb = LIST_FIRST(&inp->sctp_asoc_list);
3466				if (stcb) {
3467					SCTP_TCB_LOCK(stcb);
3468					net = sctp_findnet(stcb, sa);
3469				}
3470				SCTP_INP_RUNLOCK(inp);
3471			} else {
3472				/*
3473				 * We increment here since
3474				 * sctp_findassociation_ep_addr() wil do a
3475				 * decrement if it finds the stcb as long as
3476				 * the locked tcb (last argument) is NOT a
3477				 * TCB.. aka NULL.
3478				 */
3479				SCTP_INP_INCR_REF(inp);
3480				stcb = sctp_findassociation_ep_addr(&inp, sa, &net, NULL, NULL);
3481				if (stcb == NULL) {
3482					SCTP_INP_DECR_REF(inp);
3483				}
3484			}
3485
3486			if (stcb == NULL) {
3487				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
3488				error = ENOENT;
3489				break;
3490			}
3491			if (stcb->asoc.delayed_connection == 1) {
3492				stcb->asoc.delayed_connection = 0;
3493				(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
3494				sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb,
3495				    stcb->asoc.primary_destination,
3496				    SCTP_FROM_SCTP_USRREQ + SCTP_LOC_9);
3497				sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
3498			} else {
3499				/*
3500				 * already expired or did not use delayed
3501				 * connectx
3502				 */
3503				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
3504				error = EALREADY;
3505			}
3506			SCTP_TCB_UNLOCK(stcb);
3507		}
3508		break;
3509	case SCTP_MAX_BURST:
3510		{
3511			uint8_t *burst;
3512
3513			SCTP_CHECK_AND_CAST(burst, optval, uint8_t, optsize);
3514
3515			SCTP_INP_WLOCK(inp);
3516			if (*burst) {
3517				inp->sctp_ep.max_burst = *burst;
3518			}
3519			SCTP_INP_WUNLOCK(inp);
3520		}
3521		break;
3522	case SCTP_MAXSEG:
3523		{
3524			struct sctp_assoc_value *av;
3525			int ovh;
3526
3527			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
3528			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
3529
3530			if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
3531				ovh = SCTP_MED_OVERHEAD;
3532			} else {
3533				ovh = SCTP_MED_V4_OVERHEAD;
3534			}
3535			if (stcb) {
3536				if (av->assoc_value) {
3537					stcb->asoc.sctp_frag_point = (av->assoc_value + ovh);
3538				} else {
3539					stcb->asoc.sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT;
3540				}
3541				SCTP_TCB_UNLOCK(stcb);
3542			} else {
3543				SCTP_INP_WLOCK(inp);
3544				/*
3545				 * FIXME MT: I think this is not in tune
3546				 * with the API ID
3547				 */
3548				if (av->assoc_value) {
3549					inp->sctp_frag_point = (av->assoc_value + ovh);
3550				} else {
3551					inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT;
3552				}
3553				SCTP_INP_WUNLOCK(inp);
3554			}
3555		}
3556		break;
3557	case SCTP_EVENTS:
3558		{
3559			struct sctp_event_subscribe *events;
3560
3561			SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, optsize);
3562
3563			SCTP_INP_WLOCK(inp);
3564			if (events->sctp_data_io_event) {
3565				sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
3566			} else {
3567				sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
3568			}
3569
3570			if (events->sctp_association_event) {
3571				sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT);
3572			} else {
3573				sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT);
3574			}
3575
3576			if (events->sctp_address_event) {
3577				sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT);
3578			} else {
3579				sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPADDREVNT);
3580			}
3581
3582			if (events->sctp_send_failure_event) {
3583				sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT);
3584			} else {
3585				sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT);
3586			}
3587
3588			if (events->sctp_peer_error_event) {
3589				sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR);
3590			} else {
3591				sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPEERERR);
3592			}
3593
3594			if (events->sctp_shutdown_event) {
3595				sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT);
3596			} else {
3597				sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT);
3598			}
3599
3600			if (events->sctp_partial_delivery_event) {
3601				sctp_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT);
3602			} else {
3603				sctp_feature_off(inp, SCTP_PCB_FLAGS_PDAPIEVNT);
3604			}
3605
3606			if (events->sctp_adaptation_layer_event) {
3607				sctp_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT);
3608			} else {
3609				sctp_feature_off(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT);
3610			}
3611
3612			if (events->sctp_authentication_event) {
3613				sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT);
3614			} else {
3615				sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTHEVNT);
3616			}
3617
3618			if (events->sctp_sender_dry_event) {
3619				sctp_feature_on(inp, SCTP_PCB_FLAGS_DRYEVNT);
3620				if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3621				    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3622					stcb = LIST_FIRST(&inp->sctp_asoc_list);
3623					if (stcb) {
3624						SCTP_TCB_LOCK(stcb);
3625					}
3626					if (stcb &&
3627					    TAILQ_EMPTY(&stcb->asoc.send_queue) &&
3628					    TAILQ_EMPTY(&stcb->asoc.sent_queue) &&
3629					    (stcb->asoc.stream_queue_cnt == 0)) {
3630						sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_LOCKED);
3631					}
3632					if (stcb) {
3633						SCTP_TCB_UNLOCK(stcb);
3634					}
3635				}
3636			} else {
3637				sctp_feature_off(inp, SCTP_PCB_FLAGS_DRYEVNT);
3638			}
3639
3640			if (events->sctp_stream_reset_event) {
3641				sctp_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
3642			} else {
3643				sctp_feature_off(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
3644			}
3645			SCTP_INP_WUNLOCK(inp);
3646		}
3647		break;
3648
3649	case SCTP_ADAPTATION_LAYER:
3650		{
3651			struct sctp_setadaptation *adap_bits;
3652
3653			SCTP_CHECK_AND_CAST(adap_bits, optval, struct sctp_setadaptation, optsize);
3654			SCTP_INP_WLOCK(inp);
3655			inp->sctp_ep.adaptation_layer_indicator = adap_bits->ssb_adaptation_ind;
3656			SCTP_INP_WUNLOCK(inp);
3657		}
3658		break;
3659#ifdef SCTP_DEBUG
3660	case SCTP_SET_INITIAL_DBG_SEQ:
3661		{
3662			uint32_t *vvv;
3663
3664			SCTP_CHECK_AND_CAST(vvv, optval, uint32_t, optsize);
3665			SCTP_INP_WLOCK(inp);
3666			inp->sctp_ep.initial_sequence_debug = *vvv;
3667			SCTP_INP_WUNLOCK(inp);
3668		}
3669		break;
3670#endif
3671	case SCTP_DEFAULT_SEND_PARAM:
3672		{
3673			struct sctp_sndrcvinfo *s_info;
3674
3675			SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, optsize);
3676			SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id);
3677
3678			if (stcb) {
3679				if (s_info->sinfo_stream <= stcb->asoc.streamoutcnt) {
3680					memcpy(&stcb->asoc.def_send, s_info, min(optsize, sizeof(stcb->asoc.def_send)));
3681				} else {
3682					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3683					error = EINVAL;
3684				}
3685				SCTP_TCB_UNLOCK(stcb);
3686			} else {
3687				SCTP_INP_WLOCK(inp);
3688				memcpy(&inp->def_send, s_info, min(optsize, sizeof(inp->def_send)));
3689				SCTP_INP_WUNLOCK(inp);
3690			}
3691		}
3692		break;
3693	case SCTP_PEER_ADDR_PARAMS:
3694		/* Applys to the specific association */
3695		{
3696			struct sctp_paddrparams *paddrp;
3697			struct sctp_nets *net;
3698
3699			SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, optsize);
3700			SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id);
3701			net = NULL;
3702			if (stcb) {
3703				net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
3704			} else {
3705				/*
3706				 * We increment here since
3707				 * sctp_findassociation_ep_addr() wil do a
3708				 * decrement if it finds the stcb as long as
3709				 * the locked tcb (last argument) is NOT a
3710				 * TCB.. aka NULL.
3711				 */
3712				SCTP_INP_INCR_REF(inp);
3713				stcb = sctp_findassociation_ep_addr(&inp,
3714				    (struct sockaddr *)&paddrp->spp_address,
3715				    &net, NULL, NULL);
3716				if (stcb == NULL) {
3717					SCTP_INP_DECR_REF(inp);
3718				}
3719			}
3720			if (stcb && (net == NULL)) {
3721				struct sockaddr *sa;
3722
3723				sa = (struct sockaddr *)&paddrp->spp_address;
3724				if (sa->sa_family == AF_INET) {
3725					struct sockaddr_in *sin;
3726
3727					sin = (struct sockaddr_in *)sa;
3728					if (sin->sin_addr.s_addr) {
3729						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3730						SCTP_TCB_UNLOCK(stcb);
3731						error = EINVAL;
3732						break;
3733					}
3734				} else if (sa->sa_family == AF_INET6) {
3735					struct sockaddr_in6 *sin6;
3736
3737					sin6 = (struct sockaddr_in6 *)sa;
3738					if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
3739						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3740						SCTP_TCB_UNLOCK(stcb);
3741						error = EINVAL;
3742						break;
3743					}
3744				} else {
3745					error = EAFNOSUPPORT;
3746					SCTP_TCB_UNLOCK(stcb);
3747					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
3748					break;
3749				}
3750			}
3751			/* sanity checks */
3752			if ((paddrp->spp_flags & SPP_HB_ENABLE) && (paddrp->spp_flags & SPP_HB_DISABLE)) {
3753				if (stcb)
3754					SCTP_TCB_UNLOCK(stcb);
3755				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3756				return (EINVAL);
3757			}
3758			if ((paddrp->spp_flags & SPP_PMTUD_ENABLE) && (paddrp->spp_flags & SPP_PMTUD_DISABLE)) {
3759				if (stcb)
3760					SCTP_TCB_UNLOCK(stcb);
3761				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3762				return (EINVAL);
3763			}
3764			if (stcb) {
3765				/************************TCB SPECIFIC SET ******************/
3766				/*
3767				 * do we change the timer for HB, we run
3768				 * only one?
3769				 */
3770				int ovh = 0;
3771
3772				if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
3773					ovh = SCTP_MED_OVERHEAD;
3774				} else {
3775					ovh = SCTP_MED_V4_OVERHEAD;
3776				}
3777
3778				if (paddrp->spp_hbinterval)
3779					stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval;
3780				else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO)
3781					stcb->asoc.heart_beat_delay = 0;
3782
3783				/* network sets ? */
3784				if (net) {
3785					/************************NET SPECIFIC SET ******************/
3786					if (paddrp->spp_flags & SPP_HB_DEMAND) {
3787						/* on demand HB */
3788						if (sctp_send_hb(stcb, 1, net) < 0) {
3789							/* asoc destroyed */
3790							SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3791							error = EINVAL;
3792							break;
3793						}
3794					}
3795					if (paddrp->spp_flags & SPP_HB_DISABLE) {
3796						net->dest_state |= SCTP_ADDR_NOHB;
3797					}
3798					if (paddrp->spp_flags & SPP_HB_ENABLE) {
3799						net->dest_state &= ~SCTP_ADDR_NOHB;
3800					}
3801					if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) {
3802						if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3803							sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
3804							    SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10);
3805						}
3806						if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) {
3807							net->mtu = paddrp->spp_pathmtu + ovh;
3808							if (net->mtu < stcb->asoc.smallest_mtu) {
3809#ifdef SCTP_PRINT_FOR_B_AND_M
3810								SCTP_PRINTF("SCTP_PMTU_DISABLE calls sctp_pathmtu_adjustment:%d\n",
3811								    net->mtu);
3812#endif
3813								sctp_pathmtu_adjustment(inp, stcb, net, net->mtu);
3814							}
3815						}
3816					}
3817					if (paddrp->spp_flags & SPP_PMTUD_ENABLE) {
3818						if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3819							sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
3820						}
3821					}
3822					if (paddrp->spp_pathmaxrxt)
3823						net->failure_threshold = paddrp->spp_pathmaxrxt;
3824#ifdef INET
3825					if (paddrp->spp_flags & SPP_IPV4_TOS) {
3826						if (net->ro._l_addr.sin.sin_family == AF_INET) {
3827							net->tos_flowlabel = paddrp->spp_ipv4_tos & 0x000000fc;
3828						}
3829					}
3830#endif
3831#ifdef INET6
3832					if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) {
3833						if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
3834							net->tos_flowlabel = paddrp->spp_ipv6_flowlabel;
3835						}
3836					}
3837#endif
3838				} else {
3839					/************************ASSOC ONLY -- NO NET SPECIFIC SET ******************/
3840					if (paddrp->spp_pathmaxrxt)
3841						stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt;
3842
3843					if (paddrp->spp_flags & SPP_HB_ENABLE) {
3844						/* Turn back on the timer */
3845						stcb->asoc.hb_is_disabled = 0;
3846						sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
3847					}
3848					if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) {
3849						TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3850							if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3851								sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
3852								    SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10);
3853							}
3854							if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) {
3855								net->mtu = paddrp->spp_pathmtu + ovh;
3856								if (net->mtu < stcb->asoc.smallest_mtu) {
3857#ifdef SCTP_PRINT_FOR_B_AND_M
3858									SCTP_PRINTF("SCTP_PMTU_DISABLE calls sctp_pathmtu_adjustment:%d\n",
3859									    net->mtu);
3860#endif
3861									sctp_pathmtu_adjustment(inp, stcb, net, net->mtu);
3862								}
3863							}
3864						}
3865					}
3866					if (paddrp->spp_flags & SPP_PMTUD_ENABLE) {
3867						TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3868							if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3869								sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
3870							}
3871						}
3872					}
3873					if (paddrp->spp_flags & SPP_HB_DISABLE) {
3874						int cnt_of_unconf = 0;
3875						struct sctp_nets *lnet;
3876
3877						stcb->asoc.hb_is_disabled = 1;
3878						TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
3879							if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) {
3880								cnt_of_unconf++;
3881							}
3882						}
3883						/*
3884						 * stop the timer ONLY if we
3885						 * have no unconfirmed
3886						 * addresses
3887						 */
3888						if (cnt_of_unconf == 0) {
3889							TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3890								sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
3891								    SCTP_FROM_SCTP_USRREQ + SCTP_LOC_11);
3892							}
3893						}
3894					}
3895					if (paddrp->spp_flags & SPP_HB_ENABLE) {
3896						/* start up the timer. */
3897						TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3898							sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
3899						}
3900					}
3901#ifdef INET
3902					if (paddrp->spp_flags & SPP_IPV4_TOS)
3903						stcb->asoc.default_tos = paddrp->spp_ipv4_tos & 0x000000fc;
3904#endif
3905#ifdef INET6
3906					if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL)
3907						stcb->asoc.default_flowlabel = paddrp->spp_ipv6_flowlabel;
3908#endif
3909
3910				}
3911				SCTP_TCB_UNLOCK(stcb);
3912			} else {
3913				/************************NO TCB, SET TO default stuff ******************/
3914				SCTP_INP_WLOCK(inp);
3915				/*
3916				 * For the TOS/FLOWLABEL stuff you set it
3917				 * with the options on the socket
3918				 */
3919				if (paddrp->spp_pathmaxrxt) {
3920					inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt;
3921				}
3922				if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO)
3923					inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = 0;
3924				else if (paddrp->spp_hbinterval) {
3925					if (paddrp->spp_hbinterval > SCTP_MAX_HB_INTERVAL)
3926						paddrp->spp_hbinterval = SCTP_MAX_HB_INTERVAL;
3927					inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(paddrp->spp_hbinterval);
3928				}
3929				if (paddrp->spp_flags & SPP_HB_ENABLE) {
3930					sctp_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
3931
3932				} else if (paddrp->spp_flags & SPP_HB_DISABLE) {
3933					sctp_feature_on(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
3934				}
3935				SCTP_INP_WUNLOCK(inp);
3936			}
3937		}
3938		break;
3939	case SCTP_RTOINFO:
3940		{
3941			struct sctp_rtoinfo *srto;
3942			uint32_t new_init, new_min, new_max;
3943
3944			SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, optsize);
3945			SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id);
3946
3947			if (stcb) {
3948				if (srto->srto_initial)
3949					new_init = srto->srto_initial;
3950				else
3951					new_init = stcb->asoc.initial_rto;
3952				if (srto->srto_max)
3953					new_max = srto->srto_max;
3954				else
3955					new_max = stcb->asoc.maxrto;
3956				if (srto->srto_min)
3957					new_min = srto->srto_min;
3958				else
3959					new_min = stcb->asoc.minrto;
3960				if ((new_min <= new_init) && (new_init <= new_max)) {
3961					stcb->asoc.initial_rto = new_init;
3962					stcb->asoc.maxrto = new_max;
3963					stcb->asoc.minrto = new_min;
3964				} else {
3965					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3966					error = EINVAL;
3967				}
3968				SCTP_TCB_UNLOCK(stcb);
3969			} else {
3970				SCTP_INP_WLOCK(inp);
3971				if (srto->srto_initial)
3972					new_init = srto->srto_initial;
3973				else
3974					new_init = inp->sctp_ep.initial_rto;
3975				if (srto->srto_max)
3976					new_max = srto->srto_max;
3977				else
3978					new_max = inp->sctp_ep.sctp_maxrto;
3979				if (srto->srto_min)
3980					new_min = srto->srto_min;
3981				else
3982					new_min = inp->sctp_ep.sctp_minrto;
3983				if ((new_min <= new_init) && (new_init <= new_max)) {
3984					inp->sctp_ep.initial_rto = new_init;
3985					inp->sctp_ep.sctp_maxrto = new_max;
3986					inp->sctp_ep.sctp_minrto = new_min;
3987				} else {
3988					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3989					error = EINVAL;
3990				}
3991				SCTP_INP_WUNLOCK(inp);
3992			}
3993		}
3994		break;
3995	case SCTP_ASSOCINFO:
3996		{
3997			struct sctp_assocparams *sasoc;
3998
3999			SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, optsize);
4000			SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id);
4001			if (sasoc->sasoc_cookie_life) {
4002				/* boundary check the cookie life */
4003				if (sasoc->sasoc_cookie_life < 1000)
4004					sasoc->sasoc_cookie_life = 1000;
4005				if (sasoc->sasoc_cookie_life > SCTP_MAX_COOKIE_LIFE) {
4006					sasoc->sasoc_cookie_life = SCTP_MAX_COOKIE_LIFE;
4007				}
4008			}
4009			if (stcb) {
4010				if (sasoc->sasoc_asocmaxrxt)
4011					stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt;
4012				sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
4013				sasoc->sasoc_peer_rwnd = 0;
4014				sasoc->sasoc_local_rwnd = 0;
4015				if (sasoc->sasoc_cookie_life) {
4016					stcb->asoc.cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life);
4017				}
4018				SCTP_TCB_UNLOCK(stcb);
4019			} else {
4020				SCTP_INP_WLOCK(inp);
4021				if (sasoc->sasoc_asocmaxrxt)
4022					inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt;
4023				sasoc->sasoc_number_peer_destinations = 0;
4024				sasoc->sasoc_peer_rwnd = 0;
4025				sasoc->sasoc_local_rwnd = 0;
4026				if (sasoc->sasoc_cookie_life) {
4027					inp->sctp_ep.def_cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life);
4028				}
4029				SCTP_INP_WUNLOCK(inp);
4030			}
4031		}
4032		break;
4033	case SCTP_INITMSG:
4034		{
4035			struct sctp_initmsg *sinit;
4036
4037			SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, optsize);
4038			SCTP_INP_WLOCK(inp);
4039			if (sinit->sinit_num_ostreams)
4040				inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams;
4041
4042			if (sinit->sinit_max_instreams)
4043				inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams;
4044
4045			if (sinit->sinit_max_attempts)
4046				inp->sctp_ep.max_init_times = sinit->sinit_max_attempts;
4047
4048			if (sinit->sinit_max_init_timeo)
4049				inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo;
4050			SCTP_INP_WUNLOCK(inp);
4051		}
4052		break;
4053	case SCTP_PRIMARY_ADDR:
4054		{
4055			struct sctp_setprim *spa;
4056			struct sctp_nets *net, *lnet;
4057
4058			SCTP_CHECK_AND_CAST(spa, optval, struct sctp_setprim, optsize);
4059			SCTP_FIND_STCB(inp, stcb, spa->ssp_assoc_id);
4060
4061			net = NULL;
4062			if (stcb) {
4063				net = sctp_findnet(stcb, (struct sockaddr *)&spa->ssp_addr);
4064			} else {
4065				/*
4066				 * We increment here since
4067				 * sctp_findassociation_ep_addr() wil do a
4068				 * decrement if it finds the stcb as long as
4069				 * the locked tcb (last argument) is NOT a
4070				 * TCB.. aka NULL.
4071				 */
4072				SCTP_INP_INCR_REF(inp);
4073				stcb = sctp_findassociation_ep_addr(&inp,
4074				    (struct sockaddr *)&spa->ssp_addr,
4075				    &net, NULL, NULL);
4076				if (stcb == NULL) {
4077					SCTP_INP_DECR_REF(inp);
4078				}
4079			}
4080
4081			if ((stcb) && (net)) {
4082				if ((net != stcb->asoc.primary_destination) &&
4083				    (!(net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
4084					/* Ok we need to set it */
4085					lnet = stcb->asoc.primary_destination;
4086					if (sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net) == 0) {
4087						if (net->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
4088							net->dest_state |= SCTP_ADDR_DOUBLE_SWITCH;
4089						}
4090						net->dest_state |= SCTP_ADDR_SWITCH_PRIMARY;
4091					}
4092				}
4093			} else {
4094				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4095				error = EINVAL;
4096			}
4097			if (stcb) {
4098				SCTP_TCB_UNLOCK(stcb);
4099			}
4100		}
4101		break;
4102	case SCTP_SET_DYNAMIC_PRIMARY:
4103		{
4104			union sctp_sockstore *ss;
4105
4106			error = priv_check(curthread,
4107			    PRIV_NETINET_RESERVEDPORT);
4108			if (error)
4109				break;
4110
4111			SCTP_CHECK_AND_CAST(ss, optval, union sctp_sockstore, optsize);
4112			/* SUPER USER CHECK? */
4113			error = sctp_dynamic_set_primary(&ss->sa, vrf_id);
4114		}
4115		break;
4116	case SCTP_SET_PEER_PRIMARY_ADDR:
4117		{
4118			struct sctp_setpeerprim *sspp;
4119
4120			SCTP_CHECK_AND_CAST(sspp, optval, struct sctp_setpeerprim, optsize);
4121			SCTP_FIND_STCB(inp, stcb, sspp->sspp_assoc_id);
4122			if (stcb != NULL) {
4123				struct sctp_ifa *ifa;
4124
4125				ifa = sctp_find_ifa_by_addr((struct sockaddr *)&sspp->sspp_addr,
4126				    stcb->asoc.vrf_id, SCTP_ADDR_NOT_LOCKED);
4127				if (ifa == NULL) {
4128					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4129					error = EINVAL;
4130					goto out_of_it;
4131				}
4132				if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
4133					/*
4134					 * Must validate the ifa found is in
4135					 * our ep
4136					 */
4137					struct sctp_laddr *laddr;
4138					int found = 0;
4139
4140					LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4141						if (laddr->ifa == NULL) {
4142							SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
4143							    __FUNCTION__);
4144							continue;
4145						}
4146						if (laddr->ifa == ifa) {
4147							found = 1;
4148							break;
4149						}
4150					}
4151					if (!found) {
4152						SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4153						error = EINVAL;
4154						goto out_of_it;
4155					}
4156				}
4157				if (sctp_set_primary_ip_address_sa(stcb,
4158				    (struct sockaddr *)&sspp->sspp_addr) != 0) {
4159					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4160					error = EINVAL;
4161				}
4162		out_of_it:
4163				SCTP_TCB_UNLOCK(stcb);
4164			} else {
4165				SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4166				error = EINVAL;
4167			}
4168
4169		}
4170		break;
4171	case SCTP_BINDX_ADD_ADDR:
4172		{
4173			struct sctp_getaddresses *addrs;
4174			size_t sz;
4175			struct thread *td;
4176
4177			td = (struct thread *)p;
4178			SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses,
4179			    optsize);
4180			if (addrs->addr->sa_family == AF_INET) {
4181				sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in);
4182				if (optsize < sz) {
4183					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4184					error = EINVAL;
4185					break;
4186				}
4187				if (td != NULL && (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)(addrs->addr))->sin_addr)))) {
4188					SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error);
4189					break;
4190				}
4191#ifdef INET6
4192			} else if (addrs->addr->sa_family == AF_INET6) {
4193				sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6);
4194				if (optsize < sz) {
4195					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4196					error = EINVAL;
4197					break;
4198				}
4199				if (td != NULL && (error = prison_local_ip6(td->td_ucred, &(((struct sockaddr_in6 *)(addrs->addr))->sin6_addr),
4200				    (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) {
4201					SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error);
4202					break;
4203				}
4204#endif
4205			} else {
4206				error = EAFNOSUPPORT;
4207				break;
4208			}
4209			sctp_bindx_add_address(so, inp, addrs->addr,
4210			    addrs->sget_assoc_id, vrf_id,
4211			    &error, p);
4212		}
4213		break;
4214	case SCTP_BINDX_REM_ADDR:
4215		{
4216			struct sctp_getaddresses *addrs;
4217			size_t sz;
4218			struct thread *td;
4219
4220			td = (struct thread *)p;
4221
4222			SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, optsize);
4223			if (addrs->addr->sa_family == AF_INET) {
4224				sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in);
4225				if (optsize < sz) {
4226					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4227					error = EINVAL;
4228					break;
4229				}
4230				if (td != NULL && (error = prison_local_ip4(td->td_ucred, &(((struct sockaddr_in *)(addrs->addr))->sin_addr)))) {
4231					SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error);
4232					break;
4233				}
4234#ifdef INET6
4235			} else if (addrs->addr->sa_family == AF_INET6) {
4236				sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6);
4237				if (optsize < sz) {
4238					SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4239					error = EINVAL;
4240					break;
4241				}
4242				if (td != NULL && (error = prison_local_ip6(td->td_ucred, &(((struct sockaddr_in6 *)(addrs->addr))->sin6_addr),
4243				    (SCTP_IPV6_V6ONLY(inp) != 0))) != 0) {
4244					SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, error);
4245					break;
4246				}
4247#endif
4248			} else {
4249				error = EAFNOSUPPORT;
4250				break;
4251			}
4252			sctp_bindx_delete_address(so, inp, addrs->addr,
4253			    addrs->sget_assoc_id, vrf_id,
4254			    &error);
4255		}
4256		break;
4257	default:
4258		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
4259		error = ENOPROTOOPT;
4260		break;
4261	}			/* end switch (opt) */
4262	return (error);
4263}
4264
4265int
4266sctp_ctloutput(struct socket *so, struct sockopt *sopt)
4267{
4268	void *optval = NULL;
4269	size_t optsize = 0;
4270	struct sctp_inpcb *inp;
4271	void *p;
4272	int error = 0;
4273
4274	inp = (struct sctp_inpcb *)so->so_pcb;
4275	if (inp == 0) {
4276		/* I made the same as TCP since we are not setup? */
4277		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4278		return (ECONNRESET);
4279	}
4280	if (sopt->sopt_level != IPPROTO_SCTP) {
4281		/* wrong proto level... send back up to IP */
4282#ifdef INET6
4283		if (INP_CHECK_SOCKAF(so, AF_INET6))
4284			error = ip6_ctloutput(so, sopt);
4285		else
4286#endif				/* INET6 */
4287			error = ip_ctloutput(so, sopt);
4288		return (error);
4289	}
4290	optsize = sopt->sopt_valsize;
4291	if (optsize) {
4292		SCTP_MALLOC(optval, void *, optsize, SCTP_M_SOCKOPT);
4293		if (optval == NULL) {
4294			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOBUFS);
4295			return (ENOBUFS);
4296		}
4297		error = sooptcopyin(sopt, optval, optsize, optsize);
4298		if (error) {
4299			SCTP_FREE(optval, SCTP_M_SOCKOPT);
4300			goto out;
4301		}
4302	}
4303	p = (void *)sopt->sopt_td;
4304	if (sopt->sopt_dir == SOPT_SET) {
4305		error = sctp_setopt(so, sopt->sopt_name, optval, optsize, p);
4306	} else if (sopt->sopt_dir == SOPT_GET) {
4307		error = sctp_getopt(so, sopt->sopt_name, optval, &optsize, p);
4308	} else {
4309		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4310		error = EINVAL;
4311	}
4312	if ((error == 0) && (optval != NULL)) {
4313		error = sooptcopyout(sopt, optval, optsize);
4314		SCTP_FREE(optval, SCTP_M_SOCKOPT);
4315	} else if (optval != NULL) {
4316		SCTP_FREE(optval, SCTP_M_SOCKOPT);
4317	}
4318out:
4319	return (error);
4320}
4321
4322
4323static int
4324sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p)
4325{
4326	int error = 0;
4327	int create_lock_on = 0;
4328	uint32_t vrf_id;
4329	struct sctp_inpcb *inp;
4330	struct sctp_tcb *stcb = NULL;
4331
4332	inp = (struct sctp_inpcb *)so->so_pcb;
4333	if (inp == 0) {
4334		/* I made the same as TCP since we are not setup? */
4335		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4336		return (ECONNRESET);
4337	}
4338	if (addr == NULL) {
4339		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4340		return EINVAL;
4341	}
4342#ifdef INET6
4343	if (addr->sa_family == AF_INET6) {
4344		struct sockaddr_in6 *sin6p;
4345
4346		if (addr->sa_len != sizeof(struct sockaddr_in6)) {
4347			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4348			return (EINVAL);
4349		}
4350		sin6p = (struct sockaddr_in6 *)addr;
4351		if (p != NULL && (error = prison_remote_ip6(p->td_ucred, &sin6p->sin6_addr)) != 0) {
4352			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
4353			return (error);
4354		}
4355	} else
4356#endif
4357	if (addr->sa_family == AF_INET) {
4358		struct sockaddr_in *sinp;
4359
4360		if (addr->sa_len != sizeof(struct sockaddr_in)) {
4361			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4362			return (EINVAL);
4363		}
4364		sinp = (struct sockaddr_in *)addr;
4365		if (p != NULL && (error = prison_remote_ip4(p->td_ucred, &sinp->sin_addr)) != 0) {
4366			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
4367			return (error);
4368		}
4369	} else {
4370		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EAFNOSUPPORT);
4371		return (EAFNOSUPPORT);
4372	}
4373	SCTP_INP_INCR_REF(inp);
4374	SCTP_ASOC_CREATE_LOCK(inp);
4375	create_lock_on = 1;
4376
4377
4378	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4379	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4380		/* Should I really unlock ? */
4381		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT);
4382		error = EFAULT;
4383		goto out_now;
4384	}
4385#ifdef INET6
4386	if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
4387	    (addr->sa_family == AF_INET6)) {
4388		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4389		error = EINVAL;
4390		goto out_now;
4391	}
4392#endif				/* INET6 */
4393	if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
4394	    SCTP_PCB_FLAGS_UNBOUND) {
4395		/* Bind a ephemeral port */
4396		error = sctp_inpcb_bind(so, NULL, NULL, p);
4397		if (error) {
4398			goto out_now;
4399		}
4400	}
4401	/* Now do we connect? */
4402	if ((inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) &&
4403	    (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_PORTREUSE))) {
4404		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4405		error = EINVAL;
4406		goto out_now;
4407	}
4408	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4409	    (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
4410		/* We are already connected AND the TCP model */
4411		SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
4412		error = EADDRINUSE;
4413		goto out_now;
4414	}
4415	if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
4416		SCTP_INP_RLOCK(inp);
4417		stcb = LIST_FIRST(&inp->sctp_asoc_list);
4418		SCTP_INP_RUNLOCK(inp);
4419	} else {
4420		/*
4421		 * We increment here since sctp_findassociation_ep_addr()
4422		 * will do a decrement if it finds the stcb as long as the
4423		 * locked tcb (last argument) is NOT a TCB.. aka NULL.
4424		 */
4425		SCTP_INP_INCR_REF(inp);
4426		stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL);
4427		if (stcb == NULL) {
4428			SCTP_INP_DECR_REF(inp);
4429		} else {
4430			SCTP_TCB_UNLOCK(stcb);
4431		}
4432	}
4433	if (stcb != NULL) {
4434		/* Already have or am bring up an association */
4435		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
4436		error = EALREADY;
4437		goto out_now;
4438	}
4439	vrf_id = inp->def_vrf_id;
4440	/* We are GOOD to go */
4441	stcb = sctp_aloc_assoc(inp, addr, &error, 0, vrf_id, p);
4442	if (stcb == NULL) {
4443		/* Gak! no memory */
4444		goto out_now;
4445	}
4446	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
4447		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
4448		/* Set the connected flag so we can queue data */
4449		SOCKBUF_LOCK(&so->so_rcv);
4450		so->so_rcv.sb_state &= ~SBS_CANTRCVMORE;
4451		SOCKBUF_UNLOCK(&so->so_rcv);
4452		SOCKBUF_LOCK(&so->so_snd);
4453		so->so_snd.sb_state &= ~SBS_CANTSENDMORE;
4454		SOCKBUF_UNLOCK(&so->so_snd);
4455		SOCK_LOCK(so);
4456		so->so_state &= ~SS_ISDISCONNECTING;
4457		SOCK_UNLOCK(so);
4458		soisconnecting(so);
4459	}
4460	SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
4461	(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
4462
4463	/* initialize authentication parameters for the assoc */
4464	sctp_initialize_auth_params(inp, stcb);
4465
4466	sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
4467	SCTP_TCB_UNLOCK(stcb);
4468out_now:
4469	if (create_lock_on) {
4470		SCTP_ASOC_CREATE_UNLOCK(inp);
4471	}
4472	SCTP_INP_DECR_REF(inp);
4473	return error;
4474}
4475
4476int
4477sctp_listen(struct socket *so, int backlog, struct thread *p)
4478{
4479	/*
4480	 * Note this module depends on the protocol processing being called
4481	 * AFTER any socket level flags and backlog are applied to the
4482	 * socket. The traditional way that the socket flags are applied is
4483	 * AFTER protocol processing. We have made a change to the
4484	 * sys/kern/uipc_socket.c module to reverse this but this MUST be in
4485	 * place if the socket API for SCTP is to work properly.
4486	 */
4487
4488	int error = 0;
4489	struct sctp_inpcb *inp;
4490
4491	inp = (struct sctp_inpcb *)so->so_pcb;
4492	if (inp == 0) {
4493		/* I made the same as TCP since we are not setup? */
4494		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4495		return (ECONNRESET);
4496	}
4497	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) {
4498		/* See if we have a listener */
4499		struct sctp_inpcb *tinp;
4500		union sctp_sockstore store, *sp;
4501
4502		sp = &store;
4503		if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
4504			/* not bound all */
4505			struct sctp_laddr *laddr;
4506
4507			LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4508				memcpy(&store, &laddr->ifa->address, sizeof(store));
4509				sp->sin.sin_port = inp->sctp_lport;
4510				tinp = sctp_pcb_findep(&sp->sa, 0, 0, inp->def_vrf_id);
4511				if (tinp && (tinp != inp) &&
4512				    ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) &&
4513				    ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
4514				    (tinp->sctp_socket->so_qlimit)) {
4515					/*
4516					 * we have a listener already and
4517					 * its not this inp.
4518					 */
4519					SCTP_INP_DECR_REF(tinp);
4520					return (EADDRINUSE);
4521				} else if (tinp) {
4522					SCTP_INP_DECR_REF(tinp);
4523				}
4524			}
4525		} else {
4526			/* Setup a local addr bound all */
4527			memset(&store, 0, sizeof(store));
4528			store.sin.sin_port = inp->sctp_lport;
4529#ifdef INET6
4530			if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
4531				store.sa.sa_family = AF_INET6;
4532				store.sa.sa_len = sizeof(struct sockaddr_in6);
4533			}
4534#endif
4535			if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
4536				store.sa.sa_family = AF_INET;
4537				store.sa.sa_len = sizeof(struct sockaddr_in);
4538			}
4539			tinp = sctp_pcb_findep(&sp->sa, 0, 0, inp->def_vrf_id);
4540			if (tinp && (tinp != inp) &&
4541			    ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) == 0) &&
4542			    ((tinp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
4543			    (tinp->sctp_socket->so_qlimit)) {
4544				/*
4545				 * we have a listener already and its not
4546				 * this inp.
4547				 */
4548				SCTP_INP_DECR_REF(tinp);
4549				return (EADDRINUSE);
4550			} else if (tinp) {
4551				SCTP_INP_DECR_REF(inp);
4552			}
4553		}
4554	}
4555	SCTP_INP_RLOCK(inp);
4556#ifdef SCTP_LOCK_LOGGING
4557	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) {
4558		sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK);
4559	}
4560#endif
4561	SOCK_LOCK(so);
4562	error = solisten_proto_check(so);
4563	if (error) {
4564		SOCK_UNLOCK(so);
4565		SCTP_INP_RUNLOCK(inp);
4566		return (error);
4567	}
4568	if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PORTREUSE)) &&
4569	    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
4570		/*
4571		 * The unlucky case - We are in the tcp pool with this guy.
4572		 * - Someone else is in the main inp slot. - We must move
4573		 * this guy (the listener) to the main slot - We must then
4574		 * move the guy that was listener to the TCP Pool.
4575		 */
4576		if (sctp_swap_inpcb_for_listen(inp)) {
4577			goto in_use;
4578		}
4579	}
4580	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4581	    (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
4582		/* We are already connected AND the TCP model */
4583in_use:
4584		SCTP_INP_RUNLOCK(inp);
4585		SOCK_UNLOCK(so);
4586		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
4587		return (EADDRINUSE);
4588	}
4589	SCTP_INP_RUNLOCK(inp);
4590	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
4591		/* We must do a bind. */
4592		SOCK_UNLOCK(so);
4593		if ((error = sctp_inpcb_bind(so, NULL, NULL, p))) {
4594			/* bind error, probably perm */
4595			return (error);
4596		}
4597		SOCK_LOCK(so);
4598	}
4599	/* It appears for 7.0 and on, we must always call this. */
4600	solisten_proto(so, backlog);
4601	if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
4602		/* remove the ACCEPTCONN flag for one-to-many sockets */
4603		so->so_options &= ~SO_ACCEPTCONN;
4604	}
4605	if (backlog == 0) {
4606		/* turning off listen */
4607		so->so_options &= ~SO_ACCEPTCONN;
4608	}
4609	SOCK_UNLOCK(so);
4610	return (error);
4611}
4612
4613static int sctp_defered_wakeup_cnt = 0;
4614
4615int
4616sctp_accept(struct socket *so, struct sockaddr **addr)
4617{
4618	struct sctp_tcb *stcb;
4619	struct sctp_inpcb *inp;
4620	union sctp_sockstore store;
4621
4622#ifdef INET6
4623	int error;
4624
4625#endif
4626	inp = (struct sctp_inpcb *)so->so_pcb;
4627
4628	if (inp == 0) {
4629		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4630		return (ECONNRESET);
4631	}
4632	SCTP_INP_RLOCK(inp);
4633	if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
4634		SCTP_INP_RUNLOCK(inp);
4635		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
4636		return (EOPNOTSUPP);
4637	}
4638	if (so->so_state & SS_ISDISCONNECTED) {
4639		SCTP_INP_RUNLOCK(inp);
4640		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ECONNABORTED);
4641		return (ECONNABORTED);
4642	}
4643	stcb = LIST_FIRST(&inp->sctp_asoc_list);
4644	if (stcb == NULL) {
4645		SCTP_INP_RUNLOCK(inp);
4646		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4647		return (ECONNRESET);
4648	}
4649	SCTP_TCB_LOCK(stcb);
4650	SCTP_INP_RUNLOCK(inp);
4651	store = stcb->asoc.primary_destination->ro._l_addr;
4652	stcb->asoc.state &= ~SCTP_STATE_IN_ACCEPT_QUEUE;
4653	SCTP_TCB_UNLOCK(stcb);
4654	switch (store.sa.sa_family) {
4655	case AF_INET:
4656		{
4657			struct sockaddr_in *sin;
4658
4659			SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
4660			if (sin == NULL)
4661				return (ENOMEM);
4662			sin->sin_family = AF_INET;
4663			sin->sin_len = sizeof(*sin);
4664			sin->sin_port = ((struct sockaddr_in *)&store)->sin_port;
4665			sin->sin_addr = ((struct sockaddr_in *)&store)->sin_addr;
4666			*addr = (struct sockaddr *)sin;
4667			break;
4668		}
4669#ifdef INET6
4670	case AF_INET6:
4671		{
4672			struct sockaddr_in6 *sin6;
4673
4674			SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6);
4675			if (sin6 == NULL)
4676				return (ENOMEM);
4677			sin6->sin6_family = AF_INET6;
4678			sin6->sin6_len = sizeof(*sin6);
4679			sin6->sin6_port = ((struct sockaddr_in6 *)&store)->sin6_port;
4680
4681			sin6->sin6_addr = ((struct sockaddr_in6 *)&store)->sin6_addr;
4682			if ((error = sa6_recoverscope(sin6)) != 0) {
4683				SCTP_FREE_SONAME(sin6);
4684				return (error);
4685			}
4686			*addr = (struct sockaddr *)sin6;
4687			break;
4688		}
4689#endif
4690	default:
4691		/* TSNH */
4692		break;
4693	}
4694	/* Wake any delayed sleep action */
4695	if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) {
4696		SCTP_INP_WLOCK(inp);
4697		inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE;
4698		if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) {
4699			inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT;
4700			SCTP_INP_WUNLOCK(inp);
4701			SOCKBUF_LOCK(&inp->sctp_socket->so_snd);
4702			if (sowriteable(inp->sctp_socket)) {
4703				sowwakeup_locked(inp->sctp_socket);
4704			} else {
4705				SOCKBUF_UNLOCK(&inp->sctp_socket->so_snd);
4706			}
4707			SCTP_INP_WLOCK(inp);
4708		}
4709		if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) {
4710			inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT;
4711			SCTP_INP_WUNLOCK(inp);
4712			SOCKBUF_LOCK(&inp->sctp_socket->so_rcv);
4713			if (soreadable(inp->sctp_socket)) {
4714				sctp_defered_wakeup_cnt++;
4715				sorwakeup_locked(inp->sctp_socket);
4716			} else {
4717				SOCKBUF_UNLOCK(&inp->sctp_socket->so_rcv);
4718			}
4719			SCTP_INP_WLOCK(inp);
4720		}
4721		SCTP_INP_WUNLOCK(inp);
4722	}
4723	if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
4724		SCTP_TCB_LOCK(stcb);
4725		sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7);
4726	}
4727	return (0);
4728}
4729
4730int
4731sctp_ingetaddr(struct socket *so, struct sockaddr **addr)
4732{
4733	struct sockaddr_in *sin;
4734	uint32_t vrf_id;
4735	struct sctp_inpcb *inp;
4736	struct sctp_ifa *sctp_ifa;
4737
4738	/*
4739	 * Do the malloc first in case it blocks.
4740	 */
4741	SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
4742	if (sin == NULL)
4743		return (ENOMEM);
4744	sin->sin_family = AF_INET;
4745	sin->sin_len = sizeof(*sin);
4746	inp = (struct sctp_inpcb *)so->so_pcb;
4747	if (!inp) {
4748		SCTP_FREE_SONAME(sin);
4749		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4750		return ECONNRESET;
4751	}
4752	SCTP_INP_RLOCK(inp);
4753	sin->sin_port = inp->sctp_lport;
4754	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
4755		if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
4756			struct sctp_tcb *stcb;
4757			struct sockaddr_in *sin_a;
4758			struct sctp_nets *net;
4759			int fnd;
4760
4761			stcb = LIST_FIRST(&inp->sctp_asoc_list);
4762			if (stcb == NULL) {
4763				goto notConn;
4764			}
4765			fnd = 0;
4766			sin_a = NULL;
4767			SCTP_TCB_LOCK(stcb);
4768			TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
4769				sin_a = (struct sockaddr_in *)&net->ro._l_addr;
4770				if (sin_a == NULL)
4771					/* this will make coverity happy */
4772					continue;
4773
4774				if (sin_a->sin_family == AF_INET) {
4775					fnd = 1;
4776					break;
4777				}
4778			}
4779			if ((!fnd) || (sin_a == NULL)) {
4780				/* punt */
4781				SCTP_TCB_UNLOCK(stcb);
4782				goto notConn;
4783			}
4784			vrf_id = inp->def_vrf_id;
4785			sctp_ifa = sctp_source_address_selection(inp,
4786			    stcb,
4787			    (sctp_route_t *) & net->ro,
4788			    net, 0, vrf_id);
4789			if (sctp_ifa) {
4790				sin->sin_addr = sctp_ifa->address.sin.sin_addr;
4791				sctp_free_ifa(sctp_ifa);
4792			}
4793			SCTP_TCB_UNLOCK(stcb);
4794		} else {
4795			/* For the bound all case you get back 0 */
4796	notConn:
4797			sin->sin_addr.s_addr = 0;
4798		}
4799
4800	} else {
4801		/* Take the first IPv4 address in the list */
4802		struct sctp_laddr *laddr;
4803		int fnd = 0;
4804
4805		LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4806			if (laddr->ifa->address.sa.sa_family == AF_INET) {
4807				struct sockaddr_in *sin_a;
4808
4809				sin_a = (struct sockaddr_in *)&laddr->ifa->address.sa;
4810				sin->sin_addr = sin_a->sin_addr;
4811				fnd = 1;
4812				break;
4813			}
4814		}
4815		if (!fnd) {
4816			SCTP_FREE_SONAME(sin);
4817			SCTP_INP_RUNLOCK(inp);
4818			SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
4819			return ENOENT;
4820		}
4821	}
4822	SCTP_INP_RUNLOCK(inp);
4823	(*addr) = (struct sockaddr *)sin;
4824	return (0);
4825}
4826
4827int
4828sctp_peeraddr(struct socket *so, struct sockaddr **addr)
4829{
4830	struct sockaddr_in *sin = (struct sockaddr_in *)*addr;
4831	int fnd;
4832	struct sockaddr_in *sin_a;
4833	struct sctp_inpcb *inp;
4834	struct sctp_tcb *stcb;
4835	struct sctp_nets *net;
4836
4837	/* Do the malloc first in case it blocks. */
4838	inp = (struct sctp_inpcb *)so->so_pcb;
4839	if ((inp == NULL) ||
4840	    ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
4841		/* UDP type and listeners will drop out here */
4842		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
4843		return (ENOTCONN);
4844	}
4845	SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
4846	if (sin == NULL)
4847		return (ENOMEM);
4848	sin->sin_family = AF_INET;
4849	sin->sin_len = sizeof(*sin);
4850
4851	/* We must recapture incase we blocked */
4852	inp = (struct sctp_inpcb *)so->so_pcb;
4853	if (!inp) {
4854		SCTP_FREE_SONAME(sin);
4855		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4856		return ECONNRESET;
4857	}
4858	SCTP_INP_RLOCK(inp);
4859	stcb = LIST_FIRST(&inp->sctp_asoc_list);
4860	if (stcb) {
4861		SCTP_TCB_LOCK(stcb);
4862	}
4863	SCTP_INP_RUNLOCK(inp);
4864	if (stcb == NULL) {
4865		SCTP_FREE_SONAME(sin);
4866		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4867		return ECONNRESET;
4868	}
4869	fnd = 0;
4870	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
4871		sin_a = (struct sockaddr_in *)&net->ro._l_addr;
4872		if (sin_a->sin_family == AF_INET) {
4873			fnd = 1;
4874			sin->sin_port = stcb->rport;
4875			sin->sin_addr = sin_a->sin_addr;
4876			break;
4877		}
4878	}
4879	SCTP_TCB_UNLOCK(stcb);
4880	if (!fnd) {
4881		/* No IPv4 address */
4882		SCTP_FREE_SONAME(sin);
4883		SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
4884		return ENOENT;
4885	}
4886	(*addr) = (struct sockaddr *)sin;
4887	return (0);
4888}
4889
4890struct pr_usrreqs sctp_usrreqs = {
4891	.pru_abort = sctp_abort,
4892	.pru_accept = sctp_accept,
4893	.pru_attach = sctp_attach,
4894	.pru_bind = sctp_bind,
4895	.pru_connect = sctp_connect,
4896	.pru_control = in_control,
4897	.pru_close = sctp_close,
4898	.pru_detach = sctp_close,
4899	.pru_sopoll = sopoll_generic,
4900	.pru_flush = sctp_flush,
4901	.pru_disconnect = sctp_disconnect,
4902	.pru_listen = sctp_listen,
4903	.pru_peeraddr = sctp_peeraddr,
4904	.pru_send = sctp_sendm,
4905	.pru_shutdown = sctp_shutdown,
4906	.pru_sockaddr = sctp_ingetaddr,
4907	.pru_sosend = sctp_sosend,
4908	.pru_soreceive = sctp_soreceive
4909};
4910