sctp_usrreq.c revision 169655
1/*-
2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * a) Redistributions of source code must retain the above copyright notice,
8 *   this list of conditions and the following disclaimer.
9 *
10 * b) Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *   the documentation and/or other materials provided with the distribution.
13 *
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 *    contributors may be used to endorse or promote products derived
16 *    from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/* $KAME: sctp_usrreq.c,v 1.48 2005/03/07 23:26:08 itojun Exp $	 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/netinet/sctp_usrreq.c 169655 2007-05-17 12:16:24Z rrs $");
35#include <netinet/sctp_os.h>
36#include <sys/proc.h>
37#include <netinet/sctp_pcb.h>
38#include <netinet/sctp_header.h>
39#include <netinet/sctp_var.h>
40#if defined(INET6)
41#include <netinet6/sctp6_var.h>
42#endif
43#include <netinet/sctp_sysctl.h>
44#include <netinet/sctp_output.h>
45#include <netinet/sctp_uio.h>
46#include <netinet/sctp_asconf.h>
47#include <netinet/sctputil.h>
48#include <netinet/sctp_indata.h>
49#include <netinet/sctp_timer.h>
50#include <netinet/sctp_auth.h>
51
52
53
54void
55sctp_init(void)
56{
57	/* Init the SCTP pcb in sctp_pcb.c */
58	u_long sb_max_adj;
59
60	sctp_pcb_init();
61
62	if ((nmbclusters / 8) > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE)
63		sctp_max_chunks_on_queue = (nmbclusters / 8);
64	/*
65	 * Allow a user to take no more than 1/2 the number of clusters or
66	 * the SB_MAX whichever is smaller for the send window.
67	 */
68	sb_max_adj = (u_long)((u_quad_t) (SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES));
69	sctp_sendspace = min((min(SB_MAX, sb_max_adj)),
70	    ((nmbclusters / 2) * SCTP_DEFAULT_MAXSEGMENT));
71	/*
72	 * Now for the recv window, should we take the same amount? or
73	 * should I do 1/2 the SB_MAX instead in the SB_MAX min above. For
74	 * now I will just copy.
75	 */
76	sctp_recvspace = sctp_sendspace;
77
78
79}
80
81
82
83/*
84 * cleanup of the sctppcbinfo structure.
85 * Assumes that the sctppcbinfo lock is held.
86 */
87void
88sctp_pcbinfo_cleanup(void)
89{
90	/* free the hash tables */
91	if (sctppcbinfo.sctp_asochash != NULL)
92		SCTP_HASH_FREE(sctppcbinfo.sctp_asochash, sctppcbinfo.hashasocmark);
93	if (sctppcbinfo.sctp_ephash != NULL)
94		SCTP_HASH_FREE(sctppcbinfo.sctp_ephash, sctppcbinfo.hashmark);
95	if (sctppcbinfo.sctp_tcpephash != NULL)
96		SCTP_HASH_FREE(sctppcbinfo.sctp_tcpephash, sctppcbinfo.hashtcpmark);
97	if (sctppcbinfo.sctp_restarthash != NULL)
98		SCTP_HASH_FREE(sctppcbinfo.sctp_restarthash, sctppcbinfo.hashrestartmark);
99}
100
101
102static void
103sctp_pathmtu_adjustment(struct sctp_inpcb *inp,
104    struct sctp_tcb *stcb,
105    struct sctp_nets *net,
106    uint16_t nxtsz)
107{
108	struct sctp_tmit_chunk *chk;
109
110	/* Adjust that too */
111	stcb->asoc.smallest_mtu = nxtsz;
112	/* now off to subtract IP_DF flag if needed */
113#ifdef SCTP_PRINT_FOR_B_AND_M
114	SCTP_PRINTF("sctp_pathmtu_adjust called inp:%p stcb:%p net:%p nxtsz:%d\n",
115	    inp, stcb, net, nxtsz);
116#endif
117	TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) {
118		if ((chk->send_size + IP_HDR_SIZE) > nxtsz) {
119			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
120		}
121	}
122	TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
123		if ((chk->send_size + IP_HDR_SIZE) > nxtsz) {
124			/*
125			 * For this guy we also mark for immediate resend
126			 * since we sent to big of chunk
127			 */
128			chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
129			if (chk->sent != SCTP_DATAGRAM_RESEND) {
130				sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
131			}
132			chk->sent = SCTP_DATAGRAM_RESEND;
133			chk->rec.data.doing_fast_retransmit = 0;
134#ifdef SCTP_FLIGHT_LOGGING
135			sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PMTU,
136			    chk->whoTo->flight_size,
137			    chk->book_size,
138			    (uintptr_t) chk->whoTo,
139			    chk->rec.data.TSN_seq);
140#endif
141			/* Clear any time so NO RTT is being done */
142			chk->do_rtt = 0;
143			sctp_flight_size_decrease(chk);
144			sctp_total_flight_decrease(stcb, chk);
145		}
146	}
147}
148
149static void
150sctp_notify_mbuf(struct sctp_inpcb *inp,
151    struct sctp_tcb *stcb,
152    struct sctp_nets *net,
153    struct ip *ip,
154    struct sctphdr *sh)
155{
156	struct icmp *icmph;
157	int totsz, tmr_stopped = 0;
158	uint16_t nxtsz;
159
160	/* protection */
161	if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
162	    (ip == NULL) || (sh == NULL)) {
163		if (stcb != NULL) {
164			SCTP_TCB_UNLOCK(stcb);
165		}
166		return;
167	}
168	/* First job is to verify the vtag matches what I would send */
169	if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
170		SCTP_TCB_UNLOCK(stcb);
171		return;
172	}
173	icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) -
174	    sizeof(struct ip)));
175	if (icmph->icmp_type != ICMP_UNREACH) {
176		/* We only care about unreachable */
177		SCTP_TCB_UNLOCK(stcb);
178		return;
179	}
180	if (icmph->icmp_code != ICMP_UNREACH_NEEDFRAG) {
181		/* not a unreachable message due to frag. */
182		SCTP_TCB_UNLOCK(stcb);
183		return;
184	}
185	totsz = ip->ip_len;
186
187	nxtsz = ntohs(icmph->icmp_seq);
188	if (nxtsz == 0) {
189		/*
190		 * old type router that does not tell us what the next size
191		 * mtu is. Rats we will have to guess (in a educated fashion
192		 * of course)
193		 */
194		nxtsz = find_next_best_mtu(totsz);
195	}
196	/* Stop any PMTU timer */
197	if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
198		tmr_stopped = 1;
199		sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
200		    SCTP_FROM_SCTP_USRREQ + SCTP_LOC_1);
201	}
202	/* Adjust destination size limit */
203	if (net->mtu > nxtsz) {
204		net->mtu = nxtsz;
205	}
206	/* now what about the ep? */
207	if (stcb->asoc.smallest_mtu > nxtsz) {
208#ifdef SCTP_PRINT_FOR_B_AND_M
209		SCTP_PRINTF("notify_mbuf (ICMP) calls sctp_pathmtu_adjust mtu:%d\n",
210		    nxtsz);
211#endif
212		sctp_pathmtu_adjustment(inp, stcb, net, nxtsz);
213	}
214	if (tmr_stopped)
215		sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
216
217	SCTP_TCB_UNLOCK(stcb);
218}
219
220
221void
222sctp_notify(struct sctp_inpcb *inp,
223    int error,
224    struct sctphdr *sh,
225    struct sockaddr *to,
226    struct sctp_tcb *stcb,
227    struct sctp_nets *net)
228{
229	/* protection */
230	if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
231	    (sh == NULL) || (to == NULL)) {
232		return;
233	}
234	/* First job is to verify the vtag matches what I would send */
235	if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
236		return;
237	}
238	/* FIX ME FIX ME PROTOPT i.e. no SCTP should ALWAYS be an ABORT */
239
240	if ((error == EHOSTUNREACH) ||	/* Host is not reachable */
241	    (error == EHOSTDOWN) ||	/* Host is down */
242	    (error == ECONNREFUSED) ||	/* Host refused the connection, (not
243					 * an abort?) */
244	    (error == ENOPROTOOPT)	/* SCTP is not present on host */
245	    ) {
246		/*
247		 * Hmm reachablity problems we must examine closely. If its
248		 * not reachable, we may have lost a network. Or if there is
249		 * NO protocol at the other end named SCTP. well we consider
250		 * it a OOTB abort.
251		 */
252		if ((error == EHOSTUNREACH) || (error == EHOSTDOWN)) {
253			if (net->dest_state & SCTP_ADDR_REACHABLE) {
254				/* Ok that destination is NOT reachable */
255				SCTP_PRINTF("ICMP (thresh %d/%d) takes interface %p down\n",
256				    net->error_count,
257				    net->failure_threshold,
258				    net);
259
260				net->dest_state &= ~SCTP_ADDR_REACHABLE;
261				net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
262				net->error_count = net->failure_threshold + 1;
263				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
264				    stcb, SCTP_FAILED_THRESHOLD,
265				    (void *)net);
266			}
267			if (stcb) {
268				SCTP_TCB_UNLOCK(stcb);
269			}
270		} else {
271			/*
272			 * Here the peer is either playing tricks on us,
273			 * including an address that belongs to someone who
274			 * does not support SCTP OR was a userland
275			 * implementation that shutdown and now is dead. In
276			 * either case treat it like a OOTB abort with no
277			 * TCB
278			 */
279			sctp_abort_notification(stcb, SCTP_PEER_FAULTY);
280			sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_2);
281			/* no need to unlock here, since the TCB is gone */
282		}
283	} else {
284		/* Send all others to the app */
285		if (stcb) {
286			SCTP_TCB_UNLOCK(stcb);
287		}
288		if (inp->sctp_socket) {
289#ifdef SCTP_LOCK_LOGGING
290			sctp_log_lock(inp, stcb, SCTP_LOG_LOCK_SOCK);
291#endif
292			SOCK_LOCK(inp->sctp_socket);
293			inp->sctp_socket->so_error = error;
294			sctp_sowwakeup(inp, inp->sctp_socket);
295			SOCK_UNLOCK(inp->sctp_socket);
296		}
297	}
298}
299
300void
301sctp_ctlinput(cmd, sa, vip)
302	int cmd;
303	struct sockaddr *sa;
304	void *vip;
305{
306	struct ip *ip = vip;
307	struct sctphdr *sh;
308	uint32_t vrf_id;
309
310	/* FIX, for non-bsd is this right? */
311	vrf_id = SCTP_DEFAULT_VRFID;
312	if (sa->sa_family != AF_INET ||
313	    ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) {
314		return;
315	}
316	if (PRC_IS_REDIRECT(cmd)) {
317		ip = 0;
318	} else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) {
319		return;
320	}
321	if (ip) {
322		struct sctp_inpcb *inp = NULL;
323		struct sctp_tcb *stcb = NULL;
324		struct sctp_nets *net = NULL;
325		struct sockaddr_in to, from;
326
327		sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2));
328		bzero(&to, sizeof(to));
329		bzero(&from, sizeof(from));
330		from.sin_family = to.sin_family = AF_INET;
331		from.sin_len = to.sin_len = sizeof(to);
332		from.sin_port = sh->src_port;
333		from.sin_addr = ip->ip_src;
334		to.sin_port = sh->dest_port;
335		to.sin_addr = ip->ip_dst;
336
337		/*
338		 * 'to' holds the dest of the packet that failed to be sent.
339		 * 'from' holds our local endpoint address. Thus we reverse
340		 * the to and the from in the lookup.
341		 */
342		stcb = sctp_findassociation_addr_sa((struct sockaddr *)&from,
343		    (struct sockaddr *)&to,
344		    &inp, &net, 1, vrf_id);
345		if (stcb != NULL && inp && (inp->sctp_socket != NULL)) {
346			if (cmd != PRC_MSGSIZE) {
347				int cm;
348
349				if (cmd == PRC_HOSTDEAD) {
350					cm = EHOSTUNREACH;
351				} else {
352					cm = inetctlerrmap[cmd];
353				}
354				sctp_notify(inp, cm, sh,
355				    (struct sockaddr *)&to, stcb,
356				    net);
357			} else {
358				/* handle possible ICMP size messages */
359				sctp_notify_mbuf(inp, stcb, net, ip, sh);
360			}
361		} else {
362			if ((stcb == NULL) && (inp != NULL)) {
363				/* reduce ref-count */
364				SCTP_INP_WLOCK(inp);
365				SCTP_INP_DECR_REF(inp);
366				SCTP_INP_WUNLOCK(inp);
367			}
368		}
369	}
370	return;
371}
372
373static int
374sctp_getcred(SYSCTL_HANDLER_ARGS)
375{
376	struct xucred xuc;
377	struct sockaddr_in addrs[2];
378	struct sctp_inpcb *inp;
379	struct sctp_nets *net;
380	struct sctp_tcb *stcb;
381	int error;
382	uint32_t vrf_id;
383
384
385	/* FIX, for non-bsd is this right? */
386	vrf_id = SCTP_DEFAULT_VRFID;
387
388	/*
389	 * XXXRW: Other instances of getcred use SUSER_ALLOWJAIL, as socket
390	 * visibility is scoped using cr_canseesocket(), which it is not
391	 * here.
392	 */
393	error = priv_check_cred(req->td->td_ucred, PRIV_NETINET_GETCRED,
394	    SUSER_ALLOWJAIL);
395	if (error)
396		return (error);
397
398	error = SYSCTL_IN(req, addrs, sizeof(addrs));
399	if (error)
400		return (error);
401
402	stcb = sctp_findassociation_addr_sa(sintosa(&addrs[0]),
403	    sintosa(&addrs[1]),
404	    &inp, &net, 1, vrf_id);
405	if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) {
406		if ((inp != NULL) && (stcb == NULL)) {
407			/* reduce ref-count */
408			SCTP_INP_WLOCK(inp);
409			SCTP_INP_DECR_REF(inp);
410			goto cred_can_cont;
411		}
412		error = ENOENT;
413		goto out;
414	}
415	SCTP_TCB_UNLOCK(stcb);
416	/*
417	 * We use the write lock here, only since in the error leg we need
418	 * it. If we used RLOCK, then we would have to
419	 * wlock/decr/unlock/rlock. Which in theory could create a hole.
420	 * Better to use higher wlock.
421	 */
422	SCTP_INP_WLOCK(inp);
423cred_can_cont:
424	error = cr_canseesocket(req->td->td_ucred, inp->sctp_socket);
425	if (error) {
426		SCTP_INP_WUNLOCK(inp);
427		goto out;
428	}
429	cru2x(inp->sctp_socket->so_cred, &xuc);
430	SCTP_INP_WUNLOCK(inp);
431	error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
432out:
433	return (error);
434}
435
436SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW,
437    0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection");
438
439
440static void
441sctp_abort(struct socket *so)
442{
443	struct sctp_inpcb *inp;
444	uint32_t flags;
445
446	inp = (struct sctp_inpcb *)so->so_pcb;
447	if (inp == 0)
448		return;
449
450sctp_must_try_again:
451	flags = inp->sctp_flags;
452#ifdef SCTP_LOG_CLOSING
453	sctp_log_closing(inp, NULL, 17);
454#endif
455	if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
456	    (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
457#ifdef SCTP_LOG_CLOSING
458		sctp_log_closing(inp, NULL, 16);
459#endif
460		sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
461		    SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
462		SOCK_LOCK(so);
463		SCTP_SB_CLEAR(so->so_snd);
464		/*
465		 * same for the rcv ones, they are only here for the
466		 * accounting/select.
467		 */
468		SCTP_SB_CLEAR(so->so_rcv);
469
470		/* Now null out the reference, we are completely detached. */
471		so->so_pcb = NULL;
472		SOCK_UNLOCK(so);
473	} else {
474		flags = inp->sctp_flags;
475		if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
476			goto sctp_must_try_again;
477		}
478	}
479	return;
480}
481
482static int
483sctp_attach(struct socket *so, int proto, struct thread *p)
484{
485	struct sctp_inpcb *inp;
486	struct inpcb *ip_inp;
487	int error;
488
489#ifdef IPSEC
490	uint32_t flags;
491
492#endif
493	inp = (struct sctp_inpcb *)so->so_pcb;
494	if (inp != 0) {
495		return EINVAL;
496	}
497	error = SCTP_SORESERVE(so, sctp_sendspace, sctp_recvspace);
498	if (error) {
499		return error;
500	}
501	error = sctp_inpcb_alloc(so);
502	if (error) {
503		return error;
504	}
505	inp = (struct sctp_inpcb *)so->so_pcb;
506	SCTP_INP_WLOCK(inp);
507
508	inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6;	/* I'm not v6! */
509	ip_inp = &inp->ip_inp.inp;
510	ip_inp->inp_vflag |= INP_IPV4;
511	ip_inp->inp_ip_ttl = ip_defttl;
512
513#ifdef IPSEC
514	error = ipsec_init_pcbpolicy(so, &ip_inp->inp_sp);
515#ifdef SCTP_LOG_CLOSING
516	sctp_log_closing(inp, NULL, 17);
517#endif
518	if (error != 0) {
519		flags = inp->sctp_flags;
520		if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
521		    (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
522#ifdef SCTP_LOG_CLOSING
523			sctp_log_closing(inp, NULL, 15);
524#endif
525			SCTP_INP_WUNLOCK(inp);
526			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
527			    SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
528		} else {
529			SCTP_INP_WUNLOCK(inp);
530		}
531		return error;
532	}
533#endif				/* IPSEC */
534	SCTP_INP_WUNLOCK(inp);
535	return 0;
536}
537
538static int
539sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p)
540{
541	struct sctp_inpcb *inp;
542	int error;
543
544#ifdef INET6
545	if (addr && addr->sa_family != AF_INET)
546		/* must be a v4 address! */
547		return EINVAL;
548#endif				/* INET6 */
549
550	inp = (struct sctp_inpcb *)so->so_pcb;
551	if (inp == 0)
552		return EINVAL;
553
554	error = sctp_inpcb_bind(so, addr, p);
555	return error;
556}
557
558static void
559sctp_close(struct socket *so)
560{
561	struct sctp_inpcb *inp;
562	uint32_t flags;
563
564	inp = (struct sctp_inpcb *)so->so_pcb;
565	if (inp == 0)
566		return;
567
568	/*
569	 * Inform all the lower layer assoc that we are done.
570	 */
571sctp_must_try_again:
572	flags = inp->sctp_flags;
573#ifdef SCTP_LOG_CLOSING
574	sctp_log_closing(inp, NULL, 17);
575#endif
576	if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
577	    (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
578		if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) ||
579		    (so->so_rcv.sb_cc > 0)) {
580#ifdef SCTP_LOG_CLOSING
581			sctp_log_closing(inp, NULL, 13);
582#endif
583			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
584			    SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
585		} else {
586#ifdef SCTP_LOG_CLOSING
587			sctp_log_closing(inp, NULL, 14);
588#endif
589			sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE,
590			    SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
591		}
592		/*
593		 * The socket is now detached, no matter what the state of
594		 * the SCTP association.
595		 */
596		SOCK_LOCK(so);
597		SCTP_SB_CLEAR(so->so_snd);
598		/*
599		 * same for the rcv ones, they are only here for the
600		 * accounting/select.
601		 */
602		SCTP_SB_CLEAR(so->so_rcv);
603
604		/* Now null out the reference, we are completely detached. */
605		so->so_pcb = NULL;
606		SOCK_UNLOCK(so);
607	} else {
608		flags = inp->sctp_flags;
609		if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
610			goto sctp_must_try_again;
611		}
612	}
613	return;
614}
615
616
617int
618sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
619    struct mbuf *control, struct thread *p);
620
621
622int
623sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
624    struct mbuf *control, struct thread *p)
625{
626	struct sctp_inpcb *inp;
627	int error;
628
629	inp = (struct sctp_inpcb *)so->so_pcb;
630	if (inp == 0) {
631		if (control) {
632			sctp_m_freem(control);
633			control = NULL;
634		}
635		sctp_m_freem(m);
636		return EINVAL;
637	}
638	/* Got to have an to address if we are NOT a connected socket */
639	if ((addr == NULL) &&
640	    ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
641	    (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE))
642	    ) {
643		goto connected_type;
644	} else if (addr == NULL) {
645		error = EDESTADDRREQ;
646		sctp_m_freem(m);
647		if (control) {
648			sctp_m_freem(control);
649			control = NULL;
650		}
651		return (error);
652	}
653#ifdef INET6
654	if (addr->sa_family != AF_INET) {
655		/* must be a v4 address! */
656		sctp_m_freem(m);
657		if (control) {
658			sctp_m_freem(control);
659			control = NULL;
660		}
661		error = EDESTADDRREQ;
662		return EINVAL;
663	}
664#endif				/* INET6 */
665connected_type:
666	/* now what about control */
667	if (control) {
668		if (inp->control) {
669			SCTP_PRINTF("huh? control set?\n");
670			sctp_m_freem(inp->control);
671			inp->control = NULL;
672		}
673		inp->control = control;
674	}
675	/* Place the data */
676	if (inp->pkt) {
677		SCTP_BUF_NEXT(inp->pkt_last) = m;
678		inp->pkt_last = m;
679	} else {
680		inp->pkt_last = inp->pkt = m;
681	}
682	if (
683	/* FreeBSD uses a flag passed */
684	    ((flags & PRUS_MORETOCOME) == 0)
685	    ) {
686		/*
687		 * note with the current version this code will only be used
688		 * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for
689		 * re-defining sosend to use the sctp_sosend. One can
690		 * optionally switch back to this code (by changing back the
691		 * definitions) but this is not advisable. This code is used
692		 * by FreeBSD when sending a file with sendfile() though.
693		 */
694		int ret;
695
696		ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags);
697		inp->pkt = NULL;
698		inp->control = NULL;
699		return (ret);
700	} else {
701		return (0);
702	}
703}
704
705static int
706sctp_disconnect(struct socket *so)
707{
708	struct sctp_inpcb *inp;
709
710	inp = (struct sctp_inpcb *)so->so_pcb;
711	if (inp == NULL) {
712		return (ENOTCONN);
713	}
714	SCTP_INP_RLOCK(inp);
715	if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
716		if (SCTP_LIST_EMPTY(&inp->sctp_asoc_list)) {
717			/* No connection */
718			SCTP_INP_RUNLOCK(inp);
719			return (0);
720		} else {
721			struct sctp_association *asoc;
722			struct sctp_tcb *stcb;
723
724			stcb = LIST_FIRST(&inp->sctp_asoc_list);
725			if (stcb == NULL) {
726				SCTP_INP_RUNLOCK(inp);
727				return (EINVAL);
728			}
729			SCTP_TCB_LOCK(stcb);
730			asoc = &stcb->asoc;
731			if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
732				/* We are about to be freed, out of here */
733				SCTP_TCB_UNLOCK(stcb);
734				SCTP_INP_RUNLOCK(inp);
735				return (0);
736			}
737			if (((so->so_options & SO_LINGER) &&
738			    (so->so_linger == 0)) ||
739			    (so->so_rcv.sb_cc > 0)) {
740				if (SCTP_GET_STATE(asoc) !=
741				    SCTP_STATE_COOKIE_WAIT) {
742					/* Left with Data unread */
743					struct mbuf *err;
744
745					err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
746					if (err) {
747						/*
748						 * Fill in the user
749						 * initiated abort
750						 */
751						struct sctp_paramhdr *ph;
752
753						ph = mtod(err, struct sctp_paramhdr *);
754						SCTP_BUF_LEN(err) = sizeof(struct sctp_paramhdr);
755						ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
756						ph->param_length = htons(SCTP_BUF_LEN(err));
757					}
758					sctp_send_abort_tcb(stcb, err);
759					SCTP_STAT_INCR_COUNTER32(sctps_aborted);
760				}
761				SCTP_INP_RUNLOCK(inp);
762				if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
763				    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
764					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
765				}
766				sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_3);
767				/* No unlock tcb assoc is gone */
768				return (0);
769			}
770			if (TAILQ_EMPTY(&asoc->send_queue) &&
771			    TAILQ_EMPTY(&asoc->sent_queue) &&
772			    (asoc->stream_queue_cnt == 0)) {
773				/* there is nothing queued to send, so done */
774				if (asoc->locked_on_sending) {
775					goto abort_anyway;
776				}
777				if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
778				    (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
779					/* only send SHUTDOWN 1st time thru */
780					sctp_stop_timers_for_shutdown(stcb);
781					sctp_send_shutdown(stcb,
782					    stcb->asoc.primary_destination);
783					sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3);
784					if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
785					    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
786						SCTP_STAT_DECR_GAUGE32(sctps_currestab);
787					}
788					asoc->state = SCTP_STATE_SHUTDOWN_SENT;
789					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
790					    stcb->sctp_ep, stcb,
791					    asoc->primary_destination);
792					sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
793					    stcb->sctp_ep, stcb,
794					    asoc->primary_destination);
795				}
796			} else {
797				/*
798				 * we still got (or just got) data to send,
799				 * so set SHUTDOWN_PENDING
800				 */
801				/*
802				 * XXX sockets draft says that SCTP_EOF
803				 * should be sent with no data. currently,
804				 * we will allow user data to be sent first
805				 * and move to SHUTDOWN-PENDING
806				 */
807				asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
808				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
809				    asoc->primary_destination);
810				if (asoc->locked_on_sending) {
811					/* Locked to send out the data */
812					struct sctp_stream_queue_pending *sp;
813
814					sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
815					if (sp == NULL) {
816						SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n",
817						    asoc->locked_on_sending->stream_no);
818					} else {
819						if ((sp->length == 0) && (sp->msg_is_complete == 0))
820							asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
821					}
822				}
823				if (TAILQ_EMPTY(&asoc->send_queue) &&
824				    TAILQ_EMPTY(&asoc->sent_queue) &&
825				    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
826					struct mbuf *op_err;
827
828			abort_anyway:
829					op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
830					    0, M_DONTWAIT, 1, MT_DATA);
831					if (op_err) {
832						/*
833						 * Fill in the user
834						 * initiated abort
835						 */
836						struct sctp_paramhdr *ph;
837						uint32_t *ippp;
838
839						SCTP_BUF_LEN(op_err) =
840						    (sizeof(struct sctp_paramhdr) + sizeof(uint32_t));
841						ph = mtod(op_err,
842						    struct sctp_paramhdr *);
843						ph->param_type = htons(
844						    SCTP_CAUSE_USER_INITIATED_ABT);
845						ph->param_length = htons(SCTP_BUF_LEN(op_err));
846						ippp = (uint32_t *) (ph + 1);
847						*ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4);
848					}
849					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4;
850					sctp_send_abort_tcb(stcb, op_err);
851					SCTP_STAT_INCR_COUNTER32(sctps_aborted);
852					if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
853					    (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
854						SCTP_STAT_DECR_GAUGE32(sctps_currestab);
855					}
856					SCTP_INP_RUNLOCK(inp);
857					sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_5);
858					return (0);
859				}
860			}
861			SCTP_TCB_UNLOCK(stcb);
862			SCTP_INP_RUNLOCK(inp);
863			return (0);
864		}
865		/* not reached */
866	} else {
867		/* UDP model does not support this */
868		SCTP_INP_RUNLOCK(inp);
869		return EOPNOTSUPP;
870	}
871}
872
873int
874sctp_shutdown(struct socket *so)
875{
876	struct sctp_inpcb *inp;
877
878	inp = (struct sctp_inpcb *)so->so_pcb;
879	if (inp == 0) {
880		return EINVAL;
881	}
882	SCTP_INP_RLOCK(inp);
883	/* For UDP model this is a invalid call */
884	if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
885		/* Restore the flags that the soshutdown took away. */
886		so->so_rcv.sb_state &= ~SBS_CANTRCVMORE;
887		/* This proc will wakeup for read and do nothing (I hope) */
888		SCTP_INP_RUNLOCK(inp);
889		return (EOPNOTSUPP);
890	}
891	/*
892	 * Ok if we reach here its the TCP model and it is either a SHUT_WR
893	 * or SHUT_RDWR. This means we put the shutdown flag against it.
894	 */
895	{
896		struct sctp_tcb *stcb;
897		struct sctp_association *asoc;
898
899		socantsendmore(so);
900
901		stcb = LIST_FIRST(&inp->sctp_asoc_list);
902		if (stcb == NULL) {
903			/*
904			 * Ok we hit the case that the shutdown call was
905			 * made after an abort or something. Nothing to do
906			 * now.
907			 */
908			SCTP_INP_RUNLOCK(inp);
909			return (0);
910		}
911		SCTP_TCB_LOCK(stcb);
912		asoc = &stcb->asoc;
913		if (TAILQ_EMPTY(&asoc->send_queue) &&
914		    TAILQ_EMPTY(&asoc->sent_queue) &&
915		    (asoc->stream_queue_cnt == 0)) {
916			if (asoc->locked_on_sending) {
917				goto abort_anyway;
918			}
919			/* there is nothing queued to send, so I'm done... */
920			if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
921				/* only send SHUTDOWN the first time through */
922				sctp_stop_timers_for_shutdown(stcb);
923				sctp_send_shutdown(stcb,
924				    stcb->asoc.primary_destination);
925				sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3);
926				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
927				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
928					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
929				}
930				asoc->state = SCTP_STATE_SHUTDOWN_SENT;
931				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
932				    stcb->sctp_ep, stcb,
933				    asoc->primary_destination);
934				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
935				    stcb->sctp_ep, stcb,
936				    asoc->primary_destination);
937			}
938		} else {
939			/*
940			 * we still got (or just got) data to send, so set
941			 * SHUTDOWN_PENDING
942			 */
943			asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
944			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
945			    asoc->primary_destination);
946
947			if (asoc->locked_on_sending) {
948				/* Locked to send out the data */
949				struct sctp_stream_queue_pending *sp;
950
951				sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
952				if (sp == NULL) {
953					SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n",
954					    asoc->locked_on_sending->stream_no);
955				} else {
956					if ((sp->length == 0) && (sp->msg_is_complete == 0)) {
957						asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
958					}
959				}
960			}
961			if (TAILQ_EMPTY(&asoc->send_queue) &&
962			    TAILQ_EMPTY(&asoc->sent_queue) &&
963			    (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
964				struct mbuf *op_err;
965
966		abort_anyway:
967				op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
968				    0, M_DONTWAIT, 1, MT_DATA);
969				if (op_err) {
970					/* Fill in the user initiated abort */
971					struct sctp_paramhdr *ph;
972					uint32_t *ippp;
973
974					SCTP_BUF_LEN(op_err) =
975					    sizeof(struct sctp_paramhdr) + sizeof(uint32_t);
976					ph = mtod(op_err,
977					    struct sctp_paramhdr *);
978					ph->param_type = htons(
979					    SCTP_CAUSE_USER_INITIATED_ABT);
980					ph->param_length = htons(SCTP_BUF_LEN(op_err));
981					ippp = (uint32_t *) (ph + 1);
982					*ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6);
983				}
984				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6;
985				sctp_abort_an_association(stcb->sctp_ep, stcb,
986				    SCTP_RESPONSE_TO_USER_REQ,
987				    op_err);
988				goto skip_unlock;
989			}
990		}
991		SCTP_TCB_UNLOCK(stcb);
992	}
993skip_unlock:
994	SCTP_INP_RUNLOCK(inp);
995	return 0;
996}
997
998/*
999 * copies a "user" presentable address and removes embedded scope, etc.
1000 * returns 0 on success, 1 on error
1001 */
1002static uint32_t
1003sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa)
1004{
1005	struct sockaddr_in6 lsa6;
1006
1007	sa = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)sa,
1008	    &lsa6);
1009	memcpy(ss, sa, sa->sa_len);
1010	return (0);
1011}
1012
1013
1014
1015static size_t
1016sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp,
1017    struct sctp_tcb *stcb,
1018    size_t limit,
1019    struct sockaddr_storage *sas,
1020    uint32_t vrf_id)
1021{
1022	struct sctp_ifn *sctp_ifn;
1023	struct sctp_ifa *sctp_ifa;
1024	int loopback_scope, ipv4_local_scope, local_scope, site_scope;
1025	size_t actual;
1026	int ipv4_addr_legal, ipv6_addr_legal;
1027	struct sctp_vrf *vrf;
1028
1029	actual = 0;
1030	if (limit <= 0)
1031		return (actual);
1032
1033	if (stcb) {
1034		/* Turn on all the appropriate scope */
1035		loopback_scope = stcb->asoc.loopback_scope;
1036		ipv4_local_scope = stcb->asoc.ipv4_local_scope;
1037		local_scope = stcb->asoc.local_scope;
1038		site_scope = stcb->asoc.site_scope;
1039	} else {
1040		/* Turn on ALL scope, since we look at the EP */
1041		loopback_scope = ipv4_local_scope = local_scope =
1042		    site_scope = 1;
1043	}
1044	ipv4_addr_legal = ipv6_addr_legal = 0;
1045	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1046		ipv6_addr_legal = 1;
1047		if (SCTP_IPV6_V6ONLY(inp) == 0) {
1048			ipv4_addr_legal = 1;
1049		}
1050	} else {
1051		ipv4_addr_legal = 1;
1052	}
1053	vrf = sctp_find_vrf(vrf_id);
1054	if (vrf == NULL) {
1055		return (0);
1056	}
1057	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1058		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
1059			if ((loopback_scope == 0) &&
1060			    SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
1061				/* Skip loopback if loopback_scope not set */
1062				continue;
1063			}
1064			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
1065				if (stcb) {
1066					/*
1067					 * For the BOUND-ALL case, the list
1068					 * associated with a TCB is Always
1069					 * considered a reverse list.. i.e.
1070					 * it lists addresses that are NOT
1071					 * part of the association. If this
1072					 * is one of those we must skip it.
1073					 */
1074					if (sctp_is_addr_restricted(stcb,
1075					    sctp_ifa)) {
1076						continue;
1077					}
1078				}
1079				if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
1080				    (ipv4_addr_legal)) {
1081					struct sockaddr_in *sin;
1082
1083					sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
1084					if (sin->sin_addr.s_addr == 0) {
1085						/*
1086						 * we skip unspecifed
1087						 * addresses
1088						 */
1089						continue;
1090					}
1091					if ((ipv4_local_scope == 0) &&
1092					    (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1093						continue;
1094					}
1095					if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) {
1096						in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)sas);
1097						((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1098						sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6));
1099						actual += sizeof(sizeof(struct sockaddr_in6));
1100					} else {
1101						memcpy(sas, sin, sizeof(*sin));
1102						((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport;
1103						sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin));
1104						actual += sizeof(*sin);
1105					}
1106					if (actual >= limit) {
1107						return (actual);
1108					}
1109				} else if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
1110				    (ipv6_addr_legal)) {
1111					struct sockaddr_in6 *sin6;
1112
1113					sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
1114					if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1115						/*
1116						 * we skip unspecifed
1117						 * addresses
1118						 */
1119						continue;
1120					}
1121					if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
1122						if (local_scope == 0)
1123							continue;
1124						if (sin6->sin6_scope_id == 0) {
1125							if (sa6_recoverscope(sin6) != 0)
1126								/*
1127								 * bad link
1128								 * local
1129								 * address
1130								 */
1131								continue;
1132						}
1133					}
1134					if ((site_scope == 0) &&
1135					    (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1136						continue;
1137					}
1138					memcpy(sas, sin6, sizeof(*sin6));
1139					((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1140					sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin6));
1141					actual += sizeof(*sin6);
1142					if (actual >= limit) {
1143						return (actual);
1144					}
1145				}
1146			}
1147		}
1148	} else {
1149		struct sctp_laddr *laddr;
1150
1151		/* The list is a NEGATIVE list */
1152		LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
1153			if (stcb) {
1154				if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
1155					continue;
1156				}
1157			}
1158			if (sctp_fill_user_address(sas, &laddr->ifa->address.sa))
1159				continue;
1160
1161			((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1162			sas = (struct sockaddr_storage *)((caddr_t)sas +
1163			    laddr->ifa->address.sa.sa_len);
1164			actual += laddr->ifa->address.sa.sa_len;
1165			if (actual >= limit) {
1166				return (actual);
1167			}
1168		}
1169	}
1170	return (actual);
1171}
1172
1173static size_t
1174sctp_fill_up_addresses(struct sctp_inpcb *inp,
1175    struct sctp_tcb *stcb,
1176    size_t limit,
1177    struct sockaddr_storage *sas)
1178{
1179	size_t size = 0;
1180
1181	/* fill up addresses for the endpoint's default vrf */
1182	size = sctp_fill_up_addresses_vrf(inp, stcb, limit, sas,
1183	    inp->def_vrf_id);
1184	return (size);
1185}
1186
1187static int
1188sctp_count_max_addresses_vrf(struct sctp_inpcb *inp, uint32_t vrf_id)
1189{
1190	int cnt = 0;
1191	struct sctp_vrf *vrf = NULL;
1192
1193	/*
1194	 * In both sub-set bound an bound_all cases we return the MAXIMUM
1195	 * number of addresses that you COULD get. In reality the sub-set
1196	 * bound may have an exclusion list for a given TCB OR in the
1197	 * bound-all case a TCB may NOT include the loopback or other
1198	 * addresses as well.
1199	 */
1200	vrf = sctp_find_vrf(vrf_id);
1201	if (vrf == NULL) {
1202		return (0);
1203	}
1204	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1205		struct sctp_ifn *sctp_ifn;
1206		struct sctp_ifa *sctp_ifa;
1207
1208		LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
1209			LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
1210				/* Count them if they are the right type */
1211				if (sctp_ifa->address.sa.sa_family == AF_INET) {
1212					if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)
1213						cnt += sizeof(struct sockaddr_in6);
1214					else
1215						cnt += sizeof(struct sockaddr_in);
1216
1217				} else if (sctp_ifa->address.sa.sa_family == AF_INET6)
1218					cnt += sizeof(struct sockaddr_in6);
1219			}
1220		}
1221	} else {
1222		struct sctp_laddr *laddr;
1223
1224		LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
1225			if (laddr->ifa->address.sa.sa_family == AF_INET) {
1226				if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)
1227					cnt += sizeof(struct sockaddr_in6);
1228				else
1229					cnt += sizeof(struct sockaddr_in);
1230
1231			} else if (laddr->ifa->address.sa.sa_family == AF_INET6)
1232				cnt += sizeof(struct sockaddr_in6);
1233		}
1234	}
1235	return (cnt);
1236}
1237
1238static int
1239sctp_count_max_addresses(struct sctp_inpcb *inp)
1240{
1241	int cnt = 0;
1242
1243	/* count addresses for the endpoint's default VRF */
1244	cnt = sctp_count_max_addresses_vrf(inp, inp->def_vrf_id);
1245	return (cnt);
1246}
1247
1248static int
1249sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval,
1250    size_t optsize, void *p, int delay)
1251{
1252	int error = 0;
1253	int creat_lock_on = 0;
1254	struct sctp_tcb *stcb = NULL;
1255	struct sockaddr *sa;
1256	int num_v6 = 0, num_v4 = 0, *totaddrp, totaddr;
1257	int added = 0;
1258	uint32_t vrf_id;
1259	sctp_assoc_t *a_id;
1260
1261	SCTPDBG(SCTP_DEBUG_PCB1, "Connectx called\n");
1262
1263	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
1264	    (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
1265		/* We are already connected AND the TCP model */
1266		return (EADDRINUSE);
1267	}
1268	if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) {
1269		return (EINVAL);
1270	}
1271	if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
1272		SCTP_INP_RLOCK(inp);
1273		stcb = LIST_FIRST(&inp->sctp_asoc_list);
1274		SCTP_INP_RUNLOCK(inp);
1275	}
1276	if (stcb) {
1277		return (EALREADY);
1278	}
1279	SCTP_INP_INCR_REF(inp);
1280	SCTP_ASOC_CREATE_LOCK(inp);
1281	creat_lock_on = 1;
1282	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1283	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
1284		error = EFAULT;
1285		goto out_now;
1286	}
1287	totaddrp = (int *)optval;
1288	totaddr = *totaddrp;
1289	sa = (struct sockaddr *)(totaddrp + 1);
1290	stcb = sctp_connectx_helper_find(inp, sa, &totaddr, &num_v4, &num_v6, &error, (optsize - sizeof(int)));
1291	if (stcb != NULL) {
1292		/* Already have or am bring up an association */
1293		SCTP_ASOC_CREATE_UNLOCK(inp);
1294		creat_lock_on = 0;
1295		SCTP_TCB_UNLOCK(stcb);
1296		error = EALREADY;
1297		goto out_now;
1298	}
1299#ifdef INET6
1300	if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
1301	    (num_v6 > 0)) {
1302		error = EINVAL;
1303		goto out_now;
1304	}
1305	if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
1306	    (num_v4 > 0)) {
1307		struct in6pcb *inp6;
1308
1309		inp6 = (struct in6pcb *)inp;
1310		if (SCTP_IPV6_V6ONLY(inp6)) {
1311			/*
1312			 * if IPV6_V6ONLY flag, ignore connections destined
1313			 * to a v4 addr or v4-mapped addr
1314			 */
1315			error = EINVAL;
1316			goto out_now;
1317		}
1318	}
1319#endif				/* INET6 */
1320	if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
1321	    SCTP_PCB_FLAGS_UNBOUND) {
1322		/* Bind a ephemeral port */
1323		error = sctp_inpcb_bind(so, NULL, p);
1324		if (error) {
1325			goto out_now;
1326		}
1327	}
1328	/* FIX ME: do we want to pass in a vrf on the connect call? */
1329	vrf_id = inp->def_vrf_id;
1330
1331	/* We are GOOD to go */
1332	stcb = sctp_aloc_assoc(inp, sa, 1, &error, 0, vrf_id);
1333	if (stcb == NULL) {
1334		/* Gak! no memory */
1335		goto out_now;
1336	}
1337	stcb->asoc.state = SCTP_STATE_COOKIE_WAIT;
1338	/* move to second address */
1339	if (sa->sa_family == AF_INET)
1340		sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in));
1341	else
1342		sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6));
1343
1344	added = sctp_connectx_helper_add(stcb, sa, (totaddr - 1), &error);
1345	/* Fill in the return id */
1346	a_id = (sctp_assoc_t *) optval;
1347	*a_id = sctp_get_associd(stcb);
1348
1349	/* initialize authentication parameters for the assoc */
1350	sctp_initialize_auth_params(inp, stcb);
1351
1352	if (delay) {
1353		/* doing delayed connection */
1354		stcb->asoc.delayed_connection = 1;
1355		sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination);
1356	} else {
1357		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1358		sctp_send_initiate(inp, stcb);
1359	}
1360	SCTP_TCB_UNLOCK(stcb);
1361	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
1362		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
1363		/* Set the connected flag so we can queue data */
1364		soisconnecting(so);
1365	}
1366out_now:
1367	if (creat_lock_on) {
1368		SCTP_ASOC_CREATE_UNLOCK(inp);
1369	}
1370	SCTP_INP_DECR_REF(inp);
1371	return error;
1372}
1373
1374#define SCTP_FIND_STCB(inp, stcb, assoc_id) { \
1375	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||\
1376	    (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { \
1377		SCTP_INP_RLOCK(inp); \
1378		stcb = LIST_FIRST(&inp->sctp_asoc_list); \
1379		if (stcb) { \
1380			SCTP_TCB_LOCK(stcb); \
1381                } \
1382		SCTP_INP_RUNLOCK(inp); \
1383	} else if (assoc_id != 0) { \
1384		stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1); \
1385		if (stcb == NULL) { \
1386			error = ENOENT; \
1387			break; \
1388		} \
1389	} else { \
1390		stcb = NULL; \
1391        } \
1392  }
1393
1394
1395#define SCTP_CHECK_AND_CAST(destp, srcp, type, size)  {\
1396	if (size < sizeof(type)) { \
1397		error = EINVAL; \
1398		break; \
1399	} else { \
1400		destp = (type *)srcp; \
1401	} \
1402      }
1403
1404static int
1405sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize,
1406    void *p)
1407{
1408	struct sctp_inpcb *inp;
1409	int error, val = 0;
1410	struct sctp_tcb *stcb = NULL;
1411
1412	if (optval == NULL) {
1413		return (EINVAL);
1414	}
1415	inp = (struct sctp_inpcb *)so->so_pcb;
1416	if (inp == 0)
1417		return EINVAL;
1418	error = 0;
1419
1420	switch (optname) {
1421	case SCTP_NODELAY:
1422	case SCTP_AUTOCLOSE:
1423	case SCTP_EXPLICIT_EOR:
1424	case SCTP_AUTO_ASCONF:
1425	case SCTP_DISABLE_FRAGMENTS:
1426	case SCTP_I_WANT_MAPPED_V4_ADDR:
1427	case SCTP_USE_EXT_RCVINFO:
1428		SCTP_INP_RLOCK(inp);
1429		switch (optname) {
1430		case SCTP_DISABLE_FRAGMENTS:
1431			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT);
1432			break;
1433		case SCTP_I_WANT_MAPPED_V4_ADDR:
1434			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4);
1435			break;
1436		case SCTP_AUTO_ASCONF:
1437			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
1438			break;
1439		case SCTP_EXPLICIT_EOR:
1440			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
1441			break;
1442		case SCTP_NODELAY:
1443			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY);
1444			break;
1445		case SCTP_USE_EXT_RCVINFO:
1446			val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO);
1447			break;
1448		case SCTP_AUTOCLOSE:
1449			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))
1450				val = TICKS_TO_SEC(inp->sctp_ep.auto_close_time);
1451			else
1452				val = 0;
1453			break;
1454
1455		default:
1456			error = ENOPROTOOPT;
1457		}		/* end switch (sopt->sopt_name) */
1458		if (optname != SCTP_AUTOCLOSE) {
1459			/* make it an "on/off" value */
1460			val = (val != 0);
1461		}
1462		if (*optsize < sizeof(val)) {
1463			error = EINVAL;
1464		}
1465		SCTP_INP_RUNLOCK(inp);
1466		if (error == 0) {
1467			/* return the option value */
1468			*(int *)optval = val;
1469			*optsize = sizeof(val);
1470		}
1471		break;
1472
1473	case SCTP_PARTIAL_DELIVERY_POINT:
1474		{
1475			uint32_t *value;
1476
1477			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1478			*value = inp->partial_delivery_point;
1479			*optsize = sizeof(uint32_t);
1480		}
1481		break;
1482	case SCTP_FRAGMENT_INTERLEAVE:
1483		{
1484			uint32_t *value;
1485
1486			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1487			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) {
1488				if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) {
1489					*value = SCTP_FRAG_LEVEL_2;
1490				} else {
1491					*value = SCTP_FRAG_LEVEL_1;
1492				}
1493			} else {
1494				*value = SCTP_FRAG_LEVEL_0;
1495			}
1496			*optsize = sizeof(uint32_t);
1497		}
1498		break;
1499	case SCTP_CMT_ON_OFF:
1500		{
1501			struct sctp_assoc_value *av;
1502
1503			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1504			if (sctp_cmt_on_off) {
1505				SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1506				if (stcb) {
1507					av->assoc_value = stcb->asoc.sctp_cmt_on_off;
1508					SCTP_TCB_UNLOCK(stcb);
1509
1510				} else {
1511					error = ENOTCONN;
1512				}
1513			} else {
1514				error = ENOPROTOOPT;
1515			}
1516			*optsize = sizeof(*av);
1517		}
1518		break;
1519	case SCTP_GET_ADDR_LEN:
1520		{
1521			struct sctp_assoc_value *av;
1522
1523			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1524			error = EINVAL;
1525#ifdef INET
1526			if (av->assoc_value == AF_INET) {
1527				av->assoc_value = sizeof(struct sockaddr_in);
1528				error = 0;
1529			}
1530#endif
1531#ifdef INET6
1532			if (av->assoc_value == AF_INET6) {
1533				av->assoc_value = sizeof(struct sockaddr_in6);
1534				error = 0;
1535			}
1536#endif
1537			*optsize = sizeof(*av);
1538		}
1539		break;
1540	case SCTP_GET_ASSOC_NUMBER:
1541		{
1542			uint32_t *value, cnt;
1543
1544			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1545			cnt = 0;
1546			SCTP_INP_RLOCK(inp);
1547			LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
1548				cnt++;
1549			}
1550			SCTP_INP_RUNLOCK(inp);
1551			*value = cnt;
1552			*optsize = sizeof(uint32_t);
1553		}
1554		break;
1555
1556	case SCTP_GET_ASSOC_ID_LIST:
1557		{
1558			struct sctp_assoc_ids *ids;
1559			unsigned int at, limit;
1560
1561			SCTP_CHECK_AND_CAST(ids, optval, struct sctp_assoc_ids, *optsize);
1562			at = 0;
1563			limit = *optsize / sizeof(sctp_assoc_t);
1564			SCTP_INP_RLOCK(inp);
1565			LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
1566				if (at < limit) {
1567					ids->gaids_assoc_id[at++] = sctp_get_associd(stcb);
1568				} else {
1569					error = EINVAL;
1570					break;
1571				}
1572			}
1573			SCTP_INP_RUNLOCK(inp);
1574			*optsize = at * sizeof(sctp_assoc_t);
1575		}
1576		break;
1577	case SCTP_CONTEXT:
1578		{
1579			struct sctp_assoc_value *av;
1580
1581			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1582			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1583
1584			if (stcb) {
1585				av->assoc_value = stcb->asoc.context;
1586				SCTP_TCB_UNLOCK(stcb);
1587			} else {
1588				SCTP_INP_RLOCK(inp);
1589				av->assoc_value = inp->sctp_context;
1590				SCTP_INP_RUNLOCK(inp);
1591			}
1592			*optsize = sizeof(*av);
1593		}
1594		break;
1595	case SCTP_VRF_ID:
1596		{
1597			uint32_t *vrf_id;
1598
1599			SCTP_CHECK_AND_CAST(vrf_id, optval, uint32_t, *optsize);
1600			*vrf_id = inp->def_vrf_id;
1601			break;
1602		}
1603	case SCTP_GET_ASOC_VRF:
1604		{
1605			struct sctp_assoc_value *id;
1606
1607			SCTP_CHECK_AND_CAST(id, optval, struct sctp_assoc_value, *optsize);
1608			SCTP_FIND_STCB(inp, stcb, id->assoc_id);
1609			if (stcb == NULL) {
1610				error = EINVAL;
1611				break;
1612			}
1613			id->assoc_value = stcb->asoc.vrf_id;
1614			break;
1615		}
1616	case SCTP_GET_VRF_IDS:
1617		{
1618			error = EOPNOTSUPP;
1619			break;
1620		}
1621	case SCTP_GET_NONCE_VALUES:
1622		{
1623			struct sctp_get_nonce_values *gnv;
1624
1625			SCTP_CHECK_AND_CAST(gnv, optval, struct sctp_get_nonce_values, *optsize);
1626			SCTP_FIND_STCB(inp, stcb, gnv->gn_assoc_id);
1627
1628			if (stcb) {
1629				gnv->gn_peers_tag = stcb->asoc.peer_vtag;
1630				gnv->gn_local_tag = stcb->asoc.my_vtag;
1631				SCTP_TCB_UNLOCK(stcb);
1632			} else {
1633				error = ENOTCONN;
1634			}
1635			*optsize = sizeof(*gnv);
1636		}
1637		break;
1638	case SCTP_DELAYED_ACK_TIME:
1639		{
1640			struct sctp_assoc_value *tm;
1641
1642			SCTP_CHECK_AND_CAST(tm, optval, struct sctp_assoc_value, *optsize);
1643			SCTP_FIND_STCB(inp, stcb, tm->assoc_id);
1644
1645			if (stcb) {
1646				tm->assoc_value = stcb->asoc.delayed_ack;
1647				SCTP_TCB_UNLOCK(stcb);
1648			} else {
1649				SCTP_INP_RLOCK(inp);
1650				tm->assoc_value = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1651				SCTP_INP_RUNLOCK(inp);
1652			}
1653			*optsize = sizeof(*tm);
1654		}
1655		break;
1656
1657	case SCTP_GET_SNDBUF_USE:
1658		{
1659			struct sctp_sockstat *ss;
1660
1661			SCTP_CHECK_AND_CAST(ss, optval, struct sctp_sockstat, *optsize);
1662			SCTP_FIND_STCB(inp, stcb, ss->ss_assoc_id);
1663
1664			if (stcb) {
1665				ss->ss_total_sndbuf = stcb->asoc.total_output_queue_size;
1666				ss->ss_total_recv_buf = (stcb->asoc.size_on_reasm_queue +
1667				    stcb->asoc.size_on_all_streams);
1668				SCTP_TCB_UNLOCK(stcb);
1669			} else {
1670				error = ENOTCONN;
1671			}
1672			*optsize = sizeof(struct sctp_sockstat);
1673		}
1674		break;
1675	case SCTP_MAXBURST:
1676		{
1677			uint8_t *value;
1678
1679			SCTP_CHECK_AND_CAST(value, optval, uint8_t, *optsize);
1680
1681			SCTP_INP_RLOCK(inp);
1682			*value = inp->sctp_ep.max_burst;
1683			SCTP_INP_RUNLOCK(inp);
1684			*optsize = sizeof(uint8_t);
1685		}
1686		break;
1687	case SCTP_MAXSEG:
1688		{
1689			struct sctp_assoc_value *av;
1690			int ovh;
1691
1692			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1693			if (av->assoc_id) {
1694				SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1695			} else {
1696				stcb = NULL;
1697			}
1698
1699			if (stcb) {
1700				av->assoc_value = sctp_get_frag_point(stcb, &stcb->asoc);
1701				SCTP_TCB_UNLOCK(stcb);
1702			} else {
1703				SCTP_INP_RLOCK(inp);
1704				if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1705					ovh = SCTP_MED_OVERHEAD;
1706				} else {
1707					ovh = SCTP_MED_V4_OVERHEAD;
1708				}
1709				av->assoc_value = inp->sctp_frag_point - ovh;
1710				SCTP_INP_RUNLOCK(inp);
1711			}
1712			*optsize = sizeof(struct sctp_assoc_value);
1713		}
1714		break;
1715	case SCTP_GET_STAT_LOG:
1716#ifdef SCTP_STAT_LOGGING
1717		error = sctp_fill_stat_log(optval, optsize);
1718#else
1719		error = EOPNOTSUPP;
1720#endif
1721		break;
1722	case SCTP_EVENTS:
1723		{
1724			struct sctp_event_subscribe *events;
1725
1726			SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, *optsize);
1727			memset(events, 0, sizeof(*events));
1728			SCTP_INP_RLOCK(inp);
1729			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT))
1730				events->sctp_data_io_event = 1;
1731
1732			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT))
1733				events->sctp_association_event = 1;
1734
1735			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT))
1736				events->sctp_address_event = 1;
1737
1738			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT))
1739				events->sctp_send_failure_event = 1;
1740
1741			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR))
1742				events->sctp_peer_error_event = 1;
1743
1744			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
1745				events->sctp_shutdown_event = 1;
1746
1747			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT))
1748				events->sctp_partial_delivery_event = 1;
1749
1750			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT))
1751				events->sctp_adaptation_layer_event = 1;
1752
1753			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT))
1754				events->sctp_authentication_event = 1;
1755
1756			if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT))
1757				events->sctp_stream_reset_events = 1;
1758			SCTP_INP_RUNLOCK(inp);
1759			*optsize = sizeof(struct sctp_event_subscribe);
1760		}
1761		break;
1762
1763	case SCTP_ADAPTATION_LAYER:
1764		{
1765			uint32_t *value;
1766
1767			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1768
1769			SCTP_INP_RLOCK(inp);
1770			*value = inp->sctp_ep.adaptation_layer_indicator;
1771			SCTP_INP_RUNLOCK(inp);
1772			*optsize = sizeof(uint32_t);
1773		}
1774		break;
1775	case SCTP_SET_INITIAL_DBG_SEQ:
1776		{
1777			uint32_t *value;
1778
1779			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1780			SCTP_INP_RLOCK(inp);
1781			*value = inp->sctp_ep.initial_sequence_debug;
1782			SCTP_INP_RUNLOCK(inp);
1783			*optsize = sizeof(uint32_t);
1784		}
1785		break;
1786	case SCTP_GET_LOCAL_ADDR_SIZE:
1787		{
1788			uint32_t *value;
1789
1790			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1791			SCTP_INP_RLOCK(inp);
1792			*value = sctp_count_max_addresses(inp);
1793			SCTP_INP_RUNLOCK(inp);
1794			*optsize = sizeof(uint32_t);
1795		}
1796		break;
1797	case SCTP_GET_REMOTE_ADDR_SIZE:
1798		{
1799			uint32_t *value;
1800			size_t size;
1801			struct sctp_nets *net;
1802
1803			SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1804			/* FIXME MT: change to sctp_assoc_value? */
1805			SCTP_FIND_STCB(inp, stcb, (sctp_assoc_t) * value);
1806
1807			if (stcb) {
1808				size = 0;
1809				/* Count the sizes */
1810				TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1811					if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) ||
1812					    (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
1813						size += sizeof(struct sockaddr_in6);
1814					} else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
1815						size += sizeof(struct sockaddr_in);
1816					} else {
1817						/* huh */
1818						break;
1819					}
1820				}
1821				SCTP_TCB_UNLOCK(stcb);
1822				*value = (uint32_t) size;
1823			} else {
1824				error = ENOTCONN;
1825			}
1826			*optsize = sizeof(uint32_t);
1827		}
1828		break;
1829	case SCTP_GET_PEER_ADDRESSES:
1830		/*
1831		 * Get the address information, an array is passed in to
1832		 * fill up we pack it.
1833		 */
1834		{
1835			size_t cpsz, left;
1836			struct sockaddr_storage *sas;
1837			struct sctp_nets *net;
1838			struct sctp_getaddresses *saddr;
1839
1840			SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize);
1841			SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id);
1842
1843			if (stcb) {
1844				left = (*optsize) - sizeof(struct sctp_getaddresses);
1845				*optsize = sizeof(struct sctp_getaddresses);
1846				sas = (struct sockaddr_storage *)&saddr->addr[0];
1847
1848				TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1849					if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) ||
1850					    (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
1851						cpsz = sizeof(struct sockaddr_in6);
1852					} else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
1853						cpsz = sizeof(struct sockaddr_in);
1854					} else {
1855						/* huh */
1856						break;
1857					}
1858					if (left < cpsz) {
1859						/* not enough room. */
1860						break;
1861					}
1862					if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
1863					    (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET)) {
1864						/* Must map the address */
1865						in6_sin_2_v4mapsin6((struct sockaddr_in *)&net->ro._l_addr,
1866						    (struct sockaddr_in6 *)sas);
1867					} else {
1868						memcpy(sas, &net->ro._l_addr, cpsz);
1869					}
1870					((struct sockaddr_in *)sas)->sin_port = stcb->rport;
1871
1872					sas = (struct sockaddr_storage *)((caddr_t)sas + cpsz);
1873					left -= cpsz;
1874					*optsize += cpsz;
1875				}
1876				SCTP_TCB_UNLOCK(stcb);
1877			} else {
1878				error = ENOENT;
1879			}
1880		}
1881		break;
1882	case SCTP_GET_LOCAL_ADDRESSES:
1883		{
1884			size_t limit, actual;
1885			struct sockaddr_storage *sas;
1886			struct sctp_getaddresses *saddr;
1887
1888			SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize);
1889			SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id);
1890
1891			sas = (struct sockaddr_storage *)&saddr->addr[0];
1892			limit = *optsize - sizeof(sctp_assoc_t);
1893			actual = sctp_fill_up_addresses(inp, stcb, limit, sas);
1894			if (stcb) {
1895				SCTP_TCB_UNLOCK(stcb);
1896			}
1897			*optsize = sizeof(struct sockaddr_storage) + actual;
1898		}
1899		break;
1900	case SCTP_PEER_ADDR_PARAMS:
1901		{
1902			struct sctp_paddrparams *paddrp;
1903			struct sctp_nets *net;
1904
1905			SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, *optsize);
1906			SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id);
1907
1908			net = NULL;
1909			if (stcb) {
1910				net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
1911			} else {
1912				/*
1913				 * We increment here since
1914				 * sctp_findassociation_ep_addr() wil do a
1915				 * decrement if it finds the stcb as long as
1916				 * the locked tcb (last argument) is NOT a
1917				 * TCB.. aka NULL.
1918				 */
1919				SCTP_INP_INCR_REF(inp);
1920				stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddrp->spp_address, &net, NULL, NULL);
1921				if (stcb == NULL) {
1922					SCTP_INP_DECR_REF(inp);
1923				}
1924			}
1925
1926			if (stcb) {
1927				/* Applys to the specific association */
1928				paddrp->spp_flags = 0;
1929				if (net) {
1930					paddrp->spp_pathmaxrxt = net->failure_threshold;
1931					paddrp->spp_pathmtu = net->mtu;
1932					/* get flags for HB */
1933					if (net->dest_state & SCTP_ADDR_NOHB)
1934						paddrp->spp_flags |= SPP_HB_DISABLE;
1935					else
1936						paddrp->spp_flags |= SPP_HB_ENABLE;
1937					/* get flags for PMTU */
1938					if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
1939						paddrp->spp_flags |= SPP_PMTUD_ENABLE;
1940					} else {
1941						paddrp->spp_flags |= SPP_PMTUD_DISABLE;
1942					}
1943#ifdef INET
1944					if (net->ro._l_addr.sin.sin_family == AF_INET) {
1945						paddrp->spp_ipv4_tos = net->tos_flowlabel & 0x000000fc;
1946						paddrp->spp_flags |= SPP_IPV4_TOS;
1947					}
1948#endif
1949#ifdef INET6
1950					if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
1951						paddrp->spp_ipv6_flowlabel = net->tos_flowlabel;
1952						paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
1953					}
1954#endif
1955				} else {
1956					/*
1957					 * No destination so return default
1958					 * value
1959					 */
1960					paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure;
1961					paddrp->spp_pathmtu = sctp_get_frag_point(stcb, &stcb->asoc);
1962#ifdef INET
1963					paddrp->spp_ipv4_tos = stcb->asoc.default_tos & 0x000000fc;
1964					paddrp->spp_flags |= SPP_IPV4_TOS;
1965#endif
1966#ifdef INET6
1967					paddrp->spp_ipv6_flowlabel = stcb->asoc.default_flowlabel;
1968					paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
1969#endif
1970					/* default settings should be these */
1971					if (sctp_is_hb_timer_running(stcb)) {
1972						paddrp->spp_flags |= SPP_HB_ENABLE;
1973					}
1974				}
1975				paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay;
1976				paddrp->spp_assoc_id = sctp_get_associd(stcb);
1977				SCTP_TCB_UNLOCK(stcb);
1978			} else {
1979				/* Use endpoint defaults */
1980				SCTP_INP_RLOCK(inp);
1981				paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure;
1982				paddrp->spp_hbinterval = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
1983				paddrp->spp_assoc_id = (sctp_assoc_t) 0;
1984				/* get inp's default */
1985#ifdef INET
1986				paddrp->spp_ipv4_tos = inp->ip_inp.inp.inp_ip_tos;
1987				paddrp->spp_flags |= SPP_IPV4_TOS;
1988#endif
1989#ifdef INET6
1990				if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1991					paddrp->spp_ipv6_flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo;
1992					paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
1993				}
1994#endif
1995				/* can't return this */
1996				paddrp->spp_pathmaxrxt = 0;
1997				paddrp->spp_pathmtu = 0;
1998				/* default behavior, no stcb */
1999				paddrp->spp_flags = SPP_HB_ENABLE | SPP_PMTUD_ENABLE;
2000
2001				SCTP_INP_RUNLOCK(inp);
2002			}
2003			*optsize = sizeof(struct sctp_paddrparams);
2004		}
2005		break;
2006	case SCTP_GET_PEER_ADDR_INFO:
2007		{
2008			struct sctp_paddrinfo *paddri;
2009			struct sctp_nets *net;
2010
2011			SCTP_CHECK_AND_CAST(paddri, optval, struct sctp_paddrinfo, *optsize);
2012			SCTP_FIND_STCB(inp, stcb, paddri->spinfo_assoc_id);
2013
2014			net = NULL;
2015			if (stcb) {
2016				net = sctp_findnet(stcb, (struct sockaddr *)&paddri->spinfo_address);
2017			} else {
2018				/*
2019				 * We increment here since
2020				 * sctp_findassociation_ep_addr() wil do a
2021				 * decrement if it finds the stcb as long as
2022				 * the locked tcb (last argument) is NOT a
2023				 * TCB.. aka NULL.
2024				 */
2025				SCTP_INP_INCR_REF(inp);
2026				stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddri->spinfo_address, &net, NULL, NULL);
2027				if (stcb == NULL) {
2028					SCTP_INP_DECR_REF(inp);
2029				}
2030			}
2031
2032			if ((stcb) && (net)) {
2033				paddri->spinfo_state = net->dest_state & (SCTP_REACHABLE_MASK | SCTP_ADDR_NOHB);
2034				paddri->spinfo_cwnd = net->cwnd;
2035				paddri->spinfo_srtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
2036				paddri->spinfo_rto = net->RTO;
2037				paddri->spinfo_assoc_id = sctp_get_associd(stcb);
2038				SCTP_TCB_UNLOCK(stcb);
2039			} else {
2040				if (stcb) {
2041					SCTP_TCB_UNLOCK(stcb);
2042				}
2043				error = ENOENT;
2044			}
2045			*optsize = sizeof(struct sctp_paddrinfo);
2046		}
2047		break;
2048	case SCTP_PCB_STATUS:
2049		{
2050			struct sctp_pcbinfo *spcb;
2051
2052			SCTP_CHECK_AND_CAST(spcb, optval, struct sctp_pcbinfo, *optsize);
2053			sctp_fill_pcbinfo(spcb);
2054			*optsize = sizeof(struct sctp_pcbinfo);
2055		}
2056		break;
2057
2058	case SCTP_STATUS:
2059		{
2060			struct sctp_nets *net;
2061			struct sctp_status *sstat;
2062
2063			SCTP_CHECK_AND_CAST(sstat, optval, struct sctp_status, *optsize);
2064			SCTP_FIND_STCB(inp, stcb, sstat->sstat_assoc_id);
2065
2066			if (stcb == NULL) {
2067				error = EINVAL;
2068				break;
2069			}
2070			/*
2071			 * I think passing the state is fine since
2072			 * sctp_constants.h will be available to the user
2073			 * land.
2074			 */
2075			sstat->sstat_state = stcb->asoc.state;
2076			sstat->sstat_rwnd = stcb->asoc.peers_rwnd;
2077			sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt;
2078			/*
2079			 * We can't include chunks that have been passed to
2080			 * the socket layer. Only things in queue.
2081			 */
2082			sstat->sstat_penddata = (stcb->asoc.cnt_on_reasm_queue +
2083			    stcb->asoc.cnt_on_all_streams);
2084
2085
2086			sstat->sstat_instrms = stcb->asoc.streamincnt;
2087			sstat->sstat_outstrms = stcb->asoc.streamoutcnt;
2088			sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc);
2089			memcpy(&sstat->sstat_primary.spinfo_address,
2090			    &stcb->asoc.primary_destination->ro._l_addr,
2091			    ((struct sockaddr *)(&stcb->asoc.primary_destination->ro._l_addr))->sa_len);
2092			net = stcb->asoc.primary_destination;
2093			((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport;
2094			/*
2095			 * Again the user can get info from sctp_constants.h
2096			 * for what the state of the network is.
2097			 */
2098			sstat->sstat_primary.spinfo_state = net->dest_state & SCTP_REACHABLE_MASK;
2099			sstat->sstat_primary.spinfo_cwnd = net->cwnd;
2100			sstat->sstat_primary.spinfo_srtt = net->lastsa;
2101			sstat->sstat_primary.spinfo_rto = net->RTO;
2102			sstat->sstat_primary.spinfo_mtu = net->mtu;
2103			sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb);
2104			SCTP_TCB_UNLOCK(stcb);
2105			*optsize = sizeof(*sstat);
2106		}
2107		break;
2108	case SCTP_RTOINFO:
2109		{
2110			struct sctp_rtoinfo *srto;
2111
2112			SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, *optsize);
2113			SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id);
2114
2115			if (stcb) {
2116				srto->srto_initial = stcb->asoc.initial_rto;
2117				srto->srto_max = stcb->asoc.maxrto;
2118				srto->srto_min = stcb->asoc.minrto;
2119				SCTP_TCB_UNLOCK(stcb);
2120			} else {
2121				SCTP_INP_RLOCK(inp);
2122				srto->srto_initial = inp->sctp_ep.initial_rto;
2123				srto->srto_max = inp->sctp_ep.sctp_maxrto;
2124				srto->srto_min = inp->sctp_ep.sctp_minrto;
2125				SCTP_INP_RUNLOCK(inp);
2126			}
2127			*optsize = sizeof(*srto);
2128		}
2129		break;
2130	case SCTP_ASSOCINFO:
2131		{
2132			struct sctp_assocparams *sasoc;
2133
2134			SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, *optsize);
2135			SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id);
2136
2137			if (stcb) {
2138				sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times;
2139				sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
2140				sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd;
2141				sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd;
2142				sasoc->sasoc_cookie_life = TICKS_TO_MSEC(stcb->asoc.cookie_life);
2143				sasoc->sasoc_sack_delay = stcb->asoc.delayed_ack;
2144				sasoc->sasoc_sack_freq = stcb->asoc.sack_freq;
2145				SCTP_TCB_UNLOCK(stcb);
2146			} else {
2147				SCTP_INP_RLOCK(inp);
2148				sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times;
2149				sasoc->sasoc_number_peer_destinations = 0;
2150				sasoc->sasoc_peer_rwnd = 0;
2151				sasoc->sasoc_local_rwnd = sbspace(&inp->sctp_socket->so_rcv);
2152				sasoc->sasoc_cookie_life = TICKS_TO_MSEC(inp->sctp_ep.def_cookie_life);
2153				sasoc->sasoc_sack_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
2154				sasoc->sasoc_sack_freq = inp->sctp_ep.sctp_sack_freq;
2155				SCTP_INP_RUNLOCK(inp);
2156			}
2157			*optsize = sizeof(*sasoc);
2158		}
2159		break;
2160	case SCTP_DEFAULT_SEND_PARAM:
2161		{
2162			struct sctp_sndrcvinfo *s_info;
2163
2164			SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, *optsize);
2165			SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id);
2166
2167			if (stcb) {
2168				*s_info = stcb->asoc.def_send;
2169				SCTP_TCB_UNLOCK(stcb);
2170			} else {
2171				SCTP_INP_RLOCK(inp);
2172				*s_info = inp->def_send;
2173				SCTP_INP_RUNLOCK(inp);
2174			}
2175			*optsize = sizeof(*s_info);
2176		}
2177		break;
2178	case SCTP_INITMSG:
2179		{
2180			struct sctp_initmsg *sinit;
2181
2182			SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, *optsize);
2183			SCTP_INP_RLOCK(inp);
2184			sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count;
2185			sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome;
2186			sinit->sinit_max_attempts = inp->sctp_ep.max_init_times;
2187			sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max;
2188			SCTP_INP_RUNLOCK(inp);
2189			*optsize = sizeof(*sinit);
2190		}
2191		break;
2192	case SCTP_PRIMARY_ADDR:
2193		/* we allow a "get" operation on this */
2194		{
2195			struct sctp_setprim *ssp;
2196
2197			SCTP_CHECK_AND_CAST(ssp, optval, struct sctp_setprim, *optsize);
2198			SCTP_FIND_STCB(inp, stcb, ssp->ssp_assoc_id);
2199
2200			if (stcb) {
2201				/* simply copy out the sockaddr_storage... */
2202				memcpy(&ssp->ssp_addr, &stcb->asoc.primary_destination->ro._l_addr,
2203				    ((struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr)->sa_len);
2204				SCTP_TCB_UNLOCK(stcb);
2205			} else {
2206				error = EINVAL;
2207			}
2208			*optsize = sizeof(*ssp);
2209		}
2210		break;
2211
2212	case SCTP_HMAC_IDENT:
2213		{
2214			struct sctp_hmacalgo *shmac;
2215			sctp_hmaclist_t *hmaclist;
2216			uint32_t size;
2217			int i;
2218
2219			SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, *optsize);
2220
2221			SCTP_INP_RLOCK(inp);
2222			hmaclist = inp->sctp_ep.local_hmacs;
2223			if (hmaclist == NULL) {
2224				/* no HMACs to return */
2225				*optsize = sizeof(*shmac);
2226				SCTP_INP_RUNLOCK(inp);
2227				break;
2228			}
2229			/* is there room for all of the hmac ids? */
2230			size = sizeof(*shmac) + (hmaclist->num_algo *
2231			    sizeof(shmac->shmac_idents[0]));
2232			if ((size_t)(*optsize) < size) {
2233				error = EINVAL;
2234				SCTP_INP_RUNLOCK(inp);
2235				break;
2236			}
2237			/* copy in the list */
2238			for (i = 0; i < hmaclist->num_algo; i++)
2239				shmac->shmac_idents[i] = hmaclist->hmac[i];
2240			SCTP_INP_RUNLOCK(inp);
2241			*optsize = size;
2242			break;
2243		}
2244	case SCTP_AUTH_ACTIVE_KEY:
2245		{
2246			struct sctp_authkeyid *scact;
2247
2248			SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, *optsize);
2249			SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id);
2250
2251			if (stcb) {
2252				/* get the active key on the assoc */
2253				scact->scact_keynumber = stcb->asoc.authinfo.assoc_keyid;
2254				SCTP_TCB_UNLOCK(stcb);
2255			} else {
2256				/* get the endpoint active key */
2257				SCTP_INP_RLOCK(inp);
2258				scact->scact_keynumber = inp->sctp_ep.default_keyid;
2259				SCTP_INP_RUNLOCK(inp);
2260			}
2261			*optsize = sizeof(*scact);
2262			break;
2263		}
2264	case SCTP_LOCAL_AUTH_CHUNKS:
2265		{
2266			struct sctp_authchunks *sac;
2267			sctp_auth_chklist_t *chklist = NULL;
2268			size_t size = 0;
2269
2270			SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize);
2271			SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id);
2272
2273			if (stcb) {
2274				/* get off the assoc */
2275				chklist = stcb->asoc.local_auth_chunks;
2276				/* is there enough space? */
2277				size = sctp_auth_get_chklist_size(chklist);
2278				if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
2279					error = EINVAL;
2280				} else {
2281					/* copy in the chunks */
2282					(void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
2283				}
2284				SCTP_TCB_UNLOCK(stcb);
2285			} else {
2286				/* get off the endpoint */
2287				SCTP_INP_RLOCK(inp);
2288				chklist = inp->sctp_ep.local_auth_chunks;
2289				/* is there enough space? */
2290				size = sctp_auth_get_chklist_size(chklist);
2291				if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
2292					error = EINVAL;
2293				} else {
2294					/* copy in the chunks */
2295					(void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
2296				}
2297				SCTP_INP_RUNLOCK(inp);
2298			}
2299			*optsize = sizeof(struct sctp_authchunks) + size;
2300			break;
2301		}
2302	case SCTP_PEER_AUTH_CHUNKS:
2303		{
2304			struct sctp_authchunks *sac;
2305			sctp_auth_chklist_t *chklist = NULL;
2306			size_t size = 0;
2307
2308			SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize);
2309			SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id);
2310
2311			if (stcb) {
2312				/* get off the assoc */
2313				chklist = stcb->asoc.peer_auth_chunks;
2314				/* is there enough space? */
2315				size = sctp_auth_get_chklist_size(chklist);
2316				if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
2317					error = EINVAL;
2318				} else {
2319					/* copy in the chunks */
2320					(void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
2321				}
2322				SCTP_TCB_UNLOCK(stcb);
2323			} else {
2324				error = ENOENT;
2325			}
2326			*optsize = sizeof(struct sctp_authchunks) + size;
2327			break;
2328		}
2329
2330
2331	default:
2332		error = ENOPROTOOPT;
2333		*optsize = 0;
2334		break;
2335	}			/* end switch (sopt->sopt_name) */
2336	return (error);
2337}
2338
2339static int
2340sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
2341    void *p)
2342{
2343	int error, set_opt;
2344	uint32_t *mopt;
2345	struct sctp_tcb *stcb = NULL;
2346	struct sctp_inpcb *inp;
2347	uint32_t vrf_id;
2348
2349	if (optval == NULL) {
2350		SCTP_PRINTF("optval is NULL\n");
2351		return (EINVAL);
2352	}
2353	inp = (struct sctp_inpcb *)so->so_pcb;
2354	if (inp == 0) {
2355		SCTP_PRINTF("inp is NULL?\n");
2356		return EINVAL;
2357	}
2358	vrf_id = inp->def_vrf_id;
2359
2360	error = 0;
2361	switch (optname) {
2362	case SCTP_NODELAY:
2363	case SCTP_AUTOCLOSE:
2364	case SCTP_AUTO_ASCONF:
2365	case SCTP_EXPLICIT_EOR:
2366	case SCTP_DISABLE_FRAGMENTS:
2367	case SCTP_USE_EXT_RCVINFO:
2368	case SCTP_I_WANT_MAPPED_V4_ADDR:
2369		/* copy in the option value */
2370		SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize);
2371		set_opt = 0;
2372		if (error)
2373			break;
2374		switch (optname) {
2375		case SCTP_DISABLE_FRAGMENTS:
2376			set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT;
2377			break;
2378		case SCTP_AUTO_ASCONF:
2379			set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF;
2380			break;
2381		case SCTP_EXPLICIT_EOR:
2382			set_opt = SCTP_PCB_FLAGS_EXPLICIT_EOR;
2383			break;
2384		case SCTP_USE_EXT_RCVINFO:
2385			set_opt = SCTP_PCB_FLAGS_EXT_RCVINFO;
2386			break;
2387		case SCTP_I_WANT_MAPPED_V4_ADDR:
2388			if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2389				set_opt = SCTP_PCB_FLAGS_NEEDS_MAPPED_V4;
2390			} else {
2391				return (EINVAL);
2392			}
2393			break;
2394		case SCTP_NODELAY:
2395			set_opt = SCTP_PCB_FLAGS_NODELAY;
2396			break;
2397		case SCTP_AUTOCLOSE:
2398			set_opt = SCTP_PCB_FLAGS_AUTOCLOSE;
2399			/*
2400			 * The value is in ticks. Note this does not effect
2401			 * old associations, only new ones.
2402			 */
2403			inp->sctp_ep.auto_close_time = SEC_TO_TICKS(*mopt);
2404			break;
2405		}
2406		SCTP_INP_WLOCK(inp);
2407		if (*mopt != 0) {
2408			sctp_feature_on(inp, set_opt);
2409		} else {
2410			sctp_feature_off(inp, set_opt);
2411		}
2412		SCTP_INP_WUNLOCK(inp);
2413		break;
2414	case SCTP_PARTIAL_DELIVERY_POINT:
2415		{
2416			uint32_t *value;
2417
2418			SCTP_CHECK_AND_CAST(value, optval, uint32_t, optsize);
2419			if (*value > SCTP_SB_LIMIT_RCV(so)) {
2420				error = EINVAL;
2421				break;
2422			}
2423			inp->partial_delivery_point = *value;
2424		}
2425		break;
2426	case SCTP_FRAGMENT_INTERLEAVE:
2427		/* not yet until we re-write sctp_recvmsg() */
2428		{
2429			uint32_t *level;
2430
2431			SCTP_CHECK_AND_CAST(level, optval, uint32_t, optsize);
2432			if (*level == SCTP_FRAG_LEVEL_2) {
2433				sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
2434				sctp_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
2435			} else if (*level == SCTP_FRAG_LEVEL_1) {
2436				sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
2437				sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
2438			} else if (*level == SCTP_FRAG_LEVEL_0) {
2439				sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
2440				sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
2441
2442			} else {
2443				error = EINVAL;
2444			}
2445		}
2446		break;
2447	case SCTP_CMT_ON_OFF:
2448		{
2449			struct sctp_assoc_value *av;
2450
2451			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
2452			if (sctp_cmt_on_off) {
2453				SCTP_FIND_STCB(inp, stcb, av->assoc_id);
2454				if (stcb) {
2455					stcb->asoc.sctp_cmt_on_off = (uint8_t) av->assoc_value;
2456					SCTP_TCB_UNLOCK(stcb);
2457				} else {
2458					error = ENOTCONN;
2459				}
2460			} else {
2461				error = ENOPROTOOPT;
2462			}
2463		}
2464		break;
2465	case SCTP_CLR_STAT_LOG:
2466#ifdef SCTP_STAT_LOGGING
2467		sctp_clr_stat_log();
2468#else
2469		error = EOPNOTSUPP;
2470#endif
2471		break;
2472	case SCTP_CONTEXT:
2473		{
2474			struct sctp_assoc_value *av;
2475
2476			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
2477			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
2478
2479			if (stcb) {
2480				stcb->asoc.context = av->assoc_value;
2481				SCTP_TCB_UNLOCK(stcb);
2482			} else {
2483				SCTP_INP_WLOCK(inp);
2484				inp->sctp_context = av->assoc_value;
2485				SCTP_INP_WUNLOCK(inp);
2486			}
2487		}
2488		break;
2489	case SCTP_VRF_ID:
2490		{
2491			uint32_t *vrf_id;
2492
2493			SCTP_CHECK_AND_CAST(vrf_id, optval, uint32_t, optsize);
2494			if (*vrf_id > SCTP_MAX_VRF_ID) {
2495				error = EINVAL;
2496				break;
2497			}
2498			inp->def_vrf_id = *vrf_id;
2499			break;
2500		}
2501	case SCTP_DEL_VRF_ID:
2502		{
2503			error = EOPNOTSUPP;
2504			break;
2505		}
2506	case SCTP_ADD_VRF_ID:
2507		{
2508			error = EOPNOTSUPP;
2509			break;
2510		}
2511
2512	case SCTP_DELAYED_ACK_TIME:
2513		{
2514			struct sctp_assoc_value *tm;
2515
2516			SCTP_CHECK_AND_CAST(tm, optval, struct sctp_assoc_value, optsize);
2517			SCTP_FIND_STCB(inp, stcb, tm->assoc_id);
2518
2519			if (stcb) {
2520				stcb->asoc.delayed_ack = tm->assoc_value;
2521				SCTP_TCB_UNLOCK(stcb);
2522			} else {
2523				SCTP_INP_WLOCK(inp);
2524				inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(tm->assoc_value);
2525				SCTP_INP_WUNLOCK(inp);
2526			}
2527			break;
2528		}
2529	case SCTP_AUTH_CHUNK:
2530		{
2531			struct sctp_authchunk *sauth;
2532
2533			SCTP_CHECK_AND_CAST(sauth, optval, struct sctp_authchunk, optsize);
2534
2535			SCTP_INP_WLOCK(inp);
2536			if (sctp_auth_add_chunk(sauth->sauth_chunk, inp->sctp_ep.local_auth_chunks))
2537				error = EINVAL;
2538			SCTP_INP_WUNLOCK(inp);
2539			break;
2540		}
2541	case SCTP_AUTH_KEY:
2542		{
2543			struct sctp_authkey *sca;
2544			struct sctp_keyhead *shared_keys;
2545			sctp_sharedkey_t *shared_key;
2546			sctp_key_t *key = NULL;
2547			size_t size;
2548
2549			SCTP_CHECK_AND_CAST(sca, optval, struct sctp_authkey, optsize);
2550			SCTP_FIND_STCB(inp, stcb, sca->sca_assoc_id);
2551			size = optsize - sizeof(*sca);
2552
2553			if (stcb) {
2554				/* set it on the assoc */
2555				shared_keys = &stcb->asoc.shared_keys;
2556				/* clear the cached keys for this key id */
2557				sctp_clear_cachedkeys(stcb, sca->sca_keynumber);
2558				/*
2559				 * create the new shared key and
2560				 * insert/replace it
2561				 */
2562				if (size > 0) {
2563					key = sctp_set_key(sca->sca_key, (uint32_t) size);
2564					if (key == NULL) {
2565						error = ENOMEM;
2566						SCTP_TCB_UNLOCK(stcb);
2567						break;
2568					}
2569				}
2570				shared_key = sctp_alloc_sharedkey();
2571				if (shared_key == NULL) {
2572					sctp_free_key(key);
2573					error = ENOMEM;
2574					SCTP_TCB_UNLOCK(stcb);
2575					break;
2576				}
2577				shared_key->key = key;
2578				shared_key->keyid = sca->sca_keynumber;
2579				sctp_insert_sharedkey(shared_keys, shared_key);
2580				SCTP_TCB_UNLOCK(stcb);
2581			} else {
2582				/* set it on the endpoint */
2583				SCTP_INP_WLOCK(inp);
2584				shared_keys = &inp->sctp_ep.shared_keys;
2585				/*
2586				 * clear the cached keys on all assocs for
2587				 * this key id
2588				 */
2589				sctp_clear_cachedkeys_ep(inp, sca->sca_keynumber);
2590				/*
2591				 * create the new shared key and
2592				 * insert/replace it
2593				 */
2594				if (size > 0) {
2595					key = sctp_set_key(sca->sca_key, (uint32_t) size);
2596					if (key == NULL) {
2597						error = ENOMEM;
2598						SCTP_INP_WUNLOCK(inp);
2599						break;
2600					}
2601				}
2602				shared_key = sctp_alloc_sharedkey();
2603				if (shared_key == NULL) {
2604					sctp_free_key(key);
2605					error = ENOMEM;
2606					SCTP_INP_WUNLOCK(inp);
2607					break;
2608				}
2609				shared_key->key = key;
2610				shared_key->keyid = sca->sca_keynumber;
2611				sctp_insert_sharedkey(shared_keys, shared_key);
2612				SCTP_INP_WUNLOCK(inp);
2613			}
2614			break;
2615		}
2616	case SCTP_HMAC_IDENT:
2617		{
2618			struct sctp_hmacalgo *shmac;
2619			sctp_hmaclist_t *hmaclist;
2620			uint32_t hmacid;
2621			size_t size, i;
2622
2623			SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, optsize);
2624			size = (optsize - sizeof(*shmac)) / sizeof(shmac->shmac_idents[0]);
2625			hmaclist = sctp_alloc_hmaclist(size);
2626			if (hmaclist == NULL) {
2627				error = ENOMEM;
2628				break;
2629			}
2630			for (i = 0; i < size; i++) {
2631				hmacid = shmac->shmac_idents[i];
2632				if (sctp_auth_add_hmacid(hmaclist, (uint16_t) hmacid)) {
2633					 /* invalid HMACs were found */ ;
2634					error = EINVAL;
2635					sctp_free_hmaclist(hmaclist);
2636					goto sctp_set_hmac_done;
2637				}
2638			}
2639			/* set it on the endpoint */
2640			SCTP_INP_WLOCK(inp);
2641			if (inp->sctp_ep.local_hmacs)
2642				sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
2643			inp->sctp_ep.local_hmacs = hmaclist;
2644			SCTP_INP_WUNLOCK(inp);
2645	sctp_set_hmac_done:
2646			break;
2647		}
2648	case SCTP_AUTH_ACTIVE_KEY:
2649		{
2650			struct sctp_authkeyid *scact;
2651
2652			SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, optsize);
2653			SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id);
2654
2655			/* set the active key on the right place */
2656			if (stcb) {
2657				/* set the active key on the assoc */
2658				if (sctp_auth_setactivekey(stcb, scact->scact_keynumber))
2659					error = EINVAL;
2660				SCTP_TCB_UNLOCK(stcb);
2661			} else {
2662				/* set the active key on the endpoint */
2663				SCTP_INP_WLOCK(inp);
2664				if (sctp_auth_setactivekey_ep(inp, scact->scact_keynumber))
2665					error = EINVAL;
2666				SCTP_INP_WUNLOCK(inp);
2667			}
2668			break;
2669		}
2670	case SCTP_AUTH_DELETE_KEY:
2671		{
2672			struct sctp_authkeyid *scdel;
2673
2674			SCTP_CHECK_AND_CAST(scdel, optval, struct sctp_authkeyid, optsize);
2675			SCTP_FIND_STCB(inp, stcb, scdel->scact_assoc_id);
2676
2677			/* delete the key from the right place */
2678			if (stcb) {
2679				if (sctp_delete_sharedkey(stcb, scdel->scact_keynumber))
2680					error = EINVAL;
2681				SCTP_TCB_UNLOCK(stcb);
2682			} else {
2683				SCTP_INP_WLOCK(inp);
2684				if (sctp_delete_sharedkey_ep(inp, scdel->scact_keynumber))
2685					error = EINVAL;
2686				SCTP_INP_WUNLOCK(inp);
2687			}
2688			break;
2689		}
2690
2691	case SCTP_RESET_STREAMS:
2692		{
2693			struct sctp_stream_reset *strrst;
2694			uint8_t send_in = 0, send_tsn = 0, send_out = 0;
2695			int i;
2696
2697			SCTP_CHECK_AND_CAST(strrst, optval, struct sctp_stream_reset, optsize);
2698			SCTP_FIND_STCB(inp, stcb, strrst->strrst_assoc_id);
2699
2700			if (stcb == NULL) {
2701				error = ENOENT;
2702				break;
2703			}
2704			if (stcb->asoc.peer_supports_strreset == 0) {
2705				/*
2706				 * Peer does not support it, we return
2707				 * protocol not supported since this is true
2708				 * for this feature and this peer, not the
2709				 * socket request in general.
2710				 */
2711				error = EPROTONOSUPPORT;
2712				SCTP_TCB_UNLOCK(stcb);
2713				break;
2714			}
2715			if (stcb->asoc.stream_reset_outstanding) {
2716				error = EALREADY;
2717				SCTP_TCB_UNLOCK(stcb);
2718				break;
2719			}
2720			if (strrst->strrst_flags == SCTP_RESET_LOCAL_RECV) {
2721				send_in = 1;
2722			} else if (strrst->strrst_flags == SCTP_RESET_LOCAL_SEND) {
2723				send_out = 1;
2724			} else if (strrst->strrst_flags == SCTP_RESET_BOTH) {
2725				send_in = 1;
2726				send_out = 1;
2727			} else if (strrst->strrst_flags == SCTP_RESET_TSN) {
2728				send_tsn = 1;
2729			} else {
2730				error = EINVAL;
2731				SCTP_TCB_UNLOCK(stcb);
2732				break;
2733			}
2734			for (i = 0; i < strrst->strrst_num_streams; i++) {
2735				if ((send_in) &&
2736
2737				    (strrst->strrst_list[i] > stcb->asoc.streamincnt)) {
2738					error = EINVAL;
2739					goto get_out;
2740				}
2741				if ((send_out) &&
2742				    (strrst->strrst_list[i] > stcb->asoc.streamoutcnt)) {
2743					error = EINVAL;
2744					goto get_out;
2745				}
2746			}
2747			if (error) {
2748		get_out:
2749				SCTP_TCB_UNLOCK(stcb);
2750				break;
2751			}
2752			error = sctp_send_str_reset_req(stcb, strrst->strrst_num_streams,
2753			    strrst->strrst_list,
2754			    send_out, (stcb->asoc.str_reset_seq_in - 3),
2755			    send_in, send_tsn);
2756
2757			sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ);
2758			SCTP_TCB_UNLOCK(stcb);
2759		}
2760		break;
2761
2762	case SCTP_CONNECT_X:
2763		if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) {
2764			error = EINVAL;
2765			break;
2766		}
2767		error = sctp_do_connect_x(so, inp, optval, optsize, p, 0);
2768		break;
2769
2770	case SCTP_CONNECT_X_DELAYED:
2771		if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) {
2772			error = EINVAL;
2773			break;
2774		}
2775		error = sctp_do_connect_x(so, inp, optval, optsize, p, 1);
2776		break;
2777
2778	case SCTP_CONNECT_X_COMPLETE:
2779		{
2780			struct sockaddr *sa;
2781			struct sctp_nets *net;
2782
2783			/* FIXME MT: check correct? */
2784			SCTP_CHECK_AND_CAST(sa, optval, struct sockaddr, optsize);
2785
2786			/* find tcb */
2787			if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2788				SCTP_INP_RLOCK(inp);
2789				stcb = LIST_FIRST(&inp->sctp_asoc_list);
2790				if (stcb) {
2791					SCTP_TCB_LOCK(stcb);
2792					net = sctp_findnet(stcb, sa);
2793				}
2794				SCTP_INP_RUNLOCK(inp);
2795			} else {
2796				/*
2797				 * We increment here since
2798				 * sctp_findassociation_ep_addr() wil do a
2799				 * decrement if it finds the stcb as long as
2800				 * the locked tcb (last argument) is NOT a
2801				 * TCB.. aka NULL.
2802				 */
2803				SCTP_INP_INCR_REF(inp);
2804				stcb = sctp_findassociation_ep_addr(&inp, sa, &net, NULL, NULL);
2805				if (stcb == NULL) {
2806					SCTP_INP_DECR_REF(inp);
2807				}
2808			}
2809
2810			if (stcb == NULL) {
2811				error = ENOENT;
2812				break;
2813			}
2814			if (stcb->asoc.delayed_connection == 1) {
2815				stcb->asoc.delayed_connection = 0;
2816				(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
2817				sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb,
2818				    stcb->asoc.primary_destination,
2819				    SCTP_FROM_SCTP_USRREQ + SCTP_LOC_9);
2820				sctp_send_initiate(inp, stcb);
2821			} else {
2822				/*
2823				 * already expired or did not use delayed
2824				 * connectx
2825				 */
2826				error = EALREADY;
2827			}
2828			SCTP_TCB_UNLOCK(stcb);
2829		}
2830		break;
2831	case SCTP_MAXBURST:
2832		{
2833			uint8_t *burst;
2834
2835			SCTP_CHECK_AND_CAST(burst, optval, uint8_t, optsize);
2836
2837			SCTP_INP_WLOCK(inp);
2838			if (*burst) {
2839				inp->sctp_ep.max_burst = *burst;
2840			}
2841			SCTP_INP_WUNLOCK(inp);
2842		}
2843		break;
2844	case SCTP_MAXSEG:
2845		{
2846			struct sctp_assoc_value *av;
2847			int ovh;
2848
2849			SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
2850			SCTP_FIND_STCB(inp, stcb, av->assoc_id);
2851
2852			if (stcb) {
2853				error = EINVAL;
2854				SCTP_TCB_UNLOCK(stcb);
2855			} else {
2856				SCTP_INP_WLOCK(inp);
2857				if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2858					ovh = SCTP_MED_OVERHEAD;
2859				} else {
2860					ovh = SCTP_MED_V4_OVERHEAD;
2861				}
2862				/*
2863				 * FIXME MT: I think this is not in tune
2864				 * with the API ID
2865				 */
2866				if (av->assoc_value) {
2867					inp->sctp_frag_point = (av->assoc_value + ovh);
2868				} else {
2869					error = EINVAL;
2870				}
2871				SCTP_INP_WUNLOCK(inp);
2872			}
2873		}
2874		break;
2875	case SCTP_EVENTS:
2876		{
2877			struct sctp_event_subscribe *events;
2878
2879			SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, optsize);
2880
2881			SCTP_INP_WLOCK(inp);
2882			if (events->sctp_data_io_event) {
2883				sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
2884			} else {
2885				sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
2886			}
2887
2888			if (events->sctp_association_event) {
2889				sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT);
2890			} else {
2891				sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT);
2892			}
2893
2894			if (events->sctp_address_event) {
2895				sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT);
2896			} else {
2897				sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPADDREVNT);
2898			}
2899
2900			if (events->sctp_send_failure_event) {
2901				sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT);
2902			} else {
2903				sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT);
2904			}
2905
2906			if (events->sctp_peer_error_event) {
2907				sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR);
2908			} else {
2909				sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPEERERR);
2910			}
2911
2912			if (events->sctp_shutdown_event) {
2913				sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT);
2914			} else {
2915				sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT);
2916			}
2917
2918			if (events->sctp_partial_delivery_event) {
2919				sctp_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT);
2920			} else {
2921				sctp_feature_off(inp, SCTP_PCB_FLAGS_PDAPIEVNT);
2922			}
2923
2924			if (events->sctp_adaptation_layer_event) {
2925				sctp_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT);
2926			} else {
2927				sctp_feature_off(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT);
2928			}
2929
2930			if (events->sctp_authentication_event) {
2931				sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT);
2932			} else {
2933				sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTHEVNT);
2934			}
2935
2936			if (events->sctp_stream_reset_events) {
2937				sctp_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
2938			} else {
2939				sctp_feature_off(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
2940			}
2941			SCTP_INP_WUNLOCK(inp);
2942		}
2943		break;
2944
2945	case SCTP_ADAPTATION_LAYER:
2946		{
2947			struct sctp_setadaptation *adap_bits;
2948
2949			SCTP_CHECK_AND_CAST(adap_bits, optval, struct sctp_setadaptation, optsize);
2950			SCTP_INP_WLOCK(inp);
2951			inp->sctp_ep.adaptation_layer_indicator = adap_bits->ssb_adaptation_ind;
2952			SCTP_INP_WUNLOCK(inp);
2953		}
2954		break;
2955#ifdef SCTP_DEBUG
2956	case SCTP_SET_INITIAL_DBG_SEQ:
2957		{
2958			uint32_t *vvv;
2959
2960			SCTP_CHECK_AND_CAST(vvv, optval, uint32_t, optsize);
2961			SCTP_INP_WLOCK(inp);
2962			inp->sctp_ep.initial_sequence_debug = *vvv;
2963			SCTP_INP_WUNLOCK(inp);
2964		}
2965		break;
2966#endif
2967	case SCTP_DEFAULT_SEND_PARAM:
2968		{
2969			struct sctp_sndrcvinfo *s_info;
2970
2971			SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, optsize);
2972			SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id);
2973
2974			if (stcb) {
2975				if (s_info->sinfo_stream <= stcb->asoc.streamoutcnt) {
2976					stcb->asoc.def_send = *s_info;
2977				} else {
2978					error = EINVAL;
2979				}
2980				SCTP_TCB_UNLOCK(stcb);
2981			} else {
2982				SCTP_INP_WLOCK(inp);
2983				inp->def_send = *s_info;
2984				SCTP_INP_WUNLOCK(inp);
2985			}
2986		}
2987		break;
2988	case SCTP_PEER_ADDR_PARAMS:
2989		/* Applys to the specific association */
2990		{
2991			struct sctp_paddrparams *paddrp;
2992			struct sctp_nets *net;
2993
2994			SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, optsize);
2995			SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id);
2996			net = NULL;
2997			if (stcb) {
2998				net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
2999			} else {
3000				/*
3001				 * We increment here since
3002				 * sctp_findassociation_ep_addr() wil do a
3003				 * decrement if it finds the stcb as long as
3004				 * the locked tcb (last argument) is NOT a
3005				 * TCB.. aka NULL.
3006				 */
3007				SCTP_INP_INCR_REF(inp);
3008				stcb = sctp_findassociation_ep_addr(&inp,
3009				    (struct sockaddr *)&paddrp->spp_address,
3010				    &net, NULL, NULL);
3011				if (stcb == NULL) {
3012					SCTP_INP_DECR_REF(inp);
3013				}
3014			}
3015
3016
3017			if (stcb) {
3018				/************************TCB SPECIFIC SET ******************/
3019				/*
3020				 * do we change the timer for HB, we run
3021				 * only one?
3022				 */
3023				if (paddrp->spp_hbinterval)
3024					stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval;
3025				else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO)
3026					stcb->asoc.heart_beat_delay = 0;
3027
3028				/* network sets ? */
3029				if (net) {
3030					/************************NET SPECIFIC SET ******************/
3031					if (paddrp->spp_flags & SPP_HB_DEMAND) {
3032						/* on demand HB */
3033						(void)sctp_send_hb(stcb, 1, net);
3034					}
3035					if (paddrp->spp_flags & SPP_HB_DISABLE) {
3036						net->dest_state |= SCTP_ADDR_NOHB;
3037					}
3038					if (paddrp->spp_flags & SPP_HB_ENABLE) {
3039						net->dest_state &= ~SCTP_ADDR_NOHB;
3040					}
3041					if (paddrp->spp_flags & SPP_PMTUD_DISABLE) {
3042						if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3043							sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
3044							    SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10);
3045						}
3046						if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) {
3047							net->mtu = paddrp->spp_pathmtu;
3048							if (net->mtu < stcb->asoc.smallest_mtu) {
3049#ifdef SCTP_PRINT_FOR_B_AND_M
3050								SCTP_PRINTF("SCTP_PMTU_DISABLE calls sctp_pathmtu_adjustment:%d\n",
3051								    net->mtu);
3052#endif
3053								sctp_pathmtu_adjustment(inp, stcb, net, net->mtu);
3054							}
3055						}
3056					}
3057					if (paddrp->spp_flags & SPP_PMTUD_ENABLE) {
3058						if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3059							sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
3060						}
3061					}
3062					if (paddrp->spp_pathmaxrxt)
3063						net->failure_threshold = paddrp->spp_pathmaxrxt;
3064#ifdef INET
3065					if (paddrp->spp_flags & SPP_IPV4_TOS) {
3066						if (net->ro._l_addr.sin.sin_family == AF_INET) {
3067							net->tos_flowlabel = paddrp->spp_ipv4_tos & 0x000000fc;
3068						}
3069					}
3070#endif
3071#ifdef INET6
3072					if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) {
3073						if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
3074							net->tos_flowlabel = paddrp->spp_ipv6_flowlabel;
3075						}
3076					}
3077#endif
3078				} else {
3079					/************************ASSOC ONLY -- NO NET SPECIFIC SET ******************/
3080					if (paddrp->spp_pathmaxrxt)
3081						stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt;
3082
3083					if (paddrp->spp_flags & SPP_HB_ENABLE) {
3084						/* Turn back on the timer */
3085						stcb->asoc.hb_is_disabled = 0;
3086						sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
3087					}
3088					if (paddrp->spp_flags & SPP_HB_DISABLE) {
3089						int cnt_of_unconf = 0;
3090						struct sctp_nets *lnet;
3091
3092						stcb->asoc.hb_is_disabled = 1;
3093						TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
3094							if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) {
3095								cnt_of_unconf++;
3096							}
3097						}
3098						/*
3099						 * stop the timer ONLY if we
3100						 * have no unconfirmed
3101						 * addresses
3102						 */
3103						if (cnt_of_unconf == 0) {
3104							sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_11);
3105						}
3106					}
3107					if (paddrp->spp_flags & SPP_HB_ENABLE) {
3108						/* start up the timer. */
3109						sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
3110					}
3111#ifdef INET
3112					if (paddrp->spp_flags & SPP_IPV4_TOS)
3113						stcb->asoc.default_tos = paddrp->spp_ipv4_tos & 0x000000fc;
3114#endif
3115#ifdef INET6
3116					if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL)
3117						stcb->asoc.default_flowlabel = paddrp->spp_ipv6_flowlabel;
3118#endif
3119
3120				}
3121				SCTP_TCB_UNLOCK(stcb);
3122			} else {
3123				/************************NO TCB, SET TO default stuff ******************/
3124				SCTP_INP_WLOCK(inp);
3125				/*
3126				 * For the TOS/FLOWLABEL stuff you set it
3127				 * with the options on the socket
3128				 */
3129				if (paddrp->spp_pathmaxrxt) {
3130					inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt;
3131				}
3132				if (paddrp->spp_flags & SPP_HB_ENABLE) {
3133					inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(paddrp->spp_hbinterval);
3134					sctp_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
3135				} else if (paddrp->spp_flags & SPP_HB_DISABLE) {
3136					sctp_feature_on(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
3137				}
3138				SCTP_INP_WUNLOCK(inp);
3139			}
3140		}
3141		break;
3142	case SCTP_RTOINFO:
3143		{
3144			struct sctp_rtoinfo *srto;
3145			uint32_t new_init, new_min, new_max;
3146
3147			SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, optsize);
3148			SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id);
3149
3150			if (stcb) {
3151				if (srto->srto_initial)
3152					new_init = srto->srto_initial;
3153				else
3154					new_init = stcb->asoc.initial_rto;
3155				if (srto->srto_max)
3156					new_max = srto->srto_max;
3157				else
3158					new_max = stcb->asoc.maxrto;
3159				if (srto->srto_min)
3160					new_min = srto->srto_min;
3161				else
3162					new_min = stcb->asoc.minrto;
3163				if ((new_min <= new_init) && (new_init <= new_max)) {
3164					stcb->asoc.initial_rto = new_init;
3165					stcb->asoc.maxrto = new_max;
3166					stcb->asoc.minrto = new_min;
3167				} else {
3168					error = EDOM;
3169				}
3170				SCTP_TCB_UNLOCK(stcb);
3171			} else {
3172				SCTP_INP_WLOCK(inp);
3173				if (srto->srto_initial)
3174					new_init = srto->srto_initial;
3175				else
3176					new_init = inp->sctp_ep.initial_rto;
3177				if (srto->srto_max)
3178					new_max = srto->srto_max;
3179				else
3180					new_max = inp->sctp_ep.sctp_maxrto;
3181				if (srto->srto_min)
3182					new_min = srto->srto_min;
3183				else
3184					new_min = inp->sctp_ep.sctp_minrto;
3185				if ((new_min <= new_init) && (new_init <= new_max)) {
3186					inp->sctp_ep.initial_rto = new_init;
3187					inp->sctp_ep.sctp_maxrto = new_max;
3188					inp->sctp_ep.sctp_minrto = new_min;
3189				} else {
3190					error = EDOM;
3191				}
3192				SCTP_INP_WUNLOCK(inp);
3193			}
3194		}
3195		break;
3196	case SCTP_ASSOCINFO:
3197		{
3198			struct sctp_assocparams *sasoc;
3199
3200			SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, optsize);
3201			SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id);
3202
3203			if (stcb) {
3204				if (sasoc->sasoc_asocmaxrxt)
3205					stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt;
3206				sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
3207				sasoc->sasoc_peer_rwnd = 0;
3208				sasoc->sasoc_local_rwnd = 0;
3209				if (sasoc->sasoc_cookie_life)
3210					stcb->asoc.cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life);
3211				stcb->asoc.delayed_ack = sasoc->sasoc_sack_delay;
3212				if (sasoc->sasoc_sack_freq) {
3213					stcb->asoc.sack_freq = sasoc->sasoc_sack_freq;
3214				}
3215				SCTP_TCB_UNLOCK(stcb);
3216			} else {
3217				SCTP_INP_WLOCK(inp);
3218				if (sasoc->sasoc_asocmaxrxt)
3219					inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt;
3220				sasoc->sasoc_number_peer_destinations = 0;
3221				sasoc->sasoc_peer_rwnd = 0;
3222				sasoc->sasoc_local_rwnd = 0;
3223				if (sasoc->sasoc_cookie_life)
3224					inp->sctp_ep.def_cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life);
3225				inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(sasoc->sasoc_sack_delay);
3226				if (sasoc->sasoc_sack_freq) {
3227					inp->sctp_ep.sctp_sack_freq = sasoc->sasoc_sack_freq;
3228				}
3229				SCTP_INP_WUNLOCK(inp);
3230			}
3231		}
3232		break;
3233	case SCTP_INITMSG:
3234		{
3235			struct sctp_initmsg *sinit;
3236
3237			SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, optsize);
3238			SCTP_INP_WLOCK(inp);
3239			if (sinit->sinit_num_ostreams)
3240				inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams;
3241
3242			if (sinit->sinit_max_instreams)
3243				inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams;
3244
3245			if (sinit->sinit_max_attempts)
3246				inp->sctp_ep.max_init_times = sinit->sinit_max_attempts;
3247
3248			if (sinit->sinit_max_init_timeo)
3249				inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo;
3250			SCTP_INP_WUNLOCK(inp);
3251		}
3252		break;
3253	case SCTP_PRIMARY_ADDR:
3254		{
3255			struct sctp_setprim *spa;
3256			struct sctp_nets *net, *lnet;
3257
3258			SCTP_CHECK_AND_CAST(spa, optval, struct sctp_setprim, optsize);
3259			SCTP_FIND_STCB(inp, stcb, spa->ssp_assoc_id);
3260
3261			net = NULL;
3262			if (stcb) {
3263				net = sctp_findnet(stcb, (struct sockaddr *)&spa->ssp_addr);
3264			} else {
3265				/*
3266				 * We increment here since
3267				 * sctp_findassociation_ep_addr() wil do a
3268				 * decrement if it finds the stcb as long as
3269				 * the locked tcb (last argument) is NOT a
3270				 * TCB.. aka NULL.
3271				 */
3272				SCTP_INP_INCR_REF(inp);
3273				stcb = sctp_findassociation_ep_addr(&inp,
3274				    (struct sockaddr *)&spa->ssp_addr,
3275				    &net, NULL, NULL);
3276				if (stcb == NULL) {
3277					SCTP_INP_DECR_REF(inp);
3278				}
3279			}
3280
3281			if ((stcb) && (net)) {
3282				if ((net != stcb->asoc.primary_destination) &&
3283				    (!(net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
3284					/* Ok we need to set it */
3285					lnet = stcb->asoc.primary_destination;
3286					if (sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net) == 0) {
3287						if (net->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
3288							net->dest_state |= SCTP_ADDR_DOUBLE_SWITCH;
3289						}
3290						net->dest_state |= SCTP_ADDR_SWITCH_PRIMARY;
3291					}
3292				}
3293			} else {
3294				error = EINVAL;
3295			}
3296			if (stcb) {
3297				SCTP_TCB_UNLOCK(stcb);
3298			}
3299		}
3300		break;
3301	case SCTP_SET_DYNAMIC_PRIMARY:
3302		{
3303			union sctp_sockstore *ss;
3304
3305			error = priv_check_cred(curthread->td_ucred,
3306			    PRIV_NETINET_RESERVEDPORT,
3307			    SUSER_ALLOWJAIL);
3308			if (error)
3309				break;
3310
3311			SCTP_CHECK_AND_CAST(ss, optval, union sctp_sockstore, optsize);
3312			/* SUPER USER CHECK? */
3313			error = sctp_dynamic_set_primary(&ss->sa, vrf_id);
3314		}
3315		break;
3316	case SCTP_SET_PEER_PRIMARY_ADDR:
3317		{
3318			struct sctp_setpeerprim *sspp;
3319
3320			SCTP_CHECK_AND_CAST(sspp, optval, struct sctp_setpeerprim, optsize);
3321			SCTP_FIND_STCB(inp, stcb, sspp->sspp_assoc_id);
3322			if (stcb != NULL) {
3323				if (sctp_set_primary_ip_address_sa(stcb, (struct sockaddr *)&sspp->sspp_addr) != 0) {
3324					error = EINVAL;
3325				}
3326				SCTP_TCB_UNLOCK(stcb);
3327			} else {
3328				error = EINVAL;
3329			}
3330
3331		}
3332		break;
3333	case SCTP_BINDX_ADD_ADDR:
3334		{
3335			struct sctp_getaddresses *addrs;
3336			struct sockaddr *addr_touse;
3337			struct sockaddr_in sin;
3338
3339			SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, optsize);
3340
3341			/* see if we're bound all already! */
3342			if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3343				error = EINVAL;
3344				break;
3345			}
3346			/* Is the VRF one we have */
3347			addr_touse = addrs->addr;
3348#if defined(INET6)
3349			if (addrs->addr->sa_family == AF_INET6) {
3350				struct sockaddr_in6 *sin6;
3351
3352				sin6 = (struct sockaddr_in6 *)addr_touse;
3353				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
3354					in6_sin6_2_sin(&sin, sin6);
3355					addr_touse = (struct sockaddr *)&sin;
3356				}
3357			}
3358#endif
3359			if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
3360				if (p == NULL) {
3361					/* Can't get proc for Net/Open BSD */
3362					error = EINVAL;
3363					break;
3364				}
3365				error = sctp_inpcb_bind(so, addr_touse, p);
3366				break;
3367			}
3368			/*
3369			 * No locks required here since bind and mgmt_ep_sa
3370			 * all do their own locking. If we do something for
3371			 * the FIX: below we may need to lock in that case.
3372			 */
3373			if (addrs->sget_assoc_id == 0) {
3374				/* add the address */
3375				struct sctp_inpcb *lep;
3376
3377				((struct sockaddr_in *)addr_touse)->sin_port = inp->sctp_lport;
3378				lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
3379				if (lep != NULL) {
3380					/*
3381					 * We must decrement the refcount
3382					 * since we have the ep already and
3383					 * are binding. No remove going on
3384					 * here.
3385					 */
3386					SCTP_INP_DECR_REF(inp);
3387				}
3388				if (lep == inp) {
3389					/* already bound to it.. ok */
3390					break;
3391				} else if (lep == NULL) {
3392					((struct sockaddr_in *)addr_touse)->sin_port = 0;
3393					error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
3394					    SCTP_ADD_IP_ADDRESS, vrf_id);
3395				} else {
3396					error = EADDRNOTAVAIL;
3397				}
3398				if (error)
3399					break;
3400
3401			} else {
3402				/*
3403				 * FIX: decide whether we allow assoc based
3404				 * bindx
3405				 */
3406			}
3407		}
3408		break;
3409	case SCTP_BINDX_REM_ADDR:
3410		{
3411			struct sctp_getaddresses *addrs;
3412			struct sockaddr *addr_touse;
3413			struct sockaddr_in sin;
3414
3415			SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, optsize);
3416			/* see if we're bound all already! */
3417			if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3418				error = EINVAL;
3419				break;
3420			}
3421			addr_touse = addrs->addr;
3422#if defined(INET6)
3423			if (addrs->addr->sa_family == AF_INET6) {
3424				struct sockaddr_in6 *sin6;
3425
3426				sin6 = (struct sockaddr_in6 *)addr_touse;
3427				if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
3428					in6_sin6_2_sin(&sin, sin6);
3429					addr_touse = (struct sockaddr *)&sin;
3430				}
3431			}
3432#endif
3433			/*
3434			 * No lock required mgmt_ep_sa does its own locking.
3435			 * If the FIX: below is ever changed we may need to
3436			 * lock before calling association level binding.
3437			 */
3438			if (addrs->sget_assoc_id == 0) {
3439				/* delete the address */
3440				error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
3441				    SCTP_DEL_IP_ADDRESS,
3442				    vrf_id);
3443			} else {
3444				/*
3445				 * FIX: decide whether we allow assoc based
3446				 * bindx
3447				 */
3448			}
3449		}
3450		break;
3451	default:
3452		error = ENOPROTOOPT;
3453		break;
3454	}			/* end switch (opt) */
3455	return (error);
3456}
3457
3458
3459int
3460sctp_ctloutput(struct socket *so, struct sockopt *sopt)
3461{
3462	void *optval = NULL;
3463	size_t optsize = 0;
3464	struct sctp_inpcb *inp;
3465	void *p;
3466	int error = 0;
3467
3468	inp = (struct sctp_inpcb *)so->so_pcb;
3469	if (inp == 0) {
3470		/* I made the same as TCP since we are not setup? */
3471		return (ECONNRESET);
3472	}
3473	if (sopt->sopt_level != IPPROTO_SCTP) {
3474		/* wrong proto level... send back up to IP */
3475#ifdef INET6
3476		if (INP_CHECK_SOCKAF(so, AF_INET6))
3477			error = ip6_ctloutput(so, sopt);
3478		else
3479#endif				/* INET6 */
3480			error = ip_ctloutput(so, sopt);
3481		return (error);
3482	}
3483	optsize = sopt->sopt_valsize;
3484	if (optsize) {
3485		SCTP_MALLOC(optval, void *, optsize, "SCTPSockOpt");
3486		if (optval == NULL) {
3487			return (ENOBUFS);
3488		}
3489		error = sooptcopyin(sopt, optval, optsize, optsize);
3490		if (error) {
3491			SCTP_FREE(optval);
3492			goto out;
3493		}
3494	}
3495	p = (void *)sopt->sopt_td;
3496	if (sopt->sopt_dir == SOPT_SET) {
3497		error = sctp_setopt(so, sopt->sopt_name, optval, optsize, p);
3498	} else if (sopt->sopt_dir == SOPT_GET) {
3499		error = sctp_getopt(so, sopt->sopt_name, optval, &optsize, p);
3500	} else {
3501		error = EINVAL;
3502	}
3503	if ((error == 0) && (optval != NULL)) {
3504		error = sooptcopyout(sopt, optval, optsize);
3505		SCTP_FREE(optval);
3506	} else if (optval != NULL) {
3507		SCTP_FREE(optval);
3508	}
3509out:
3510	return (error);
3511}
3512
3513
3514static int
3515sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p)
3516{
3517	int error = 0;
3518	int create_lock_on = 0;
3519	uint32_t vrf_id;
3520	struct sctp_inpcb *inp;
3521	struct sctp_tcb *stcb = NULL;
3522
3523	inp = (struct sctp_inpcb *)so->so_pcb;
3524	if (inp == 0) {
3525		/* I made the same as TCP since we are not setup? */
3526		return (ECONNRESET);
3527	}
3528	SCTP_ASOC_CREATE_LOCK(inp);
3529	create_lock_on = 1;
3530
3531	SCTP_INP_INCR_REF(inp);
3532	if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3533	    (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3534		/* Should I really unlock ? */
3535		error = EFAULT;
3536		goto out_now;
3537	}
3538#ifdef INET6
3539	if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
3540	    (addr->sa_family == AF_INET6)) {
3541		error = EINVAL;
3542		goto out_now;
3543	}
3544#endif				/* INET6 */
3545	if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
3546	    SCTP_PCB_FLAGS_UNBOUND) {
3547		/* Bind a ephemeral port */
3548		error = sctp_inpcb_bind(so, NULL, p);
3549		if (error) {
3550			goto out_now;
3551		}
3552	}
3553	/* Now do we connect? */
3554	if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) {
3555		error = EINVAL;
3556		goto out_now;
3557	}
3558	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3559	    (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
3560		/* We are already connected AND the TCP model */
3561		error = EADDRINUSE;
3562		goto out_now;
3563	}
3564	if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3565		SCTP_INP_RLOCK(inp);
3566		stcb = LIST_FIRST(&inp->sctp_asoc_list);
3567		SCTP_INP_RUNLOCK(inp);
3568	} else {
3569		/*
3570		 * We increment here since sctp_findassociation_ep_addr()
3571		 * wil do a decrement if it finds the stcb as long as the
3572		 * locked tcb (last argument) is NOT a TCB.. aka NULL.
3573		 */
3574		SCTP_INP_INCR_REF(inp);
3575		stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL);
3576		if (stcb == NULL) {
3577			SCTP_INP_DECR_REF(inp);
3578		} else {
3579			SCTP_TCB_LOCK(stcb);
3580		}
3581	}
3582	if (stcb != NULL) {
3583		/* Already have or am bring up an association */
3584		error = EALREADY;
3585		goto out_now;
3586	}
3587	vrf_id = inp->def_vrf_id;
3588	/* We are GOOD to go */
3589	stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0, vrf_id);
3590	if (stcb == NULL) {
3591		/* Gak! no memory */
3592		goto out_now;
3593	}
3594	if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
3595		stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
3596		/* Set the connected flag so we can queue data */
3597		soisconnecting(so);
3598	}
3599	stcb->asoc.state = SCTP_STATE_COOKIE_WAIT;
3600	(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
3601
3602	/* initialize authentication parameters for the assoc */
3603	sctp_initialize_auth_params(inp, stcb);
3604
3605	sctp_send_initiate(inp, stcb);
3606	SCTP_TCB_UNLOCK(stcb);
3607out_now:
3608	if (create_lock_on) {
3609		SCTP_ASOC_CREATE_UNLOCK(inp);
3610	}
3611	SCTP_INP_DECR_REF(inp);
3612	return error;
3613}
3614
3615int
3616sctp_listen(struct socket *so, int backlog, struct thread *p)
3617{
3618	/*
3619	 * Note this module depends on the protocol processing being called
3620	 * AFTER any socket level flags and backlog are applied to the
3621	 * socket. The traditional way that the socket flags are applied is
3622	 * AFTER protocol processing. We have made a change to the
3623	 * sys/kern/uipc_socket.c module to reverse this but this MUST be in
3624	 * place if the socket API for SCTP is to work properly.
3625	 */
3626
3627	int error = 0;
3628	struct sctp_inpcb *inp;
3629
3630	inp = (struct sctp_inpcb *)so->so_pcb;
3631	if (inp == 0) {
3632		/* I made the same as TCP since we are not setup? */
3633		return (ECONNRESET);
3634	}
3635	SCTP_INP_RLOCK(inp);
3636#ifdef SCTP_LOCK_LOGGING
3637	sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK);
3638#endif
3639	SOCK_LOCK(so);
3640	error = solisten_proto_check(so);
3641	if (error) {
3642		SOCK_UNLOCK(so);
3643		SCTP_INP_RUNLOCK(inp);
3644		return (error);
3645	}
3646	if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3647	    (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
3648		/* We are already connected AND the TCP model */
3649		SCTP_INP_RUNLOCK(inp);
3650		SOCK_UNLOCK(so);
3651		return (EADDRINUSE);
3652	}
3653	if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
3654		/* We must do a bind. */
3655		SOCK_UNLOCK(so);
3656		SCTP_INP_RUNLOCK(inp);
3657		if ((error = sctp_inpcb_bind(so, NULL, p))) {
3658			/* bind error, probably perm */
3659			return (error);
3660		}
3661		SOCK_LOCK(so);
3662	} else {
3663		SCTP_INP_RUNLOCK(inp);
3664	}
3665	/* It appears for 7.0 and on, we must always call this. */
3666	solisten_proto(so, backlog);
3667	if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
3668		/* remove the ACCEPTCONN flag for one-to-many sockets */
3669		so->so_options &= ~SO_ACCEPTCONN;
3670	}
3671	if (backlog == 0) {
3672		/* turning off listen */
3673		so->so_options &= ~SO_ACCEPTCONN;
3674	}
3675	SOCK_UNLOCK(so);
3676	return (error);
3677}
3678
3679static int sctp_defered_wakeup_cnt = 0;
3680
3681int
3682sctp_accept(struct socket *so, struct sockaddr **addr)
3683{
3684	struct sctp_tcb *stcb;
3685	struct sctp_inpcb *inp;
3686	union sctp_sockstore store;
3687
3688	int error;
3689
3690	inp = (struct sctp_inpcb *)so->so_pcb;
3691
3692	if (inp == 0) {
3693		return (ECONNRESET);
3694	}
3695	SCTP_INP_RLOCK(inp);
3696	if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
3697		SCTP_INP_RUNLOCK(inp);
3698		return (ENOTSUP);
3699	}
3700	if (so->so_state & SS_ISDISCONNECTED) {
3701		SCTP_INP_RUNLOCK(inp);
3702		return (ECONNABORTED);
3703	}
3704	stcb = LIST_FIRST(&inp->sctp_asoc_list);
3705	if (stcb == NULL) {
3706		SCTP_INP_RUNLOCK(inp);
3707		return (ECONNRESET);
3708	}
3709	SCTP_TCB_LOCK(stcb);
3710	SCTP_INP_RUNLOCK(inp);
3711	store = stcb->asoc.primary_destination->ro._l_addr;
3712	SCTP_TCB_UNLOCK(stcb);
3713	if (store.sa.sa_family == AF_INET) {
3714		struct sockaddr_in *sin;
3715
3716		SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
3717		sin->sin_family = AF_INET;
3718		sin->sin_len = sizeof(*sin);
3719		sin->sin_port = ((struct sockaddr_in *)&store)->sin_port;
3720		sin->sin_addr = ((struct sockaddr_in *)&store)->sin_addr;
3721		*addr = (struct sockaddr *)sin;
3722	} else {
3723		struct sockaddr_in6 *sin6;
3724
3725		SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6);
3726		sin6->sin6_family = AF_INET6;
3727		sin6->sin6_len = sizeof(*sin6);
3728		sin6->sin6_port = ((struct sockaddr_in6 *)&store)->sin6_port;
3729
3730		sin6->sin6_addr = ((struct sockaddr_in6 *)&store)->sin6_addr;
3731		if ((error = sa6_recoverscope(sin6)) != 0) {
3732			SCTP_FREE_SONAME(sin6);
3733			return (error);
3734		}
3735		*addr = (struct sockaddr *)sin6;
3736	}
3737	/* Wake any delayed sleep action */
3738	if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) {
3739		SCTP_INP_WLOCK(inp);
3740		inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE;
3741		if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) {
3742			inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT;
3743			SCTP_INP_WUNLOCK(inp);
3744			SOCKBUF_LOCK(&inp->sctp_socket->so_snd);
3745			if (sowriteable(inp->sctp_socket)) {
3746				sowwakeup_locked(inp->sctp_socket);
3747			} else {
3748				SOCKBUF_UNLOCK(&inp->sctp_socket->so_snd);
3749			}
3750			SCTP_INP_WLOCK(inp);
3751		}
3752		if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) {
3753			inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT;
3754			SCTP_INP_WUNLOCK(inp);
3755			SOCKBUF_LOCK(&inp->sctp_socket->so_rcv);
3756			if (soreadable(inp->sctp_socket)) {
3757				sctp_defered_wakeup_cnt++;
3758				sorwakeup_locked(inp->sctp_socket);
3759			} else {
3760				SOCKBUF_UNLOCK(&inp->sctp_socket->so_rcv);
3761			}
3762			SCTP_INP_WLOCK(inp);
3763		}
3764		SCTP_INP_WUNLOCK(inp);
3765	}
3766	return (0);
3767}
3768
3769int
3770sctp_ingetaddr(struct socket *so, struct sockaddr **addr)
3771{
3772	struct sockaddr_in *sin;
3773	uint32_t vrf_id;
3774	struct sctp_inpcb *inp;
3775	struct sctp_ifa *sctp_ifa;
3776
3777	/*
3778	 * Do the malloc first in case it blocks.
3779	 */
3780	SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
3781	sin->sin_family = AF_INET;
3782	sin->sin_len = sizeof(*sin);
3783	inp = (struct sctp_inpcb *)so->so_pcb;
3784	if (!inp) {
3785		SCTP_FREE_SONAME(sin);
3786		return ECONNRESET;
3787	}
3788	SCTP_INP_RLOCK(inp);
3789	sin->sin_port = inp->sctp_lport;
3790	if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3791		if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3792			struct sctp_tcb *stcb;
3793			struct sockaddr_in *sin_a;
3794			struct sctp_nets *net;
3795			int fnd;
3796
3797			stcb = LIST_FIRST(&inp->sctp_asoc_list);
3798			if (stcb == NULL) {
3799				goto notConn;
3800			}
3801			fnd = 0;
3802			sin_a = NULL;
3803			SCTP_TCB_LOCK(stcb);
3804			TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3805				sin_a = (struct sockaddr_in *)&net->ro._l_addr;
3806				if (sin_a == NULL)
3807					/* this will make coverity happy */
3808					continue;
3809
3810				if (sin_a->sin_family == AF_INET) {
3811					fnd = 1;
3812					break;
3813				}
3814			}
3815			if ((!fnd) || (sin_a == NULL)) {
3816				/* punt */
3817				SCTP_TCB_UNLOCK(stcb);
3818				goto notConn;
3819			}
3820			vrf_id = inp->def_vrf_id;
3821			sctp_ifa = sctp_source_address_selection(inp,
3822			    stcb,
3823			    (sctp_route_t *) & net->ro,
3824			    net, 0, vrf_id);
3825			if (sctp_ifa) {
3826				sin->sin_addr = sctp_ifa->address.sin.sin_addr;
3827				sctp_free_ifa(sctp_ifa);
3828			}
3829			SCTP_TCB_UNLOCK(stcb);
3830		} else {
3831			/* For the bound all case you get back 0 */
3832	notConn:
3833			sin->sin_addr.s_addr = 0;
3834		}
3835
3836	} else {
3837		/* Take the first IPv4 address in the list */
3838		struct sctp_laddr *laddr;
3839		int fnd = 0;
3840
3841		LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
3842			if (laddr->ifa->address.sa.sa_family == AF_INET) {
3843				struct sockaddr_in *sin_a;
3844
3845				sin_a = (struct sockaddr_in *)&laddr->ifa->address.sa;
3846				sin->sin_addr = sin_a->sin_addr;
3847				fnd = 1;
3848				break;
3849			}
3850		}
3851		if (!fnd) {
3852			SCTP_FREE_SONAME(sin);
3853			SCTP_INP_RUNLOCK(inp);
3854			return ENOENT;
3855		}
3856	}
3857	SCTP_INP_RUNLOCK(inp);
3858	(*addr) = (struct sockaddr *)sin;
3859	return (0);
3860}
3861
3862int
3863sctp_peeraddr(struct socket *so, struct sockaddr **addr)
3864{
3865	struct sockaddr_in *sin = (struct sockaddr_in *)*addr;
3866	int fnd;
3867	struct sockaddr_in *sin_a;
3868	struct sctp_inpcb *inp;
3869	struct sctp_tcb *stcb;
3870	struct sctp_nets *net;
3871
3872	/* Do the malloc first in case it blocks. */
3873	inp = (struct sctp_inpcb *)so->so_pcb;
3874	if ((inp == NULL) ||
3875	    ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
3876		/* UDP type and listeners will drop out here */
3877		return (ENOTCONN);
3878	}
3879	SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
3880	sin->sin_family = AF_INET;
3881	sin->sin_len = sizeof(*sin);
3882
3883	/* We must recapture incase we blocked */
3884	inp = (struct sctp_inpcb *)so->so_pcb;
3885	if (!inp) {
3886		SCTP_FREE_SONAME(sin);
3887		return ECONNRESET;
3888	}
3889	SCTP_INP_RLOCK(inp);
3890	stcb = LIST_FIRST(&inp->sctp_asoc_list);
3891	if (stcb) {
3892		SCTP_TCB_LOCK(stcb);
3893	}
3894	SCTP_INP_RUNLOCK(inp);
3895	if (stcb == NULL) {
3896		SCTP_FREE_SONAME(sin);
3897		return ECONNRESET;
3898	}
3899	fnd = 0;
3900	TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3901		sin_a = (struct sockaddr_in *)&net->ro._l_addr;
3902		if (sin_a->sin_family == AF_INET) {
3903			fnd = 1;
3904			sin->sin_port = stcb->rport;
3905			sin->sin_addr = sin_a->sin_addr;
3906			break;
3907		}
3908	}
3909	SCTP_TCB_UNLOCK(stcb);
3910	if (!fnd) {
3911		/* No IPv4 address */
3912		SCTP_FREE_SONAME(sin);
3913		return ENOENT;
3914	}
3915	(*addr) = (struct sockaddr *)sin;
3916	return (0);
3917}
3918
3919struct pr_usrreqs sctp_usrreqs = {
3920	.pru_abort = sctp_abort,
3921	.pru_accept = sctp_accept,
3922	.pru_attach = sctp_attach,
3923	.pru_bind = sctp_bind,
3924	.pru_connect = sctp_connect,
3925	.pru_control = in_control,
3926	.pru_close = sctp_close,
3927	.pru_detach = sctp_close,
3928	.pru_sopoll = sopoll_generic,
3929	.pru_disconnect = sctp_disconnect,
3930	.pru_listen = sctp_listen,
3931	.pru_peeraddr = sctp_peeraddr,
3932	.pru_send = sctp_sendm,
3933	.pru_shutdown = sctp_shutdown,
3934	.pru_sockaddr = sctp_ingetaddr,
3935	.pru_sosend = sctp_sosend,
3936	.pru_soreceive = sctp_soreceive
3937};
3938