Deleted Added
sdiff udiff text old ( 170056 ) new ( 170091 )
full compact
1/*-
2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 *
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
13 *
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/* $KAME: sctp_usrreq.c,v 1.48 2005/03/07 23:26:08 itojun Exp $ */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/netinet/sctp_usrreq.c 170091 2007-05-29 09:29:03Z rrs $");
35#include <netinet/sctp_os.h>
36#include <sys/proc.h>
37#include <netinet/sctp_pcb.h>
38#include <netinet/sctp_header.h>
39#include <netinet/sctp_var.h>
40#if defined(INET6)
41#include <netinet6/sctp6_var.h>
42#endif
43#include <netinet/sctp_sysctl.h>
44#include <netinet/sctp_output.h>
45#include <netinet/sctp_uio.h>
46#include <netinet/sctp_asconf.h>
47#include <netinet/sctputil.h>
48#include <netinet/sctp_indata.h>
49#include <netinet/sctp_timer.h>
50#include <netinet/sctp_auth.h>
51#include <netinet/sctp_bsd_addr.h>
52
53
54
55
56void
57sctp_init(void)
58{
59 /* Init the SCTP pcb in sctp_pcb.c */
60 u_long sb_max_adj;
61
62 sctp_pcb_init();
63
64 if ((nmbclusters / 8) > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE)
65 sctp_max_chunks_on_queue = (nmbclusters / 8);
66 /*
67 * Allow a user to take no more than 1/2 the number of clusters or
68 * the SB_MAX whichever is smaller for the send window.
69 */
70 sb_max_adj = (u_long)((u_quad_t) (SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES));
71 sctp_sendspace = min((min(SB_MAX, sb_max_adj)),
72 (((uint32_t) nmbclusters / 2) * SCTP_DEFAULT_MAXSEGMENT));
73 /*
74 * Now for the recv window, should we take the same amount? or
75 * should I do 1/2 the SB_MAX instead in the SB_MAX min above. For
76 * now I will just copy.
77 */
78 sctp_recvspace = sctp_sendspace;
79
80
81}
82
83
84
85/*
86 * cleanup of the sctppcbinfo structure.
87 * Assumes that the sctppcbinfo lock is held.
88 */
89void
90sctp_pcbinfo_cleanup(void)
91{
92 /* free the hash tables */
93 if (sctppcbinfo.sctp_asochash != NULL)
94 SCTP_HASH_FREE(sctppcbinfo.sctp_asochash, sctppcbinfo.hashasocmark);
95 if (sctppcbinfo.sctp_ephash != NULL)
96 SCTP_HASH_FREE(sctppcbinfo.sctp_ephash, sctppcbinfo.hashmark);
97 if (sctppcbinfo.sctp_tcpephash != NULL)
98 SCTP_HASH_FREE(sctppcbinfo.sctp_tcpephash, sctppcbinfo.hashtcpmark);
99 if (sctppcbinfo.sctp_restarthash != NULL)
100 SCTP_HASH_FREE(sctppcbinfo.sctp_restarthash, sctppcbinfo.hashrestartmark);
101}
102
103
104static void
105sctp_pathmtu_adjustment(struct sctp_inpcb *inp,
106 struct sctp_tcb *stcb,
107 struct sctp_nets *net,
108 uint16_t nxtsz)
109{
110 struct sctp_tmit_chunk *chk;
111
112 /* Adjust that too */
113 stcb->asoc.smallest_mtu = nxtsz;
114 /* now off to subtract IP_DF flag if needed */
115#ifdef SCTP_PRINT_FOR_B_AND_M
116 SCTP_PRINTF("sctp_pathmtu_adjust called inp:%p stcb:%p net:%p nxtsz:%d\n",
117 inp, stcb, net, nxtsz);
118#endif
119 TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) {
120 if ((chk->send_size + IP_HDR_SIZE) > nxtsz) {
121 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
122 }
123 }
124 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
125 if ((chk->send_size + IP_HDR_SIZE) > nxtsz) {
126 /*
127 * For this guy we also mark for immediate resend
128 * since we sent to big of chunk
129 */
130 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
131 if (chk->sent != SCTP_DATAGRAM_RESEND) {
132 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
133 }
134 chk->sent = SCTP_DATAGRAM_RESEND;
135 chk->rec.data.doing_fast_retransmit = 0;
136#ifdef SCTP_FLIGHT_LOGGING
137 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PMTU,
138 chk->whoTo->flight_size,
139 chk->book_size,
140 (uintptr_t) chk->whoTo,
141 chk->rec.data.TSN_seq);
142#endif
143 /* Clear any time so NO RTT is being done */
144 chk->do_rtt = 0;
145 sctp_flight_size_decrease(chk);
146 sctp_total_flight_decrease(stcb, chk);
147 }
148 }
149}
150
151static void
152sctp_notify_mbuf(struct sctp_inpcb *inp,
153 struct sctp_tcb *stcb,
154 struct sctp_nets *net,
155 struct ip *ip,
156 struct sctphdr *sh)
157{
158 struct icmp *icmph;
159 int totsz, tmr_stopped = 0;
160 uint16_t nxtsz;
161
162 /* protection */
163 if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
164 (ip == NULL) || (sh == NULL)) {
165 if (stcb != NULL) {
166 SCTP_TCB_UNLOCK(stcb);
167 }
168 return;
169 }
170 /* First job is to verify the vtag matches what I would send */
171 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
172 SCTP_TCB_UNLOCK(stcb);
173 return;
174 }
175 icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) -
176 sizeof(struct ip)));
177 if (icmph->icmp_type != ICMP_UNREACH) {
178 /* We only care about unreachable */
179 SCTP_TCB_UNLOCK(stcb);
180 return;
181 }
182 if (icmph->icmp_code != ICMP_UNREACH_NEEDFRAG) {
183 /* not a unreachable message due to frag. */
184 SCTP_TCB_UNLOCK(stcb);
185 return;
186 }
187 totsz = ip->ip_len;
188
189 nxtsz = ntohs(icmph->icmp_seq);
190 if (nxtsz == 0) {
191 /*
192 * old type router that does not tell us what the next size
193 * mtu is. Rats we will have to guess (in a educated fashion
194 * of course)
195 */
196 nxtsz = find_next_best_mtu(totsz);
197 }
198 /* Stop any PMTU timer */
199 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
200 tmr_stopped = 1;
201 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
202 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_1);
203 }
204 /* Adjust destination size limit */
205 if (net->mtu > nxtsz) {
206 net->mtu = nxtsz;
207 }
208 /* now what about the ep? */
209 if (stcb->asoc.smallest_mtu > nxtsz) {
210#ifdef SCTP_PRINT_FOR_B_AND_M
211 SCTP_PRINTF("notify_mbuf (ICMP) calls sctp_pathmtu_adjust mtu:%d\n",
212 nxtsz);
213#endif
214 sctp_pathmtu_adjustment(inp, stcb, net, nxtsz);
215 }
216 if (tmr_stopped)
217 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
218
219 SCTP_TCB_UNLOCK(stcb);
220}
221
222
223void
224sctp_notify(struct sctp_inpcb *inp,
225 int error,
226 struct sctphdr *sh,
227 struct sockaddr *to,
228 struct sctp_tcb *stcb,
229 struct sctp_nets *net)
230{
231 /* protection */
232 if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
233 (sh == NULL) || (to == NULL)) {
234 return;
235 }
236 /* First job is to verify the vtag matches what I would send */
237 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
238 return;
239 }
240 /* FIX ME FIX ME PROTOPT i.e. no SCTP should ALWAYS be an ABORT */
241
242 if ((error == EHOSTUNREACH) || /* Host is not reachable */
243 (error == EHOSTDOWN) || /* Host is down */
244 (error == ECONNREFUSED) || /* Host refused the connection, (not
245 * an abort?) */
246 (error == ENOPROTOOPT) /* SCTP is not present on host */
247 ) {
248 /*
249 * Hmm reachablity problems we must examine closely. If its
250 * not reachable, we may have lost a network. Or if there is
251 * NO protocol at the other end named SCTP. well we consider
252 * it a OOTB abort.
253 */
254 if ((error == EHOSTUNREACH) || (error == EHOSTDOWN)) {
255 if (net->dest_state & SCTP_ADDR_REACHABLE) {
256 /* Ok that destination is NOT reachable */
257 SCTP_PRINTF("ICMP (thresh %d/%d) takes interface %p down\n",
258 net->error_count,
259 net->failure_threshold,
260 net);
261
262 net->dest_state &= ~SCTP_ADDR_REACHABLE;
263 net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
264 net->error_count = net->failure_threshold + 1;
265 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
266 stcb, SCTP_FAILED_THRESHOLD,
267 (void *)net);
268 }
269 if (stcb) {
270 SCTP_TCB_UNLOCK(stcb);
271 }
272 } else {
273 /*
274 * Here the peer is either playing tricks on us,
275 * including an address that belongs to someone who
276 * does not support SCTP OR was a userland
277 * implementation that shutdown and now is dead. In
278 * either case treat it like a OOTB abort with no
279 * TCB
280 */
281 sctp_abort_notification(stcb, SCTP_PEER_FAULTY);
282 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_2);
283 /* no need to unlock here, since the TCB is gone */
284 }
285 } else {
286 /* Send all others to the app */
287 if (stcb) {
288 SCTP_TCB_UNLOCK(stcb);
289 }
290 if (inp->sctp_socket) {
291#ifdef SCTP_LOCK_LOGGING
292 sctp_log_lock(inp, stcb, SCTP_LOG_LOCK_SOCK);
293#endif
294 SOCK_LOCK(inp->sctp_socket);
295 inp->sctp_socket->so_error = error;
296 sctp_sowwakeup(inp, inp->sctp_socket);
297 SOCK_UNLOCK(inp->sctp_socket);
298 }
299 }
300}
301
302void
303sctp_ctlinput(cmd, sa, vip)
304 int cmd;
305 struct sockaddr *sa;
306 void *vip;
307{
308 struct ip *ip = vip;
309 struct sctphdr *sh;
310 uint32_t vrf_id;
311
312 /* FIX, for non-bsd is this right? */
313 vrf_id = SCTP_DEFAULT_VRFID;
314 if (sa->sa_family != AF_INET ||
315 ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) {
316 return;
317 }
318 if (PRC_IS_REDIRECT(cmd)) {
319 ip = 0;
320 } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) {
321 return;
322 }
323 if (ip) {
324 struct sctp_inpcb *inp = NULL;
325 struct sctp_tcb *stcb = NULL;
326 struct sctp_nets *net = NULL;
327 struct sockaddr_in to, from;
328
329 sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2));
330 bzero(&to, sizeof(to));
331 bzero(&from, sizeof(from));
332 from.sin_family = to.sin_family = AF_INET;
333 from.sin_len = to.sin_len = sizeof(to);
334 from.sin_port = sh->src_port;
335 from.sin_addr = ip->ip_src;
336 to.sin_port = sh->dest_port;
337 to.sin_addr = ip->ip_dst;
338
339 /*
340 * 'to' holds the dest of the packet that failed to be sent.
341 * 'from' holds our local endpoint address. Thus we reverse
342 * the to and the from in the lookup.
343 */
344 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&from,
345 (struct sockaddr *)&to,
346 &inp, &net, 1, vrf_id);
347 if (stcb != NULL && inp && (inp->sctp_socket != NULL)) {
348 if (cmd != PRC_MSGSIZE) {
349 int cm;
350
351 if (cmd == PRC_HOSTDEAD) {
352 cm = EHOSTUNREACH;
353 } else {
354 cm = inetctlerrmap[cmd];
355 }
356 sctp_notify(inp, cm, sh,
357 (struct sockaddr *)&to, stcb,
358 net);
359 } else {
360 /* handle possible ICMP size messages */
361 sctp_notify_mbuf(inp, stcb, net, ip, sh);
362 }
363 } else {
364 if ((stcb == NULL) && (inp != NULL)) {
365 /* reduce ref-count */
366 SCTP_INP_WLOCK(inp);
367 SCTP_INP_DECR_REF(inp);
368 SCTP_INP_WUNLOCK(inp);
369 }
370 }
371 }
372 return;
373}
374
375static int
376sctp_getcred(SYSCTL_HANDLER_ARGS)
377{
378 struct xucred xuc;
379 struct sockaddr_in addrs[2];
380 struct sctp_inpcb *inp;
381 struct sctp_nets *net;
382 struct sctp_tcb *stcb;
383 int error;
384 uint32_t vrf_id;
385
386
387 /* FIX, for non-bsd is this right? */
388 vrf_id = SCTP_DEFAULT_VRFID;
389
390 /*
391 * XXXRW: Other instances of getcred use SUSER_ALLOWJAIL, as socket
392 * visibility is scoped using cr_canseesocket(), which it is not
393 * here.
394 */
395 error = priv_check_cred(req->td->td_ucred, PRIV_NETINET_GETCRED,
396 SUSER_ALLOWJAIL);
397 if (error)
398 return (error);
399
400 error = SYSCTL_IN(req, addrs, sizeof(addrs));
401 if (error)
402 return (error);
403
404 stcb = sctp_findassociation_addr_sa(sintosa(&addrs[0]),
405 sintosa(&addrs[1]),
406 &inp, &net, 1, vrf_id);
407 if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) {
408 if ((inp != NULL) && (stcb == NULL)) {
409 /* reduce ref-count */
410 SCTP_INP_WLOCK(inp);
411 SCTP_INP_DECR_REF(inp);
412 goto cred_can_cont;
413 }
414 error = ENOENT;
415 goto out;
416 }
417 SCTP_TCB_UNLOCK(stcb);
418 /*
419 * We use the write lock here, only since in the error leg we need
420 * it. If we used RLOCK, then we would have to
421 * wlock/decr/unlock/rlock. Which in theory could create a hole.
422 * Better to use higher wlock.
423 */
424 SCTP_INP_WLOCK(inp);
425cred_can_cont:
426 error = cr_canseesocket(req->td->td_ucred, inp->sctp_socket);
427 if (error) {
428 SCTP_INP_WUNLOCK(inp);
429 goto out;
430 }
431 cru2x(inp->sctp_socket->so_cred, &xuc);
432 SCTP_INP_WUNLOCK(inp);
433 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
434out:
435 return (error);
436}
437
438SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW,
439 0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection");
440
441
442static void
443sctp_abort(struct socket *so)
444{
445 struct sctp_inpcb *inp;
446 uint32_t flags;
447
448 inp = (struct sctp_inpcb *)so->so_pcb;
449 if (inp == 0)
450 return;
451
452sctp_must_try_again:
453 flags = inp->sctp_flags;
454#ifdef SCTP_LOG_CLOSING
455 sctp_log_closing(inp, NULL, 17);
456#endif
457 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
458 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
459#ifdef SCTP_LOG_CLOSING
460 sctp_log_closing(inp, NULL, 16);
461#endif
462 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
463 SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
464 SOCK_LOCK(so);
465 SCTP_SB_CLEAR(so->so_snd);
466 /*
467 * same for the rcv ones, they are only here for the
468 * accounting/select.
469 */
470 SCTP_SB_CLEAR(so->so_rcv);
471
472 /* Now null out the reference, we are completely detached. */
473 so->so_pcb = NULL;
474 SOCK_UNLOCK(so);
475 } else {
476 flags = inp->sctp_flags;
477 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
478 goto sctp_must_try_again;
479 }
480 }
481 return;
482}
483
484static int
485sctp_attach(struct socket *so, int proto, struct thread *p)
486{
487 struct sctp_inpcb *inp;
488 struct inpcb *ip_inp;
489 int error;
490
491#ifdef IPSEC
492 uint32_t flags;
493
494#endif
495 inp = (struct sctp_inpcb *)so->so_pcb;
496 if (inp != 0) {
497 return EINVAL;
498 }
499 error = SCTP_SORESERVE(so, sctp_sendspace, sctp_recvspace);
500 if (error) {
501 return error;
502 }
503 error = sctp_inpcb_alloc(so);
504 if (error) {
505 return error;
506 }
507 inp = (struct sctp_inpcb *)so->so_pcb;
508 SCTP_INP_WLOCK(inp);
509
510 inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6; /* I'm not v6! */
511 ip_inp = &inp->ip_inp.inp;
512 ip_inp->inp_vflag |= INP_IPV4;
513 ip_inp->inp_ip_ttl = ip_defttl;
514
515#ifdef IPSEC
516 error = ipsec_init_pcbpolicy(so, &ip_inp->inp_sp);
517#ifdef SCTP_LOG_CLOSING
518 sctp_log_closing(inp, NULL, 17);
519#endif
520 if (error != 0) {
521 flags = inp->sctp_flags;
522 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
523 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
524#ifdef SCTP_LOG_CLOSING
525 sctp_log_closing(inp, NULL, 15);
526#endif
527 SCTP_INP_WUNLOCK(inp);
528 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
529 SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
530 } else {
531 SCTP_INP_WUNLOCK(inp);
532 }
533 return error;
534 }
535#endif /* IPSEC */
536 SCTP_INP_WUNLOCK(inp);
537 return 0;
538}
539
540static int
541sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p)
542{
543 struct sctp_inpcb *inp;
544 int error;
545
546#ifdef INET6
547 if (addr && addr->sa_family != AF_INET)
548 /* must be a v4 address! */
549 return EINVAL;
550#endif /* INET6 */
551 if (addr && (addr->sa_len != sizeof(struct sockaddr_in))) {
552 return EINVAL;
553 }
554 inp = (struct sctp_inpcb *)so->so_pcb;
555 if (inp == 0)
556 return EINVAL;
557
558 error = sctp_inpcb_bind(so, addr, p);
559 return error;
560}
561
562static void
563sctp_close(struct socket *so)
564{
565 struct sctp_inpcb *inp;
566 uint32_t flags;
567
568 inp = (struct sctp_inpcb *)so->so_pcb;
569 if (inp == 0)
570 return;
571
572 /*
573 * Inform all the lower layer assoc that we are done.
574 */
575sctp_must_try_again:
576 flags = inp->sctp_flags;
577#ifdef SCTP_LOG_CLOSING
578 sctp_log_closing(inp, NULL, 17);
579#endif
580 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
581 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
582 if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) ||
583 (so->so_rcv.sb_cc > 0)) {
584#ifdef SCTP_LOG_CLOSING
585 sctp_log_closing(inp, NULL, 13);
586#endif
587 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
588 SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
589 } else {
590#ifdef SCTP_LOG_CLOSING
591 sctp_log_closing(inp, NULL, 14);
592#endif
593 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE,
594 SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
595 }
596 /*
597 * The socket is now detached, no matter what the state of
598 * the SCTP association.
599 */
600 SOCK_LOCK(so);
601 SCTP_SB_CLEAR(so->so_snd);
602 /*
603 * same for the rcv ones, they are only here for the
604 * accounting/select.
605 */
606 SCTP_SB_CLEAR(so->so_rcv);
607
608 /* Now null out the reference, we are completely detached. */
609 so->so_pcb = NULL;
610 SOCK_UNLOCK(so);
611 } else {
612 flags = inp->sctp_flags;
613 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
614 goto sctp_must_try_again;
615 }
616 }
617 return;
618}
619
620
621int
622sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
623 struct mbuf *control, struct thread *p);
624
625
626int
627sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
628 struct mbuf *control, struct thread *p)
629{
630 struct sctp_inpcb *inp;
631 int error;
632
633 inp = (struct sctp_inpcb *)so->so_pcb;
634 if (inp == 0) {
635 if (control) {
636 sctp_m_freem(control);
637 control = NULL;
638 }
639 sctp_m_freem(m);
640 return EINVAL;
641 }
642 /* Got to have an to address if we are NOT a connected socket */
643 if ((addr == NULL) &&
644 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
645 (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE))
646 ) {
647 goto connected_type;
648 } else if (addr == NULL) {
649 error = EDESTADDRREQ;
650 sctp_m_freem(m);
651 if (control) {
652 sctp_m_freem(control);
653 control = NULL;
654 }
655 return (error);
656 }
657#ifdef INET6
658 if (addr->sa_family != AF_INET) {
659 /* must be a v4 address! */
660 sctp_m_freem(m);
661 if (control) {
662 sctp_m_freem(control);
663 control = NULL;
664 }
665 error = EDESTADDRREQ;
666 return EINVAL;
667 }
668#endif /* INET6 */
669connected_type:
670 /* now what about control */
671 if (control) {
672 if (inp->control) {
673 SCTP_PRINTF("huh? control set?\n");
674 sctp_m_freem(inp->control);
675 inp->control = NULL;
676 }
677 inp->control = control;
678 }
679 /* Place the data */
680 if (inp->pkt) {
681 SCTP_BUF_NEXT(inp->pkt_last) = m;
682 inp->pkt_last = m;
683 } else {
684 inp->pkt_last = inp->pkt = m;
685 }
686 if (
687 /* FreeBSD uses a flag passed */
688 ((flags & PRUS_MORETOCOME) == 0)
689 ) {
690 /*
691 * note with the current version this code will only be used
692 * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for
693 * re-defining sosend to use the sctp_sosend. One can
694 * optionally switch back to this code (by changing back the
695 * definitions) but this is not advisable. This code is used
696 * by FreeBSD when sending a file with sendfile() though.
697 */
698 int ret;
699
700 ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags);
701 inp->pkt = NULL;
702 inp->control = NULL;
703 return (ret);
704 } else {
705 return (0);
706 }
707}
708
709static int
710sctp_disconnect(struct socket *so)
711{
712 struct sctp_inpcb *inp;
713
714 inp = (struct sctp_inpcb *)so->so_pcb;
715 if (inp == NULL) {
716 return (ENOTCONN);
717 }
718 SCTP_INP_RLOCK(inp);
719 if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
720 if (SCTP_LIST_EMPTY(&inp->sctp_asoc_list)) {
721 /* No connection */
722 SCTP_INP_RUNLOCK(inp);
723 return (0);
724 } else {
725 struct sctp_association *asoc;
726 struct sctp_tcb *stcb;
727
728 stcb = LIST_FIRST(&inp->sctp_asoc_list);
729 if (stcb == NULL) {
730 SCTP_INP_RUNLOCK(inp);
731 return (EINVAL);
732 }
733 SCTP_TCB_LOCK(stcb);
734 asoc = &stcb->asoc;
735 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
736 /* We are about to be freed, out of here */
737 SCTP_TCB_UNLOCK(stcb);
738 SCTP_INP_RUNLOCK(inp);
739 return (0);
740 }
741 if (((so->so_options & SO_LINGER) &&
742 (so->so_linger == 0)) ||
743 (so->so_rcv.sb_cc > 0)) {
744 if (SCTP_GET_STATE(asoc) !=
745 SCTP_STATE_COOKIE_WAIT) {
746 /* Left with Data unread */
747 struct mbuf *err;
748
749 err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
750 if (err) {
751 /*
752 * Fill in the user
753 * initiated abort
754 */
755 struct sctp_paramhdr *ph;
756
757 ph = mtod(err, struct sctp_paramhdr *);
758 SCTP_BUF_LEN(err) = sizeof(struct sctp_paramhdr);
759 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
760 ph->param_length = htons(SCTP_BUF_LEN(err));
761 }
762 sctp_send_abort_tcb(stcb, err);
763 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
764 }
765 SCTP_INP_RUNLOCK(inp);
766 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
767 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
768 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
769 }
770 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_3);
771 /* No unlock tcb assoc is gone */
772 return (0);
773 }
774 if (TAILQ_EMPTY(&asoc->send_queue) &&
775 TAILQ_EMPTY(&asoc->sent_queue) &&
776 (asoc->stream_queue_cnt == 0)) {
777 /* there is nothing queued to send, so done */
778 if (asoc->locked_on_sending) {
779 goto abort_anyway;
780 }
781 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
782 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
783 /* only send SHUTDOWN 1st time thru */
784 sctp_stop_timers_for_shutdown(stcb);
785 sctp_send_shutdown(stcb,
786 stcb->asoc.primary_destination);
787 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3);
788 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
789 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
790 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
791 }
792 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
793 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
794 stcb->sctp_ep, stcb,
795 asoc->primary_destination);
796 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
797 stcb->sctp_ep, stcb,
798 asoc->primary_destination);
799 }
800 } else {
801 /*
802 * we still got (or just got) data to send,
803 * so set SHUTDOWN_PENDING
804 */
805 /*
806 * XXX sockets draft says that SCTP_EOF
807 * should be sent with no data. currently,
808 * we will allow user data to be sent first
809 * and move to SHUTDOWN-PENDING
810 */
811 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
812 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
813 asoc->primary_destination);
814 if (asoc->locked_on_sending) {
815 /* Locked to send out the data */
816 struct sctp_stream_queue_pending *sp;
817
818 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
819 if (sp == NULL) {
820 SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n",
821 asoc->locked_on_sending->stream_no);
822 } else {
823 if ((sp->length == 0) && (sp->msg_is_complete == 0))
824 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
825 }
826 }
827 if (TAILQ_EMPTY(&asoc->send_queue) &&
828 TAILQ_EMPTY(&asoc->sent_queue) &&
829 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
830 struct mbuf *op_err;
831
832 abort_anyway:
833 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
834 0, M_DONTWAIT, 1, MT_DATA);
835 if (op_err) {
836 /*
837 * Fill in the user
838 * initiated abort
839 */
840 struct sctp_paramhdr *ph;
841 uint32_t *ippp;
842
843 SCTP_BUF_LEN(op_err) =
844 (sizeof(struct sctp_paramhdr) + sizeof(uint32_t));
845 ph = mtod(op_err,
846 struct sctp_paramhdr *);
847 ph->param_type = htons(
848 SCTP_CAUSE_USER_INITIATED_ABT);
849 ph->param_length = htons(SCTP_BUF_LEN(op_err));
850 ippp = (uint32_t *) (ph + 1);
851 *ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4);
852 }
853 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4;
854 sctp_send_abort_tcb(stcb, op_err);
855 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
856 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
857 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
858 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
859 }
860 SCTP_INP_RUNLOCK(inp);
861 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_5);
862 return (0);
863 }
864 }
865 SCTP_TCB_UNLOCK(stcb);
866 SCTP_INP_RUNLOCK(inp);
867 return (0);
868 }
869 /* not reached */
870 } else {
871 /* UDP model does not support this */
872 SCTP_INP_RUNLOCK(inp);
873 return EOPNOTSUPP;
874 }
875}
876
877int
878sctp_shutdown(struct socket *so)
879{
880 struct sctp_inpcb *inp;
881
882 inp = (struct sctp_inpcb *)so->so_pcb;
883 if (inp == 0) {
884 return EINVAL;
885 }
886 SCTP_INP_RLOCK(inp);
887 /* For UDP model this is a invalid call */
888 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
889 /* Restore the flags that the soshutdown took away. */
890 so->so_rcv.sb_state &= ~SBS_CANTRCVMORE;
891 /* This proc will wakeup for read and do nothing (I hope) */
892 SCTP_INP_RUNLOCK(inp);
893 return (EOPNOTSUPP);
894 }
895 /*
896 * Ok if we reach here its the TCP model and it is either a SHUT_WR
897 * or SHUT_RDWR. This means we put the shutdown flag against it.
898 */
899 {
900 struct sctp_tcb *stcb;
901 struct sctp_association *asoc;
902
903 socantsendmore(so);
904
905 stcb = LIST_FIRST(&inp->sctp_asoc_list);
906 if (stcb == NULL) {
907 /*
908 * Ok we hit the case that the shutdown call was
909 * made after an abort or something. Nothing to do
910 * now.
911 */
912 SCTP_INP_RUNLOCK(inp);
913 return (0);
914 }
915 SCTP_TCB_LOCK(stcb);
916 asoc = &stcb->asoc;
917 if (TAILQ_EMPTY(&asoc->send_queue) &&
918 TAILQ_EMPTY(&asoc->sent_queue) &&
919 (asoc->stream_queue_cnt == 0)) {
920 if (asoc->locked_on_sending) {
921 goto abort_anyway;
922 }
923 /* there is nothing queued to send, so I'm done... */
924 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
925 /* only send SHUTDOWN the first time through */
926 sctp_stop_timers_for_shutdown(stcb);
927 sctp_send_shutdown(stcb,
928 stcb->asoc.primary_destination);
929 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3);
930 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
931 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
932 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
933 }
934 asoc->state = SCTP_STATE_SHUTDOWN_SENT;
935 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
936 stcb->sctp_ep, stcb,
937 asoc->primary_destination);
938 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
939 stcb->sctp_ep, stcb,
940 asoc->primary_destination);
941 }
942 } else {
943 /*
944 * we still got (or just got) data to send, so set
945 * SHUTDOWN_PENDING
946 */
947 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
948 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
949 asoc->primary_destination);
950
951 if (asoc->locked_on_sending) {
952 /* Locked to send out the data */
953 struct sctp_stream_queue_pending *sp;
954
955 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
956 if (sp == NULL) {
957 SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n",
958 asoc->locked_on_sending->stream_no);
959 } else {
960 if ((sp->length == 0) && (sp->msg_is_complete == 0)) {
961 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
962 }
963 }
964 }
965 if (TAILQ_EMPTY(&asoc->send_queue) &&
966 TAILQ_EMPTY(&asoc->sent_queue) &&
967 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
968 struct mbuf *op_err;
969
970 abort_anyway:
971 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
972 0, M_DONTWAIT, 1, MT_DATA);
973 if (op_err) {
974 /* Fill in the user initiated abort */
975 struct sctp_paramhdr *ph;
976 uint32_t *ippp;
977
978 SCTP_BUF_LEN(op_err) =
979 sizeof(struct sctp_paramhdr) + sizeof(uint32_t);
980 ph = mtod(op_err,
981 struct sctp_paramhdr *);
982 ph->param_type = htons(
983 SCTP_CAUSE_USER_INITIATED_ABT);
984 ph->param_length = htons(SCTP_BUF_LEN(op_err));
985 ippp = (uint32_t *) (ph + 1);
986 *ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6);
987 }
988 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6;
989 sctp_abort_an_association(stcb->sctp_ep, stcb,
990 SCTP_RESPONSE_TO_USER_REQ,
991 op_err);
992 goto skip_unlock;
993 }
994 }
995 SCTP_TCB_UNLOCK(stcb);
996 }
997skip_unlock:
998 SCTP_INP_RUNLOCK(inp);
999 return 0;
1000}
1001
1002/*
1003 * copies a "user" presentable address and removes embedded scope, etc.
1004 * returns 0 on success, 1 on error
1005 */
1006static uint32_t
1007sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa)
1008{
1009 struct sockaddr_in6 lsa6;
1010
1011 sa = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)sa,
1012 &lsa6);
1013 memcpy(ss, sa, sa->sa_len);
1014 return (0);
1015}
1016
1017
1018
1019static size_t
1020sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp,
1021 struct sctp_tcb *stcb,
1022 size_t limit,
1023 struct sockaddr_storage *sas,
1024 uint32_t vrf_id)
1025{
1026 struct sctp_ifn *sctp_ifn;
1027 struct sctp_ifa *sctp_ifa;
1028 int loopback_scope, ipv4_local_scope, local_scope, site_scope;
1029 size_t actual;
1030 int ipv4_addr_legal, ipv6_addr_legal;
1031 struct sctp_vrf *vrf;
1032
1033 actual = 0;
1034 if (limit <= 0)
1035 return (actual);
1036
1037 if (stcb) {
1038 /* Turn on all the appropriate scope */
1039 loopback_scope = stcb->asoc.loopback_scope;
1040 ipv4_local_scope = stcb->asoc.ipv4_local_scope;
1041 local_scope = stcb->asoc.local_scope;
1042 site_scope = stcb->asoc.site_scope;
1043 } else {
1044 /* Turn on ALL scope, since we look at the EP */
1045 loopback_scope = ipv4_local_scope = local_scope =
1046 site_scope = 1;
1047 }
1048 ipv4_addr_legal = ipv6_addr_legal = 0;
1049 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1050 ipv6_addr_legal = 1;
1051 if (SCTP_IPV6_V6ONLY(inp) == 0) {
1052 ipv4_addr_legal = 1;
1053 }
1054 } else {
1055 ipv4_addr_legal = 1;
1056 }
1057 vrf = sctp_find_vrf(vrf_id);
1058 if (vrf == NULL) {
1059 return (0);
1060 }
1061 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1062 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
1063 if ((loopback_scope == 0) &&
1064 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
1065 /* Skip loopback if loopback_scope not set */
1066 continue;
1067 }
1068 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
1069 if (stcb) {
1070 /*
1071 * For the BOUND-ALL case, the list
1072 * associated with a TCB is Always
1073 * considered a reverse list.. i.e.
1074 * it lists addresses that are NOT
1075 * part of the association. If this
1076 * is one of those we must skip it.
1077 */
1078 if (sctp_is_addr_restricted(stcb,
1079 sctp_ifa)) {
1080 continue;
1081 }
1082 }
1083 if ((sctp_ifa->address.sa.sa_family == AF_INET) &&
1084 (ipv4_addr_legal)) {
1085 struct sockaddr_in *sin;
1086
1087 sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
1088 if (sin->sin_addr.s_addr == 0) {
1089 /*
1090 * we skip unspecifed
1091 * addresses
1092 */
1093 continue;
1094 }
1095 if ((ipv4_local_scope == 0) &&
1096 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1097 continue;
1098 }
1099 if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) {
1100 in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)sas);
1101 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1102 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6));
1103 actual += sizeof(sizeof(struct sockaddr_in6));
1104 } else {
1105 memcpy(sas, sin, sizeof(*sin));
1106 ((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport;
1107 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin));
1108 actual += sizeof(*sin);
1109 }
1110 if (actual >= limit) {
1111 return (actual);
1112 }
1113 } else if ((sctp_ifa->address.sa.sa_family == AF_INET6) &&
1114 (ipv6_addr_legal)) {
1115 struct sockaddr_in6 *sin6;
1116
1117 sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
1118 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1119 /*
1120 * we skip unspecifed
1121 * addresses
1122 */
1123 continue;
1124 }
1125 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
1126 if (local_scope == 0)
1127 continue;
1128 if (sin6->sin6_scope_id == 0) {
1129 if (sa6_recoverscope(sin6) != 0)
1130 /*
1131 * bad link
1132 * local
1133 * address
1134 */
1135 continue;
1136 }
1137 }
1138 if ((site_scope == 0) &&
1139 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1140 continue;
1141 }
1142 memcpy(sas, sin6, sizeof(*sin6));
1143 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1144 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin6));
1145 actual += sizeof(*sin6);
1146 if (actual >= limit) {
1147 return (actual);
1148 }
1149 }
1150 }
1151 }
1152 } else {
1153 struct sctp_laddr *laddr;
1154
1155 /* The list is a NEGATIVE list */
1156 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
1157 if (stcb) {
1158 if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
1159 continue;
1160 }
1161 }
1162 if (sctp_fill_user_address(sas, &laddr->ifa->address.sa))
1163 continue;
1164
1165 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1166 sas = (struct sockaddr_storage *)((caddr_t)sas +
1167 laddr->ifa->address.sa.sa_len);
1168 actual += laddr->ifa->address.sa.sa_len;
1169 if (actual >= limit) {
1170 return (actual);
1171 }
1172 }
1173 }
1174 return (actual);
1175}
1176
1177static size_t
1178sctp_fill_up_addresses(struct sctp_inpcb *inp,
1179 struct sctp_tcb *stcb,
1180 size_t limit,
1181 struct sockaddr_storage *sas)
1182{
1183 size_t size = 0;
1184
1185 /* fill up addresses for the endpoint's default vrf */
1186 size = sctp_fill_up_addresses_vrf(inp, stcb, limit, sas,
1187 inp->def_vrf_id);
1188 return (size);
1189}
1190
1191static int
1192sctp_count_max_addresses_vrf(struct sctp_inpcb *inp, uint32_t vrf_id)
1193{
1194 int cnt = 0;
1195 struct sctp_vrf *vrf = NULL;
1196
1197 /*
1198 * In both sub-set bound an bound_all cases we return the MAXIMUM
1199 * number of addresses that you COULD get. In reality the sub-set
1200 * bound may have an exclusion list for a given TCB OR in the
1201 * bound-all case a TCB may NOT include the loopback or other
1202 * addresses as well.
1203 */
1204 vrf = sctp_find_vrf(vrf_id);
1205 if (vrf == NULL) {
1206 return (0);
1207 }
1208 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1209 struct sctp_ifn *sctp_ifn;
1210 struct sctp_ifa *sctp_ifa;
1211
1212 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
1213 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
1214 /* Count them if they are the right type */
1215 if (sctp_ifa->address.sa.sa_family == AF_INET) {
1216 if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)
1217 cnt += sizeof(struct sockaddr_in6);
1218 else
1219 cnt += sizeof(struct sockaddr_in);
1220
1221 } else if (sctp_ifa->address.sa.sa_family == AF_INET6)
1222 cnt += sizeof(struct sockaddr_in6);
1223 }
1224 }
1225 } else {
1226 struct sctp_laddr *laddr;
1227
1228 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
1229 if (laddr->ifa->address.sa.sa_family == AF_INET) {
1230 if (inp->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)
1231 cnt += sizeof(struct sockaddr_in6);
1232 else
1233 cnt += sizeof(struct sockaddr_in);
1234
1235 } else if (laddr->ifa->address.sa.sa_family == AF_INET6)
1236 cnt += sizeof(struct sockaddr_in6);
1237 }
1238 }
1239 return (cnt);
1240}
1241
1242static int
1243sctp_count_max_addresses(struct sctp_inpcb *inp)
1244{
1245 int cnt = 0;
1246
1247 /* count addresses for the endpoint's default VRF */
1248 cnt = sctp_count_max_addresses_vrf(inp, inp->def_vrf_id);
1249 return (cnt);
1250}
1251
1252static int
1253sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval,
1254 size_t optsize, void *p, int delay)
1255{
1256 int error = 0;
1257 int creat_lock_on = 0;
1258 struct sctp_tcb *stcb = NULL;
1259 struct sockaddr *sa;
1260 int num_v6 = 0, num_v4 = 0, *totaddrp, totaddr;
1261 int added = 0;
1262 uint32_t vrf_id;
1263 int bad_addresses = 0;
1264 sctp_assoc_t *a_id;
1265
1266 SCTPDBG(SCTP_DEBUG_PCB1, "Connectx called\n");
1267
1268 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
1269 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
1270 /* We are already connected AND the TCP model */
1271 return (EADDRINUSE);
1272 }
1273 if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) {
1274 return (EINVAL);
1275 }
1276 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
1277 SCTP_INP_RLOCK(inp);
1278 stcb = LIST_FIRST(&inp->sctp_asoc_list);
1279 SCTP_INP_RUNLOCK(inp);
1280 }
1281 if (stcb) {
1282 return (EALREADY);
1283 }
1284 SCTP_INP_INCR_REF(inp);
1285 SCTP_ASOC_CREATE_LOCK(inp);
1286 creat_lock_on = 1;
1287 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1288 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
1289 error = EFAULT;
1290 goto out_now;
1291 }
1292 totaddrp = (int *)optval;
1293 totaddr = *totaddrp;
1294 sa = (struct sockaddr *)(totaddrp + 1);
1295 stcb = sctp_connectx_helper_find(inp, sa, &totaddr, &num_v4, &num_v6, &error, (optsize - sizeof(int)), &bad_addresses);
1296 if ((stcb != NULL) || bad_addresses) {
1297 /* Already have or am bring up an association */
1298 SCTP_ASOC_CREATE_UNLOCK(inp);
1299 creat_lock_on = 0;
1300 SCTP_TCB_UNLOCK(stcb);
1301 if (bad_addresses == 0)
1302 error = EALREADY;
1303 goto out_now;
1304 }
1305#ifdef INET6
1306 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
1307 (num_v6 > 0)) {
1308 error = EINVAL;
1309 goto out_now;
1310 }
1311 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
1312 (num_v4 > 0)) {
1313 struct in6pcb *inp6;
1314
1315 inp6 = (struct in6pcb *)inp;
1316 if (SCTP_IPV6_V6ONLY(inp6)) {
1317 /*
1318 * if IPV6_V6ONLY flag, ignore connections destined
1319 * to a v4 addr or v4-mapped addr
1320 */
1321 error = EINVAL;
1322 goto out_now;
1323 }
1324 }
1325#endif /* INET6 */
1326 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
1327 SCTP_PCB_FLAGS_UNBOUND) {
1328 /* Bind a ephemeral port */
1329 error = sctp_inpcb_bind(so, NULL, p);
1330 if (error) {
1331 goto out_now;
1332 }
1333 }
1334 /* FIX ME: do we want to pass in a vrf on the connect call? */
1335 vrf_id = inp->def_vrf_id;
1336
1337 /* We are GOOD to go */
1338 stcb = sctp_aloc_assoc(inp, sa, 1, &error, 0, vrf_id);
1339 if (stcb == NULL) {
1340 /* Gak! no memory */
1341 goto out_now;
1342 }
1343 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT;
1344 /* move to second address */
1345 if (sa->sa_family == AF_INET)
1346 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in));
1347 else
1348 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6));
1349
1350 error = 0;
1351 added = sctp_connectx_helper_add(stcb, sa, (totaddr - 1), &error);
1352 /* Fill in the return id */
1353 if (error) {
1354 sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_12);
1355 goto out_now;
1356 }
1357 a_id = (sctp_assoc_t *) optval;
1358 *a_id = sctp_get_associd(stcb);
1359
1360 /* initialize authentication parameters for the assoc */
1361 sctp_initialize_auth_params(inp, stcb);
1362
1363 if (delay) {
1364 /* doing delayed connection */
1365 stcb->asoc.delayed_connection = 1;
1366 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination);
1367 } else {
1368 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1369 sctp_send_initiate(inp, stcb);
1370 }
1371 SCTP_TCB_UNLOCK(stcb);
1372 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
1373 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
1374 /* Set the connected flag so we can queue data */
1375 soisconnecting(so);
1376 }
1377out_now:
1378 if (creat_lock_on) {
1379 SCTP_ASOC_CREATE_UNLOCK(inp);
1380 }
1381 SCTP_INP_DECR_REF(inp);
1382 return error;
1383}
1384
1385#define SCTP_FIND_STCB(inp, stcb, assoc_id) { \
1386 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||\
1387 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { \
1388 SCTP_INP_RLOCK(inp); \
1389 stcb = LIST_FIRST(&inp->sctp_asoc_list); \
1390 if (stcb) { \
1391 SCTP_TCB_LOCK(stcb); \
1392 } \
1393 SCTP_INP_RUNLOCK(inp); \
1394 } else if (assoc_id != 0) { \
1395 stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1); \
1396 if (stcb == NULL) { \
1397 error = ENOENT; \
1398 break; \
1399 } \
1400 } else { \
1401 stcb = NULL; \
1402 } \
1403 }
1404
1405
1406#define SCTP_CHECK_AND_CAST(destp, srcp, type, size) {\
1407 if (size < sizeof(type)) { \
1408 error = EINVAL; \
1409 break; \
1410 } else { \
1411 destp = (type *)srcp; \
1412 } \
1413 }
1414
1415static int
1416sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize,
1417 void *p)
1418{
1419 struct sctp_inpcb *inp;
1420 int error, val = 0;
1421 struct sctp_tcb *stcb = NULL;
1422
1423 if (optval == NULL) {
1424 return (EINVAL);
1425 }
1426 inp = (struct sctp_inpcb *)so->so_pcb;
1427 if (inp == 0)
1428 return EINVAL;
1429 error = 0;
1430
1431 switch (optname) {
1432 case SCTP_NODELAY:
1433 case SCTP_AUTOCLOSE:
1434 case SCTP_EXPLICIT_EOR:
1435 case SCTP_AUTO_ASCONF:
1436 case SCTP_DISABLE_FRAGMENTS:
1437 case SCTP_I_WANT_MAPPED_V4_ADDR:
1438 case SCTP_USE_EXT_RCVINFO:
1439 SCTP_INP_RLOCK(inp);
1440 switch (optname) {
1441 case SCTP_DISABLE_FRAGMENTS:
1442 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT);
1443 break;
1444 case SCTP_I_WANT_MAPPED_V4_ADDR:
1445 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4);
1446 break;
1447 case SCTP_AUTO_ASCONF:
1448 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
1449 break;
1450 case SCTP_EXPLICIT_EOR:
1451 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
1452 break;
1453 case SCTP_NODELAY:
1454 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY);
1455 break;
1456 case SCTP_USE_EXT_RCVINFO:
1457 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO);
1458 break;
1459 case SCTP_AUTOCLOSE:
1460 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))
1461 val = TICKS_TO_SEC(inp->sctp_ep.auto_close_time);
1462 else
1463 val = 0;
1464 break;
1465
1466 default:
1467 error = ENOPROTOOPT;
1468 } /* end switch (sopt->sopt_name) */
1469 if (optname != SCTP_AUTOCLOSE) {
1470 /* make it an "on/off" value */
1471 val = (val != 0);
1472 }
1473 if (*optsize < sizeof(val)) {
1474 error = EINVAL;
1475 }
1476 SCTP_INP_RUNLOCK(inp);
1477 if (error == 0) {
1478 /* return the option value */
1479 *(int *)optval = val;
1480 *optsize = sizeof(val);
1481 }
1482 break;
1483 case SCTP_GET_PACKET_LOG:
1484 {
1485#ifdef SCTP_PACKET_LOGGING
1486 uint8_t *target;
1487 int ret;
1488
1489 SCTP_CHECK_AND_CAST(target, optval, uint8_t, *optsize);
1490 ret = sctp_copy_out_packet_log(target, (int)*optsize);
1491 *optsize = ret;
1492#else
1493 error = EOPNOTSUPP;
1494#endif
1495 break;
1496 }
1497 case SCTP_PARTIAL_DELIVERY_POINT:
1498 {
1499 uint32_t *value;
1500
1501 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1502 *value = inp->partial_delivery_point;
1503 *optsize = sizeof(uint32_t);
1504 }
1505 break;
1506 case SCTP_FRAGMENT_INTERLEAVE:
1507 {
1508 uint32_t *value;
1509
1510 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1511 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) {
1512 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) {
1513 *value = SCTP_FRAG_LEVEL_2;
1514 } else {
1515 *value = SCTP_FRAG_LEVEL_1;
1516 }
1517 } else {
1518 *value = SCTP_FRAG_LEVEL_0;
1519 }
1520 *optsize = sizeof(uint32_t);
1521 }
1522 break;
1523 case SCTP_CMT_ON_OFF:
1524 {
1525 struct sctp_assoc_value *av;
1526
1527 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1528 if (sctp_cmt_on_off) {
1529 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1530 if (stcb) {
1531 av->assoc_value = stcb->asoc.sctp_cmt_on_off;
1532 SCTP_TCB_UNLOCK(stcb);
1533
1534 } else {
1535 error = ENOTCONN;
1536 }
1537 } else {
1538 error = ENOPROTOOPT;
1539 }
1540 *optsize = sizeof(*av);
1541 }
1542 break;
1543 case SCTP_GET_ADDR_LEN:
1544 {
1545 struct sctp_assoc_value *av;
1546
1547 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1548 error = EINVAL;
1549#ifdef INET
1550 if (av->assoc_value == AF_INET) {
1551 av->assoc_value = sizeof(struct sockaddr_in);
1552 error = 0;
1553 }
1554#endif
1555#ifdef INET6
1556 if (av->assoc_value == AF_INET6) {
1557 av->assoc_value = sizeof(struct sockaddr_in6);
1558 error = 0;
1559 }
1560#endif
1561 *optsize = sizeof(*av);
1562 }
1563 break;
1564 case SCTP_GET_ASSOC_NUMBER:
1565 {
1566 uint32_t *value, cnt;
1567
1568 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1569 cnt = 0;
1570 SCTP_INP_RLOCK(inp);
1571 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
1572 cnt++;
1573 }
1574 SCTP_INP_RUNLOCK(inp);
1575 *value = cnt;
1576 *optsize = sizeof(uint32_t);
1577 }
1578 break;
1579
1580 case SCTP_GET_ASSOC_ID_LIST:
1581 {
1582 struct sctp_assoc_ids *ids;
1583 unsigned int at, limit;
1584
1585 SCTP_CHECK_AND_CAST(ids, optval, struct sctp_assoc_ids, *optsize);
1586 at = 0;
1587 limit = *optsize / sizeof(sctp_assoc_t);
1588 SCTP_INP_RLOCK(inp);
1589 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
1590 if (at < limit) {
1591 ids->gaids_assoc_id[at++] = sctp_get_associd(stcb);
1592 } else {
1593 error = EINVAL;
1594 break;
1595 }
1596 }
1597 SCTP_INP_RUNLOCK(inp);
1598 *optsize = at * sizeof(sctp_assoc_t);
1599 }
1600 break;
1601 case SCTP_CONTEXT:
1602 {
1603 struct sctp_assoc_value *av;
1604
1605 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1606 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1607
1608 if (stcb) {
1609 av->assoc_value = stcb->asoc.context;
1610 SCTP_TCB_UNLOCK(stcb);
1611 } else {
1612 SCTP_INP_RLOCK(inp);
1613 av->assoc_value = inp->sctp_context;
1614 SCTP_INP_RUNLOCK(inp);
1615 }
1616 *optsize = sizeof(*av);
1617 }
1618 break;
1619 case SCTP_VRF_ID:
1620 {
1621 uint32_t *default_vrfid;
1622
1623 SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, *optsize);
1624 *default_vrfid = inp->def_vrf_id;
1625 break;
1626 }
1627 case SCTP_GET_ASOC_VRF:
1628 {
1629 struct sctp_assoc_value *id;
1630
1631 SCTP_CHECK_AND_CAST(id, optval, struct sctp_assoc_value, *optsize);
1632 SCTP_FIND_STCB(inp, stcb, id->assoc_id);
1633 if (stcb == NULL) {
1634 error = EINVAL;
1635 break;
1636 }
1637 id->assoc_value = stcb->asoc.vrf_id;
1638 break;
1639 }
1640 case SCTP_GET_VRF_IDS:
1641 {
1642 error = EOPNOTSUPP;
1643 break;
1644 }
1645 case SCTP_GET_NONCE_VALUES:
1646 {
1647 struct sctp_get_nonce_values *gnv;
1648
1649 SCTP_CHECK_AND_CAST(gnv, optval, struct sctp_get_nonce_values, *optsize);
1650 SCTP_FIND_STCB(inp, stcb, gnv->gn_assoc_id);
1651
1652 if (stcb) {
1653 gnv->gn_peers_tag = stcb->asoc.peer_vtag;
1654 gnv->gn_local_tag = stcb->asoc.my_vtag;
1655 SCTP_TCB_UNLOCK(stcb);
1656 } else {
1657 error = ENOTCONN;
1658 }
1659 *optsize = sizeof(*gnv);
1660 }
1661 break;
1662 case SCTP_DELAYED_SACK:
1663 {
1664 struct sctp_sack_info *sack;
1665
1666 SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, *optsize);
1667 SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id);
1668 if (stcb) {
1669 sack->sack_delay = stcb->asoc.delayed_ack;
1670 sack->sack_freq = stcb->asoc.sack_freq;
1671 SCTP_TCB_UNLOCK(stcb);
1672 } else {
1673 SCTP_INP_RLOCK(inp);
1674 sack->sack_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1675 sack->sack_freq = inp->sctp_ep.sctp_sack_freq;
1676 SCTP_INP_RUNLOCK(inp);
1677 }
1678 *optsize = sizeof(*sack);
1679 }
1680 break;
1681
1682 case SCTP_GET_SNDBUF_USE:
1683 {
1684 struct sctp_sockstat *ss;
1685
1686 SCTP_CHECK_AND_CAST(ss, optval, struct sctp_sockstat, *optsize);
1687 SCTP_FIND_STCB(inp, stcb, ss->ss_assoc_id);
1688
1689 if (stcb) {
1690 ss->ss_total_sndbuf = stcb->asoc.total_output_queue_size;
1691 ss->ss_total_recv_buf = (stcb->asoc.size_on_reasm_queue +
1692 stcb->asoc.size_on_all_streams);
1693 SCTP_TCB_UNLOCK(stcb);
1694 } else {
1695 error = ENOTCONN;
1696 }
1697 *optsize = sizeof(struct sctp_sockstat);
1698 }
1699 break;
1700 case SCTP_MAX_BURST:
1701 {
1702 uint8_t *value;
1703
1704 SCTP_CHECK_AND_CAST(value, optval, uint8_t, *optsize);
1705
1706 SCTP_INP_RLOCK(inp);
1707 *value = inp->sctp_ep.max_burst;
1708 SCTP_INP_RUNLOCK(inp);
1709 *optsize = sizeof(uint8_t);
1710 }
1711 break;
1712 case SCTP_MAXSEG:
1713 {
1714 struct sctp_assoc_value *av;
1715 int ovh;
1716
1717 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1718 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1719
1720 if (stcb) {
1721 av->assoc_value = sctp_get_frag_point(stcb, &stcb->asoc);
1722 SCTP_TCB_UNLOCK(stcb);
1723 } else {
1724 SCTP_INP_RLOCK(inp);
1725 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1726 ovh = SCTP_MED_OVERHEAD;
1727 } else {
1728 ovh = SCTP_MED_V4_OVERHEAD;
1729 }
1730 if (inp->sctp_frag_point >= SCTP_DEFAULT_MAXSEGMENT)
1731 av->assoc_value = 0;
1732 else
1733 av->assoc_value = inp->sctp_frag_point - ovh;
1734 SCTP_INP_RUNLOCK(inp);
1735 }
1736 *optsize = sizeof(struct sctp_assoc_value);
1737 }
1738 break;
1739 case SCTP_GET_STAT_LOG:
1740#ifdef SCTP_STAT_LOGGING
1741 error = sctp_fill_stat_log(optval, optsize);
1742#else
1743 error = EOPNOTSUPP;
1744#endif
1745 break;
1746 case SCTP_EVENTS:
1747 {
1748 struct sctp_event_subscribe *events;
1749
1750 SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, *optsize);
1751 memset(events, 0, sizeof(*events));
1752 SCTP_INP_RLOCK(inp);
1753 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT))
1754 events->sctp_data_io_event = 1;
1755
1756 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT))
1757 events->sctp_association_event = 1;
1758
1759 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT))
1760 events->sctp_address_event = 1;
1761
1762 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT))
1763 events->sctp_send_failure_event = 1;
1764
1765 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR))
1766 events->sctp_peer_error_event = 1;
1767
1768 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
1769 events->sctp_shutdown_event = 1;
1770
1771 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT))
1772 events->sctp_partial_delivery_event = 1;
1773
1774 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT))
1775 events->sctp_adaptation_layer_event = 1;
1776
1777 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT))
1778 events->sctp_authentication_event = 1;
1779
1780 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT))
1781 events->sctp_stream_reset_events = 1;
1782 SCTP_INP_RUNLOCK(inp);
1783 *optsize = sizeof(struct sctp_event_subscribe);
1784 }
1785 break;
1786
1787 case SCTP_ADAPTATION_LAYER:
1788 {
1789 uint32_t *value;
1790
1791 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1792
1793 SCTP_INP_RLOCK(inp);
1794 *value = inp->sctp_ep.adaptation_layer_indicator;
1795 SCTP_INP_RUNLOCK(inp);
1796 *optsize = sizeof(uint32_t);
1797 }
1798 break;
1799 case SCTP_SET_INITIAL_DBG_SEQ:
1800 {
1801 uint32_t *value;
1802
1803 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1804 SCTP_INP_RLOCK(inp);
1805 *value = inp->sctp_ep.initial_sequence_debug;
1806 SCTP_INP_RUNLOCK(inp);
1807 *optsize = sizeof(uint32_t);
1808 }
1809 break;
1810 case SCTP_GET_LOCAL_ADDR_SIZE:
1811 {
1812 uint32_t *value;
1813
1814 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1815 SCTP_INP_RLOCK(inp);
1816 *value = sctp_count_max_addresses(inp);
1817 SCTP_INP_RUNLOCK(inp);
1818 *optsize = sizeof(uint32_t);
1819 }
1820 break;
1821 case SCTP_GET_REMOTE_ADDR_SIZE:
1822 {
1823 uint32_t *value;
1824 size_t size;
1825 struct sctp_nets *net;
1826
1827 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1828 /* FIXME MT: change to sctp_assoc_value? */
1829 SCTP_FIND_STCB(inp, stcb, (sctp_assoc_t) * value);
1830
1831 if (stcb) {
1832 size = 0;
1833 /* Count the sizes */
1834 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1835 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) ||
1836 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
1837 size += sizeof(struct sockaddr_in6);
1838 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
1839 size += sizeof(struct sockaddr_in);
1840 } else {
1841 /* huh */
1842 break;
1843 }
1844 }
1845 SCTP_TCB_UNLOCK(stcb);
1846 *value = (uint32_t) size;
1847 } else {
1848 error = ENOTCONN;
1849 }
1850 *optsize = sizeof(uint32_t);
1851 }
1852 break;
1853 case SCTP_GET_PEER_ADDRESSES:
1854 /*
1855 * Get the address information, an array is passed in to
1856 * fill up we pack it.
1857 */
1858 {
1859 size_t cpsz, left;
1860 struct sockaddr_storage *sas;
1861 struct sctp_nets *net;
1862 struct sctp_getaddresses *saddr;
1863
1864 SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize);
1865 SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id);
1866
1867 if (stcb) {
1868 left = (*optsize) - sizeof(struct sctp_getaddresses);
1869 *optsize = sizeof(struct sctp_getaddresses);
1870 sas = (struct sockaddr_storage *)&saddr->addr[0];
1871
1872 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1873 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) ||
1874 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
1875 cpsz = sizeof(struct sockaddr_in6);
1876 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
1877 cpsz = sizeof(struct sockaddr_in);
1878 } else {
1879 /* huh */
1880 break;
1881 }
1882 if (left < cpsz) {
1883 /* not enough room. */
1884 break;
1885 }
1886 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_NEEDS_MAPPED_V4) &&
1887 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET)) {
1888 /* Must map the address */
1889 in6_sin_2_v4mapsin6((struct sockaddr_in *)&net->ro._l_addr,
1890 (struct sockaddr_in6 *)sas);
1891 } else {
1892 memcpy(sas, &net->ro._l_addr, cpsz);
1893 }
1894 ((struct sockaddr_in *)sas)->sin_port = stcb->rport;
1895
1896 sas = (struct sockaddr_storage *)((caddr_t)sas + cpsz);
1897 left -= cpsz;
1898 *optsize += cpsz;
1899 }
1900 SCTP_TCB_UNLOCK(stcb);
1901 } else {
1902 error = ENOENT;
1903 }
1904 }
1905 break;
1906 case SCTP_GET_LOCAL_ADDRESSES:
1907 {
1908 size_t limit, actual;
1909 struct sockaddr_storage *sas;
1910 struct sctp_getaddresses *saddr;
1911
1912 SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize);
1913 SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id);
1914
1915 sas = (struct sockaddr_storage *)&saddr->addr[0];
1916 limit = *optsize - sizeof(sctp_assoc_t);
1917 actual = sctp_fill_up_addresses(inp, stcb, limit, sas);
1918 if (stcb) {
1919 SCTP_TCB_UNLOCK(stcb);
1920 }
1921 *optsize = sizeof(struct sockaddr_storage) + actual;
1922 }
1923 break;
1924 case SCTP_PEER_ADDR_PARAMS:
1925 {
1926 struct sctp_paddrparams *paddrp;
1927 struct sctp_nets *net;
1928
1929 SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, *optsize);
1930 SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id);
1931
1932 net = NULL;
1933 if (stcb) {
1934 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
1935 } else {
1936 /*
1937 * We increment here since
1938 * sctp_findassociation_ep_addr() wil do a
1939 * decrement if it finds the stcb as long as
1940 * the locked tcb (last argument) is NOT a
1941 * TCB.. aka NULL.
1942 */
1943 SCTP_INP_INCR_REF(inp);
1944 stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddrp->spp_address, &net, NULL, NULL);
1945 if (stcb == NULL) {
1946 SCTP_INP_DECR_REF(inp);
1947 }
1948 }
1949
1950 if (stcb) {
1951 /* Applys to the specific association */
1952 paddrp->spp_flags = 0;
1953 if (net) {
1954 int ovh;
1955
1956 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1957 ovh = SCTP_MED_OVERHEAD;
1958 } else {
1959 ovh = SCTP_MED_V4_OVERHEAD;
1960 }
1961
1962
1963 paddrp->spp_pathmaxrxt = net->failure_threshold;
1964 paddrp->spp_pathmtu = net->mtu - ovh;
1965 /* get flags for HB */
1966 if (net->dest_state & SCTP_ADDR_NOHB)
1967 paddrp->spp_flags |= SPP_HB_DISABLE;
1968 else
1969 paddrp->spp_flags |= SPP_HB_ENABLE;
1970 /* get flags for PMTU */
1971 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
1972 paddrp->spp_flags |= SPP_PMTUD_ENABLE;
1973 } else {
1974 paddrp->spp_flags |= SPP_PMTUD_DISABLE;
1975 }
1976#ifdef INET
1977 if (net->ro._l_addr.sin.sin_family == AF_INET) {
1978 paddrp->spp_ipv4_tos = net->tos_flowlabel & 0x000000fc;
1979 paddrp->spp_flags |= SPP_IPV4_TOS;
1980 }
1981#endif
1982#ifdef INET6
1983 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
1984 paddrp->spp_ipv6_flowlabel = net->tos_flowlabel;
1985 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
1986 }
1987#endif
1988 } else {
1989 /*
1990 * No destination so return default
1991 * value
1992 */
1993 int cnt = 0;
1994
1995 paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure;
1996 paddrp->spp_pathmtu = sctp_get_frag_point(stcb, &stcb->asoc);
1997#ifdef INET
1998 paddrp->spp_ipv4_tos = stcb->asoc.default_tos & 0x000000fc;
1999 paddrp->spp_flags |= SPP_IPV4_TOS;
2000#endif
2001#ifdef INET6
2002 paddrp->spp_ipv6_flowlabel = stcb->asoc.default_flowlabel;
2003 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
2004#endif
2005 /* default settings should be these */
2006 if (stcb->asoc.hb_is_disabled == 0) {
2007 paddrp->spp_flags |= SPP_HB_ENABLE;
2008 } else {
2009 paddrp->spp_flags |= SPP_HB_DISABLE;
2010 }
2011 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2012 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
2013 cnt++;
2014 }
2015 }
2016 if (cnt) {
2017 paddrp->spp_flags |= SPP_PMTUD_ENABLE;
2018 }
2019 }
2020 paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay;
2021 paddrp->spp_assoc_id = sctp_get_associd(stcb);
2022 SCTP_TCB_UNLOCK(stcb);
2023 } else {
2024 /* Use endpoint defaults */
2025 SCTP_INP_RLOCK(inp);
2026 paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure;
2027 paddrp->spp_hbinterval = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
2028 paddrp->spp_assoc_id = (sctp_assoc_t) 0;
2029 /* get inp's default */
2030#ifdef INET
2031 paddrp->spp_ipv4_tos = inp->ip_inp.inp.inp_ip_tos;
2032 paddrp->spp_flags |= SPP_IPV4_TOS;
2033#endif
2034#ifdef INET6
2035 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2036 paddrp->spp_ipv6_flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo;
2037 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
2038 }
2039#endif
2040 /* can't return this */
2041 paddrp->spp_pathmtu = 0;
2042
2043 /* default behavior, no stcb */
2044 paddrp->spp_flags = SPP_PMTUD_ENABLE;
2045
2046 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) {
2047 paddrp->spp_flags |= SPP_HB_ENABLE;
2048 } else {
2049 paddrp->spp_flags |= SPP_HB_DISABLE;
2050 }
2051 SCTP_INP_RUNLOCK(inp);
2052 }
2053 *optsize = sizeof(struct sctp_paddrparams);
2054 }
2055 break;
2056 case SCTP_GET_PEER_ADDR_INFO:
2057 {
2058 struct sctp_paddrinfo *paddri;
2059 struct sctp_nets *net;
2060
2061 SCTP_CHECK_AND_CAST(paddri, optval, struct sctp_paddrinfo, *optsize);
2062 SCTP_FIND_STCB(inp, stcb, paddri->spinfo_assoc_id);
2063
2064 net = NULL;
2065 if (stcb) {
2066 net = sctp_findnet(stcb, (struct sockaddr *)&paddri->spinfo_address);
2067 } else {
2068 /*
2069 * We increment here since
2070 * sctp_findassociation_ep_addr() wil do a
2071 * decrement if it finds the stcb as long as
2072 * the locked tcb (last argument) is NOT a
2073 * TCB.. aka NULL.
2074 */
2075 SCTP_INP_INCR_REF(inp);
2076 stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddri->spinfo_address, &net, NULL, NULL);
2077 if (stcb == NULL) {
2078 SCTP_INP_DECR_REF(inp);
2079 }
2080 }
2081
2082 if ((stcb) && (net)) {
2083 paddri->spinfo_state = net->dest_state & (SCTP_REACHABLE_MASK | SCTP_ADDR_NOHB);
2084 paddri->spinfo_cwnd = net->cwnd;
2085 paddri->spinfo_srtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
2086 paddri->spinfo_rto = net->RTO;
2087 paddri->spinfo_assoc_id = sctp_get_associd(stcb);
2088 SCTP_TCB_UNLOCK(stcb);
2089 } else {
2090 if (stcb) {
2091 SCTP_TCB_UNLOCK(stcb);
2092 }
2093 error = ENOENT;
2094 }
2095 *optsize = sizeof(struct sctp_paddrinfo);
2096 }
2097 break;
2098 case SCTP_PCB_STATUS:
2099 {
2100 struct sctp_pcbinfo *spcb;
2101
2102 SCTP_CHECK_AND_CAST(spcb, optval, struct sctp_pcbinfo, *optsize);
2103 sctp_fill_pcbinfo(spcb);
2104 *optsize = sizeof(struct sctp_pcbinfo);
2105 }
2106 break;
2107
2108 case SCTP_STATUS:
2109 {
2110 struct sctp_nets *net;
2111 struct sctp_status *sstat;
2112
2113 SCTP_CHECK_AND_CAST(sstat, optval, struct sctp_status, *optsize);
2114 SCTP_FIND_STCB(inp, stcb, sstat->sstat_assoc_id);
2115
2116 if (stcb == NULL) {
2117 error = EINVAL;
2118 break;
2119 }
2120 /*
2121 * I think passing the state is fine since
2122 * sctp_constants.h will be available to the user
2123 * land.
2124 */
2125 sstat->sstat_state = stcb->asoc.state;
2126 sstat->sstat_rwnd = stcb->asoc.peers_rwnd;
2127 sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt;
2128 /*
2129 * We can't include chunks that have been passed to
2130 * the socket layer. Only things in queue.
2131 */
2132 sstat->sstat_penddata = (stcb->asoc.cnt_on_reasm_queue +
2133 stcb->asoc.cnt_on_all_streams);
2134
2135
2136 sstat->sstat_instrms = stcb->asoc.streamincnt;
2137 sstat->sstat_outstrms = stcb->asoc.streamoutcnt;
2138 sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc);
2139 memcpy(&sstat->sstat_primary.spinfo_address,
2140 &stcb->asoc.primary_destination->ro._l_addr,
2141 ((struct sockaddr *)(&stcb->asoc.primary_destination->ro._l_addr))->sa_len);
2142 net = stcb->asoc.primary_destination;
2143 ((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport;
2144 /*
2145 * Again the user can get info from sctp_constants.h
2146 * for what the state of the network is.
2147 */
2148 sstat->sstat_primary.spinfo_state = net->dest_state & SCTP_REACHABLE_MASK;
2149 sstat->sstat_primary.spinfo_cwnd = net->cwnd;
2150 sstat->sstat_primary.spinfo_srtt = net->lastsa;
2151 sstat->sstat_primary.spinfo_rto = net->RTO;
2152 sstat->sstat_primary.spinfo_mtu = net->mtu;
2153 sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb);
2154 SCTP_TCB_UNLOCK(stcb);
2155 *optsize = sizeof(*sstat);
2156 }
2157 break;
2158 case SCTP_RTOINFO:
2159 {
2160 struct sctp_rtoinfo *srto;
2161
2162 SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, *optsize);
2163 SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id);
2164
2165 if (stcb) {
2166 srto->srto_initial = stcb->asoc.initial_rto;
2167 srto->srto_max = stcb->asoc.maxrto;
2168 srto->srto_min = stcb->asoc.minrto;
2169 SCTP_TCB_UNLOCK(stcb);
2170 } else {
2171 SCTP_INP_RLOCK(inp);
2172 srto->srto_initial = inp->sctp_ep.initial_rto;
2173 srto->srto_max = inp->sctp_ep.sctp_maxrto;
2174 srto->srto_min = inp->sctp_ep.sctp_minrto;
2175 SCTP_INP_RUNLOCK(inp);
2176 }
2177 *optsize = sizeof(*srto);
2178 }
2179 break;
2180 case SCTP_ASSOCINFO:
2181 {
2182 struct sctp_assocparams *sasoc;
2183
2184 SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, *optsize);
2185 SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id);
2186
2187 if (stcb) {
2188 sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times;
2189 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
2190 sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd;
2191 sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd;
2192 sasoc->sasoc_cookie_life = TICKS_TO_MSEC(stcb->asoc.cookie_life);
2193 SCTP_TCB_UNLOCK(stcb);
2194 } else {
2195 SCTP_INP_RLOCK(inp);
2196 sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times;
2197 sasoc->sasoc_number_peer_destinations = 0;
2198 sasoc->sasoc_peer_rwnd = 0;
2199 sasoc->sasoc_local_rwnd = sbspace(&inp->sctp_socket->so_rcv);
2200 sasoc->sasoc_cookie_life = TICKS_TO_MSEC(inp->sctp_ep.def_cookie_life);
2201 SCTP_INP_RUNLOCK(inp);
2202 }
2203 *optsize = sizeof(*sasoc);
2204 }
2205 break;
2206 case SCTP_DEFAULT_SEND_PARAM:
2207 {
2208 struct sctp_sndrcvinfo *s_info;
2209
2210 SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, *optsize);
2211 SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id);
2212
2213 if (stcb) {
2214 memcpy(s_info, &stcb->asoc.def_send, sizeof(stcb->asoc.def_send));
2215 SCTP_TCB_UNLOCK(stcb);
2216 } else {
2217 SCTP_INP_RLOCK(inp);
2218 memcpy(s_info, &inp->def_send, sizeof(inp->def_send));
2219 SCTP_INP_RUNLOCK(inp);
2220 }
2221 *optsize = sizeof(*s_info);
2222 }
2223 break;
2224 case SCTP_INITMSG:
2225 {
2226 struct sctp_initmsg *sinit;
2227
2228 SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, *optsize);
2229 SCTP_INP_RLOCK(inp);
2230 sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count;
2231 sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome;
2232 sinit->sinit_max_attempts = inp->sctp_ep.max_init_times;
2233 sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max;
2234 SCTP_INP_RUNLOCK(inp);
2235 *optsize = sizeof(*sinit);
2236 }
2237 break;
2238 case SCTP_PRIMARY_ADDR:
2239 /* we allow a "get" operation on this */
2240 {
2241 struct sctp_setprim *ssp;
2242
2243 SCTP_CHECK_AND_CAST(ssp, optval, struct sctp_setprim, *optsize);
2244 SCTP_FIND_STCB(inp, stcb, ssp->ssp_assoc_id);
2245
2246 if (stcb) {
2247 /* simply copy out the sockaddr_storage... */
2248 int len;
2249
2250 len = *optsize;
2251 if (len > stcb->asoc.primary_destination->ro._l_addr.sa.sa_len)
2252 len = stcb->asoc.primary_destination->ro._l_addr.sa.sa_len;
2253
2254 memcpy(&ssp->ssp_addr,
2255 &stcb->asoc.primary_destination->ro._l_addr,
2256 len);
2257 SCTP_TCB_UNLOCK(stcb);
2258 } else {
2259 error = EINVAL;
2260 }
2261 *optsize = sizeof(*ssp);
2262 }
2263 break;
2264
2265 case SCTP_HMAC_IDENT:
2266 {
2267 struct sctp_hmacalgo *shmac;
2268 sctp_hmaclist_t *hmaclist;
2269 uint32_t size;
2270 int i;
2271
2272 SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, *optsize);
2273
2274 SCTP_INP_RLOCK(inp);
2275 hmaclist = inp->sctp_ep.local_hmacs;
2276 if (hmaclist == NULL) {
2277 /* no HMACs to return */
2278 *optsize = sizeof(*shmac);
2279 SCTP_INP_RUNLOCK(inp);
2280 break;
2281 }
2282 /* is there room for all of the hmac ids? */
2283 size = sizeof(*shmac) + (hmaclist->num_algo *
2284 sizeof(shmac->shmac_idents[0]));
2285 if ((size_t)(*optsize) < size) {
2286 error = EINVAL;
2287 SCTP_INP_RUNLOCK(inp);
2288 break;
2289 }
2290 /* copy in the list */
2291 for (i = 0; i < hmaclist->num_algo; i++)
2292 shmac->shmac_idents[i] = hmaclist->hmac[i];
2293 SCTP_INP_RUNLOCK(inp);
2294 *optsize = size;
2295 break;
2296 }
2297 case SCTP_AUTH_ACTIVE_KEY:
2298 {
2299 struct sctp_authkeyid *scact;
2300
2301 SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, *optsize);
2302 SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id);
2303
2304 if (stcb) {
2305 /* get the active key on the assoc */
2306 scact->scact_keynumber = stcb->asoc.authinfo.assoc_keyid;
2307 SCTP_TCB_UNLOCK(stcb);
2308 } else {
2309 /* get the endpoint active key */
2310 SCTP_INP_RLOCK(inp);
2311 scact->scact_keynumber = inp->sctp_ep.default_keyid;
2312 SCTP_INP_RUNLOCK(inp);
2313 }
2314 *optsize = sizeof(*scact);
2315 break;
2316 }
2317 case SCTP_LOCAL_AUTH_CHUNKS:
2318 {
2319 struct sctp_authchunks *sac;
2320 sctp_auth_chklist_t *chklist = NULL;
2321 size_t size = 0;
2322
2323 SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize);
2324 SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id);
2325
2326 if (stcb) {
2327 /* get off the assoc */
2328 chklist = stcb->asoc.local_auth_chunks;
2329 /* is there enough space? */
2330 size = sctp_auth_get_chklist_size(chklist);
2331 if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
2332 error = EINVAL;
2333 } else {
2334 /* copy in the chunks */
2335 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
2336 }
2337 SCTP_TCB_UNLOCK(stcb);
2338 } else {
2339 /* get off the endpoint */
2340 SCTP_INP_RLOCK(inp);
2341 chklist = inp->sctp_ep.local_auth_chunks;
2342 /* is there enough space? */
2343 size = sctp_auth_get_chklist_size(chklist);
2344 if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
2345 error = EINVAL;
2346 } else {
2347 /* copy in the chunks */
2348 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
2349 }
2350 SCTP_INP_RUNLOCK(inp);
2351 }
2352 *optsize = sizeof(struct sctp_authchunks) + size;
2353 break;
2354 }
2355 case SCTP_PEER_AUTH_CHUNKS:
2356 {
2357 struct sctp_authchunks *sac;
2358 sctp_auth_chklist_t *chklist = NULL;
2359 size_t size = 0;
2360
2361 SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize);
2362 SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id);
2363
2364 if (stcb) {
2365 /* get off the assoc */
2366 chklist = stcb->asoc.peer_auth_chunks;
2367 /* is there enough space? */
2368 size = sctp_auth_get_chklist_size(chklist);
2369 if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
2370 error = EINVAL;
2371 } else {
2372 /* copy in the chunks */
2373 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
2374 }
2375 SCTP_TCB_UNLOCK(stcb);
2376 } else {
2377 error = ENOENT;
2378 }
2379 *optsize = sizeof(struct sctp_authchunks) + size;
2380 break;
2381 }
2382
2383
2384 default:
2385 error = ENOPROTOOPT;
2386 *optsize = 0;
2387 break;
2388 } /* end switch (sopt->sopt_name) */
2389 return (error);
2390}
2391
2392static int
2393sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
2394 void *p)
2395{
2396 int error, set_opt;
2397 uint32_t *mopt;
2398 struct sctp_tcb *stcb = NULL;
2399 struct sctp_inpcb *inp;
2400 uint32_t vrf_id;
2401
2402 if (optval == NULL) {
2403 SCTP_PRINTF("optval is NULL\n");
2404 return (EINVAL);
2405 }
2406 inp = (struct sctp_inpcb *)so->so_pcb;
2407 if (inp == 0) {
2408 SCTP_PRINTF("inp is NULL?\n");
2409 return EINVAL;
2410 }
2411 vrf_id = inp->def_vrf_id;
2412
2413 error = 0;
2414 switch (optname) {
2415 case SCTP_NODELAY:
2416 case SCTP_AUTOCLOSE:
2417 case SCTP_AUTO_ASCONF:
2418 case SCTP_EXPLICIT_EOR:
2419 case SCTP_DISABLE_FRAGMENTS:
2420 case SCTP_USE_EXT_RCVINFO:
2421 case SCTP_I_WANT_MAPPED_V4_ADDR:
2422 /* copy in the option value */
2423 SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize);
2424 set_opt = 0;
2425 if (error)
2426 break;
2427 switch (optname) {
2428 case SCTP_DISABLE_FRAGMENTS:
2429 set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT;
2430 break;
2431 case SCTP_AUTO_ASCONF:
2432 set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF;
2433 break;
2434 case SCTP_EXPLICIT_EOR:
2435 set_opt = SCTP_PCB_FLAGS_EXPLICIT_EOR;
2436 break;
2437 case SCTP_USE_EXT_RCVINFO:
2438 set_opt = SCTP_PCB_FLAGS_EXT_RCVINFO;
2439 break;
2440 case SCTP_I_WANT_MAPPED_V4_ADDR:
2441 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2442 set_opt = SCTP_PCB_FLAGS_NEEDS_MAPPED_V4;
2443 } else {
2444 return (EINVAL);
2445 }
2446 break;
2447 case SCTP_NODELAY:
2448 set_opt = SCTP_PCB_FLAGS_NODELAY;
2449 break;
2450 case SCTP_AUTOCLOSE:
2451 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2452 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2453 return (EINVAL);
2454 }
2455 set_opt = SCTP_PCB_FLAGS_AUTOCLOSE;
2456 /*
2457 * The value is in ticks. Note this does not effect
2458 * old associations, only new ones.
2459 */
2460 inp->sctp_ep.auto_close_time = SEC_TO_TICKS(*mopt);
2461 break;
2462 }
2463 SCTP_INP_WLOCK(inp);
2464 if (*mopt != 0) {
2465 sctp_feature_on(inp, set_opt);
2466 } else {
2467 sctp_feature_off(inp, set_opt);
2468 }
2469 SCTP_INP_WUNLOCK(inp);
2470 break;
2471 case SCTP_PARTIAL_DELIVERY_POINT:
2472 {
2473 uint32_t *value;
2474
2475 SCTP_CHECK_AND_CAST(value, optval, uint32_t, optsize);
2476 if (*value > SCTP_SB_LIMIT_RCV(so)) {
2477 error = EINVAL;
2478 break;
2479 }
2480 inp->partial_delivery_point = *value;
2481 }
2482 break;
2483 case SCTP_FRAGMENT_INTERLEAVE:
2484 /* not yet until we re-write sctp_recvmsg() */
2485 {
2486 uint32_t *level;
2487
2488 SCTP_CHECK_AND_CAST(level, optval, uint32_t, optsize);
2489 if (*level == SCTP_FRAG_LEVEL_2) {
2490 sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
2491 sctp_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
2492 } else if (*level == SCTP_FRAG_LEVEL_1) {
2493 sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
2494 sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
2495 } else if (*level == SCTP_FRAG_LEVEL_0) {
2496 sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
2497 sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
2498
2499 } else {
2500 error = EINVAL;
2501 }
2502 }
2503 break;
2504 case SCTP_CMT_ON_OFF:
2505 {
2506 struct sctp_assoc_value *av;
2507
2508 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
2509 if (sctp_cmt_on_off) {
2510 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
2511 if (stcb) {
2512 stcb->asoc.sctp_cmt_on_off = (uint8_t) av->assoc_value;
2513 SCTP_TCB_UNLOCK(stcb);
2514 } else {
2515 error = ENOTCONN;
2516 }
2517 } else {
2518 error = ENOPROTOOPT;
2519 }
2520 }
2521 break;
2522 case SCTP_CLR_STAT_LOG:
2523#ifdef SCTP_STAT_LOGGING
2524 sctp_clr_stat_log();
2525#else
2526 error = EOPNOTSUPP;
2527#endif
2528 break;
2529 case SCTP_CONTEXT:
2530 {
2531 struct sctp_assoc_value *av;
2532
2533 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
2534 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
2535
2536 if (stcb) {
2537 stcb->asoc.context = av->assoc_value;
2538 SCTP_TCB_UNLOCK(stcb);
2539 } else {
2540 SCTP_INP_WLOCK(inp);
2541 inp->sctp_context = av->assoc_value;
2542 SCTP_INP_WUNLOCK(inp);
2543 }
2544 }
2545 break;
2546 case SCTP_VRF_ID:
2547 {
2548 uint32_t *default_vrfid;
2549
2550 SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, optsize);
2551 if (*default_vrfid > SCTP_MAX_VRF_ID) {
2552 error = EINVAL;
2553 break;
2554 }
2555 inp->def_vrf_id = *default_vrfid;
2556 break;
2557 }
2558 case SCTP_DEL_VRF_ID:
2559 {
2560 error = EOPNOTSUPP;
2561 break;
2562 }
2563 case SCTP_ADD_VRF_ID:
2564 {
2565 error = EOPNOTSUPP;
2566 break;
2567 }
2568 case SCTP_DELAYED_SACK:
2569 {
2570 struct sctp_sack_info *sack;
2571
2572 SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, optsize);
2573 SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id);
2574 if (stcb) {
2575 if (sack->sack_delay) {
2576 if (MSEC_TO_TICKS(sack->sack_delay) < 1) {
2577 sack->sack_delay = TICKS_TO_MSEC(1);
2578 }
2579 stcb->asoc.delayed_ack = sack->sack_delay;
2580 }
2581 if (sack->sack_freq) {
2582 stcb->asoc.sack_freq = sack->sack_freq;
2583 }
2584 SCTP_TCB_UNLOCK(stcb);
2585 } else {
2586 SCTP_INP_WLOCK(inp);
2587 if (sack->sack_delay) {
2588 if (MSEC_TO_TICKS(sack->sack_delay) < 1) {
2589 sack->sack_delay = TICKS_TO_MSEC(1);
2590 }
2591 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(sack->sack_delay);
2592 }
2593 if (sack->sack_freq) {
2594 inp->sctp_ep.sctp_sack_freq = sack->sack_freq;
2595 }
2596 SCTP_INP_WUNLOCK(inp);
2597 }
2598 break;
2599 }
2600 case SCTP_AUTH_CHUNK:
2601 {
2602 struct sctp_authchunk *sauth;
2603
2604 SCTP_CHECK_AND_CAST(sauth, optval, struct sctp_authchunk, optsize);
2605
2606 SCTP_INP_WLOCK(inp);
2607 if (sctp_auth_add_chunk(sauth->sauth_chunk, inp->sctp_ep.local_auth_chunks))
2608 error = EINVAL;
2609 SCTP_INP_WUNLOCK(inp);
2610 break;
2611 }
2612 case SCTP_AUTH_KEY:
2613 {
2614 struct sctp_authkey *sca;
2615 struct sctp_keyhead *shared_keys;
2616 sctp_sharedkey_t *shared_key;
2617 sctp_key_t *key = NULL;
2618 size_t size;
2619
2620 SCTP_CHECK_AND_CAST(sca, optval, struct sctp_authkey, optsize);
2621 SCTP_FIND_STCB(inp, stcb, sca->sca_assoc_id);
2622 size = optsize - sizeof(*sca);
2623
2624 if (stcb) {
2625 /* set it on the assoc */
2626 shared_keys = &stcb->asoc.shared_keys;
2627 /* clear the cached keys for this key id */
2628 sctp_clear_cachedkeys(stcb, sca->sca_keynumber);
2629 /*
2630 * create the new shared key and
2631 * insert/replace it
2632 */
2633 if (size > 0) {
2634 key = sctp_set_key(sca->sca_key, (uint32_t) size);
2635 if (key == NULL) {
2636 error = ENOMEM;
2637 SCTP_TCB_UNLOCK(stcb);
2638 break;
2639 }
2640 }
2641 shared_key = sctp_alloc_sharedkey();
2642 if (shared_key == NULL) {
2643 sctp_free_key(key);
2644 error = ENOMEM;
2645 SCTP_TCB_UNLOCK(stcb);
2646 break;
2647 }
2648 shared_key->key = key;
2649 shared_key->keyid = sca->sca_keynumber;
2650 sctp_insert_sharedkey(shared_keys, shared_key);
2651 SCTP_TCB_UNLOCK(stcb);
2652 } else {
2653 /* set it on the endpoint */
2654 SCTP_INP_WLOCK(inp);
2655 shared_keys = &inp->sctp_ep.shared_keys;
2656 /*
2657 * clear the cached keys on all assocs for
2658 * this key id
2659 */
2660 sctp_clear_cachedkeys_ep(inp, sca->sca_keynumber);
2661 /*
2662 * create the new shared key and
2663 * insert/replace it
2664 */
2665 if (size > 0) {
2666 key = sctp_set_key(sca->sca_key, (uint32_t) size);
2667 if (key == NULL) {
2668 error = ENOMEM;
2669 SCTP_INP_WUNLOCK(inp);
2670 break;
2671 }
2672 }
2673 shared_key = sctp_alloc_sharedkey();
2674 if (shared_key == NULL) {
2675 sctp_free_key(key);
2676 error = ENOMEM;
2677 SCTP_INP_WUNLOCK(inp);
2678 break;
2679 }
2680 shared_key->key = key;
2681 shared_key->keyid = sca->sca_keynumber;
2682 sctp_insert_sharedkey(shared_keys, shared_key);
2683 SCTP_INP_WUNLOCK(inp);
2684 }
2685 break;
2686 }
2687 case SCTP_HMAC_IDENT:
2688 {
2689 struct sctp_hmacalgo *shmac;
2690 sctp_hmaclist_t *hmaclist;
2691 uint32_t hmacid;
2692 size_t size, i, found;
2693
2694 SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, optsize);
2695 size = (optsize - sizeof(*shmac)) / sizeof(shmac->shmac_idents[0]);
2696 hmaclist = sctp_alloc_hmaclist(size);
2697 if (hmaclist == NULL) {
2698 error = ENOMEM;
2699 break;
2700 }
2701 for (i = 0; i < size; i++) {
2702 hmacid = shmac->shmac_idents[i];
2703 if (sctp_auth_add_hmacid(hmaclist, (uint16_t) hmacid)) {
2704 /* invalid HMACs were found */ ;
2705 error = EINVAL;
2706 sctp_free_hmaclist(hmaclist);
2707 goto sctp_set_hmac_done;
2708 }
2709 }
2710 found = 0;
2711 for (i = 0; i < hmaclist->num_algo; i++) {
2712 if (hmaclist->hmac[i] == SCTP_AUTH_HMAC_ID_SHA1) {
2713 /* already in list */
2714 found = 1;
2715 }
2716 }
2717 if (!found) {
2718 sctp_free_hmaclist(hmaclist);
2719 error = EINVAL;
2720 break;
2721 }
2722 /* set it on the endpoint */
2723 SCTP_INP_WLOCK(inp);
2724 if (inp->sctp_ep.local_hmacs)
2725 sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
2726 inp->sctp_ep.local_hmacs = hmaclist;
2727 SCTP_INP_WUNLOCK(inp);
2728 sctp_set_hmac_done:
2729 break;
2730 }
2731 case SCTP_AUTH_ACTIVE_KEY:
2732 {
2733 struct sctp_authkeyid *scact;
2734
2735 SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, optsize);
2736 SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id);
2737
2738 /* set the active key on the right place */
2739 if (stcb) {
2740 /* set the active key on the assoc */
2741 if (sctp_auth_setactivekey(stcb, scact->scact_keynumber))
2742 error = EINVAL;
2743 SCTP_TCB_UNLOCK(stcb);
2744 } else {
2745 /* set the active key on the endpoint */
2746 SCTP_INP_WLOCK(inp);
2747 if (sctp_auth_setactivekey_ep(inp, scact->scact_keynumber))
2748 error = EINVAL;
2749 SCTP_INP_WUNLOCK(inp);
2750 }
2751 break;
2752 }
2753 case SCTP_AUTH_DELETE_KEY:
2754 {
2755 struct sctp_authkeyid *scdel;
2756
2757 SCTP_CHECK_AND_CAST(scdel, optval, struct sctp_authkeyid, optsize);
2758 SCTP_FIND_STCB(inp, stcb, scdel->scact_assoc_id);
2759
2760 /* delete the key from the right place */
2761 if (stcb) {
2762 if (sctp_delete_sharedkey(stcb, scdel->scact_keynumber))
2763 error = EINVAL;
2764 SCTP_TCB_UNLOCK(stcb);
2765 } else {
2766 SCTP_INP_WLOCK(inp);
2767 if (sctp_delete_sharedkey_ep(inp, scdel->scact_keynumber))
2768 error = EINVAL;
2769 SCTP_INP_WUNLOCK(inp);
2770 }
2771 break;
2772 }
2773
2774 case SCTP_RESET_STREAMS:
2775 {
2776 struct sctp_stream_reset *strrst;
2777 uint8_t send_in = 0, send_tsn = 0, send_out = 0;
2778 int i;
2779
2780 SCTP_CHECK_AND_CAST(strrst, optval, struct sctp_stream_reset, optsize);
2781 SCTP_FIND_STCB(inp, stcb, strrst->strrst_assoc_id);
2782
2783 if (stcb == NULL) {
2784 error = ENOENT;
2785 break;
2786 }
2787 if (stcb->asoc.peer_supports_strreset == 0) {
2788 /*
2789 * Peer does not support it, we return
2790 * protocol not supported since this is true
2791 * for this feature and this peer, not the
2792 * socket request in general.
2793 */
2794 error = EPROTONOSUPPORT;
2795 SCTP_TCB_UNLOCK(stcb);
2796 break;
2797 }
2798 if (stcb->asoc.stream_reset_outstanding) {
2799 error = EALREADY;
2800 SCTP_TCB_UNLOCK(stcb);
2801 break;
2802 }
2803 if (strrst->strrst_flags == SCTP_RESET_LOCAL_RECV) {
2804 send_in = 1;
2805 } else if (strrst->strrst_flags == SCTP_RESET_LOCAL_SEND) {
2806 send_out = 1;
2807 } else if (strrst->strrst_flags == SCTP_RESET_BOTH) {
2808 send_in = 1;
2809 send_out = 1;
2810 } else if (strrst->strrst_flags == SCTP_RESET_TSN) {
2811 send_tsn = 1;
2812 } else {
2813 error = EINVAL;
2814 SCTP_TCB_UNLOCK(stcb);
2815 break;
2816 }
2817 for (i = 0; i < strrst->strrst_num_streams; i++) {
2818 if ((send_in) &&
2819
2820 (strrst->strrst_list[i] > stcb->asoc.streamincnt)) {
2821 error = EINVAL;
2822 goto get_out;
2823 }
2824 if ((send_out) &&
2825 (strrst->strrst_list[i] > stcb->asoc.streamoutcnt)) {
2826 error = EINVAL;
2827 goto get_out;
2828 }
2829 }
2830 if (error) {
2831 get_out:
2832 SCTP_TCB_UNLOCK(stcb);
2833 break;
2834 }
2835 error = sctp_send_str_reset_req(stcb, strrst->strrst_num_streams,
2836 strrst->strrst_list,
2837 send_out, (stcb->asoc.str_reset_seq_in - 3),
2838 send_in, send_tsn);
2839
2840 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ);
2841 SCTP_TCB_UNLOCK(stcb);
2842 }
2843 break;
2844
2845 case SCTP_CONNECT_X:
2846 if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) {
2847 error = EINVAL;
2848 break;
2849 }
2850 error = sctp_do_connect_x(so, inp, optval, optsize, p, 0);
2851 break;
2852
2853 case SCTP_CONNECT_X_DELAYED:
2854 if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) {
2855 error = EINVAL;
2856 break;
2857 }
2858 error = sctp_do_connect_x(so, inp, optval, optsize, p, 1);
2859 break;
2860
2861 case SCTP_CONNECT_X_COMPLETE:
2862 {
2863 struct sockaddr *sa;
2864 struct sctp_nets *net;
2865
2866 /* FIXME MT: check correct? */
2867 SCTP_CHECK_AND_CAST(sa, optval, struct sockaddr, optsize);
2868
2869 /* find tcb */
2870 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
2871 SCTP_INP_RLOCK(inp);
2872 stcb = LIST_FIRST(&inp->sctp_asoc_list);
2873 if (stcb) {
2874 SCTP_TCB_LOCK(stcb);
2875 net = sctp_findnet(stcb, sa);
2876 }
2877 SCTP_INP_RUNLOCK(inp);
2878 } else {
2879 /*
2880 * We increment here since
2881 * sctp_findassociation_ep_addr() wil do a
2882 * decrement if it finds the stcb as long as
2883 * the locked tcb (last argument) is NOT a
2884 * TCB.. aka NULL.
2885 */
2886 SCTP_INP_INCR_REF(inp);
2887 stcb = sctp_findassociation_ep_addr(&inp, sa, &net, NULL, NULL);
2888 if (stcb == NULL) {
2889 SCTP_INP_DECR_REF(inp);
2890 }
2891 }
2892
2893 if (stcb == NULL) {
2894 error = ENOENT;
2895 break;
2896 }
2897 if (stcb->asoc.delayed_connection == 1) {
2898 stcb->asoc.delayed_connection = 0;
2899 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
2900 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb,
2901 stcb->asoc.primary_destination,
2902 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_9);
2903 sctp_send_initiate(inp, stcb);
2904 } else {
2905 /*
2906 * already expired or did not use delayed
2907 * connectx
2908 */
2909 error = EALREADY;
2910 }
2911 SCTP_TCB_UNLOCK(stcb);
2912 }
2913 break;
2914 case SCTP_MAX_BURST:
2915 {
2916 uint8_t *burst;
2917
2918 SCTP_CHECK_AND_CAST(burst, optval, uint8_t, optsize);
2919
2920 SCTP_INP_WLOCK(inp);
2921 if (*burst) {
2922 inp->sctp_ep.max_burst = *burst;
2923 }
2924 SCTP_INP_WUNLOCK(inp);
2925 }
2926 break;
2927 case SCTP_MAXSEG:
2928 {
2929 struct sctp_assoc_value *av;
2930 int ovh;
2931
2932 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
2933 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
2934
2935 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2936 ovh = SCTP_MED_OVERHEAD;
2937 } else {
2938 ovh = SCTP_MED_V4_OVERHEAD;
2939 }
2940 if (stcb) {
2941 if (av->assoc_value) {
2942 stcb->asoc.sctp_frag_point = (av->assoc_value + ovh);
2943 } else {
2944 stcb->asoc.sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT;
2945 }
2946 SCTP_TCB_UNLOCK(stcb);
2947 } else {
2948 SCTP_INP_WLOCK(inp);
2949 /*
2950 * FIXME MT: I think this is not in tune
2951 * with the API ID
2952 */
2953 if (av->assoc_value) {
2954 inp->sctp_frag_point = (av->assoc_value + ovh);
2955 } else {
2956 inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT;
2957 }
2958 SCTP_INP_WUNLOCK(inp);
2959 }
2960 }
2961 break;
2962 case SCTP_EVENTS:
2963 {
2964 struct sctp_event_subscribe *events;
2965
2966 SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, optsize);
2967
2968 SCTP_INP_WLOCK(inp);
2969 if (events->sctp_data_io_event) {
2970 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
2971 } else {
2972 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
2973 }
2974
2975 if (events->sctp_association_event) {
2976 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT);
2977 } else {
2978 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT);
2979 }
2980
2981 if (events->sctp_address_event) {
2982 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT);
2983 } else {
2984 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPADDREVNT);
2985 }
2986
2987 if (events->sctp_send_failure_event) {
2988 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT);
2989 } else {
2990 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT);
2991 }
2992
2993 if (events->sctp_peer_error_event) {
2994 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR);
2995 } else {
2996 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPEERERR);
2997 }
2998
2999 if (events->sctp_shutdown_event) {
3000 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT);
3001 } else {
3002 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT);
3003 }
3004
3005 if (events->sctp_partial_delivery_event) {
3006 sctp_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT);
3007 } else {
3008 sctp_feature_off(inp, SCTP_PCB_FLAGS_PDAPIEVNT);
3009 }
3010
3011 if (events->sctp_adaptation_layer_event) {
3012 sctp_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT);
3013 } else {
3014 sctp_feature_off(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT);
3015 }
3016
3017 if (events->sctp_authentication_event) {
3018 sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT);
3019 } else {
3020 sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTHEVNT);
3021 }
3022
3023 if (events->sctp_stream_reset_events) {
3024 sctp_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
3025 } else {
3026 sctp_feature_off(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
3027 }
3028 SCTP_INP_WUNLOCK(inp);
3029 }
3030 break;
3031
3032 case SCTP_ADAPTATION_LAYER:
3033 {
3034 struct sctp_setadaptation *adap_bits;
3035
3036 SCTP_CHECK_AND_CAST(adap_bits, optval, struct sctp_setadaptation, optsize);
3037 SCTP_INP_WLOCK(inp);
3038 inp->sctp_ep.adaptation_layer_indicator = adap_bits->ssb_adaptation_ind;
3039 SCTP_INP_WUNLOCK(inp);
3040 }
3041 break;
3042#ifdef SCTP_DEBUG
3043 case SCTP_SET_INITIAL_DBG_SEQ:
3044 {
3045 uint32_t *vvv;
3046
3047 SCTP_CHECK_AND_CAST(vvv, optval, uint32_t, optsize);
3048 SCTP_INP_WLOCK(inp);
3049 inp->sctp_ep.initial_sequence_debug = *vvv;
3050 SCTP_INP_WUNLOCK(inp);
3051 }
3052 break;
3053#endif
3054 case SCTP_DEFAULT_SEND_PARAM:
3055 {
3056 struct sctp_sndrcvinfo *s_info;
3057
3058 SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, optsize);
3059 SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id);
3060
3061 if (stcb) {
3062 if (s_info->sinfo_stream <= stcb->asoc.streamoutcnt) {
3063 memcpy(&stcb->asoc.def_send, s_info, min(optsize, sizeof(stcb->asoc.def_send)));
3064 } else {
3065 error = EINVAL;
3066 }
3067 SCTP_TCB_UNLOCK(stcb);
3068 } else {
3069 SCTP_INP_WLOCK(inp);
3070 memcpy(&inp->def_send, s_info, min(optsize, sizeof(inp->def_send)));
3071 SCTP_INP_WUNLOCK(inp);
3072 }
3073 }
3074 break;
3075 case SCTP_PEER_ADDR_PARAMS:
3076 /* Applys to the specific association */
3077 {
3078 struct sctp_paddrparams *paddrp;
3079 struct sctp_nets *net;
3080
3081 SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, optsize);
3082 SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id);
3083 net = NULL;
3084 if (stcb) {
3085 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
3086 } else {
3087 /*
3088 * We increment here since
3089 * sctp_findassociation_ep_addr() wil do a
3090 * decrement if it finds the stcb as long as
3091 * the locked tcb (last argument) is NOT a
3092 * TCB.. aka NULL.
3093 */
3094 SCTP_INP_INCR_REF(inp);
3095 stcb = sctp_findassociation_ep_addr(&inp,
3096 (struct sockaddr *)&paddrp->spp_address,
3097 &net, NULL, NULL);
3098 if (stcb == NULL) {
3099 SCTP_INP_DECR_REF(inp);
3100 }
3101 }
3102 /* sanity checks */
3103 if ((paddrp->spp_flags & SPP_HB_ENABLE) && (paddrp->spp_flags & SPP_HB_DISABLE)) {
3104 if (stcb)
3105 SCTP_TCB_UNLOCK(stcb);
3106 return (EINVAL);
3107 }
3108 if ((paddrp->spp_flags & SPP_PMTUD_ENABLE) && (paddrp->spp_flags & SPP_PMTUD_DISABLE)) {
3109 if (stcb)
3110 SCTP_TCB_UNLOCK(stcb);
3111 return (EINVAL);
3112 }
3113 if (stcb) {
3114 /************************TCB SPECIFIC SET ******************/
3115 /*
3116 * do we change the timer for HB, we run
3117 * only one?
3118 */
3119 int ovh = 0;
3120
3121 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
3122 ovh = SCTP_MED_OVERHEAD;
3123 } else {
3124 ovh = SCTP_MED_V4_OVERHEAD;
3125 }
3126
3127 if (paddrp->spp_hbinterval)
3128 stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval;
3129 else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO)
3130 stcb->asoc.heart_beat_delay = 0;
3131
3132 /* network sets ? */
3133 if (net) {
3134 /************************NET SPECIFIC SET ******************/
3135 if (paddrp->spp_flags & SPP_HB_DEMAND) {
3136 /* on demand HB */
3137 (void)sctp_send_hb(stcb, 1, net);
3138 }
3139 if (paddrp->spp_flags & SPP_HB_DISABLE) {
3140 net->dest_state |= SCTP_ADDR_NOHB;
3141 }
3142 if (paddrp->spp_flags & SPP_HB_ENABLE) {
3143 net->dest_state &= ~SCTP_ADDR_NOHB;
3144 }
3145 if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) {
3146 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3147 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
3148 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10);
3149 }
3150 if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) {
3151 net->mtu = paddrp->spp_pathmtu + ovh;
3152 if (net->mtu < stcb->asoc.smallest_mtu) {
3153#ifdef SCTP_PRINT_FOR_B_AND_M
3154 SCTP_PRINTF("SCTP_PMTU_DISABLE calls sctp_pathmtu_adjustment:%d\n",
3155 net->mtu);
3156#endif
3157 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu);
3158 }
3159 }
3160 }
3161 if (paddrp->spp_flags & SPP_PMTUD_ENABLE) {
3162 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3163 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
3164 }
3165 }
3166 if (paddrp->spp_pathmaxrxt)
3167 net->failure_threshold = paddrp->spp_pathmaxrxt;
3168#ifdef INET
3169 if (paddrp->spp_flags & SPP_IPV4_TOS) {
3170 if (net->ro._l_addr.sin.sin_family == AF_INET) {
3171 net->tos_flowlabel = paddrp->spp_ipv4_tos & 0x000000fc;
3172 }
3173 }
3174#endif
3175#ifdef INET6
3176 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) {
3177 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
3178 net->tos_flowlabel = paddrp->spp_ipv6_flowlabel;
3179 }
3180 }
3181#endif
3182 } else {
3183 /************************ASSOC ONLY -- NO NET SPECIFIC SET ******************/
3184 if (paddrp->spp_pathmaxrxt)
3185 stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt;
3186
3187 if (paddrp->spp_flags & SPP_HB_ENABLE) {
3188 /* Turn back on the timer */
3189 stcb->asoc.hb_is_disabled = 0;
3190 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
3191 }
3192 if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) {
3193 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3194 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3195 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
3196 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10);
3197 }
3198 if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) {
3199 net->mtu = paddrp->spp_pathmtu + ovh;
3200 if (net->mtu < stcb->asoc.smallest_mtu) {
3201#ifdef SCTP_PRINT_FOR_B_AND_M
3202 SCTP_PRINTF("SCTP_PMTU_DISABLE calls sctp_pathmtu_adjustment:%d\n",
3203 net->mtu);
3204#endif
3205 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu);
3206 }
3207 }
3208 }
3209 }
3210 if (paddrp->spp_flags & SPP_PMTUD_ENABLE) {
3211 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3212 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3213 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
3214 }
3215 }
3216 }
3217 if (paddrp->spp_flags & SPP_HB_DISABLE) {
3218 int cnt_of_unconf = 0;
3219 struct sctp_nets *lnet;
3220
3221 stcb->asoc.hb_is_disabled = 1;
3222 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
3223 if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) {
3224 cnt_of_unconf++;
3225 }
3226 }
3227 /*
3228 * stop the timer ONLY if we
3229 * have no unconfirmed
3230 * addresses
3231 */
3232 if (cnt_of_unconf == 0) {
3233 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3234 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
3235 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_11);
3236 }
3237 }
3238 }
3239 if (paddrp->spp_flags & SPP_HB_ENABLE) {
3240 /* start up the timer. */
3241 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3242 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
3243 }
3244 }
3245#ifdef INET
3246 if (paddrp->spp_flags & SPP_IPV4_TOS)
3247 stcb->asoc.default_tos = paddrp->spp_ipv4_tos & 0x000000fc;
3248#endif
3249#ifdef INET6
3250 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL)
3251 stcb->asoc.default_flowlabel = paddrp->spp_ipv6_flowlabel;
3252#endif
3253
3254 }
3255 SCTP_TCB_UNLOCK(stcb);
3256 } else {
3257 /************************NO TCB, SET TO default stuff ******************/
3258 SCTP_INP_WLOCK(inp);
3259 /*
3260 * For the TOS/FLOWLABEL stuff you set it
3261 * with the options on the socket
3262 */
3263 if (paddrp->spp_pathmaxrxt) {
3264 inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt;
3265 }
3266 if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO)
3267 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = 0;
3268 else if (paddrp->spp_hbinterval)
3269 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(paddrp->spp_hbinterval);
3270
3271 if (paddrp->spp_flags & SPP_HB_ENABLE) {
3272 sctp_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
3273
3274 } else if (paddrp->spp_flags & SPP_HB_DISABLE) {
3275 sctp_feature_on(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
3276 }
3277 SCTP_INP_WUNLOCK(inp);
3278 }
3279 }
3280 break;
3281 case SCTP_RTOINFO:
3282 {
3283 struct sctp_rtoinfo *srto;
3284 uint32_t new_init, new_min, new_max;
3285
3286 SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, optsize);
3287 SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id);
3288
3289 if (stcb) {
3290 if (srto->srto_initial)
3291 new_init = srto->srto_initial;
3292 else
3293 new_init = stcb->asoc.initial_rto;
3294 if (srto->srto_max)
3295 new_max = srto->srto_max;
3296 else
3297 new_max = stcb->asoc.maxrto;
3298 if (srto->srto_min)
3299 new_min = srto->srto_min;
3300 else
3301 new_min = stcb->asoc.minrto;
3302 if ((new_min <= new_init) && (new_init <= new_max)) {
3303 stcb->asoc.initial_rto = new_init;
3304 stcb->asoc.maxrto = new_max;
3305 stcb->asoc.minrto = new_min;
3306 } else {
3307 error = EDOM;
3308 }
3309 SCTP_TCB_UNLOCK(stcb);
3310 } else {
3311 SCTP_INP_WLOCK(inp);
3312 if (srto->srto_initial)
3313 new_init = srto->srto_initial;
3314 else
3315 new_init = inp->sctp_ep.initial_rto;
3316 if (srto->srto_max)
3317 new_max = srto->srto_max;
3318 else
3319 new_max = inp->sctp_ep.sctp_maxrto;
3320 if (srto->srto_min)
3321 new_min = srto->srto_min;
3322 else
3323 new_min = inp->sctp_ep.sctp_minrto;
3324 if ((new_min <= new_init) && (new_init <= new_max)) {
3325 inp->sctp_ep.initial_rto = new_init;
3326 inp->sctp_ep.sctp_maxrto = new_max;
3327 inp->sctp_ep.sctp_minrto = new_min;
3328 } else {
3329 error = EDOM;
3330 }
3331 SCTP_INP_WUNLOCK(inp);
3332 }
3333 }
3334 break;
3335 case SCTP_ASSOCINFO:
3336 {
3337 struct sctp_assocparams *sasoc;
3338
3339 SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, optsize);
3340 SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id);
3341
3342 if (stcb) {
3343 if (sasoc->sasoc_asocmaxrxt)
3344 stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt;
3345 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
3346 sasoc->sasoc_peer_rwnd = 0;
3347 sasoc->sasoc_local_rwnd = 0;
3348 if (sasoc->sasoc_cookie_life) {
3349 if (sasoc->sasoc_cookie_life < 1000)
3350 sasoc->sasoc_cookie_life = 1000;
3351 stcb->asoc.cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life);
3352 }
3353 SCTP_TCB_UNLOCK(stcb);
3354 } else {
3355 SCTP_INP_WLOCK(inp);
3356 if (sasoc->sasoc_asocmaxrxt)
3357 inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt;
3358 sasoc->sasoc_number_peer_destinations = 0;
3359 sasoc->sasoc_peer_rwnd = 0;
3360 sasoc->sasoc_local_rwnd = 0;
3361 if (sasoc->sasoc_cookie_life) {
3362 if (sasoc->sasoc_cookie_life < 1000)
3363 sasoc->sasoc_cookie_life = 1000;
3364 inp->sctp_ep.def_cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life);
3365 }
3366 SCTP_INP_WUNLOCK(inp);
3367 }
3368 }
3369 break;
3370 case SCTP_INITMSG:
3371 {
3372 struct sctp_initmsg *sinit;
3373
3374 SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, optsize);
3375 SCTP_INP_WLOCK(inp);
3376 if (sinit->sinit_num_ostreams)
3377 inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams;
3378
3379 if (sinit->sinit_max_instreams)
3380 inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams;
3381
3382 if (sinit->sinit_max_attempts)
3383 inp->sctp_ep.max_init_times = sinit->sinit_max_attempts;
3384
3385 if (sinit->sinit_max_init_timeo)
3386 inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo;
3387 SCTP_INP_WUNLOCK(inp);
3388 }
3389 break;
3390 case SCTP_PRIMARY_ADDR:
3391 {
3392 struct sctp_setprim *spa;
3393 struct sctp_nets *net, *lnet;
3394
3395 SCTP_CHECK_AND_CAST(spa, optval, struct sctp_setprim, optsize);
3396 SCTP_FIND_STCB(inp, stcb, spa->ssp_assoc_id);
3397
3398 net = NULL;
3399 if (stcb) {
3400 net = sctp_findnet(stcb, (struct sockaddr *)&spa->ssp_addr);
3401 } else {
3402 /*
3403 * We increment here since
3404 * sctp_findassociation_ep_addr() wil do a
3405 * decrement if it finds the stcb as long as
3406 * the locked tcb (last argument) is NOT a
3407 * TCB.. aka NULL.
3408 */
3409 SCTP_INP_INCR_REF(inp);
3410 stcb = sctp_findassociation_ep_addr(&inp,
3411 (struct sockaddr *)&spa->ssp_addr,
3412 &net, NULL, NULL);
3413 if (stcb == NULL) {
3414 SCTP_INP_DECR_REF(inp);
3415 }
3416 }
3417
3418 if ((stcb) && (net)) {
3419 if ((net != stcb->asoc.primary_destination) &&
3420 (!(net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
3421 /* Ok we need to set it */
3422 lnet = stcb->asoc.primary_destination;
3423 if (sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net) == 0) {
3424 if (net->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
3425 net->dest_state |= SCTP_ADDR_DOUBLE_SWITCH;
3426 }
3427 net->dest_state |= SCTP_ADDR_SWITCH_PRIMARY;
3428 }
3429 }
3430 } else {
3431 error = EINVAL;
3432 }
3433 if (stcb) {
3434 SCTP_TCB_UNLOCK(stcb);
3435 }
3436 }
3437 break;
3438 case SCTP_SET_DYNAMIC_PRIMARY:
3439 {
3440 union sctp_sockstore *ss;
3441
3442 error = priv_check_cred(curthread->td_ucred,
3443 PRIV_NETINET_RESERVEDPORT,
3444 SUSER_ALLOWJAIL);
3445 if (error)
3446 break;
3447
3448 SCTP_CHECK_AND_CAST(ss, optval, union sctp_sockstore, optsize);
3449 /* SUPER USER CHECK? */
3450 error = sctp_dynamic_set_primary(&ss->sa, vrf_id);
3451 }
3452 break;
3453 case SCTP_SET_PEER_PRIMARY_ADDR:
3454 {
3455 struct sctp_setpeerprim *sspp;
3456
3457 SCTP_CHECK_AND_CAST(sspp, optval, struct sctp_setpeerprim, optsize);
3458 SCTP_FIND_STCB(inp, stcb, sspp->sspp_assoc_id);
3459 if (stcb != NULL) {
3460 struct sctp_ifa *ifa;
3461
3462 ifa = sctp_find_ifa_by_addr((struct sockaddr *)&sspp->sspp_addr,
3463 stcb->asoc.vrf_id, 0);
3464 if (ifa == NULL) {
3465 error = EINVAL;
3466 goto out_of_it;
3467 }
3468 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
3469 /*
3470 * Must validate the ifa found is in
3471 * our ep
3472 */
3473 struct sctp_laddr *laddr;
3474 int found = 0;
3475
3476 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
3477 if (laddr->ifa == NULL) {
3478 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
3479 __FUNCTION__);
3480 continue;
3481 }
3482 if (laddr->ifa == ifa) {
3483 found = 1;
3484 break;
3485 }
3486 }
3487 if (!found) {
3488 error = EINVAL;
3489 goto out_of_it;
3490 }
3491 }
3492 if (sctp_set_primary_ip_address_sa(stcb,
3493 (struct sockaddr *)&sspp->sspp_addr) != 0) {
3494 error = EINVAL;
3495 }
3496 out_of_it:
3497 SCTP_TCB_UNLOCK(stcb);
3498 } else {
3499 error = EINVAL;
3500 }
3501
3502 }
3503 break;
3504 case SCTP_BINDX_ADD_ADDR:
3505 {
3506 struct sctp_getaddresses *addrs;
3507 struct sockaddr *addr_touse;
3508 struct sockaddr_in sin;
3509
3510 SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, optsize);
3511
3512 /* see if we're bound all already! */
3513 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3514 error = EINVAL;
3515 break;
3516 }
3517 /* Is the VRF one we have */
3518 addr_touse = addrs->addr;
3519#if defined(INET6)
3520 if (addrs->addr->sa_family == AF_INET6) {
3521 struct sockaddr_in6 *sin6;
3522
3523 if (addrs->addr->sa_len != sizeof(struct sockaddr_in6)) {
3524 error = EINVAL;
3525 break;
3526 }
3527 sin6 = (struct sockaddr_in6 *)addr_touse;
3528 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
3529 in6_sin6_2_sin(&sin, sin6);
3530 addr_touse = (struct sockaddr *)&sin;
3531 }
3532 }
3533#endif
3534 if (addrs->addr->sa_family == AF_INET) {
3535 if (addrs->addr->sa_len != sizeof(struct sockaddr_in)) {
3536 error = EINVAL;
3537 break;
3538 }
3539 }
3540 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
3541
3542 if (p == NULL) {
3543 /* Can't get proc for Net/Open BSD */
3544 error = EINVAL;
3545 break;
3546 }
3547 error = sctp_inpcb_bind(so, addr_touse, p);
3548 break;
3549 }
3550 /*
3551 * No locks required here since bind and mgmt_ep_sa
3552 * all do their own locking. If we do something for
3553 * the FIX: below we may need to lock in that case.
3554 */
3555 if (addrs->sget_assoc_id == 0) {
3556 /* add the address */
3557 struct sctp_inpcb *lep;
3558
3559 ((struct sockaddr_in *)addr_touse)->sin_port = inp->sctp_lport;
3560 lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id);
3561 if (lep != NULL) {
3562 /*
3563 * We must decrement the refcount
3564 * since we have the ep already and
3565 * are binding. No remove going on
3566 * here.
3567 */
3568 SCTP_INP_DECR_REF(inp);
3569 }
3570 if (lep == inp) {
3571 /* already bound to it.. ok */
3572 break;
3573 } else if (lep == NULL) {
3574 ((struct sockaddr_in *)addr_touse)->sin_port = 0;
3575 error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
3576 SCTP_ADD_IP_ADDRESS, vrf_id);
3577 } else {
3578 error = EADDRINUSE;
3579 }
3580 if (error)
3581 break;
3582
3583 } else {
3584 /*
3585 * FIX: decide whether we allow assoc based
3586 * bindx
3587 */
3588 }
3589 }
3590 break;
3591 case SCTP_BINDX_REM_ADDR:
3592 {
3593 struct sctp_getaddresses *addrs;
3594 struct sockaddr *addr_touse;
3595 struct sockaddr_in sin;
3596
3597 SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, optsize);
3598 /* see if we're bound all already! */
3599 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3600 error = EINVAL;
3601 break;
3602 }
3603 addr_touse = addrs->addr;
3604#if defined(INET6)
3605 if (addrs->addr->sa_family == AF_INET6) {
3606 struct sockaddr_in6 *sin6;
3607
3608 if (addrs->addr->sa_len != sizeof(struct sockaddr_in6)) {
3609 error = EINVAL;
3610 break;
3611 }
3612 sin6 = (struct sockaddr_in6 *)addr_touse;
3613 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
3614 in6_sin6_2_sin(&sin, sin6);
3615 addr_touse = (struct sockaddr *)&sin;
3616 }
3617 }
3618#endif
3619 if (addrs->addr->sa_family == AF_INET) {
3620 if (addrs->addr->sa_len != sizeof(struct sockaddr_in)) {
3621 error = EINVAL;
3622 break;
3623 }
3624 }
3625 /*
3626 * No lock required mgmt_ep_sa does its own locking.
3627 * If the FIX: below is ever changed we may need to
3628 * lock before calling association level binding.
3629 */
3630 if (addrs->sget_assoc_id == 0) {
3631 /* delete the address */
3632 error = sctp_addr_mgmt_ep_sa(inp, addr_touse,
3633 SCTP_DEL_IP_ADDRESS,
3634 vrf_id);
3635 } else {
3636 /*
3637 * FIX: decide whether we allow assoc based
3638 * bindx
3639 */
3640 }
3641 }
3642 break;
3643 default:
3644 error = ENOPROTOOPT;
3645 break;
3646 } /* end switch (opt) */
3647 return (error);
3648}
3649
3650
3651int
3652sctp_ctloutput(struct socket *so, struct sockopt *sopt)
3653{
3654 void *optval = NULL;
3655 size_t optsize = 0;
3656 struct sctp_inpcb *inp;
3657 void *p;
3658 int error = 0;
3659
3660 inp = (struct sctp_inpcb *)so->so_pcb;
3661 if (inp == 0) {
3662 /* I made the same as TCP since we are not setup? */
3663 return (ECONNRESET);
3664 }
3665 if (sopt->sopt_level != IPPROTO_SCTP) {
3666 /* wrong proto level... send back up to IP */
3667#ifdef INET6
3668 if (INP_CHECK_SOCKAF(so, AF_INET6))
3669 error = ip6_ctloutput(so, sopt);
3670 else
3671#endif /* INET6 */
3672 error = ip_ctloutput(so, sopt);
3673 return (error);
3674 }
3675 optsize = sopt->sopt_valsize;
3676 if (optsize) {
3677 SCTP_MALLOC(optval, void *, optsize, SCTP_M_SOCKOPT);
3678 if (optval == NULL) {
3679 return (ENOBUFS);
3680 }
3681 error = sooptcopyin(sopt, optval, optsize, optsize);
3682 if (error) {
3683 SCTP_FREE(optval, SCTP_M_SOCKOPT);
3684 goto out;
3685 }
3686 }
3687 p = (void *)sopt->sopt_td;
3688 if (sopt->sopt_dir == SOPT_SET) {
3689 error = sctp_setopt(so, sopt->sopt_name, optval, optsize, p);
3690 } else if (sopt->sopt_dir == SOPT_GET) {
3691 error = sctp_getopt(so, sopt->sopt_name, optval, &optsize, p);
3692 } else {
3693 error = EINVAL;
3694 }
3695 if ((error == 0) && (optval != NULL)) {
3696 error = sooptcopyout(sopt, optval, optsize);
3697 SCTP_FREE(optval, SCTP_M_SOCKOPT);
3698 } else if (optval != NULL) {
3699 SCTP_FREE(optval, SCTP_M_SOCKOPT);
3700 }
3701out:
3702 return (error);
3703}
3704
3705
3706static int
3707sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p)
3708{
3709 int error = 0;
3710 int create_lock_on = 0;
3711 uint32_t vrf_id;
3712 struct sctp_inpcb *inp;
3713 struct sctp_tcb *stcb = NULL;
3714
3715 inp = (struct sctp_inpcb *)so->so_pcb;
3716 if (inp == 0) {
3717 /* I made the same as TCP since we are not setup? */
3718 return (ECONNRESET);
3719 }
3720 if (addr == NULL)
3721 return EINVAL;
3722
3723 if ((addr->sa_family == AF_INET6) && (addr->sa_len != sizeof(struct sockaddr_in6))) {
3724 return (EINVAL);
3725 }
3726 if ((addr->sa_family == AF_INET) && (addr->sa_len != sizeof(struct sockaddr_in))) {
3727 return (EINVAL);
3728 }
3729 SCTP_ASOC_CREATE_LOCK(inp);
3730 create_lock_on = 1;
3731
3732 SCTP_INP_INCR_REF(inp);
3733 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3734 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
3735 /* Should I really unlock ? */
3736 error = EFAULT;
3737 goto out_now;
3738 }
3739#ifdef INET6
3740 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
3741 (addr->sa_family == AF_INET6)) {
3742 error = EINVAL;
3743 goto out_now;
3744 }
3745#endif /* INET6 */
3746 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
3747 SCTP_PCB_FLAGS_UNBOUND) {
3748 /* Bind a ephemeral port */
3749 error = sctp_inpcb_bind(so, NULL, p);
3750 if (error) {
3751 goto out_now;
3752 }
3753 }
3754 /* Now do we connect? */
3755 if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) {
3756 error = EINVAL;
3757 goto out_now;
3758 }
3759 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3760 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
3761 /* We are already connected AND the TCP model */
3762 error = EADDRINUSE;
3763 goto out_now;
3764 }
3765 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3766 SCTP_INP_RLOCK(inp);
3767 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3768 SCTP_INP_RUNLOCK(inp);
3769 } else {
3770 /*
3771 * We increment here since sctp_findassociation_ep_addr()
3772 * wil do a decrement if it finds the stcb as long as the
3773 * locked tcb (last argument) is NOT a TCB.. aka NULL.
3774 */
3775 SCTP_INP_INCR_REF(inp);
3776 stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL);
3777 if (stcb == NULL) {
3778 SCTP_INP_DECR_REF(inp);
3779 } else {
3780 SCTP_TCB_LOCK(stcb);
3781 }
3782 }
3783 if (stcb != NULL) {
3784 /* Already have or am bring up an association */
3785 error = EALREADY;
3786 goto out_now;
3787 }
3788 vrf_id = inp->def_vrf_id;
3789 /* We are GOOD to go */
3790 stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0, vrf_id);
3791 if (stcb == NULL) {
3792 /* Gak! no memory */
3793 goto out_now;
3794 }
3795 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
3796 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
3797 /* Set the connected flag so we can queue data */
3798 soisconnecting(so);
3799 }
3800 stcb->asoc.state = SCTP_STATE_COOKIE_WAIT;
3801 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
3802
3803 /* initialize authentication parameters for the assoc */
3804 sctp_initialize_auth_params(inp, stcb);
3805
3806 sctp_send_initiate(inp, stcb);
3807 SCTP_TCB_UNLOCK(stcb);
3808out_now:
3809 if (create_lock_on) {
3810 SCTP_ASOC_CREATE_UNLOCK(inp);
3811 }
3812 SCTP_INP_DECR_REF(inp);
3813 return error;
3814}
3815
3816int
3817sctp_listen(struct socket *so, int backlog, struct thread *p)
3818{
3819 /*
3820 * Note this module depends on the protocol processing being called
3821 * AFTER any socket level flags and backlog are applied to the
3822 * socket. The traditional way that the socket flags are applied is
3823 * AFTER protocol processing. We have made a change to the
3824 * sys/kern/uipc_socket.c module to reverse this but this MUST be in
3825 * place if the socket API for SCTP is to work properly.
3826 */
3827
3828 int error = 0;
3829 struct sctp_inpcb *inp;
3830
3831 inp = (struct sctp_inpcb *)so->so_pcb;
3832 if (inp == 0) {
3833 /* I made the same as TCP since we are not setup? */
3834 return (ECONNRESET);
3835 }
3836 SCTP_INP_RLOCK(inp);
3837#ifdef SCTP_LOCK_LOGGING
3838 sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK);
3839#endif
3840 SOCK_LOCK(so);
3841 error = solisten_proto_check(so);
3842 if (error) {
3843 SOCK_UNLOCK(so);
3844 SCTP_INP_RUNLOCK(inp);
3845 return (error);
3846 }
3847 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
3848 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
3849 /* We are already connected AND the TCP model */
3850 SCTP_INP_RUNLOCK(inp);
3851 SOCK_UNLOCK(so);
3852 return (EADDRINUSE);
3853 }
3854 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
3855 /* We must do a bind. */
3856 SOCK_UNLOCK(so);
3857 SCTP_INP_RUNLOCK(inp);
3858 if ((error = sctp_inpcb_bind(so, NULL, p))) {
3859 /* bind error, probably perm */
3860 return (error);
3861 }
3862 SOCK_LOCK(so);
3863 } else {
3864 SCTP_INP_RUNLOCK(inp);
3865 }
3866 /* It appears for 7.0 and on, we must always call this. */
3867 solisten_proto(so, backlog);
3868 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
3869 /* remove the ACCEPTCONN flag for one-to-many sockets */
3870 so->so_options &= ~SO_ACCEPTCONN;
3871 }
3872 if (backlog == 0) {
3873 /* turning off listen */
3874 so->so_options &= ~SO_ACCEPTCONN;
3875 }
3876 SOCK_UNLOCK(so);
3877 return (error);
3878}
3879
3880static int sctp_defered_wakeup_cnt = 0;
3881
3882int
3883sctp_accept(struct socket *so, struct sockaddr **addr)
3884{
3885 struct sctp_tcb *stcb;
3886 struct sctp_inpcb *inp;
3887 union sctp_sockstore store;
3888
3889 int error;
3890
3891 inp = (struct sctp_inpcb *)so->so_pcb;
3892
3893 if (inp == 0) {
3894 return (ECONNRESET);
3895 }
3896 SCTP_INP_RLOCK(inp);
3897 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
3898 SCTP_INP_RUNLOCK(inp);
3899 return (ENOTSUP);
3900 }
3901 if (so->so_state & SS_ISDISCONNECTED) {
3902 SCTP_INP_RUNLOCK(inp);
3903 return (ECONNABORTED);
3904 }
3905 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3906 if (stcb == NULL) {
3907 SCTP_INP_RUNLOCK(inp);
3908 return (ECONNRESET);
3909 }
3910 SCTP_TCB_LOCK(stcb);
3911 SCTP_INP_RUNLOCK(inp);
3912 store = stcb->asoc.primary_destination->ro._l_addr;
3913 SCTP_TCB_UNLOCK(stcb);
3914 if (store.sa.sa_family == AF_INET) {
3915 struct sockaddr_in *sin;
3916
3917 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
3918 sin->sin_family = AF_INET;
3919 sin->sin_len = sizeof(*sin);
3920 sin->sin_port = ((struct sockaddr_in *)&store)->sin_port;
3921 sin->sin_addr = ((struct sockaddr_in *)&store)->sin_addr;
3922 *addr = (struct sockaddr *)sin;
3923 } else {
3924 struct sockaddr_in6 *sin6;
3925
3926 SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6);
3927 sin6->sin6_family = AF_INET6;
3928 sin6->sin6_len = sizeof(*sin6);
3929 sin6->sin6_port = ((struct sockaddr_in6 *)&store)->sin6_port;
3930
3931 sin6->sin6_addr = ((struct sockaddr_in6 *)&store)->sin6_addr;
3932 if ((error = sa6_recoverscope(sin6)) != 0) {
3933 SCTP_FREE_SONAME(sin6);
3934 return (error);
3935 }
3936 *addr = (struct sockaddr *)sin6;
3937 }
3938 /* Wake any delayed sleep action */
3939 if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) {
3940 SCTP_INP_WLOCK(inp);
3941 inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE;
3942 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) {
3943 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT;
3944 SCTP_INP_WUNLOCK(inp);
3945 SOCKBUF_LOCK(&inp->sctp_socket->so_snd);
3946 if (sowriteable(inp->sctp_socket)) {
3947 sowwakeup_locked(inp->sctp_socket);
3948 } else {
3949 SOCKBUF_UNLOCK(&inp->sctp_socket->so_snd);
3950 }
3951 SCTP_INP_WLOCK(inp);
3952 }
3953 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) {
3954 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT;
3955 SCTP_INP_WUNLOCK(inp);
3956 SOCKBUF_LOCK(&inp->sctp_socket->so_rcv);
3957 if (soreadable(inp->sctp_socket)) {
3958 sctp_defered_wakeup_cnt++;
3959 sorwakeup_locked(inp->sctp_socket);
3960 } else {
3961 SOCKBUF_UNLOCK(&inp->sctp_socket->so_rcv);
3962 }
3963 SCTP_INP_WLOCK(inp);
3964 }
3965 SCTP_INP_WUNLOCK(inp);
3966 }
3967 return (0);
3968}
3969
3970int
3971sctp_ingetaddr(struct socket *so, struct sockaddr **addr)
3972{
3973 struct sockaddr_in *sin;
3974 uint32_t vrf_id;
3975 struct sctp_inpcb *inp;
3976 struct sctp_ifa *sctp_ifa;
3977
3978 /*
3979 * Do the malloc first in case it blocks.
3980 */
3981 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
3982 sin->sin_family = AF_INET;
3983 sin->sin_len = sizeof(*sin);
3984 inp = (struct sctp_inpcb *)so->so_pcb;
3985 if (!inp) {
3986 SCTP_FREE_SONAME(sin);
3987 return ECONNRESET;
3988 }
3989 SCTP_INP_RLOCK(inp);
3990 sin->sin_port = inp->sctp_lport;
3991 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3992 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3993 struct sctp_tcb *stcb;
3994 struct sockaddr_in *sin_a;
3995 struct sctp_nets *net;
3996 int fnd;
3997
3998 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3999 if (stcb == NULL) {
4000 goto notConn;
4001 }
4002 fnd = 0;
4003 sin_a = NULL;
4004 SCTP_TCB_LOCK(stcb);
4005 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
4006 sin_a = (struct sockaddr_in *)&net->ro._l_addr;
4007 if (sin_a == NULL)
4008 /* this will make coverity happy */
4009 continue;
4010
4011 if (sin_a->sin_family == AF_INET) {
4012 fnd = 1;
4013 break;
4014 }
4015 }
4016 if ((!fnd) || (sin_a == NULL)) {
4017 /* punt */
4018 SCTP_TCB_UNLOCK(stcb);
4019 goto notConn;
4020 }
4021 vrf_id = inp->def_vrf_id;
4022 sctp_ifa = sctp_source_address_selection(inp,
4023 stcb,
4024 (sctp_route_t *) & net->ro,
4025 net, 0, vrf_id);
4026 if (sctp_ifa) {
4027 sin->sin_addr = sctp_ifa->address.sin.sin_addr;
4028 sctp_free_ifa(sctp_ifa);
4029 }
4030 SCTP_TCB_UNLOCK(stcb);
4031 } else {
4032 /* For the bound all case you get back 0 */
4033 notConn:
4034 sin->sin_addr.s_addr = 0;
4035 }
4036
4037 } else {
4038 /* Take the first IPv4 address in the list */
4039 struct sctp_laddr *laddr;
4040 int fnd = 0;
4041
4042 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4043 if (laddr->ifa->address.sa.sa_family == AF_INET) {
4044 struct sockaddr_in *sin_a;
4045
4046 sin_a = (struct sockaddr_in *)&laddr->ifa->address.sa;
4047 sin->sin_addr = sin_a->sin_addr;
4048 fnd = 1;
4049 break;
4050 }
4051 }
4052 if (!fnd) {
4053 SCTP_FREE_SONAME(sin);
4054 SCTP_INP_RUNLOCK(inp);
4055 return ENOENT;
4056 }
4057 }
4058 SCTP_INP_RUNLOCK(inp);
4059 (*addr) = (struct sockaddr *)sin;
4060 return (0);
4061}
4062
4063int
4064sctp_peeraddr(struct socket *so, struct sockaddr **addr)
4065{
4066 struct sockaddr_in *sin = (struct sockaddr_in *)*addr;
4067 int fnd;
4068 struct sockaddr_in *sin_a;
4069 struct sctp_inpcb *inp;
4070 struct sctp_tcb *stcb;
4071 struct sctp_nets *net;
4072
4073 /* Do the malloc first in case it blocks. */
4074 inp = (struct sctp_inpcb *)so->so_pcb;
4075 if ((inp == NULL) ||
4076 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
4077 /* UDP type and listeners will drop out here */
4078 return (ENOTCONN);
4079 }
4080 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
4081 sin->sin_family = AF_INET;
4082 sin->sin_len = sizeof(*sin);
4083
4084 /* We must recapture incase we blocked */
4085 inp = (struct sctp_inpcb *)so->so_pcb;
4086 if (!inp) {
4087 SCTP_FREE_SONAME(sin);
4088 return ECONNRESET;
4089 }
4090 SCTP_INP_RLOCK(inp);
4091 stcb = LIST_FIRST(&inp->sctp_asoc_list);
4092 if (stcb) {
4093 SCTP_TCB_LOCK(stcb);
4094 }
4095 SCTP_INP_RUNLOCK(inp);
4096 if (stcb == NULL) {
4097 SCTP_FREE_SONAME(sin);
4098 return ECONNRESET;
4099 }
4100 fnd = 0;
4101 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
4102 sin_a = (struct sockaddr_in *)&net->ro._l_addr;
4103 if (sin_a->sin_family == AF_INET) {
4104 fnd = 1;
4105 sin->sin_port = stcb->rport;
4106 sin->sin_addr = sin_a->sin_addr;
4107 break;
4108 }
4109 }
4110 SCTP_TCB_UNLOCK(stcb);
4111 if (!fnd) {
4112 /* No IPv4 address */
4113 SCTP_FREE_SONAME(sin);
4114 return ENOENT;
4115 }
4116 (*addr) = (struct sockaddr *)sin;
4117 return (0);
4118}
4119
4120struct pr_usrreqs sctp_usrreqs = {
4121 .pru_abort = sctp_abort,
4122 .pru_accept = sctp_accept,
4123 .pru_attach = sctp_attach,
4124 .pru_bind = sctp_bind,
4125 .pru_connect = sctp_connect,
4126 .pru_control = in_control,
4127 .pru_close = sctp_close,
4128 .pru_detach = sctp_close,
4129 .pru_sopoll = sopoll_generic,
4130 .pru_disconnect = sctp_disconnect,
4131 .pru_listen = sctp_listen,
4132 .pru_peeraddr = sctp_peeraddr,
4133 .pru_send = sctp_sendm,
4134 .pru_shutdown = sctp_shutdown,
4135 .pru_sockaddr = sctp_ingetaddr,
4136 .pru_sosend = sctp_sosend,
4137 .pru_soreceive = sctp_soreceive
4138};