Deleted Added
sdiff udiff text old ( 179157 ) new ( 179783 )
full compact
1/*-
2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 *
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
13 *
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/* $KAME: sctp_usrreq.c,v 1.48 2005/03/07 23:26:08 itojun Exp $ */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/netinet/sctp_usrreq.c 179783 2008-06-14 07:58:05Z rrs $");
35#include <netinet/sctp_os.h>
36#include <sys/proc.h>
37#include <netinet/sctp_pcb.h>
38#include <netinet/sctp_header.h>
39#include <netinet/sctp_var.h>
40#if defined(INET6)
41#include <netinet6/sctp6_var.h>
42#endif
43#include <netinet/sctp_sysctl.h>
44#include <netinet/sctp_output.h>
45#include <netinet/sctp_uio.h>
46#include <netinet/sctp_asconf.h>
47#include <netinet/sctputil.h>
48#include <netinet/sctp_indata.h>
49#include <netinet/sctp_timer.h>
50#include <netinet/sctp_auth.h>
51#include <netinet/sctp_bsd_addr.h>
52#include <netinet/sctp_cc_functions.h>
53
54
55
56
57void
58sctp_init(void)
59{
60 u_long sb_max_adj;
61
62 bzero(&SCTP_BASE_STATS, sizeof(struct sctpstat));
63
64 /* Initialize and modify the sysctled variables */
65 sctp_init_sysctls();
66
67 if ((nmbclusters / 8) > SCTP_ASOC_MAX_CHUNKS_ON_QUEUE)
68 SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue) = (nmbclusters / 8);
69 /*
70 * Allow a user to take no more than 1/2 the number of clusters or
71 * the SB_MAX whichever is smaller for the send window.
72 */
73 sb_max_adj = (u_long)((u_quad_t) (SB_MAX) * MCLBYTES / (MSIZE + MCLBYTES));
74 SCTP_BASE_SYSCTL(sctp_sendspace) = min(sb_max_adj,
75 (((uint32_t) nmbclusters / 2) * SCTP_DEFAULT_MAXSEGMENT));
76 /*
77 * Now for the recv window, should we take the same amount? or
78 * should I do 1/2 the SB_MAX instead in the SB_MAX min above. For
79 * now I will just copy.
80 */
81 SCTP_BASE_SYSCTL(sctp_recvspace) = SCTP_BASE_SYSCTL(sctp_sendspace);
82
83 SCTP_BASE_VAR(first_time) = 0;
84 SCTP_BASE_VAR(sctp_pcb_initialized) = 0;
85 sctp_pcb_init();
86#if defined(SCTP_PACKET_LOGGING)
87 SCTP_BASE_VAR(packet_log_writers) = 0;
88 SCTP_BASE_VAR(packet_log_end) = 0;
89 bzero(&SCTP_BASE_VAR(packet_log_buffer), SCTP_PACKET_LOG_SIZE);
90#endif
91
92
93}
94
95void
96sctp_finish(void)
97{
98 sctp_pcb_finish();
99}
100
101/*
102 * cleanup of the SCTP_BASE_INFO() structure.
103 * Assumes that the SCTP_BASE_INFO() lock is held.
104 */
105void
106sctp_pcbinfo_cleanup(void)
107{
108 /* free the hash tables */
109 if (SCTP_BASE_INFO(sctp_asochash) != NULL)
110 SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_asochash), SCTP_BASE_INFO(hashasocmark));
111 if (SCTP_BASE_INFO(sctp_ephash) != NULL)
112 SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_ephash), SCTP_BASE_INFO(hashmark));
113 if (SCTP_BASE_INFO(sctp_tcpephash) != NULL)
114 SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_tcpephash), SCTP_BASE_INFO(hashtcpmark));
115 if (SCTP_BASE_INFO(sctp_restarthash) != NULL)
116 SCTP_HASH_FREE(SCTP_BASE_INFO(sctp_restarthash), SCTP_BASE_INFO(hashrestartmark));
117}
118
119
120void
121sctp_pathmtu_adjustment(struct sctp_inpcb *inp,
122 struct sctp_tcb *stcb,
123 struct sctp_nets *net,
124 uint16_t nxtsz)
125{
126 struct sctp_tmit_chunk *chk;
127
128 /* Adjust that too */
129 stcb->asoc.smallest_mtu = nxtsz;
130 /* now off to subtract IP_DF flag if needed */
131#ifdef SCTP_PRINT_FOR_B_AND_M
132 SCTP_PRINTF("sctp_pathmtu_adjust called inp:%p stcb:%p net:%p nxtsz:%d\n",
133 inp, stcb, net, nxtsz);
134#endif
135 TAILQ_FOREACH(chk, &stcb->asoc.send_queue, sctp_next) {
136 if ((chk->send_size + IP_HDR_SIZE) > nxtsz) {
137 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
138 }
139 }
140 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
141 if ((chk->send_size + IP_HDR_SIZE) > nxtsz) {
142 /*
143 * For this guy we also mark for immediate resend
144 * since we sent to big of chunk
145 */
146 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK;
147 if (chk->sent != SCTP_DATAGRAM_RESEND) {
148 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
149 }
150 chk->sent = SCTP_DATAGRAM_RESEND;
151 chk->rec.data.doing_fast_retransmit = 0;
152 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
153 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PMTU,
154 chk->whoTo->flight_size,
155 chk->book_size,
156 (uintptr_t) chk->whoTo,
157 chk->rec.data.TSN_seq);
158 }
159 /* Clear any time so NO RTT is being done */
160 chk->do_rtt = 0;
161 sctp_flight_size_decrease(chk);
162 sctp_total_flight_decrease(stcb, chk);
163 }
164 }
165}
166
167static void
168sctp_notify_mbuf(struct sctp_inpcb *inp,
169 struct sctp_tcb *stcb,
170 struct sctp_nets *net,
171 struct ip *ip,
172 struct sctphdr *sh)
173{
174 struct icmp *icmph;
175 int totsz, tmr_stopped = 0;
176 uint16_t nxtsz;
177
178 /* protection */
179 if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
180 (ip == NULL) || (sh == NULL)) {
181 if (stcb != NULL) {
182 SCTP_TCB_UNLOCK(stcb);
183 }
184 return;
185 }
186 /* First job is to verify the vtag matches what I would send */
187 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
188 SCTP_TCB_UNLOCK(stcb);
189 return;
190 }
191 icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) -
192 sizeof(struct ip)));
193 if (icmph->icmp_type != ICMP_UNREACH) {
194 /* We only care about unreachable */
195 SCTP_TCB_UNLOCK(stcb);
196 return;
197 }
198 if (icmph->icmp_code != ICMP_UNREACH_NEEDFRAG) {
199 /* not a unreachable message due to frag. */
200 SCTP_TCB_UNLOCK(stcb);
201 return;
202 }
203 totsz = ip->ip_len;
204
205 nxtsz = ntohs(icmph->icmp_nextmtu);
206 if (nxtsz == 0) {
207 /*
208 * old type router that does not tell us what the next size
209 * mtu is. Rats we will have to guess (in a educated fashion
210 * of course)
211 */
212 nxtsz = find_next_best_mtu(totsz);
213 }
214 /* Stop any PMTU timer */
215 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
216 tmr_stopped = 1;
217 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
218 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_1);
219 }
220 /* Adjust destination size limit */
221 if (net->mtu > nxtsz) {
222 net->mtu = nxtsz;
223 }
224 /* now what about the ep? */
225 if (stcb->asoc.smallest_mtu > nxtsz) {
226#ifdef SCTP_PRINT_FOR_B_AND_M
227 SCTP_PRINTF("notify_mbuf (ICMP) calls sctp_pathmtu_adjust mtu:%d\n",
228 nxtsz);
229#endif
230 sctp_pathmtu_adjustment(inp, stcb, net, nxtsz);
231 }
232 if (tmr_stopped)
233 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
234
235 SCTP_TCB_UNLOCK(stcb);
236}
237
238
239void
240sctp_notify(struct sctp_inpcb *inp,
241 struct ip *ip,
242 struct sctphdr *sh,
243 struct sockaddr *to,
244 struct sctp_tcb *stcb,
245 struct sctp_nets *net)
246{
247#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
248 struct socket *so;
249
250#endif
251 /* protection */
252 int reason;
253 struct icmp *icmph;
254
255
256 if ((inp == NULL) || (stcb == NULL) || (net == NULL) ||
257 (sh == NULL) || (to == NULL)) {
258 if (stcb)
259 SCTP_TCB_UNLOCK(stcb);
260 return;
261 }
262 /* First job is to verify the vtag matches what I would send */
263 if (ntohl(sh->v_tag) != (stcb->asoc.peer_vtag)) {
264 SCTP_TCB_UNLOCK(stcb);
265 return;
266 }
267 icmph = (struct icmp *)((caddr_t)ip - (sizeof(struct icmp) -
268 sizeof(struct ip)));
269 if (icmph->icmp_type != ICMP_UNREACH) {
270 /* We only care about unreachable */
271 SCTP_TCB_UNLOCK(stcb);
272 return;
273 }
274 if ((icmph->icmp_code == ICMP_UNREACH_NET) ||
275 (icmph->icmp_code == ICMP_UNREACH_HOST) ||
276 (icmph->icmp_code == ICMP_UNREACH_NET_UNKNOWN) ||
277 (icmph->icmp_code == ICMP_UNREACH_HOST_UNKNOWN) ||
278 (icmph->icmp_code == ICMP_UNREACH_ISOLATED) ||
279 (icmph->icmp_code == ICMP_UNREACH_NET_PROHIB) ||
280 (icmph->icmp_code == ICMP_UNREACH_HOST_PROHIB) ||
281 (icmph->icmp_code == ICMP_UNREACH_FILTER_PROHIB)) {
282
283 /*
284 * Hmm reachablity problems we must examine closely. If its
285 * not reachable, we may have lost a network. Or if there is
286 * NO protocol at the other end named SCTP. well we consider
287 * it a OOTB abort.
288 */
289 if (net->dest_state & SCTP_ADDR_REACHABLE) {
290 /* Ok that destination is NOT reachable */
291 SCTP_PRINTF("ICMP (thresh %d/%d) takes interface %p down\n",
292 net->error_count,
293 net->failure_threshold,
294 net);
295
296 net->dest_state &= ~SCTP_ADDR_REACHABLE;
297 net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
298 /*
299 * JRS 5/14/07 - If a destination is unreachable,
300 * the PF bit is turned off. This allows an
301 * unambiguous use of the PF bit for destinations
302 * that are reachable but potentially failed. If the
303 * destination is set to the unreachable state, also
304 * set the destination to the PF state.
305 */
306 /*
307 * Add debug message here if destination is not in
308 * PF state.
309 */
310 /* Stop any running T3 timers here? */
311 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
312 net->dest_state &= ~SCTP_ADDR_PF;
313 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n",
314 net);
315 }
316 net->error_count = net->failure_threshold + 1;
317 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
318 stcb, SCTP_FAILED_THRESHOLD,
319 (void *)net, SCTP_SO_NOT_LOCKED);
320 }
321 SCTP_TCB_UNLOCK(stcb);
322 } else if ((icmph->icmp_code == ICMP_UNREACH_PROTOCOL) ||
323 (icmph->icmp_code == ICMP_UNREACH_PORT)) {
324 /*
325 * Here the peer is either playing tricks on us, including
326 * an address that belongs to someone who does not support
327 * SCTP OR was a userland implementation that shutdown and
328 * now is dead. In either case treat it like a OOTB abort
329 * with no TCB
330 */
331 reason = SCTP_PEER_FAULTY;
332 sctp_abort_notification(stcb, reason, SCTP_SO_NOT_LOCKED);
333#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
334 so = SCTP_INP_SO(inp);
335 atomic_add_int(&stcb->asoc.refcnt, 1);
336 SCTP_TCB_UNLOCK(stcb);
337 SCTP_SOCKET_LOCK(so, 1);
338 SCTP_TCB_LOCK(stcb);
339 atomic_subtract_int(&stcb->asoc.refcnt, 1);
340#endif
341 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_2);
342#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
343 SCTP_SOCKET_UNLOCK(so, 1);
344 /* SCTP_TCB_UNLOCK(stcb); MT: I think this is not needed. */
345#endif
346 /* no need to unlock here, since the TCB is gone */
347 } else {
348 SCTP_TCB_UNLOCK(stcb);
349 }
350}
351
352void
353sctp_ctlinput(cmd, sa, vip)
354 int cmd;
355 struct sockaddr *sa;
356 void *vip;
357{
358 struct ip *ip = vip;
359 struct sctphdr *sh;
360 uint32_t vrf_id;
361
362 /* FIX, for non-bsd is this right? */
363 vrf_id = SCTP_DEFAULT_VRFID;
364 if (sa->sa_family != AF_INET ||
365 ((struct sockaddr_in *)sa)->sin_addr.s_addr == INADDR_ANY) {
366 return;
367 }
368 if (PRC_IS_REDIRECT(cmd)) {
369 ip = 0;
370 } else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) {
371 return;
372 }
373 if (ip) {
374 struct sctp_inpcb *inp = NULL;
375 struct sctp_tcb *stcb = NULL;
376 struct sctp_nets *net = NULL;
377 struct sockaddr_in to, from;
378
379 sh = (struct sctphdr *)((caddr_t)ip + (ip->ip_hl << 2));
380 bzero(&to, sizeof(to));
381 bzero(&from, sizeof(from));
382 from.sin_family = to.sin_family = AF_INET;
383 from.sin_len = to.sin_len = sizeof(to);
384 from.sin_port = sh->src_port;
385 from.sin_addr = ip->ip_src;
386 to.sin_port = sh->dest_port;
387 to.sin_addr = ip->ip_dst;
388
389 /*
390 * 'to' holds the dest of the packet that failed to be sent.
391 * 'from' holds our local endpoint address. Thus we reverse
392 * the to and the from in the lookup.
393 */
394 stcb = sctp_findassociation_addr_sa((struct sockaddr *)&from,
395 (struct sockaddr *)&to,
396 &inp, &net, 1, vrf_id);
397 if (stcb != NULL && inp && (inp->sctp_socket != NULL)) {
398 if (cmd != PRC_MSGSIZE) {
399 sctp_notify(inp, ip, sh,
400 (struct sockaddr *)&to, stcb,
401 net);
402 } else {
403 /* handle possible ICMP size messages */
404 sctp_notify_mbuf(inp, stcb, net, ip, sh);
405 }
406 } else {
407 if ((stcb == NULL) && (inp != NULL)) {
408 /* reduce ref-count */
409 SCTP_INP_WLOCK(inp);
410 SCTP_INP_DECR_REF(inp);
411 SCTP_INP_WUNLOCK(inp);
412 }
413 }
414 }
415 return;
416}
417
418static int
419sctp_getcred(SYSCTL_HANDLER_ARGS)
420{
421 struct xucred xuc;
422 struct sockaddr_in addrs[2];
423 struct sctp_inpcb *inp;
424 struct sctp_nets *net;
425 struct sctp_tcb *stcb;
426 int error;
427 uint32_t vrf_id;
428
429 /* FIX, for non-bsd is this right? */
430 vrf_id = SCTP_DEFAULT_VRFID;
431
432 error = priv_check(req->td, PRIV_NETINET_GETCRED);
433
434 if (error)
435 return (error);
436
437 error = SYSCTL_IN(req, addrs, sizeof(addrs));
438 if (error)
439 return (error);
440
441 stcb = sctp_findassociation_addr_sa(sintosa(&addrs[0]),
442 sintosa(&addrs[1]),
443 &inp, &net, 1, vrf_id);
444 if (stcb == NULL || inp == NULL || inp->sctp_socket == NULL) {
445 if ((inp != NULL) && (stcb == NULL)) {
446 /* reduce ref-count */
447 SCTP_INP_WLOCK(inp);
448 SCTP_INP_DECR_REF(inp);
449 goto cred_can_cont;
450 }
451 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
452 error = ENOENT;
453 goto out;
454 }
455 SCTP_TCB_UNLOCK(stcb);
456 /*
457 * We use the write lock here, only since in the error leg we need
458 * it. If we used RLOCK, then we would have to
459 * wlock/decr/unlock/rlock. Which in theory could create a hole.
460 * Better to use higher wlock.
461 */
462 SCTP_INP_WLOCK(inp);
463cred_can_cont:
464 error = cr_canseesocket(req->td->td_ucred, inp->sctp_socket);
465 if (error) {
466 SCTP_INP_WUNLOCK(inp);
467 goto out;
468 }
469 cru2x(inp->sctp_socket->so_cred, &xuc);
470 SCTP_INP_WUNLOCK(inp);
471 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred));
472out:
473 return (error);
474}
475
476SYSCTL_PROC(_net_inet_sctp, OID_AUTO, getcred, CTLTYPE_OPAQUE | CTLFLAG_RW,
477 0, 0, sctp_getcred, "S,ucred", "Get the ucred of a SCTP connection");
478
479
480static void
481sctp_abort(struct socket *so)
482{
483 struct sctp_inpcb *inp;
484 uint32_t flags;
485
486 inp = (struct sctp_inpcb *)so->so_pcb;
487 if (inp == 0) {
488 return;
489 }
490sctp_must_try_again:
491 flags = inp->sctp_flags;
492#ifdef SCTP_LOG_CLOSING
493 sctp_log_closing(inp, NULL, 17);
494#endif
495 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
496 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
497#ifdef SCTP_LOG_CLOSING
498 sctp_log_closing(inp, NULL, 16);
499#endif
500 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
501 SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
502 SOCK_LOCK(so);
503 SCTP_SB_CLEAR(so->so_snd);
504 /*
505 * same for the rcv ones, they are only here for the
506 * accounting/select.
507 */
508 SCTP_SB_CLEAR(so->so_rcv);
509
510 /* Now null out the reference, we are completely detached. */
511 so->so_pcb = NULL;
512 SOCK_UNLOCK(so);
513 } else {
514 flags = inp->sctp_flags;
515 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
516 goto sctp_must_try_again;
517 }
518 }
519 return;
520}
521
522static int
523sctp_attach(struct socket *so, int proto, struct thread *p)
524{
525 struct sctp_inpcb *inp;
526 struct inpcb *ip_inp;
527 int error;
528 uint32_t vrf_id = SCTP_DEFAULT_VRFID;
529
530#ifdef IPSEC
531 uint32_t flags;
532
533#endif
534 inp = (struct sctp_inpcb *)so->so_pcb;
535 if (inp != 0) {
536 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
537 return EINVAL;
538 }
539 error = SCTP_SORESERVE(so, SCTP_BASE_SYSCTL(sctp_sendspace), SCTP_BASE_SYSCTL(sctp_recvspace));
540 if (error) {
541 return error;
542 }
543 error = sctp_inpcb_alloc(so, vrf_id);
544 if (error) {
545 return error;
546 }
547 inp = (struct sctp_inpcb *)so->so_pcb;
548 SCTP_INP_WLOCK(inp);
549 inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUND_V6; /* I'm not v6! */
550 ip_inp = &inp->ip_inp.inp;
551 ip_inp->inp_vflag |= INP_IPV4;
552 ip_inp->inp_ip_ttl = ip_defttl;
553#ifdef IPSEC
554 error = ipsec_init_policy(so, &ip_inp->inp_sp);
555#ifdef SCTP_LOG_CLOSING
556 sctp_log_closing(inp, NULL, 17);
557#endif
558 if (error != 0) {
559 flags = inp->sctp_flags;
560 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
561 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
562#ifdef SCTP_LOG_CLOSING
563 sctp_log_closing(inp, NULL, 15);
564#endif
565 SCTP_INP_WUNLOCK(inp);
566 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
567 SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
568 } else {
569 SCTP_INP_WUNLOCK(inp);
570 }
571 return error;
572 }
573#endif /* IPSEC */
574 SCTP_INP_WUNLOCK(inp);
575 return 0;
576}
577
578static int
579sctp_bind(struct socket *so, struct sockaddr *addr, struct thread *p)
580{
581 struct sctp_inpcb *inp = NULL;
582 int error;
583
584#ifdef INET6
585 if (addr && addr->sa_family != AF_INET) {
586 /* must be a v4 address! */
587 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
588 return EINVAL;
589 }
590#endif /* INET6 */
591 if (addr && (addr->sa_len != sizeof(struct sockaddr_in))) {
592 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
593 return EINVAL;
594 }
595 inp = (struct sctp_inpcb *)so->so_pcb;
596 if (inp == 0) {
597 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
598 return EINVAL;
599 }
600 error = sctp_inpcb_bind(so, addr, NULL, p);
601 return error;
602}
603
604void
605sctp_close(struct socket *so)
606{
607 struct sctp_inpcb *inp;
608 uint32_t flags;
609
610 inp = (struct sctp_inpcb *)so->so_pcb;
611 if (inp == 0)
612 return;
613
614 /*
615 * Inform all the lower layer assoc that we are done.
616 */
617sctp_must_try_again:
618 flags = inp->sctp_flags;
619#ifdef SCTP_LOG_CLOSING
620 sctp_log_closing(inp, NULL, 17);
621#endif
622 if (((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) &&
623 (atomic_cmpset_int(&inp->sctp_flags, flags, (flags | SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_CLOSE_IP)))) {
624 if (((so->so_options & SO_LINGER) && (so->so_linger == 0)) ||
625 (so->so_rcv.sb_cc > 0)) {
626#ifdef SCTP_LOG_CLOSING
627 sctp_log_closing(inp, NULL, 13);
628#endif
629 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT,
630 SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
631 } else {
632#ifdef SCTP_LOG_CLOSING
633 sctp_log_closing(inp, NULL, 14);
634#endif
635 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_GRACEFUL_CLOSE,
636 SCTP_CALLED_AFTER_CMPSET_OFCLOSE);
637 }
638 /*
639 * The socket is now detached, no matter what the state of
640 * the SCTP association.
641 */
642 SOCK_LOCK(so);
643 SCTP_SB_CLEAR(so->so_snd);
644 /*
645 * same for the rcv ones, they are only here for the
646 * accounting/select.
647 */
648 SCTP_SB_CLEAR(so->so_rcv);
649
650 /* Now null out the reference, we are completely detached. */
651 so->so_pcb = NULL;
652 SOCK_UNLOCK(so);
653 } else {
654 flags = inp->sctp_flags;
655 if ((flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) {
656 goto sctp_must_try_again;
657 }
658 }
659 return;
660}
661
662
663int
664sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
665 struct mbuf *control, struct thread *p);
666
667
668int
669sctp_sendm(struct socket *so, int flags, struct mbuf *m, struct sockaddr *addr,
670 struct mbuf *control, struct thread *p)
671{
672 struct sctp_inpcb *inp;
673 int error;
674
675 inp = (struct sctp_inpcb *)so->so_pcb;
676 if (inp == 0) {
677 if (control) {
678 sctp_m_freem(control);
679 control = NULL;
680 }
681 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
682 sctp_m_freem(m);
683 return EINVAL;
684 }
685 /* Got to have an to address if we are NOT a connected socket */
686 if ((addr == NULL) &&
687 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) ||
688 (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE))
689 ) {
690 goto connected_type;
691 } else if (addr == NULL) {
692 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ);
693 error = EDESTADDRREQ;
694 sctp_m_freem(m);
695 if (control) {
696 sctp_m_freem(control);
697 control = NULL;
698 }
699 return (error);
700 }
701#ifdef INET6
702 if (addr->sa_family != AF_INET) {
703 /* must be a v4 address! */
704 SCTP_LTRACE_ERR_RET_PKT(m, inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EDESTADDRREQ);
705 sctp_m_freem(m);
706 if (control) {
707 sctp_m_freem(control);
708 control = NULL;
709 }
710 error = EDESTADDRREQ;
711 return EDESTADDRREQ;
712 }
713#endif /* INET6 */
714connected_type:
715 /* now what about control */
716 if (control) {
717 if (inp->control) {
718 SCTP_PRINTF("huh? control set?\n");
719 sctp_m_freem(inp->control);
720 inp->control = NULL;
721 }
722 inp->control = control;
723 }
724 /* Place the data */
725 if (inp->pkt) {
726 SCTP_BUF_NEXT(inp->pkt_last) = m;
727 inp->pkt_last = m;
728 } else {
729 inp->pkt_last = inp->pkt = m;
730 }
731 if (
732 /* FreeBSD uses a flag passed */
733 ((flags & PRUS_MORETOCOME) == 0)
734 ) {
735 /*
736 * note with the current version this code will only be used
737 * by OpenBSD-- NetBSD, FreeBSD, and MacOS have methods for
738 * re-defining sosend to use the sctp_sosend. One can
739 * optionally switch back to this code (by changing back the
740 * definitions) but this is not advisable. This code is used
741 * by FreeBSD when sending a file with sendfile() though.
742 */
743 int ret;
744
745 ret = sctp_output(inp, inp->pkt, addr, inp->control, p, flags);
746 inp->pkt = NULL;
747 inp->control = NULL;
748 return (ret);
749 } else {
750 return (0);
751 }
752}
753
754int
755sctp_disconnect(struct socket *so)
756{
757 struct sctp_inpcb *inp;
758
759 inp = (struct sctp_inpcb *)so->so_pcb;
760 if (inp == NULL) {
761 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
762 return (ENOTCONN);
763 }
764 SCTP_INP_RLOCK(inp);
765 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
766 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
767 if (SCTP_LIST_EMPTY(&inp->sctp_asoc_list)) {
768 /* No connection */
769 SCTP_INP_RUNLOCK(inp);
770 return (0);
771 } else {
772 struct sctp_association *asoc;
773 struct sctp_tcb *stcb;
774
775 stcb = LIST_FIRST(&inp->sctp_asoc_list);
776 if (stcb == NULL) {
777 SCTP_INP_RUNLOCK(inp);
778 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
779 return (EINVAL);
780 }
781 SCTP_TCB_LOCK(stcb);
782 asoc = &stcb->asoc;
783 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
784 /* We are about to be freed, out of here */
785 SCTP_TCB_UNLOCK(stcb);
786 SCTP_INP_RUNLOCK(inp);
787 return (0);
788 }
789 if (((so->so_options & SO_LINGER) &&
790 (so->so_linger == 0)) ||
791 (so->so_rcv.sb_cc > 0)) {
792 if (SCTP_GET_STATE(asoc) !=
793 SCTP_STATE_COOKIE_WAIT) {
794 /* Left with Data unread */
795 struct mbuf *err;
796
797 err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_DONTWAIT, 1, MT_DATA);
798 if (err) {
799 /*
800 * Fill in the user
801 * initiated abort
802 */
803 struct sctp_paramhdr *ph;
804
805 ph = mtod(err, struct sctp_paramhdr *);
806 SCTP_BUF_LEN(err) = sizeof(struct sctp_paramhdr);
807 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
808 ph->param_length = htons(SCTP_BUF_LEN(err));
809 }
810#if defined(SCTP_PANIC_ON_ABORT)
811 panic("disconnect does an abort");
812#endif
813 sctp_send_abort_tcb(stcb, err, SCTP_SO_LOCKED);
814 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
815 }
816 SCTP_INP_RUNLOCK(inp);
817 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
818 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
819 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
820 }
821 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_3);
822 /* No unlock tcb assoc is gone */
823 return (0);
824 }
825 if (TAILQ_EMPTY(&asoc->send_queue) &&
826 TAILQ_EMPTY(&asoc->sent_queue) &&
827 (asoc->stream_queue_cnt == 0)) {
828 /* there is nothing queued to send, so done */
829 if (asoc->locked_on_sending) {
830 goto abort_anyway;
831 }
832 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
833 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
834 /* only send SHUTDOWN 1st time thru */
835 sctp_stop_timers_for_shutdown(stcb);
836 sctp_send_shutdown(stcb,
837 stcb->asoc.primary_destination);
838 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED);
839 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
840 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
841 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
842 }
843 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
844 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
845 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
846 stcb->sctp_ep, stcb,
847 asoc->primary_destination);
848 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
849 stcb->sctp_ep, stcb,
850 asoc->primary_destination);
851 }
852 } else {
853 /*
854 * we still got (or just got) data to send,
855 * so set SHUTDOWN_PENDING
856 */
857 /*
858 * XXX sockets draft says that SCTP_EOF
859 * should be sent with no data. currently,
860 * we will allow user data to be sent first
861 * and move to SHUTDOWN-PENDING
862 */
863 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
864 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
865 asoc->primary_destination);
866 if (asoc->locked_on_sending) {
867 /* Locked to send out the data */
868 struct sctp_stream_queue_pending *sp;
869
870 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
871 if (sp == NULL) {
872 SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n",
873 asoc->locked_on_sending->stream_no);
874 } else {
875 if ((sp->length == 0) && (sp->msg_is_complete == 0))
876 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
877 }
878 }
879 if (TAILQ_EMPTY(&asoc->send_queue) &&
880 TAILQ_EMPTY(&asoc->sent_queue) &&
881 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
882 struct mbuf *op_err;
883
884 abort_anyway:
885 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
886 0, M_DONTWAIT, 1, MT_DATA);
887 if (op_err) {
888 /*
889 * Fill in the user
890 * initiated abort
891 */
892 struct sctp_paramhdr *ph;
893 uint32_t *ippp;
894
895 SCTP_BUF_LEN(op_err) =
896 (sizeof(struct sctp_paramhdr) + sizeof(uint32_t));
897 ph = mtod(op_err,
898 struct sctp_paramhdr *);
899 ph->param_type = htons(
900 SCTP_CAUSE_USER_INITIATED_ABT);
901 ph->param_length = htons(SCTP_BUF_LEN(op_err));
902 ippp = (uint32_t *) (ph + 1);
903 *ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4);
904 }
905#if defined(SCTP_PANIC_ON_ABORT)
906 panic("disconnect does an abort");
907#endif
908
909 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_4;
910 sctp_send_abort_tcb(stcb, op_err, SCTP_SO_LOCKED);
911 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
912 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) ||
913 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
914 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
915 }
916 SCTP_INP_RUNLOCK(inp);
917 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_5);
918 return (0);
919 } else {
920 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED);
921 }
922 }
923 SCTP_TCB_UNLOCK(stcb);
924 SCTP_INP_RUNLOCK(inp);
925 return (0);
926 }
927 /* not reached */
928 } else {
929 /* UDP model does not support this */
930 SCTP_INP_RUNLOCK(inp);
931 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
932 return EOPNOTSUPP;
933 }
934}
935
936int
937sctp_flush(struct socket *so, int how)
938{
939 /*
940 * We will just clear out the values and let subsequent close clear
941 * out the data, if any. Note if the user did a shutdown(SHUT_RD)
942 * they will not be able to read the data, the socket will block
943 * that from happening.
944 */
945 if ((how == PRU_FLUSH_RD) || (how == PRU_FLUSH_RDWR)) {
946 /*
947 * First make sure the sb will be happy, we don't use these
948 * except maybe the count
949 */
950 so->so_rcv.sb_cc = 0;
951 so->so_rcv.sb_mbcnt = 0;
952 so->so_rcv.sb_mb = NULL;
953 }
954 if ((how == PRU_FLUSH_WR) || (how == PRU_FLUSH_RDWR)) {
955 /*
956 * First make sure the sb will be happy, we don't use these
957 * except maybe the count
958 */
959 so->so_snd.sb_cc = 0;
960 so->so_snd.sb_mbcnt = 0;
961 so->so_snd.sb_mb = NULL;
962
963 }
964 return (0);
965}
966
967int
968sctp_shutdown(struct socket *so)
969{
970 struct sctp_inpcb *inp;
971
972 inp = (struct sctp_inpcb *)so->so_pcb;
973 if (inp == 0) {
974 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
975 return EINVAL;
976 }
977 SCTP_INP_RLOCK(inp);
978 /* For UDP model this is a invalid call */
979 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
980 /* Restore the flags that the soshutdown took away. */
981 so->so_rcv.sb_state &= ~SBS_CANTRCVMORE;
982 /* This proc will wakeup for read and do nothing (I hope) */
983 SCTP_INP_RUNLOCK(inp);
984 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
985 return (EOPNOTSUPP);
986 }
987 /*
988 * Ok if we reach here its the TCP model and it is either a SHUT_WR
989 * or SHUT_RDWR. This means we put the shutdown flag against it.
990 */
991 {
992 struct sctp_tcb *stcb;
993 struct sctp_association *asoc;
994
995 socantsendmore(so);
996
997 stcb = LIST_FIRST(&inp->sctp_asoc_list);
998 if (stcb == NULL) {
999 /*
1000 * Ok we hit the case that the shutdown call was
1001 * made after an abort or something. Nothing to do
1002 * now.
1003 */
1004 SCTP_INP_RUNLOCK(inp);
1005 return (0);
1006 }
1007 SCTP_TCB_LOCK(stcb);
1008 asoc = &stcb->asoc;
1009 if (TAILQ_EMPTY(&asoc->send_queue) &&
1010 TAILQ_EMPTY(&asoc->sent_queue) &&
1011 (asoc->stream_queue_cnt == 0)) {
1012 if (asoc->locked_on_sending) {
1013 goto abort_anyway;
1014 }
1015 /* there is nothing queued to send, so I'm done... */
1016 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1017 /* only send SHUTDOWN the first time through */
1018 sctp_stop_timers_for_shutdown(stcb);
1019 sctp_send_shutdown(stcb,
1020 stcb->asoc.primary_destination);
1021 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_LOCKED);
1022 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
1023 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
1024 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
1025 }
1026 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
1027 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
1028 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
1029 stcb->sctp_ep, stcb,
1030 asoc->primary_destination);
1031 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1032 stcb->sctp_ep, stcb,
1033 asoc->primary_destination);
1034 }
1035 } else {
1036 /*
1037 * we still got (or just got) data to send, so set
1038 * SHUTDOWN_PENDING
1039 */
1040 asoc->state |= SCTP_STATE_SHUTDOWN_PENDING;
1041 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, stcb->sctp_ep, stcb,
1042 asoc->primary_destination);
1043
1044 if (asoc->locked_on_sending) {
1045 /* Locked to send out the data */
1046 struct sctp_stream_queue_pending *sp;
1047
1048 sp = TAILQ_LAST(&asoc->locked_on_sending->outqueue, sctp_streamhead);
1049 if (sp == NULL) {
1050 SCTP_PRINTF("Error, sp is NULL, locked on sending is non-null strm:%d\n",
1051 asoc->locked_on_sending->stream_no);
1052 } else {
1053 if ((sp->length == 0) && (sp->msg_is_complete == 0)) {
1054 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
1055 }
1056 }
1057 }
1058 if (TAILQ_EMPTY(&asoc->send_queue) &&
1059 TAILQ_EMPTY(&asoc->sent_queue) &&
1060 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
1061 struct mbuf *op_err;
1062
1063 abort_anyway:
1064 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
1065 0, M_DONTWAIT, 1, MT_DATA);
1066 if (op_err) {
1067 /* Fill in the user initiated abort */
1068 struct sctp_paramhdr *ph;
1069 uint32_t *ippp;
1070
1071 SCTP_BUF_LEN(op_err) =
1072 sizeof(struct sctp_paramhdr) + sizeof(uint32_t);
1073 ph = mtod(op_err,
1074 struct sctp_paramhdr *);
1075 ph->param_type = htons(
1076 SCTP_CAUSE_USER_INITIATED_ABT);
1077 ph->param_length = htons(SCTP_BUF_LEN(op_err));
1078 ippp = (uint32_t *) (ph + 1);
1079 *ippp = htonl(SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6);
1080 }
1081#if defined(SCTP_PANIC_ON_ABORT)
1082 panic("shutdown does an abort");
1083#endif
1084 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_USRREQ + SCTP_LOC_6;
1085 sctp_abort_an_association(stcb->sctp_ep, stcb,
1086 SCTP_RESPONSE_TO_USER_REQ,
1087 op_err, SCTP_SO_LOCKED);
1088 goto skip_unlock;
1089 } else {
1090 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CLOSING, SCTP_SO_LOCKED);
1091 }
1092 }
1093 SCTP_TCB_UNLOCK(stcb);
1094 }
1095skip_unlock:
1096 SCTP_INP_RUNLOCK(inp);
1097 return 0;
1098}
1099
1100/*
1101 * copies a "user" presentable address and removes embedded scope, etc.
1102 * returns 0 on success, 1 on error
1103 */
1104static uint32_t
1105sctp_fill_user_address(struct sockaddr_storage *ss, struct sockaddr *sa)
1106{
1107#ifdef INET6
1108 struct sockaddr_in6 lsa6;
1109
1110 sa = (struct sockaddr *)sctp_recover_scope((struct sockaddr_in6 *)sa,
1111 &lsa6);
1112#endif
1113 memcpy(ss, sa, sa->sa_len);
1114 return (0);
1115}
1116
1117
1118
1119/*
1120 * NOTE: assumes addr lock is held
1121 */
1122static size_t
1123sctp_fill_up_addresses_vrf(struct sctp_inpcb *inp,
1124 struct sctp_tcb *stcb,
1125 size_t limit,
1126 struct sockaddr_storage *sas,
1127 uint32_t vrf_id)
1128{
1129 struct sctp_ifn *sctp_ifn;
1130 struct sctp_ifa *sctp_ifa;
1131 int loopback_scope, ipv4_local_scope, local_scope, site_scope;
1132 size_t actual;
1133 int ipv4_addr_legal, ipv6_addr_legal;
1134 struct sctp_vrf *vrf;
1135
1136 actual = 0;
1137 if (limit <= 0)
1138 return (actual);
1139
1140 if (stcb) {
1141 /* Turn on all the appropriate scope */
1142 loopback_scope = stcb->asoc.loopback_scope;
1143 ipv4_local_scope = stcb->asoc.ipv4_local_scope;
1144 local_scope = stcb->asoc.local_scope;
1145 site_scope = stcb->asoc.site_scope;
1146 } else {
1147 /* Turn on ALL scope, since we look at the EP */
1148 loopback_scope = ipv4_local_scope = local_scope =
1149 site_scope = 1;
1150 }
1151 ipv4_addr_legal = ipv6_addr_legal = 0;
1152 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1153 ipv6_addr_legal = 1;
1154 if (SCTP_IPV6_V6ONLY(inp) == 0) {
1155 ipv4_addr_legal = 1;
1156 }
1157 } else {
1158 ipv4_addr_legal = 1;
1159 }
1160 vrf = sctp_find_vrf(vrf_id);
1161 if (vrf == NULL) {
1162 return (0);
1163 }
1164 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1165 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
1166 if ((loopback_scope == 0) &&
1167 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) {
1168 /* Skip loopback if loopback_scope not set */
1169 continue;
1170 }
1171 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
1172 if (stcb) {
1173 /*
1174 * For the BOUND-ALL case, the list
1175 * associated with a TCB is Always
1176 * considered a reverse list.. i.e.
1177 * it lists addresses that are NOT
1178 * part of the association. If this
1179 * is one of those we must skip it.
1180 */
1181 if (sctp_is_addr_restricted(stcb,
1182 sctp_ifa)) {
1183 continue;
1184 }
1185 }
1186 switch (sctp_ifa->address.sa.sa_family) {
1187 case AF_INET:
1188 if (ipv4_addr_legal) {
1189 struct sockaddr_in *sin;
1190
1191 sin = (struct sockaddr_in *)&sctp_ifa->address.sa;
1192 if (sin->sin_addr.s_addr == 0) {
1193 /*
1194 * we skip
1195 * unspecifed
1196 * addresses
1197 */
1198 continue;
1199 }
1200 if ((ipv4_local_scope == 0) &&
1201 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
1202 continue;
1203 }
1204#ifdef INET6
1205 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) {
1206 in6_sin_2_v4mapsin6(sin, (struct sockaddr_in6 *)sas);
1207 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1208 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(struct sockaddr_in6));
1209 actual += sizeof(struct sockaddr_in6);
1210 } else {
1211#endif
1212 memcpy(sas, sin, sizeof(*sin));
1213 ((struct sockaddr_in *)sas)->sin_port = inp->sctp_lport;
1214 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin));
1215 actual += sizeof(*sin);
1216#ifdef INET6
1217 }
1218#endif
1219 if (actual >= limit) {
1220 return (actual);
1221 }
1222 } else {
1223 continue;
1224 }
1225 break;
1226#ifdef INET6
1227 case AF_INET6:
1228 if (ipv6_addr_legal) {
1229 struct sockaddr_in6 *sin6;
1230
1231 sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa;
1232 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1233 /*
1234 * we skip
1235 * unspecifed
1236 * addresses
1237 */
1238 continue;
1239 }
1240 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
1241 if (local_scope == 0)
1242 continue;
1243 if (sin6->sin6_scope_id == 0) {
1244 if (sa6_recoverscope(sin6) != 0)
1245 /*
1246 *
1247 * bad
1248 *
1249 * li
1250 * nk
1251 *
1252 * loc
1253 * al
1254 *
1255 * add
1256 * re
1257 * ss
1258 * */
1259 continue;
1260 }
1261 }
1262 if ((site_scope == 0) &&
1263 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) {
1264 continue;
1265 }
1266 memcpy(sas, sin6, sizeof(*sin6));
1267 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1268 sas = (struct sockaddr_storage *)((caddr_t)sas + sizeof(*sin6));
1269 actual += sizeof(*sin6);
1270 if (actual >= limit) {
1271 return (actual);
1272 }
1273 } else {
1274 continue;
1275 }
1276 break;
1277#endif
1278 default:
1279 /* TSNH */
1280 break;
1281 }
1282 }
1283 }
1284 } else {
1285 struct sctp_laddr *laddr;
1286
1287 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
1288 if (stcb) {
1289 if (sctp_is_addr_restricted(stcb, laddr->ifa)) {
1290 continue;
1291 }
1292 }
1293 if (sctp_fill_user_address(sas, &laddr->ifa->address.sa))
1294 continue;
1295
1296 ((struct sockaddr_in6 *)sas)->sin6_port = inp->sctp_lport;
1297 sas = (struct sockaddr_storage *)((caddr_t)sas +
1298 laddr->ifa->address.sa.sa_len);
1299 actual += laddr->ifa->address.sa.sa_len;
1300 if (actual >= limit) {
1301 return (actual);
1302 }
1303 }
1304 }
1305 return (actual);
1306}
1307
1308static size_t
1309sctp_fill_up_addresses(struct sctp_inpcb *inp,
1310 struct sctp_tcb *stcb,
1311 size_t limit,
1312 struct sockaddr_storage *sas)
1313{
1314 size_t size = 0;
1315
1316 SCTP_IPI_ADDR_RLOCK();
1317 /* fill up addresses for the endpoint's default vrf */
1318 size = sctp_fill_up_addresses_vrf(inp, stcb, limit, sas,
1319 inp->def_vrf_id);
1320 SCTP_IPI_ADDR_RUNLOCK();
1321 return (size);
1322}
1323
1324/*
1325 * NOTE: assumes addr lock is held
1326 */
1327static int
1328sctp_count_max_addresses_vrf(struct sctp_inpcb *inp, uint32_t vrf_id)
1329{
1330 int cnt = 0;
1331 struct sctp_vrf *vrf = NULL;
1332
1333 /*
1334 * In both sub-set bound an bound_all cases we return the MAXIMUM
1335 * number of addresses that you COULD get. In reality the sub-set
1336 * bound may have an exclusion list for a given TCB OR in the
1337 * bound-all case a TCB may NOT include the loopback or other
1338 * addresses as well.
1339 */
1340 vrf = sctp_find_vrf(vrf_id);
1341 if (vrf == NULL) {
1342 return (0);
1343 }
1344 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1345 struct sctp_ifn *sctp_ifn;
1346 struct sctp_ifa *sctp_ifa;
1347
1348 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) {
1349 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) {
1350 /* Count them if they are the right type */
1351 if (sctp_ifa->address.sa.sa_family == AF_INET) {
1352 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4))
1353 cnt += sizeof(struct sockaddr_in6);
1354 else
1355 cnt += sizeof(struct sockaddr_in);
1356
1357 } else if (sctp_ifa->address.sa.sa_family == AF_INET6)
1358 cnt += sizeof(struct sockaddr_in6);
1359 }
1360 }
1361 } else {
1362 struct sctp_laddr *laddr;
1363
1364 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
1365 if (laddr->ifa->address.sa.sa_family == AF_INET) {
1366 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4))
1367 cnt += sizeof(struct sockaddr_in6);
1368 else
1369 cnt += sizeof(struct sockaddr_in);
1370
1371 } else if (laddr->ifa->address.sa.sa_family == AF_INET6)
1372 cnt += sizeof(struct sockaddr_in6);
1373 }
1374 }
1375 return (cnt);
1376}
1377
1378static int
1379sctp_count_max_addresses(struct sctp_inpcb *inp)
1380{
1381 int cnt = 0;
1382
1383 SCTP_IPI_ADDR_RLOCK();
1384 /* count addresses for the endpoint's default VRF */
1385 cnt = sctp_count_max_addresses_vrf(inp, inp->def_vrf_id);
1386 SCTP_IPI_ADDR_RUNLOCK();
1387 return (cnt);
1388}
1389
1390static int
1391sctp_do_connect_x(struct socket *so, struct sctp_inpcb *inp, void *optval,
1392 size_t optsize, void *p, int delay)
1393{
1394 int error = 0;
1395 int creat_lock_on = 0;
1396 struct sctp_tcb *stcb = NULL;
1397 struct sockaddr *sa;
1398 int num_v6 = 0, num_v4 = 0, *totaddrp, totaddr;
1399 int added = 0;
1400 uint32_t vrf_id;
1401 int bad_addresses = 0;
1402 sctp_assoc_t *a_id;
1403
1404 SCTPDBG(SCTP_DEBUG_PCB1, "Connectx called\n");
1405
1406 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
1407 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
1408 /* We are already connected AND the TCP model */
1409 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
1410 return (EADDRINUSE);
1411 }
1412 if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) {
1413 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1414 return (EINVAL);
1415 }
1416 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
1417 SCTP_INP_RLOCK(inp);
1418 stcb = LIST_FIRST(&inp->sctp_asoc_list);
1419 SCTP_INP_RUNLOCK(inp);
1420 }
1421 if (stcb) {
1422 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
1423 return (EALREADY);
1424 }
1425 SCTP_INP_INCR_REF(inp);
1426 SCTP_ASOC_CREATE_LOCK(inp);
1427 creat_lock_on = 1;
1428 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1429 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
1430 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT);
1431 error = EFAULT;
1432 goto out_now;
1433 }
1434 totaddrp = (int *)optval;
1435 totaddr = *totaddrp;
1436 sa = (struct sockaddr *)(totaddrp + 1);
1437 stcb = sctp_connectx_helper_find(inp, sa, &totaddr, &num_v4, &num_v6, &error, (optsize - sizeof(int)), &bad_addresses);
1438 if ((stcb != NULL) || bad_addresses) {
1439 /* Already have or am bring up an association */
1440 SCTP_ASOC_CREATE_UNLOCK(inp);
1441 creat_lock_on = 0;
1442 if (stcb)
1443 SCTP_TCB_UNLOCK(stcb);
1444 if (bad_addresses == 0) {
1445 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
1446 error = EALREADY;
1447 }
1448 goto out_now;
1449 }
1450#ifdef INET6
1451 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
1452 (num_v6 > 0)) {
1453 error = EINVAL;
1454 goto out_now;
1455 }
1456 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
1457 (num_v4 > 0)) {
1458 struct in6pcb *inp6;
1459
1460 inp6 = (struct in6pcb *)inp;
1461 if (SCTP_IPV6_V6ONLY(inp6)) {
1462 /*
1463 * if IPV6_V6ONLY flag, ignore connections destined
1464 * to a v4 addr or v4-mapped addr
1465 */
1466 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1467 error = EINVAL;
1468 goto out_now;
1469 }
1470 }
1471#endif /* INET6 */
1472 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
1473 SCTP_PCB_FLAGS_UNBOUND) {
1474 /* Bind a ephemeral port */
1475 error = sctp_inpcb_bind(so, NULL, NULL, p);
1476 if (error) {
1477 goto out_now;
1478 }
1479 }
1480 /* FIX ME: do we want to pass in a vrf on the connect call? */
1481 vrf_id = inp->def_vrf_id;
1482
1483 /* We are GOOD to go */
1484 stcb = sctp_aloc_assoc(inp, sa, 1, &error, 0, vrf_id,
1485 (struct thread *)p
1486 );
1487 if (stcb == NULL) {
1488 /* Gak! no memory */
1489 goto out_now;
1490 }
1491 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
1492 /* move to second address */
1493 if (sa->sa_family == AF_INET)
1494 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in));
1495 else
1496 sa = (struct sockaddr *)((caddr_t)sa + sizeof(struct sockaddr_in6));
1497
1498 error = 0;
1499 added = sctp_connectx_helper_add(stcb, sa, (totaddr - 1), &error);
1500 /* Fill in the return id */
1501 if (error) {
1502 (void)sctp_free_assoc(inp, stcb, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_12);
1503 goto out_now;
1504 }
1505 a_id = (sctp_assoc_t *) optval;
1506 *a_id = sctp_get_associd(stcb);
1507
1508 /* initialize authentication parameters for the assoc */
1509 sctp_initialize_auth_params(inp, stcb);
1510
1511 if (delay) {
1512 /* doing delayed connection */
1513 stcb->asoc.delayed_connection = 1;
1514 sctp_timer_start(SCTP_TIMER_TYPE_INIT, inp, stcb, stcb->asoc.primary_destination);
1515 } else {
1516 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
1517 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
1518 }
1519 SCTP_TCB_UNLOCK(stcb);
1520 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
1521 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
1522 /* Set the connected flag so we can queue data */
1523 soisconnecting(so);
1524 }
1525out_now:
1526 if (creat_lock_on) {
1527 SCTP_ASOC_CREATE_UNLOCK(inp);
1528 }
1529 SCTP_INP_DECR_REF(inp);
1530 return error;
1531}
1532
1533#define SCTP_FIND_STCB(inp, stcb, assoc_id) { \
1534 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||\
1535 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { \
1536 SCTP_INP_RLOCK(inp); \
1537 stcb = LIST_FIRST(&inp->sctp_asoc_list); \
1538 if (stcb) { \
1539 SCTP_TCB_LOCK(stcb); \
1540 } \
1541 SCTP_INP_RUNLOCK(inp); \
1542 } else if (assoc_id != 0) { \
1543 stcb = sctp_findassociation_ep_asocid(inp, assoc_id, 1); \
1544 if (stcb == NULL) { \
1545 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT); \
1546 error = ENOENT; \
1547 break; \
1548 } \
1549 } else { \
1550 stcb = NULL; \
1551 } \
1552 }
1553
1554
1555#define SCTP_CHECK_AND_CAST(destp, srcp, type, size) {\
1556 if (size < sizeof(type)) { \
1557 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL); \
1558 error = EINVAL; \
1559 break; \
1560 } else { \
1561 destp = (type *)srcp; \
1562 } \
1563 }
1564
1565static int
1566sctp_getopt(struct socket *so, int optname, void *optval, size_t *optsize,
1567 void *p)
1568{
1569 struct sctp_inpcb *inp = NULL;
1570 int error, val = 0;
1571 struct sctp_tcb *stcb = NULL;
1572
1573 if (optval == NULL) {
1574 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1575 return (EINVAL);
1576 }
1577 inp = (struct sctp_inpcb *)so->so_pcb;
1578 if (inp == 0) {
1579 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1580 return EINVAL;
1581 }
1582 error = 0;
1583
1584 switch (optname) {
1585 case SCTP_NODELAY:
1586 case SCTP_AUTOCLOSE:
1587 case SCTP_EXPLICIT_EOR:
1588 case SCTP_AUTO_ASCONF:
1589 case SCTP_DISABLE_FRAGMENTS:
1590 case SCTP_I_WANT_MAPPED_V4_ADDR:
1591 case SCTP_USE_EXT_RCVINFO:
1592 SCTP_INP_RLOCK(inp);
1593 switch (optname) {
1594 case SCTP_DISABLE_FRAGMENTS:
1595 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NO_FRAGMENT);
1596 break;
1597 case SCTP_I_WANT_MAPPED_V4_ADDR:
1598 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4);
1599 break;
1600 case SCTP_AUTO_ASCONF:
1601 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
1602 /* only valid for bound all sockets */
1603 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
1604 } else {
1605 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1606 error = EINVAL;
1607 goto flags_out;
1608 }
1609 break;
1610 case SCTP_EXPLICIT_EOR:
1611 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR);
1612 break;
1613 case SCTP_NODELAY:
1614 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NODELAY);
1615 break;
1616 case SCTP_USE_EXT_RCVINFO:
1617 val = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO);
1618 break;
1619 case SCTP_AUTOCLOSE:
1620 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))
1621 val = TICKS_TO_SEC(inp->sctp_ep.auto_close_time);
1622 else
1623 val = 0;
1624 break;
1625
1626 default:
1627 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
1628 error = ENOPROTOOPT;
1629 } /* end switch (sopt->sopt_name) */
1630 if (optname != SCTP_AUTOCLOSE) {
1631 /* make it an "on/off" value */
1632 val = (val != 0);
1633 }
1634 if (*optsize < sizeof(val)) {
1635 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
1636 error = EINVAL;
1637 }
1638flags_out:
1639 SCTP_INP_RUNLOCK(inp);
1640 if (error == 0) {
1641 /* return the option value */
1642 *(int *)optval = val;
1643 *optsize = sizeof(val);
1644 }
1645 break;
1646 case SCTP_GET_PACKET_LOG:
1647 {
1648#ifdef SCTP_PACKET_LOGGING
1649 uint8_t *target;
1650 int ret;
1651
1652 SCTP_CHECK_AND_CAST(target, optval, uint8_t, *optsize);
1653 ret = sctp_copy_out_packet_log(target, (int)*optsize);
1654 *optsize = ret;
1655#else
1656 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
1657 error = EOPNOTSUPP;
1658#endif
1659 break;
1660 }
1661 case SCTP_PARTIAL_DELIVERY_POINT:
1662 {
1663 uint32_t *value;
1664
1665 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1666 *value = inp->partial_delivery_point;
1667 *optsize = sizeof(uint32_t);
1668 }
1669 break;
1670 case SCTP_FRAGMENT_INTERLEAVE:
1671 {
1672 uint32_t *value;
1673
1674 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1675 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) {
1676 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) {
1677 *value = SCTP_FRAG_LEVEL_2;
1678 } else {
1679 *value = SCTP_FRAG_LEVEL_1;
1680 }
1681 } else {
1682 *value = SCTP_FRAG_LEVEL_0;
1683 }
1684 *optsize = sizeof(uint32_t);
1685 }
1686 break;
1687 case SCTP_CMT_ON_OFF:
1688 {
1689 struct sctp_assoc_value *av;
1690
1691 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1692 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
1693 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1694 if (stcb) {
1695 av->assoc_value = stcb->asoc.sctp_cmt_on_off;
1696 SCTP_TCB_UNLOCK(stcb);
1697
1698 } else {
1699 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
1700 error = ENOTCONN;
1701 }
1702 } else {
1703 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
1704 error = ENOPROTOOPT;
1705 }
1706 *optsize = sizeof(*av);
1707 }
1708 break;
1709 /* JRS - Get socket option for pluggable congestion control */
1710 case SCTP_PLUGGABLE_CC:
1711 {
1712 struct sctp_assoc_value *av;
1713
1714 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1715 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1716 if (stcb) {
1717 av->assoc_value = stcb->asoc.congestion_control_module;
1718 SCTP_TCB_UNLOCK(stcb);
1719 } else {
1720 av->assoc_value = inp->sctp_ep.sctp_default_cc_module;
1721 }
1722 *optsize = sizeof(*av);
1723 }
1724 break;
1725 case SCTP_GET_ADDR_LEN:
1726 {
1727 struct sctp_assoc_value *av;
1728
1729 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1730 error = EINVAL;
1731#ifdef INET
1732 if (av->assoc_value == AF_INET) {
1733 av->assoc_value = sizeof(struct sockaddr_in);
1734 error = 0;
1735 }
1736#endif
1737#ifdef INET6
1738 if (av->assoc_value == AF_INET6) {
1739 av->assoc_value = sizeof(struct sockaddr_in6);
1740 error = 0;
1741 }
1742#endif
1743 if (error) {
1744 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
1745 }
1746 *optsize = sizeof(*av);
1747 }
1748 break;
1749 case SCTP_GET_ASSOC_NUMBER:
1750 {
1751 uint32_t *value, cnt;
1752
1753 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1754 cnt = 0;
1755 SCTP_INP_RLOCK(inp);
1756 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
1757 cnt++;
1758 }
1759 SCTP_INP_RUNLOCK(inp);
1760 *value = cnt;
1761 *optsize = sizeof(uint32_t);
1762 }
1763 break;
1764
1765 case SCTP_GET_ASSOC_ID_LIST:
1766 {
1767 struct sctp_assoc_ids *ids;
1768 unsigned int at, limit;
1769
1770 SCTP_CHECK_AND_CAST(ids, optval, struct sctp_assoc_ids, *optsize);
1771 at = 0;
1772 limit = *optsize / sizeof(sctp_assoc_t);
1773 SCTP_INP_RLOCK(inp);
1774 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
1775 if (at < limit) {
1776 ids->gaids_assoc_id[at++] = sctp_get_associd(stcb);
1777 } else {
1778 error = EINVAL;
1779 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
1780 break;
1781 }
1782 }
1783 SCTP_INP_RUNLOCK(inp);
1784 *optsize = at * sizeof(sctp_assoc_t);
1785 }
1786 break;
1787 case SCTP_CONTEXT:
1788 {
1789 struct sctp_assoc_value *av;
1790
1791 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1792 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1793
1794 if (stcb) {
1795 av->assoc_value = stcb->asoc.context;
1796 SCTP_TCB_UNLOCK(stcb);
1797 } else {
1798 SCTP_INP_RLOCK(inp);
1799 av->assoc_value = inp->sctp_context;
1800 SCTP_INP_RUNLOCK(inp);
1801 }
1802 *optsize = sizeof(*av);
1803 }
1804 break;
1805 case SCTP_VRF_ID:
1806 {
1807 uint32_t *default_vrfid;
1808
1809 SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, *optsize);
1810 *default_vrfid = inp->def_vrf_id;
1811 break;
1812 }
1813 case SCTP_GET_ASOC_VRF:
1814 {
1815 struct sctp_assoc_value *id;
1816
1817 SCTP_CHECK_AND_CAST(id, optval, struct sctp_assoc_value, *optsize);
1818 SCTP_FIND_STCB(inp, stcb, id->assoc_id);
1819 if (stcb == NULL) {
1820 error = EINVAL;
1821 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
1822 break;
1823 }
1824 id->assoc_value = stcb->asoc.vrf_id;
1825 break;
1826 }
1827 case SCTP_GET_VRF_IDS:
1828 {
1829 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
1830 error = EOPNOTSUPP;
1831 break;
1832 }
1833 case SCTP_GET_NONCE_VALUES:
1834 {
1835 struct sctp_get_nonce_values *gnv;
1836
1837 SCTP_CHECK_AND_CAST(gnv, optval, struct sctp_get_nonce_values, *optsize);
1838 SCTP_FIND_STCB(inp, stcb, gnv->gn_assoc_id);
1839
1840 if (stcb) {
1841 gnv->gn_peers_tag = stcb->asoc.peer_vtag;
1842 gnv->gn_local_tag = stcb->asoc.my_vtag;
1843 SCTP_TCB_UNLOCK(stcb);
1844 } else {
1845 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
1846 error = ENOTCONN;
1847 }
1848 *optsize = sizeof(*gnv);
1849 }
1850 break;
1851 case SCTP_DELAYED_SACK:
1852 {
1853 struct sctp_sack_info *sack;
1854
1855 SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, *optsize);
1856 SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id);
1857 if (stcb) {
1858 sack->sack_delay = stcb->asoc.delayed_ack;
1859 sack->sack_freq = stcb->asoc.sack_freq;
1860 SCTP_TCB_UNLOCK(stcb);
1861 } else {
1862 SCTP_INP_RLOCK(inp);
1863 sack->sack_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]);
1864 sack->sack_freq = inp->sctp_ep.sctp_sack_freq;
1865 SCTP_INP_RUNLOCK(inp);
1866 }
1867 *optsize = sizeof(*sack);
1868 }
1869 break;
1870
1871 case SCTP_GET_SNDBUF_USE:
1872 {
1873 struct sctp_sockstat *ss;
1874
1875 SCTP_CHECK_AND_CAST(ss, optval, struct sctp_sockstat, *optsize);
1876 SCTP_FIND_STCB(inp, stcb, ss->ss_assoc_id);
1877
1878 if (stcb) {
1879 ss->ss_total_sndbuf = stcb->asoc.total_output_queue_size;
1880 ss->ss_total_recv_buf = (stcb->asoc.size_on_reasm_queue +
1881 stcb->asoc.size_on_all_streams);
1882 SCTP_TCB_UNLOCK(stcb);
1883 } else {
1884 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
1885 error = ENOTCONN;
1886 }
1887 *optsize = sizeof(struct sctp_sockstat);
1888 }
1889 break;
1890 case SCTP_MAX_BURST:
1891 {
1892 uint8_t *value;
1893
1894 SCTP_CHECK_AND_CAST(value, optval, uint8_t, *optsize);
1895
1896 SCTP_INP_RLOCK(inp);
1897 *value = inp->sctp_ep.max_burst;
1898 SCTP_INP_RUNLOCK(inp);
1899 *optsize = sizeof(uint8_t);
1900 }
1901 break;
1902 case SCTP_MAXSEG:
1903 {
1904 struct sctp_assoc_value *av;
1905 int ovh;
1906
1907 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, *optsize);
1908 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
1909
1910 if (stcb) {
1911 av->assoc_value = sctp_get_frag_point(stcb, &stcb->asoc);
1912 SCTP_TCB_UNLOCK(stcb);
1913 } else {
1914 SCTP_INP_RLOCK(inp);
1915 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1916 ovh = SCTP_MED_OVERHEAD;
1917 } else {
1918 ovh = SCTP_MED_V4_OVERHEAD;
1919 }
1920 if (inp->sctp_frag_point >= SCTP_DEFAULT_MAXSEGMENT)
1921 av->assoc_value = 0;
1922 else
1923 av->assoc_value = inp->sctp_frag_point - ovh;
1924 SCTP_INP_RUNLOCK(inp);
1925 }
1926 *optsize = sizeof(struct sctp_assoc_value);
1927 }
1928 break;
1929 case SCTP_GET_STAT_LOG:
1930 error = sctp_fill_stat_log(optval, optsize);
1931 break;
1932 case SCTP_EVENTS:
1933 {
1934 struct sctp_event_subscribe *events;
1935
1936 SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, *optsize);
1937 memset(events, 0, sizeof(*events));
1938 SCTP_INP_RLOCK(inp);
1939 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT))
1940 events->sctp_data_io_event = 1;
1941
1942 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT))
1943 events->sctp_association_event = 1;
1944
1945 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT))
1946 events->sctp_address_event = 1;
1947
1948 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT))
1949 events->sctp_send_failure_event = 1;
1950
1951 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR))
1952 events->sctp_peer_error_event = 1;
1953
1954 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT))
1955 events->sctp_shutdown_event = 1;
1956
1957 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT))
1958 events->sctp_partial_delivery_event = 1;
1959
1960 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT))
1961 events->sctp_adaptation_layer_event = 1;
1962
1963 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT))
1964 events->sctp_authentication_event = 1;
1965
1966 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT))
1967 events->sctp_stream_reset_events = 1;
1968 SCTP_INP_RUNLOCK(inp);
1969 *optsize = sizeof(struct sctp_event_subscribe);
1970 }
1971 break;
1972
1973 case SCTP_ADAPTATION_LAYER:
1974 {
1975 uint32_t *value;
1976
1977 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1978
1979 SCTP_INP_RLOCK(inp);
1980 *value = inp->sctp_ep.adaptation_layer_indicator;
1981 SCTP_INP_RUNLOCK(inp);
1982 *optsize = sizeof(uint32_t);
1983 }
1984 break;
1985 case SCTP_SET_INITIAL_DBG_SEQ:
1986 {
1987 uint32_t *value;
1988
1989 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
1990 SCTP_INP_RLOCK(inp);
1991 *value = inp->sctp_ep.initial_sequence_debug;
1992 SCTP_INP_RUNLOCK(inp);
1993 *optsize = sizeof(uint32_t);
1994 }
1995 break;
1996 case SCTP_GET_LOCAL_ADDR_SIZE:
1997 {
1998 uint32_t *value;
1999
2000 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
2001 SCTP_INP_RLOCK(inp);
2002 *value = sctp_count_max_addresses(inp);
2003 SCTP_INP_RUNLOCK(inp);
2004 *optsize = sizeof(uint32_t);
2005 }
2006 break;
2007 case SCTP_GET_REMOTE_ADDR_SIZE:
2008 {
2009 uint32_t *value;
2010 size_t size;
2011 struct sctp_nets *net;
2012
2013 SCTP_CHECK_AND_CAST(value, optval, uint32_t, *optsize);
2014 /* FIXME MT: change to sctp_assoc_value? */
2015 SCTP_FIND_STCB(inp, stcb, (sctp_assoc_t) * value);
2016
2017 if (stcb) {
2018 size = 0;
2019 /* Count the sizes */
2020 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2021 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) ||
2022 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
2023 size += sizeof(struct sockaddr_in6);
2024 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
2025 size += sizeof(struct sockaddr_in);
2026 } else {
2027 /* huh */
2028 break;
2029 }
2030 }
2031 SCTP_TCB_UNLOCK(stcb);
2032 *value = (uint32_t) size;
2033 } else {
2034 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
2035 error = ENOTCONN;
2036 }
2037 *optsize = sizeof(uint32_t);
2038 }
2039 break;
2040 case SCTP_GET_PEER_ADDRESSES:
2041 /*
2042 * Get the address information, an array is passed in to
2043 * fill up we pack it.
2044 */
2045 {
2046 size_t cpsz, left;
2047 struct sockaddr_storage *sas;
2048 struct sctp_nets *net;
2049 struct sctp_getaddresses *saddr;
2050
2051 SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize);
2052 SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id);
2053
2054 if (stcb) {
2055 left = (*optsize) - sizeof(struct sctp_getaddresses);
2056 *optsize = sizeof(struct sctp_getaddresses);
2057 sas = (struct sockaddr_storage *)&saddr->addr[0];
2058
2059 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2060 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) ||
2061 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET6)) {
2062 cpsz = sizeof(struct sockaddr_in6);
2063 } else if (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET) {
2064 cpsz = sizeof(struct sockaddr_in);
2065 } else {
2066 /* huh */
2067 break;
2068 }
2069 if (left < cpsz) {
2070 /* not enough room. */
2071 break;
2072 }
2073#ifdef INET6
2074 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) &&
2075 (((struct sockaddr *)&net->ro._l_addr)->sa_family == AF_INET)) {
2076 /* Must map the address */
2077 in6_sin_2_v4mapsin6((struct sockaddr_in *)&net->ro._l_addr,
2078 (struct sockaddr_in6 *)sas);
2079 } else {
2080#endif
2081 memcpy(sas, &net->ro._l_addr, cpsz);
2082#ifdef INET6
2083 }
2084#endif
2085 ((struct sockaddr_in *)sas)->sin_port = stcb->rport;
2086
2087 sas = (struct sockaddr_storage *)((caddr_t)sas + cpsz);
2088 left -= cpsz;
2089 *optsize += cpsz;
2090 }
2091 SCTP_TCB_UNLOCK(stcb);
2092 } else {
2093 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
2094 error = ENOENT;
2095 }
2096 }
2097 break;
2098 case SCTP_GET_LOCAL_ADDRESSES:
2099 {
2100 size_t limit, actual;
2101 struct sockaddr_storage *sas;
2102 struct sctp_getaddresses *saddr;
2103
2104 SCTP_CHECK_AND_CAST(saddr, optval, struct sctp_getaddresses, *optsize);
2105 SCTP_FIND_STCB(inp, stcb, saddr->sget_assoc_id);
2106
2107 sas = (struct sockaddr_storage *)&saddr->addr[0];
2108 limit = *optsize - sizeof(sctp_assoc_t);
2109 actual = sctp_fill_up_addresses(inp, stcb, limit, sas);
2110 if (stcb) {
2111 SCTP_TCB_UNLOCK(stcb);
2112 }
2113 *optsize = sizeof(struct sockaddr_storage) + actual;
2114 }
2115 break;
2116 case SCTP_PEER_ADDR_PARAMS:
2117 {
2118 struct sctp_paddrparams *paddrp;
2119 struct sctp_nets *net;
2120
2121 SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, *optsize);
2122 SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id);
2123
2124 net = NULL;
2125 if (stcb) {
2126 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
2127 } else {
2128 /*
2129 * We increment here since
2130 * sctp_findassociation_ep_addr() wil do a
2131 * decrement if it finds the stcb as long as
2132 * the locked tcb (last argument) is NOT a
2133 * TCB.. aka NULL.
2134 */
2135 SCTP_INP_INCR_REF(inp);
2136 stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddrp->spp_address, &net, NULL, NULL);
2137 if (stcb == NULL) {
2138 SCTP_INP_DECR_REF(inp);
2139 }
2140 }
2141 if (stcb && (net == NULL)) {
2142 struct sockaddr *sa;
2143
2144 sa = (struct sockaddr *)&paddrp->spp_address;
2145 if (sa->sa_family == AF_INET) {
2146 struct sockaddr_in *sin;
2147
2148 sin = (struct sockaddr_in *)sa;
2149 if (sin->sin_addr.s_addr) {
2150 error = EINVAL;
2151 SCTP_TCB_UNLOCK(stcb);
2152 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2153 break;
2154 }
2155 } else if (sa->sa_family == AF_INET6) {
2156 struct sockaddr_in6 *sin6;
2157
2158 sin6 = (struct sockaddr_in6 *)sa;
2159 if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
2160 error = EINVAL;
2161 SCTP_TCB_UNLOCK(stcb);
2162 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2163 break;
2164 }
2165 } else {
2166 error = EAFNOSUPPORT;
2167 SCTP_TCB_UNLOCK(stcb);
2168 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2169 break;
2170 }
2171 }
2172 if (stcb) {
2173 /* Applys to the specific association */
2174 paddrp->spp_flags = 0;
2175 if (net) {
2176 int ovh;
2177
2178 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2179 ovh = SCTP_MED_OVERHEAD;
2180 } else {
2181 ovh = SCTP_MED_V4_OVERHEAD;
2182 }
2183
2184
2185 paddrp->spp_pathmaxrxt = net->failure_threshold;
2186 paddrp->spp_pathmtu = net->mtu - ovh;
2187 /* get flags for HB */
2188 if (net->dest_state & SCTP_ADDR_NOHB)
2189 paddrp->spp_flags |= SPP_HB_DISABLE;
2190 else
2191 paddrp->spp_flags |= SPP_HB_ENABLE;
2192 /* get flags for PMTU */
2193 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
2194 paddrp->spp_flags |= SPP_PMTUD_ENABLE;
2195 } else {
2196 paddrp->spp_flags |= SPP_PMTUD_DISABLE;
2197 }
2198#ifdef INET
2199 if (net->ro._l_addr.sin.sin_family == AF_INET) {
2200 paddrp->spp_ipv4_tos = net->tos_flowlabel & 0x000000fc;
2201 paddrp->spp_flags |= SPP_IPV4_TOS;
2202 }
2203#endif
2204#ifdef INET6
2205 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
2206 paddrp->spp_ipv6_flowlabel = net->tos_flowlabel;
2207 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
2208 }
2209#endif
2210 } else {
2211 /*
2212 * No destination so return default
2213 * value
2214 */
2215 int cnt = 0;
2216
2217 paddrp->spp_pathmaxrxt = stcb->asoc.def_net_failure;
2218 paddrp->spp_pathmtu = sctp_get_frag_point(stcb, &stcb->asoc);
2219#ifdef INET
2220 paddrp->spp_ipv4_tos = stcb->asoc.default_tos & 0x000000fc;
2221 paddrp->spp_flags |= SPP_IPV4_TOS;
2222#endif
2223#ifdef INET6
2224 paddrp->spp_ipv6_flowlabel = stcb->asoc.default_flowlabel;
2225 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
2226#endif
2227 /* default settings should be these */
2228 if (stcb->asoc.hb_is_disabled == 0) {
2229 paddrp->spp_flags |= SPP_HB_ENABLE;
2230 } else {
2231 paddrp->spp_flags |= SPP_HB_DISABLE;
2232 }
2233 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2234 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
2235 cnt++;
2236 }
2237 }
2238 if (cnt) {
2239 paddrp->spp_flags |= SPP_PMTUD_ENABLE;
2240 }
2241 }
2242 paddrp->spp_hbinterval = stcb->asoc.heart_beat_delay;
2243 paddrp->spp_assoc_id = sctp_get_associd(stcb);
2244 SCTP_TCB_UNLOCK(stcb);
2245 } else {
2246 /* Use endpoint defaults */
2247 SCTP_INP_RLOCK(inp);
2248 paddrp->spp_pathmaxrxt = inp->sctp_ep.def_net_failure;
2249 paddrp->spp_hbinterval = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]);
2250 paddrp->spp_assoc_id = (sctp_assoc_t) 0;
2251 /* get inp's default */
2252#ifdef INET
2253 paddrp->spp_ipv4_tos = inp->ip_inp.inp.inp_ip_tos;
2254 paddrp->spp_flags |= SPP_IPV4_TOS;
2255#endif
2256#ifdef INET6
2257 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2258 paddrp->spp_ipv6_flowlabel = ((struct in6pcb *)inp)->in6p_flowinfo;
2259 paddrp->spp_flags |= SPP_IPV6_FLOWLABEL;
2260 }
2261#endif
2262 /* can't return this */
2263 paddrp->spp_pathmtu = 0;
2264
2265 /* default behavior, no stcb */
2266 paddrp->spp_flags = SPP_PMTUD_ENABLE;
2267
2268 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT)) {
2269 paddrp->spp_flags |= SPP_HB_ENABLE;
2270 } else {
2271 paddrp->spp_flags |= SPP_HB_DISABLE;
2272 }
2273 SCTP_INP_RUNLOCK(inp);
2274 }
2275 *optsize = sizeof(struct sctp_paddrparams);
2276 }
2277 break;
2278 case SCTP_GET_PEER_ADDR_INFO:
2279 {
2280 struct sctp_paddrinfo *paddri;
2281 struct sctp_nets *net;
2282
2283 SCTP_CHECK_AND_CAST(paddri, optval, struct sctp_paddrinfo, *optsize);
2284 SCTP_FIND_STCB(inp, stcb, paddri->spinfo_assoc_id);
2285
2286 net = NULL;
2287 if (stcb) {
2288 net = sctp_findnet(stcb, (struct sockaddr *)&paddri->spinfo_address);
2289 } else {
2290 /*
2291 * We increment here since
2292 * sctp_findassociation_ep_addr() wil do a
2293 * decrement if it finds the stcb as long as
2294 * the locked tcb (last argument) is NOT a
2295 * TCB.. aka NULL.
2296 */
2297 SCTP_INP_INCR_REF(inp);
2298 stcb = sctp_findassociation_ep_addr(&inp, (struct sockaddr *)&paddri->spinfo_address, &net, NULL, NULL);
2299 if (stcb == NULL) {
2300 SCTP_INP_DECR_REF(inp);
2301 }
2302 }
2303
2304 if ((stcb) && (net)) {
2305 paddri->spinfo_state = net->dest_state & (SCTP_REACHABLE_MASK | SCTP_ADDR_NOHB);
2306 paddri->spinfo_cwnd = net->cwnd;
2307 paddri->spinfo_srtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
2308 paddri->spinfo_rto = net->RTO;
2309 paddri->spinfo_assoc_id = sctp_get_associd(stcb);
2310 SCTP_TCB_UNLOCK(stcb);
2311 } else {
2312 if (stcb) {
2313 SCTP_TCB_UNLOCK(stcb);
2314 }
2315 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
2316 error = ENOENT;
2317 }
2318 *optsize = sizeof(struct sctp_paddrinfo);
2319 }
2320 break;
2321 case SCTP_PCB_STATUS:
2322 {
2323 struct sctp_pcbinfo *spcb;
2324
2325 SCTP_CHECK_AND_CAST(spcb, optval, struct sctp_pcbinfo, *optsize);
2326 sctp_fill_pcbinfo(spcb);
2327 *optsize = sizeof(struct sctp_pcbinfo);
2328 }
2329 break;
2330
2331 case SCTP_STATUS:
2332 {
2333 struct sctp_nets *net;
2334 struct sctp_status *sstat;
2335
2336 SCTP_CHECK_AND_CAST(sstat, optval, struct sctp_status, *optsize);
2337 SCTP_FIND_STCB(inp, stcb, sstat->sstat_assoc_id);
2338
2339 if (stcb == NULL) {
2340 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2341 error = EINVAL;
2342 break;
2343 }
2344 /*
2345 * I think passing the state is fine since
2346 * sctp_constants.h will be available to the user
2347 * land.
2348 */
2349 sstat->sstat_state = stcb->asoc.state;
2350 sstat->sstat_assoc_id = sctp_get_associd(stcb);
2351 sstat->sstat_rwnd = stcb->asoc.peers_rwnd;
2352 sstat->sstat_unackdata = stcb->asoc.sent_queue_cnt;
2353 /*
2354 * We can't include chunks that have been passed to
2355 * the socket layer. Only things in queue.
2356 */
2357 sstat->sstat_penddata = (stcb->asoc.cnt_on_reasm_queue +
2358 stcb->asoc.cnt_on_all_streams);
2359
2360
2361 sstat->sstat_instrms = stcb->asoc.streamincnt;
2362 sstat->sstat_outstrms = stcb->asoc.streamoutcnt;
2363 sstat->sstat_fragmentation_point = sctp_get_frag_point(stcb, &stcb->asoc);
2364 memcpy(&sstat->sstat_primary.spinfo_address,
2365 &stcb->asoc.primary_destination->ro._l_addr,
2366 ((struct sockaddr *)(&stcb->asoc.primary_destination->ro._l_addr))->sa_len);
2367 net = stcb->asoc.primary_destination;
2368 ((struct sockaddr_in *)&sstat->sstat_primary.spinfo_address)->sin_port = stcb->rport;
2369 /*
2370 * Again the user can get info from sctp_constants.h
2371 * for what the state of the network is.
2372 */
2373 sstat->sstat_primary.spinfo_state = net->dest_state & SCTP_REACHABLE_MASK;
2374 sstat->sstat_primary.spinfo_cwnd = net->cwnd;
2375 sstat->sstat_primary.spinfo_srtt = net->lastsa;
2376 sstat->sstat_primary.spinfo_rto = net->RTO;
2377 sstat->sstat_primary.spinfo_mtu = net->mtu;
2378 sstat->sstat_primary.spinfo_assoc_id = sctp_get_associd(stcb);
2379 SCTP_TCB_UNLOCK(stcb);
2380 *optsize = sizeof(*sstat);
2381 }
2382 break;
2383 case SCTP_RTOINFO:
2384 {
2385 struct sctp_rtoinfo *srto;
2386
2387 SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, *optsize);
2388 SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id);
2389
2390 if (stcb) {
2391 srto->srto_initial = stcb->asoc.initial_rto;
2392 srto->srto_max = stcb->asoc.maxrto;
2393 srto->srto_min = stcb->asoc.minrto;
2394 SCTP_TCB_UNLOCK(stcb);
2395 } else {
2396 SCTP_INP_RLOCK(inp);
2397 srto->srto_initial = inp->sctp_ep.initial_rto;
2398 srto->srto_max = inp->sctp_ep.sctp_maxrto;
2399 srto->srto_min = inp->sctp_ep.sctp_minrto;
2400 SCTP_INP_RUNLOCK(inp);
2401 }
2402 *optsize = sizeof(*srto);
2403 }
2404 break;
2405 case SCTP_ASSOCINFO:
2406 {
2407 struct sctp_assocparams *sasoc;
2408 uint32_t oldval;
2409
2410 SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, *optsize);
2411 SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id);
2412
2413 if (stcb) {
2414 oldval = sasoc->sasoc_cookie_life;
2415 sasoc->sasoc_cookie_life = TICKS_TO_MSEC(stcb->asoc.cookie_life);
2416 sasoc->sasoc_asocmaxrxt = stcb->asoc.max_send_times;
2417 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
2418 sasoc->sasoc_peer_rwnd = stcb->asoc.peers_rwnd;
2419 sasoc->sasoc_local_rwnd = stcb->asoc.my_rwnd;
2420 SCTP_TCB_UNLOCK(stcb);
2421 } else {
2422 SCTP_INP_RLOCK(inp);
2423 sasoc->sasoc_cookie_life = TICKS_TO_MSEC(inp->sctp_ep.def_cookie_life);
2424 sasoc->sasoc_asocmaxrxt = inp->sctp_ep.max_send_times;
2425 sasoc->sasoc_number_peer_destinations = 0;
2426 sasoc->sasoc_peer_rwnd = 0;
2427 sasoc->sasoc_local_rwnd = sbspace(&inp->sctp_socket->so_rcv);
2428 SCTP_INP_RUNLOCK(inp);
2429 }
2430 *optsize = sizeof(*sasoc);
2431 }
2432 break;
2433 case SCTP_DEFAULT_SEND_PARAM:
2434 {
2435 struct sctp_sndrcvinfo *s_info;
2436
2437 SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, *optsize);
2438 SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id);
2439
2440 if (stcb) {
2441 memcpy(s_info, &stcb->asoc.def_send, sizeof(stcb->asoc.def_send));
2442 SCTP_TCB_UNLOCK(stcb);
2443 } else {
2444 SCTP_INP_RLOCK(inp);
2445 memcpy(s_info, &inp->def_send, sizeof(inp->def_send));
2446 SCTP_INP_RUNLOCK(inp);
2447 }
2448 *optsize = sizeof(*s_info);
2449 }
2450 break;
2451 case SCTP_INITMSG:
2452 {
2453 struct sctp_initmsg *sinit;
2454
2455 SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, *optsize);
2456 SCTP_INP_RLOCK(inp);
2457 sinit->sinit_num_ostreams = inp->sctp_ep.pre_open_stream_count;
2458 sinit->sinit_max_instreams = inp->sctp_ep.max_open_streams_intome;
2459 sinit->sinit_max_attempts = inp->sctp_ep.max_init_times;
2460 sinit->sinit_max_init_timeo = inp->sctp_ep.initial_init_rto_max;
2461 SCTP_INP_RUNLOCK(inp);
2462 *optsize = sizeof(*sinit);
2463 }
2464 break;
2465 case SCTP_PRIMARY_ADDR:
2466 /* we allow a "get" operation on this */
2467 {
2468 struct sctp_setprim *ssp;
2469
2470 SCTP_CHECK_AND_CAST(ssp, optval, struct sctp_setprim, *optsize);
2471 SCTP_FIND_STCB(inp, stcb, ssp->ssp_assoc_id);
2472
2473 if (stcb) {
2474 /* simply copy out the sockaddr_storage... */
2475 int len;
2476
2477 len = *optsize;
2478 if (len > stcb->asoc.primary_destination->ro._l_addr.sa.sa_len)
2479 len = stcb->asoc.primary_destination->ro._l_addr.sa.sa_len;
2480
2481 memcpy(&ssp->ssp_addr,
2482 &stcb->asoc.primary_destination->ro._l_addr,
2483 len);
2484 SCTP_TCB_UNLOCK(stcb);
2485 } else {
2486 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2487 error = EINVAL;
2488 }
2489 *optsize = sizeof(*ssp);
2490 }
2491 break;
2492
2493 case SCTP_HMAC_IDENT:
2494 {
2495 struct sctp_hmacalgo *shmac;
2496 sctp_hmaclist_t *hmaclist;
2497 uint32_t size;
2498 int i;
2499
2500 SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, *optsize);
2501
2502 SCTP_INP_RLOCK(inp);
2503 hmaclist = inp->sctp_ep.local_hmacs;
2504 if (hmaclist == NULL) {
2505 /* no HMACs to return */
2506 *optsize = sizeof(*shmac);
2507 SCTP_INP_RUNLOCK(inp);
2508 break;
2509 }
2510 /* is there room for all of the hmac ids? */
2511 size = sizeof(*shmac) + (hmaclist->num_algo *
2512 sizeof(shmac->shmac_idents[0]));
2513 if ((size_t)(*optsize) < size) {
2514 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2515 error = EINVAL;
2516 SCTP_INP_RUNLOCK(inp);
2517 break;
2518 }
2519 /* copy in the list */
2520 for (i = 0; i < hmaclist->num_algo; i++)
2521 shmac->shmac_idents[i] = hmaclist->hmac[i];
2522 SCTP_INP_RUNLOCK(inp);
2523 *optsize = size;
2524 break;
2525 }
2526 case SCTP_AUTH_ACTIVE_KEY:
2527 {
2528 struct sctp_authkeyid *scact;
2529
2530 SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, *optsize);
2531 SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id);
2532
2533 if (stcb) {
2534 /* get the active key on the assoc */
2535 scact->scact_keynumber = stcb->asoc.authinfo.assoc_keyid;
2536 SCTP_TCB_UNLOCK(stcb);
2537 } else {
2538 /* get the endpoint active key */
2539 SCTP_INP_RLOCK(inp);
2540 scact->scact_keynumber = inp->sctp_ep.default_keyid;
2541 SCTP_INP_RUNLOCK(inp);
2542 }
2543 *optsize = sizeof(*scact);
2544 break;
2545 }
2546 case SCTP_LOCAL_AUTH_CHUNKS:
2547 {
2548 struct sctp_authchunks *sac;
2549 sctp_auth_chklist_t *chklist = NULL;
2550 size_t size = 0;
2551
2552 SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize);
2553 SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id);
2554
2555 if (stcb) {
2556 /* get off the assoc */
2557 chklist = stcb->asoc.local_auth_chunks;
2558 /* is there enough space? */
2559 size = sctp_auth_get_chklist_size(chklist);
2560 if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
2561 error = EINVAL;
2562 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2563 } else {
2564 /* copy in the chunks */
2565 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
2566 }
2567 SCTP_TCB_UNLOCK(stcb);
2568 } else {
2569 /* get off the endpoint */
2570 SCTP_INP_RLOCK(inp);
2571 chklist = inp->sctp_ep.local_auth_chunks;
2572 /* is there enough space? */
2573 size = sctp_auth_get_chklist_size(chklist);
2574 if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
2575 error = EINVAL;
2576 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2577 } else {
2578 /* copy in the chunks */
2579 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
2580 }
2581 SCTP_INP_RUNLOCK(inp);
2582 }
2583 *optsize = sizeof(struct sctp_authchunks) + size;
2584 break;
2585 }
2586 case SCTP_PEER_AUTH_CHUNKS:
2587 {
2588 struct sctp_authchunks *sac;
2589 sctp_auth_chklist_t *chklist = NULL;
2590 size_t size = 0;
2591
2592 SCTP_CHECK_AND_CAST(sac, optval, struct sctp_authchunks, *optsize);
2593 SCTP_FIND_STCB(inp, stcb, sac->gauth_assoc_id);
2594
2595 if (stcb) {
2596 /* get off the assoc */
2597 chklist = stcb->asoc.peer_auth_chunks;
2598 /* is there enough space? */
2599 size = sctp_auth_get_chklist_size(chklist);
2600 if (*optsize < (sizeof(struct sctp_authchunks) + size)) {
2601 error = EINVAL;
2602 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
2603 } else {
2604 /* copy in the chunks */
2605 (void)sctp_serialize_auth_chunks(chklist, sac->gauth_chunks);
2606 }
2607 SCTP_TCB_UNLOCK(stcb);
2608 } else {
2609 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
2610 error = ENOENT;
2611 }
2612 *optsize = sizeof(struct sctp_authchunks) + size;
2613 break;
2614 }
2615
2616
2617 default:
2618 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
2619 error = ENOPROTOOPT;
2620 *optsize = 0;
2621 break;
2622 } /* end switch (sopt->sopt_name) */
2623 return (error);
2624}
2625
2626static int
2627sctp_setopt(struct socket *so, int optname, void *optval, size_t optsize,
2628 void *p)
2629{
2630 int error, set_opt;
2631 uint32_t *mopt;
2632 struct sctp_tcb *stcb = NULL;
2633 struct sctp_inpcb *inp = NULL;
2634 uint32_t vrf_id;
2635
2636 if (optval == NULL) {
2637 SCTP_PRINTF("optval is NULL\n");
2638 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2639 return (EINVAL);
2640 }
2641 inp = (struct sctp_inpcb *)so->so_pcb;
2642 if (inp == 0) {
2643 SCTP_PRINTF("inp is NULL?\n");
2644 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2645 return EINVAL;
2646 }
2647 vrf_id = inp->def_vrf_id;
2648
2649 error = 0;
2650 switch (optname) {
2651 case SCTP_NODELAY:
2652 case SCTP_AUTOCLOSE:
2653 case SCTP_AUTO_ASCONF:
2654 case SCTP_EXPLICIT_EOR:
2655 case SCTP_DISABLE_FRAGMENTS:
2656 case SCTP_USE_EXT_RCVINFO:
2657 case SCTP_I_WANT_MAPPED_V4_ADDR:
2658 /* copy in the option value */
2659 SCTP_CHECK_AND_CAST(mopt, optval, uint32_t, optsize);
2660 set_opt = 0;
2661 if (error)
2662 break;
2663 switch (optname) {
2664 case SCTP_DISABLE_FRAGMENTS:
2665 set_opt = SCTP_PCB_FLAGS_NO_FRAGMENT;
2666 break;
2667 case SCTP_AUTO_ASCONF:
2668 /*
2669 * NOTE: we don't really support this flag
2670 */
2671 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
2672 /* only valid for bound all sockets */
2673 set_opt = SCTP_PCB_FLAGS_AUTO_ASCONF;
2674 } else {
2675 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2676 return (EINVAL);
2677 }
2678 break;
2679 case SCTP_EXPLICIT_EOR:
2680 set_opt = SCTP_PCB_FLAGS_EXPLICIT_EOR;
2681 break;
2682 case SCTP_USE_EXT_RCVINFO:
2683 set_opt = SCTP_PCB_FLAGS_EXT_RCVINFO;
2684 break;
2685 case SCTP_I_WANT_MAPPED_V4_ADDR:
2686 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
2687 set_opt = SCTP_PCB_FLAGS_NEEDS_MAPPED_V4;
2688 } else {
2689 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2690 return (EINVAL);
2691 }
2692 break;
2693 case SCTP_NODELAY:
2694 set_opt = SCTP_PCB_FLAGS_NODELAY;
2695 break;
2696 case SCTP_AUTOCLOSE:
2697 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
2698 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
2699 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2700 return (EINVAL);
2701 }
2702 set_opt = SCTP_PCB_FLAGS_AUTOCLOSE;
2703 /*
2704 * The value is in ticks. Note this does not effect
2705 * old associations, only new ones.
2706 */
2707 inp->sctp_ep.auto_close_time = SEC_TO_TICKS(*mopt);
2708 break;
2709 }
2710 SCTP_INP_WLOCK(inp);
2711 if (*mopt != 0) {
2712 sctp_feature_on(inp, set_opt);
2713 } else {
2714 sctp_feature_off(inp, set_opt);
2715 }
2716 SCTP_INP_WUNLOCK(inp);
2717 break;
2718 case SCTP_PARTIAL_DELIVERY_POINT:
2719 {
2720 uint32_t *value;
2721
2722 SCTP_CHECK_AND_CAST(value, optval, uint32_t, optsize);
2723 if (*value > SCTP_SB_LIMIT_RCV(so)) {
2724 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2725 error = EINVAL;
2726 break;
2727 }
2728 inp->partial_delivery_point = *value;
2729 }
2730 break;
2731 case SCTP_FRAGMENT_INTERLEAVE:
2732 /* not yet until we re-write sctp_recvmsg() */
2733 {
2734 uint32_t *level;
2735
2736 SCTP_CHECK_AND_CAST(level, optval, uint32_t, optsize);
2737 if (*level == SCTP_FRAG_LEVEL_2) {
2738 sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
2739 sctp_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
2740 } else if (*level == SCTP_FRAG_LEVEL_1) {
2741 sctp_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
2742 sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
2743 } else if (*level == SCTP_FRAG_LEVEL_0) {
2744 sctp_feature_off(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE);
2745 sctp_feature_off(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS);
2746
2747 } else {
2748 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2749 error = EINVAL;
2750 }
2751 }
2752 break;
2753 case SCTP_CMT_ON_OFF:
2754 {
2755 struct sctp_assoc_value *av;
2756
2757 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
2758 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
2759 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
2760 if (stcb) {
2761 stcb->asoc.sctp_cmt_on_off = (uint8_t) av->assoc_value;
2762 SCTP_TCB_UNLOCK(stcb);
2763 } else {
2764 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
2765 error = ENOTCONN;
2766 }
2767 } else {
2768 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
2769 error = ENOPROTOOPT;
2770 }
2771 }
2772 break;
2773 /* JRS - Set socket option for pluggable congestion control */
2774 case SCTP_PLUGGABLE_CC:
2775 {
2776 struct sctp_assoc_value *av;
2777
2778 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
2779 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
2780 if (stcb) {
2781 switch (av->assoc_value) {
2782 /*
2783 * JRS - Standard TCP congestion
2784 * control
2785 */
2786 case SCTP_CC_RFC2581:
2787 {
2788 stcb->asoc.congestion_control_module = SCTP_CC_RFC2581;
2789 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
2790 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_cwnd_update_after_sack;
2791 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_cwnd_update_after_fr;
2792 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
2793 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
2794 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
2795 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
2796 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
2797 SCTP_TCB_UNLOCK(stcb);
2798 break;
2799 }
2800 /*
2801 * JRS - High Speed TCP congestion
2802 * control (Floyd)
2803 */
2804 case SCTP_CC_HSTCP:
2805 {
2806 stcb->asoc.congestion_control_module = SCTP_CC_HSTCP;
2807 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_set_initial_cc_param;
2808 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_hs_cwnd_update_after_sack;
2809 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_hs_cwnd_update_after_fr;
2810 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_cwnd_update_after_timeout;
2811 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_cwnd_update_after_ecn_echo;
2812 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
2813 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
2814 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_cwnd_update_after_fr_timer;
2815 SCTP_TCB_UNLOCK(stcb);
2816 break;
2817 }
2818 /* JRS - HTCP congestion control */
2819 case SCTP_CC_HTCP:
2820 {
2821 stcb->asoc.congestion_control_module = SCTP_CC_HTCP;
2822 stcb->asoc.cc_functions.sctp_set_initial_cc_param = &sctp_htcp_set_initial_cc_param;
2823 stcb->asoc.cc_functions.sctp_cwnd_update_after_sack = &sctp_htcp_cwnd_update_after_sack;
2824 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr = &sctp_htcp_cwnd_update_after_fr;
2825 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout = &sctp_htcp_cwnd_update_after_timeout;
2826 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo = &sctp_htcp_cwnd_update_after_ecn_echo;
2827 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped = &sctp_cwnd_update_after_packet_dropped;
2828 stcb->asoc.cc_functions.sctp_cwnd_update_after_output = &sctp_cwnd_update_after_output;
2829 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer = &sctp_htcp_cwnd_update_after_fr_timer;
2830 SCTP_TCB_UNLOCK(stcb);
2831 break;
2832 }
2833 /*
2834 * JRS - All other values are
2835 * invalid
2836 */
2837 default:
2838 {
2839 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2840 error = EINVAL;
2841 SCTP_TCB_UNLOCK(stcb);
2842 break;
2843 }
2844 }
2845 } else {
2846 switch (av->assoc_value) {
2847 case SCTP_CC_RFC2581:
2848 case SCTP_CC_HSTCP:
2849 case SCTP_CC_HTCP:
2850 inp->sctp_ep.sctp_default_cc_module = av->assoc_value;
2851 break;
2852 default:
2853 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2854 error = EINVAL;
2855 break;
2856 };
2857 }
2858 }
2859 break;
2860 case SCTP_CLR_STAT_LOG:
2861 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
2862 error = EOPNOTSUPP;
2863 break;
2864 case SCTP_CONTEXT:
2865 {
2866 struct sctp_assoc_value *av;
2867
2868 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
2869 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
2870
2871 if (stcb) {
2872 stcb->asoc.context = av->assoc_value;
2873 SCTP_TCB_UNLOCK(stcb);
2874 } else {
2875 SCTP_INP_WLOCK(inp);
2876 inp->sctp_context = av->assoc_value;
2877 SCTP_INP_WUNLOCK(inp);
2878 }
2879 }
2880 break;
2881 case SCTP_VRF_ID:
2882 {
2883 uint32_t *default_vrfid;
2884
2885 SCTP_CHECK_AND_CAST(default_vrfid, optval, uint32_t, optsize);
2886 if (*default_vrfid > SCTP_MAX_VRF_ID) {
2887 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2888 error = EINVAL;
2889 break;
2890 }
2891 inp->def_vrf_id = *default_vrfid;
2892 break;
2893 }
2894 case SCTP_DEL_VRF_ID:
2895 {
2896 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
2897 error = EOPNOTSUPP;
2898 break;
2899 }
2900 case SCTP_ADD_VRF_ID:
2901 {
2902 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
2903 error = EOPNOTSUPP;
2904 break;
2905 }
2906 case SCTP_DELAYED_SACK:
2907 {
2908 struct sctp_sack_info *sack;
2909
2910 SCTP_CHECK_AND_CAST(sack, optval, struct sctp_sack_info, optsize);
2911 SCTP_FIND_STCB(inp, stcb, sack->sack_assoc_id);
2912 if (sack->sack_delay) {
2913 if (sack->sack_delay > SCTP_MAX_SACK_DELAY)
2914 sack->sack_delay = SCTP_MAX_SACK_DELAY;
2915 }
2916 if (stcb) {
2917 if (sack->sack_delay) {
2918 if (MSEC_TO_TICKS(sack->sack_delay) < 1) {
2919 sack->sack_delay = TICKS_TO_MSEC(1);
2920 }
2921 stcb->asoc.delayed_ack = sack->sack_delay;
2922 }
2923 if (sack->sack_freq) {
2924 stcb->asoc.sack_freq = sack->sack_freq;
2925 }
2926 SCTP_TCB_UNLOCK(stcb);
2927 } else {
2928 SCTP_INP_WLOCK(inp);
2929 if (sack->sack_delay) {
2930 if (MSEC_TO_TICKS(sack->sack_delay) < 1) {
2931 sack->sack_delay = TICKS_TO_MSEC(1);
2932 }
2933 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(sack->sack_delay);
2934 }
2935 if (sack->sack_freq) {
2936 inp->sctp_ep.sctp_sack_freq = sack->sack_freq;
2937 }
2938 SCTP_INP_WUNLOCK(inp);
2939 }
2940 break;
2941 }
2942 case SCTP_AUTH_CHUNK:
2943 {
2944 struct sctp_authchunk *sauth;
2945
2946 SCTP_CHECK_AND_CAST(sauth, optval, struct sctp_authchunk, optsize);
2947
2948 SCTP_INP_WLOCK(inp);
2949 if (sctp_auth_add_chunk(sauth->sauth_chunk, inp->sctp_ep.local_auth_chunks)) {
2950 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
2951 error = EINVAL;
2952 }
2953 SCTP_INP_WUNLOCK(inp);
2954 break;
2955 }
2956 case SCTP_AUTH_KEY:
2957 {
2958 struct sctp_authkey *sca;
2959 struct sctp_keyhead *shared_keys;
2960 sctp_sharedkey_t *shared_key;
2961 sctp_key_t *key = NULL;
2962 size_t size;
2963
2964 SCTP_CHECK_AND_CAST(sca, optval, struct sctp_authkey, optsize);
2965 SCTP_FIND_STCB(inp, stcb, sca->sca_assoc_id);
2966 size = optsize - sizeof(*sca);
2967
2968 if (stcb) {
2969 /* set it on the assoc */
2970 shared_keys = &stcb->asoc.shared_keys;
2971 /* clear the cached keys for this key id */
2972 sctp_clear_cachedkeys(stcb, sca->sca_keynumber);
2973 /*
2974 * create the new shared key and
2975 * insert/replace it
2976 */
2977 if (size > 0) {
2978 key = sctp_set_key(sca->sca_key, (uint32_t) size);
2979 if (key == NULL) {
2980 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
2981 error = ENOMEM;
2982 SCTP_TCB_UNLOCK(stcb);
2983 break;
2984 }
2985 }
2986 shared_key = sctp_alloc_sharedkey();
2987 if (shared_key == NULL) {
2988 sctp_free_key(key);
2989 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
2990 error = ENOMEM;
2991 SCTP_TCB_UNLOCK(stcb);
2992 break;
2993 }
2994 shared_key->key = key;
2995 shared_key->keyid = sca->sca_keynumber;
2996 sctp_insert_sharedkey(shared_keys, shared_key);
2997 SCTP_TCB_UNLOCK(stcb);
2998 } else {
2999 /* set it on the endpoint */
3000 SCTP_INP_WLOCK(inp);
3001 shared_keys = &inp->sctp_ep.shared_keys;
3002 /*
3003 * clear the cached keys on all assocs for
3004 * this key id
3005 */
3006 sctp_clear_cachedkeys_ep(inp, sca->sca_keynumber);
3007 /*
3008 * create the new shared key and
3009 * insert/replace it
3010 */
3011 if (size > 0) {
3012 key = sctp_set_key(sca->sca_key, (uint32_t) size);
3013 if (key == NULL) {
3014 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
3015 error = ENOMEM;
3016 SCTP_INP_WUNLOCK(inp);
3017 break;
3018 }
3019 }
3020 shared_key = sctp_alloc_sharedkey();
3021 if (shared_key == NULL) {
3022 sctp_free_key(key);
3023 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
3024 error = ENOMEM;
3025 SCTP_INP_WUNLOCK(inp);
3026 break;
3027 }
3028 shared_key->key = key;
3029 shared_key->keyid = sca->sca_keynumber;
3030 sctp_insert_sharedkey(shared_keys, shared_key);
3031 SCTP_INP_WUNLOCK(inp);
3032 }
3033 break;
3034 }
3035 case SCTP_HMAC_IDENT:
3036 {
3037 struct sctp_hmacalgo *shmac;
3038 sctp_hmaclist_t *hmaclist;
3039 uint32_t hmacid;
3040 size_t size, i, found;
3041
3042 SCTP_CHECK_AND_CAST(shmac, optval, struct sctp_hmacalgo, optsize);
3043 size = (optsize - sizeof(*shmac)) / sizeof(shmac->shmac_idents[0]);
3044 hmaclist = sctp_alloc_hmaclist(size);
3045 if (hmaclist == NULL) {
3046 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOMEM);
3047 error = ENOMEM;
3048 break;
3049 }
3050 for (i = 0; i < size; i++) {
3051 hmacid = shmac->shmac_idents[i];
3052 if (sctp_auth_add_hmacid(hmaclist, (uint16_t) hmacid)) {
3053 /* invalid HMACs were found */ ;
3054 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3055 error = EINVAL;
3056 sctp_free_hmaclist(hmaclist);
3057 goto sctp_set_hmac_done;
3058 }
3059 }
3060 found = 0;
3061 for (i = 0; i < hmaclist->num_algo; i++) {
3062 if (hmaclist->hmac[i] == SCTP_AUTH_HMAC_ID_SHA1) {
3063 /* already in list */
3064 found = 1;
3065 }
3066 }
3067 if (!found) {
3068 sctp_free_hmaclist(hmaclist);
3069 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3070 error = EINVAL;
3071 break;
3072 }
3073 /* set it on the endpoint */
3074 SCTP_INP_WLOCK(inp);
3075 if (inp->sctp_ep.local_hmacs)
3076 sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
3077 inp->sctp_ep.local_hmacs = hmaclist;
3078 SCTP_INP_WUNLOCK(inp);
3079 sctp_set_hmac_done:
3080 break;
3081 }
3082 case SCTP_AUTH_ACTIVE_KEY:
3083 {
3084 struct sctp_authkeyid *scact;
3085
3086 SCTP_CHECK_AND_CAST(scact, optval, struct sctp_authkeyid, optsize);
3087 SCTP_FIND_STCB(inp, stcb, scact->scact_assoc_id);
3088
3089 /* set the active key on the right place */
3090 if (stcb) {
3091 /* set the active key on the assoc */
3092 if (sctp_auth_setactivekey(stcb, scact->scact_keynumber)) {
3093 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3094 error = EINVAL;
3095 }
3096 SCTP_TCB_UNLOCK(stcb);
3097 } else {
3098 /* set the active key on the endpoint */
3099 SCTP_INP_WLOCK(inp);
3100 if (sctp_auth_setactivekey_ep(inp, scact->scact_keynumber)) {
3101 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3102 error = EINVAL;
3103 }
3104 SCTP_INP_WUNLOCK(inp);
3105 }
3106 break;
3107 }
3108 case SCTP_AUTH_DELETE_KEY:
3109 {
3110 struct sctp_authkeyid *scdel;
3111
3112 SCTP_CHECK_AND_CAST(scdel, optval, struct sctp_authkeyid, optsize);
3113 SCTP_FIND_STCB(inp, stcb, scdel->scact_assoc_id);
3114
3115 /* delete the key from the right place */
3116 if (stcb) {
3117 if (sctp_delete_sharedkey(stcb, scdel->scact_keynumber)) {
3118 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3119 error = EINVAL;
3120 }
3121 SCTP_TCB_UNLOCK(stcb);
3122 } else {
3123 SCTP_INP_WLOCK(inp);
3124 if (sctp_delete_sharedkey_ep(inp, scdel->scact_keynumber)) {
3125 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3126 error = EINVAL;
3127 }
3128 SCTP_INP_WUNLOCK(inp);
3129 }
3130 break;
3131 }
3132
3133 case SCTP_RESET_STREAMS:
3134 {
3135 struct sctp_stream_reset *strrst;
3136 uint8_t send_in = 0, send_tsn = 0, send_out = 0;
3137 int i;
3138
3139 SCTP_CHECK_AND_CAST(strrst, optval, struct sctp_stream_reset, optsize);
3140 SCTP_FIND_STCB(inp, stcb, strrst->strrst_assoc_id);
3141
3142 if (stcb == NULL) {
3143 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
3144 error = ENOENT;
3145 break;
3146 }
3147 if (stcb->asoc.peer_supports_strreset == 0) {
3148 /*
3149 * Peer does not support it, we return
3150 * protocol not supported since this is true
3151 * for this feature and this peer, not the
3152 * socket request in general.
3153 */
3154 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EPROTONOSUPPORT);
3155 error = EPROTONOSUPPORT;
3156 SCTP_TCB_UNLOCK(stcb);
3157 break;
3158 }
3159 if (stcb->asoc.stream_reset_outstanding) {
3160 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
3161 error = EALREADY;
3162 SCTP_TCB_UNLOCK(stcb);
3163 break;
3164 }
3165 if (strrst->strrst_flags == SCTP_RESET_LOCAL_RECV) {
3166 send_in = 1;
3167 } else if (strrst->strrst_flags == SCTP_RESET_LOCAL_SEND) {
3168 send_out = 1;
3169 } else if (strrst->strrst_flags == SCTP_RESET_BOTH) {
3170 send_in = 1;
3171 send_out = 1;
3172 } else if (strrst->strrst_flags == SCTP_RESET_TSN) {
3173 send_tsn = 1;
3174 } else {
3175 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3176 error = EINVAL;
3177 SCTP_TCB_UNLOCK(stcb);
3178 break;
3179 }
3180 for (i = 0; i < strrst->strrst_num_streams; i++) {
3181 if ((send_in) &&
3182
3183 (strrst->strrst_list[i] > stcb->asoc.streamincnt)) {
3184 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3185 error = EINVAL;
3186 goto get_out;
3187 }
3188 if ((send_out) &&
3189 (strrst->strrst_list[i] > stcb->asoc.streamoutcnt)) {
3190 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3191 error = EINVAL;
3192 goto get_out;
3193 }
3194 }
3195 if (error) {
3196 get_out:
3197 SCTP_TCB_UNLOCK(stcb);
3198 break;
3199 }
3200 error = sctp_send_str_reset_req(stcb, strrst->strrst_num_streams,
3201 strrst->strrst_list,
3202 send_out, (stcb->asoc.str_reset_seq_in - 3),
3203 send_in, send_tsn);
3204
3205 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_REQ, SCTP_SO_LOCKED);
3206 SCTP_TCB_UNLOCK(stcb);
3207 }
3208 break;
3209
3210 case SCTP_CONNECT_X:
3211 if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) {
3212 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3213 error = EINVAL;
3214 break;
3215 }
3216 error = sctp_do_connect_x(so, inp, optval, optsize, p, 0);
3217 break;
3218
3219 case SCTP_CONNECT_X_DELAYED:
3220 if (optsize < (sizeof(int) + sizeof(struct sockaddr_in))) {
3221 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3222 error = EINVAL;
3223 break;
3224 }
3225 error = sctp_do_connect_x(so, inp, optval, optsize, p, 1);
3226 break;
3227
3228 case SCTP_CONNECT_X_COMPLETE:
3229 {
3230 struct sockaddr *sa;
3231 struct sctp_nets *net;
3232
3233 /* FIXME MT: check correct? */
3234 SCTP_CHECK_AND_CAST(sa, optval, struct sockaddr, optsize);
3235
3236 /* find tcb */
3237 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3238 SCTP_INP_RLOCK(inp);
3239 stcb = LIST_FIRST(&inp->sctp_asoc_list);
3240 if (stcb) {
3241 SCTP_TCB_LOCK(stcb);
3242 net = sctp_findnet(stcb, sa);
3243 }
3244 SCTP_INP_RUNLOCK(inp);
3245 } else {
3246 /*
3247 * We increment here since
3248 * sctp_findassociation_ep_addr() wil do a
3249 * decrement if it finds the stcb as long as
3250 * the locked tcb (last argument) is NOT a
3251 * TCB.. aka NULL.
3252 */
3253 SCTP_INP_INCR_REF(inp);
3254 stcb = sctp_findassociation_ep_addr(&inp, sa, &net, NULL, NULL);
3255 if (stcb == NULL) {
3256 SCTP_INP_DECR_REF(inp);
3257 }
3258 }
3259
3260 if (stcb == NULL) {
3261 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
3262 error = ENOENT;
3263 break;
3264 }
3265 if (stcb->asoc.delayed_connection == 1) {
3266 stcb->asoc.delayed_connection = 0;
3267 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
3268 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb,
3269 stcb->asoc.primary_destination,
3270 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_9);
3271 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
3272 } else {
3273 /*
3274 * already expired or did not use delayed
3275 * connectx
3276 */
3277 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
3278 error = EALREADY;
3279 }
3280 SCTP_TCB_UNLOCK(stcb);
3281 }
3282 break;
3283 case SCTP_MAX_BURST:
3284 {
3285 uint8_t *burst;
3286
3287 SCTP_CHECK_AND_CAST(burst, optval, uint8_t, optsize);
3288
3289 SCTP_INP_WLOCK(inp);
3290 if (*burst) {
3291 inp->sctp_ep.max_burst = *burst;
3292 }
3293 SCTP_INP_WUNLOCK(inp);
3294 }
3295 break;
3296 case SCTP_MAXSEG:
3297 {
3298 struct sctp_assoc_value *av;
3299 int ovh;
3300
3301 SCTP_CHECK_AND_CAST(av, optval, struct sctp_assoc_value, optsize);
3302 SCTP_FIND_STCB(inp, stcb, av->assoc_id);
3303
3304 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
3305 ovh = SCTP_MED_OVERHEAD;
3306 } else {
3307 ovh = SCTP_MED_V4_OVERHEAD;
3308 }
3309 if (stcb) {
3310 if (av->assoc_value) {
3311 stcb->asoc.sctp_frag_point = (av->assoc_value + ovh);
3312 } else {
3313 stcb->asoc.sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT;
3314 }
3315 SCTP_TCB_UNLOCK(stcb);
3316 } else {
3317 SCTP_INP_WLOCK(inp);
3318 /*
3319 * FIXME MT: I think this is not in tune
3320 * with the API ID
3321 */
3322 if (av->assoc_value) {
3323 inp->sctp_frag_point = (av->assoc_value + ovh);
3324 } else {
3325 inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT;
3326 }
3327 SCTP_INP_WUNLOCK(inp);
3328 }
3329 }
3330 break;
3331 case SCTP_EVENTS:
3332 {
3333 struct sctp_event_subscribe *events;
3334
3335 SCTP_CHECK_AND_CAST(events, optval, struct sctp_event_subscribe, optsize);
3336
3337 SCTP_INP_WLOCK(inp);
3338 if (events->sctp_data_io_event) {
3339 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
3340 } else {
3341 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
3342 }
3343
3344 if (events->sctp_association_event) {
3345 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT);
3346 } else {
3347 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVASSOCEVNT);
3348 }
3349
3350 if (events->sctp_address_event) {
3351 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPADDREVNT);
3352 } else {
3353 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPADDREVNT);
3354 }
3355
3356 if (events->sctp_send_failure_event) {
3357 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT);
3358 } else {
3359 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSENDFAILEVNT);
3360 }
3361
3362 if (events->sctp_peer_error_event) {
3363 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVPEERERR);
3364 } else {
3365 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVPEERERR);
3366 }
3367
3368 if (events->sctp_shutdown_event) {
3369 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT);
3370 } else {
3371 sctp_feature_off(inp, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT);
3372 }
3373
3374 if (events->sctp_partial_delivery_event) {
3375 sctp_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT);
3376 } else {
3377 sctp_feature_off(inp, SCTP_PCB_FLAGS_PDAPIEVNT);
3378 }
3379
3380 if (events->sctp_adaptation_layer_event) {
3381 sctp_feature_on(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT);
3382 } else {
3383 sctp_feature_off(inp, SCTP_PCB_FLAGS_ADAPTATIONEVNT);
3384 }
3385
3386 if (events->sctp_authentication_event) {
3387 sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTHEVNT);
3388 } else {
3389 sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTHEVNT);
3390 }
3391
3392 if (events->sctp_stream_reset_events) {
3393 sctp_feature_on(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
3394 } else {
3395 sctp_feature_off(inp, SCTP_PCB_FLAGS_STREAM_RESETEVNT);
3396 }
3397 SCTP_INP_WUNLOCK(inp);
3398 }
3399 break;
3400
3401 case SCTP_ADAPTATION_LAYER:
3402 {
3403 struct sctp_setadaptation *adap_bits;
3404
3405 SCTP_CHECK_AND_CAST(adap_bits, optval, struct sctp_setadaptation, optsize);
3406 SCTP_INP_WLOCK(inp);
3407 inp->sctp_ep.adaptation_layer_indicator = adap_bits->ssb_adaptation_ind;
3408 SCTP_INP_WUNLOCK(inp);
3409 }
3410 break;
3411#ifdef SCTP_DEBUG
3412 case SCTP_SET_INITIAL_DBG_SEQ:
3413 {
3414 uint32_t *vvv;
3415
3416 SCTP_CHECK_AND_CAST(vvv, optval, uint32_t, optsize);
3417 SCTP_INP_WLOCK(inp);
3418 inp->sctp_ep.initial_sequence_debug = *vvv;
3419 SCTP_INP_WUNLOCK(inp);
3420 }
3421 break;
3422#endif
3423 case SCTP_DEFAULT_SEND_PARAM:
3424 {
3425 struct sctp_sndrcvinfo *s_info;
3426
3427 SCTP_CHECK_AND_CAST(s_info, optval, struct sctp_sndrcvinfo, optsize);
3428 SCTP_FIND_STCB(inp, stcb, s_info->sinfo_assoc_id);
3429
3430 if (stcb) {
3431 if (s_info->sinfo_stream <= stcb->asoc.streamoutcnt) {
3432 memcpy(&stcb->asoc.def_send, s_info, min(optsize, sizeof(stcb->asoc.def_send)));
3433 } else {
3434 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3435 error = EINVAL;
3436 }
3437 SCTP_TCB_UNLOCK(stcb);
3438 } else {
3439 SCTP_INP_WLOCK(inp);
3440 memcpy(&inp->def_send, s_info, min(optsize, sizeof(inp->def_send)));
3441 SCTP_INP_WUNLOCK(inp);
3442 }
3443 }
3444 break;
3445 case SCTP_PEER_ADDR_PARAMS:
3446 /* Applys to the specific association */
3447 {
3448 struct sctp_paddrparams *paddrp;
3449 struct sctp_nets *net;
3450
3451 SCTP_CHECK_AND_CAST(paddrp, optval, struct sctp_paddrparams, optsize);
3452 SCTP_FIND_STCB(inp, stcb, paddrp->spp_assoc_id);
3453 net = NULL;
3454 if (stcb) {
3455 net = sctp_findnet(stcb, (struct sockaddr *)&paddrp->spp_address);
3456 } else {
3457 /*
3458 * We increment here since
3459 * sctp_findassociation_ep_addr() wil do a
3460 * decrement if it finds the stcb as long as
3461 * the locked tcb (last argument) is NOT a
3462 * TCB.. aka NULL.
3463 */
3464 SCTP_INP_INCR_REF(inp);
3465 stcb = sctp_findassociation_ep_addr(&inp,
3466 (struct sockaddr *)&paddrp->spp_address,
3467 &net, NULL, NULL);
3468 if (stcb == NULL) {
3469 SCTP_INP_DECR_REF(inp);
3470 }
3471 }
3472 if (stcb && (net == NULL)) {
3473 struct sockaddr *sa;
3474
3475 sa = (struct sockaddr *)&paddrp->spp_address;
3476 if (sa->sa_family == AF_INET) {
3477 struct sockaddr_in *sin;
3478
3479 sin = (struct sockaddr_in *)sa;
3480 if (sin->sin_addr.s_addr) {
3481 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3482 SCTP_TCB_UNLOCK(stcb);
3483 error = EINVAL;
3484 break;
3485 }
3486 } else if (sa->sa_family == AF_INET6) {
3487 struct sockaddr_in6 *sin6;
3488
3489 sin6 = (struct sockaddr_in6 *)sa;
3490 if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
3491 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3492 SCTP_TCB_UNLOCK(stcb);
3493 error = EINVAL;
3494 break;
3495 }
3496 } else {
3497 error = EAFNOSUPPORT;
3498 SCTP_TCB_UNLOCK(stcb);
3499 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, error);
3500 break;
3501 }
3502 }
3503 /* sanity checks */
3504 if ((paddrp->spp_flags & SPP_HB_ENABLE) && (paddrp->spp_flags & SPP_HB_DISABLE)) {
3505 if (stcb)
3506 SCTP_TCB_UNLOCK(stcb);
3507 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3508 return (EINVAL);
3509 }
3510 if ((paddrp->spp_flags & SPP_PMTUD_ENABLE) && (paddrp->spp_flags & SPP_PMTUD_DISABLE)) {
3511 if (stcb)
3512 SCTP_TCB_UNLOCK(stcb);
3513 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3514 return (EINVAL);
3515 }
3516 if (stcb) {
3517 /************************TCB SPECIFIC SET ******************/
3518 /*
3519 * do we change the timer for HB, we run
3520 * only one?
3521 */
3522 int ovh = 0;
3523
3524 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
3525 ovh = SCTP_MED_OVERHEAD;
3526 } else {
3527 ovh = SCTP_MED_V4_OVERHEAD;
3528 }
3529
3530 if (paddrp->spp_hbinterval)
3531 stcb->asoc.heart_beat_delay = paddrp->spp_hbinterval;
3532 else if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO)
3533 stcb->asoc.heart_beat_delay = 0;
3534
3535 /* network sets ? */
3536 if (net) {
3537 /************************NET SPECIFIC SET ******************/
3538 if (paddrp->spp_flags & SPP_HB_DEMAND) {
3539 /* on demand HB */
3540 if (sctp_send_hb(stcb, 1, net) < 0) {
3541 /* asoc destroyed */
3542 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3543 error = EINVAL;
3544 break;
3545 }
3546 }
3547 if (paddrp->spp_flags & SPP_HB_DISABLE) {
3548 net->dest_state |= SCTP_ADDR_NOHB;
3549 }
3550 if (paddrp->spp_flags & SPP_HB_ENABLE) {
3551 net->dest_state &= ~SCTP_ADDR_NOHB;
3552 }
3553 if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) {
3554 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3555 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
3556 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10);
3557 }
3558 if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) {
3559 net->mtu = paddrp->spp_pathmtu + ovh;
3560 if (net->mtu < stcb->asoc.smallest_mtu) {
3561#ifdef SCTP_PRINT_FOR_B_AND_M
3562 SCTP_PRINTF("SCTP_PMTU_DISABLE calls sctp_pathmtu_adjustment:%d\n",
3563 net->mtu);
3564#endif
3565 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu);
3566 }
3567 }
3568 }
3569 if (paddrp->spp_flags & SPP_PMTUD_ENABLE) {
3570 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3571 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
3572 }
3573 }
3574 if (paddrp->spp_pathmaxrxt)
3575 net->failure_threshold = paddrp->spp_pathmaxrxt;
3576#ifdef INET
3577 if (paddrp->spp_flags & SPP_IPV4_TOS) {
3578 if (net->ro._l_addr.sin.sin_family == AF_INET) {
3579 net->tos_flowlabel = paddrp->spp_ipv4_tos & 0x000000fc;
3580 }
3581 }
3582#endif
3583#ifdef INET6
3584 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL) {
3585 if (net->ro._l_addr.sin6.sin6_family == AF_INET6) {
3586 net->tos_flowlabel = paddrp->spp_ipv6_flowlabel;
3587 }
3588 }
3589#endif
3590 } else {
3591 /************************ASSOC ONLY -- NO NET SPECIFIC SET ******************/
3592 if (paddrp->spp_pathmaxrxt)
3593 stcb->asoc.def_net_failure = paddrp->spp_pathmaxrxt;
3594
3595 if (paddrp->spp_flags & SPP_HB_ENABLE) {
3596 /* Turn back on the timer */
3597 stcb->asoc.hb_is_disabled = 0;
3598 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
3599 }
3600 if ((paddrp->spp_flags & SPP_PMTUD_DISABLE) && (paddrp->spp_pathmtu >= SCTP_SMALLEST_PMTU)) {
3601 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3602 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3603 sctp_timer_stop(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net,
3604 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_10);
3605 }
3606 if (paddrp->spp_pathmtu > SCTP_DEFAULT_MINSEGMENT) {
3607 net->mtu = paddrp->spp_pathmtu + ovh;
3608 if (net->mtu < stcb->asoc.smallest_mtu) {
3609#ifdef SCTP_PRINT_FOR_B_AND_M
3610 SCTP_PRINTF("SCTP_PMTU_DISABLE calls sctp_pathmtu_adjustment:%d\n",
3611 net->mtu);
3612#endif
3613 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu);
3614 }
3615 }
3616 }
3617 }
3618 if (paddrp->spp_flags & SPP_PMTUD_ENABLE) {
3619 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3620 if (SCTP_OS_TIMER_PENDING(&net->pmtu_timer.timer)) {
3621 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
3622 }
3623 }
3624 }
3625 if (paddrp->spp_flags & SPP_HB_DISABLE) {
3626 int cnt_of_unconf = 0;
3627 struct sctp_nets *lnet;
3628
3629 stcb->asoc.hb_is_disabled = 1;
3630 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
3631 if (lnet->dest_state & SCTP_ADDR_UNCONFIRMED) {
3632 cnt_of_unconf++;
3633 }
3634 }
3635 /*
3636 * stop the timer ONLY if we
3637 * have no unconfirmed
3638 * addresses
3639 */
3640 if (cnt_of_unconf == 0) {
3641 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3642 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net,
3643 SCTP_FROM_SCTP_USRREQ + SCTP_LOC_11);
3644 }
3645 }
3646 }
3647 if (paddrp->spp_flags & SPP_HB_ENABLE) {
3648 /* start up the timer. */
3649 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3650 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net);
3651 }
3652 }
3653#ifdef INET
3654 if (paddrp->spp_flags & SPP_IPV4_TOS)
3655 stcb->asoc.default_tos = paddrp->spp_ipv4_tos & 0x000000fc;
3656#endif
3657#ifdef INET6
3658 if (paddrp->spp_flags & SPP_IPV6_FLOWLABEL)
3659 stcb->asoc.default_flowlabel = paddrp->spp_ipv6_flowlabel;
3660#endif
3661
3662 }
3663 SCTP_TCB_UNLOCK(stcb);
3664 } else {
3665 /************************NO TCB, SET TO default stuff ******************/
3666 SCTP_INP_WLOCK(inp);
3667 /*
3668 * For the TOS/FLOWLABEL stuff you set it
3669 * with the options on the socket
3670 */
3671 if (paddrp->spp_pathmaxrxt) {
3672 inp->sctp_ep.def_net_failure = paddrp->spp_pathmaxrxt;
3673 }
3674 if (paddrp->spp_flags & SPP_HB_TIME_IS_ZERO)
3675 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = 0;
3676 else if (paddrp->spp_hbinterval) {
3677 if (paddrp->spp_hbinterval > SCTP_MAX_HB_INTERVAL)
3678 paddrp->spp_hbinterval = SCTP_MAX_HB_INTERVAL;
3679 inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(paddrp->spp_hbinterval);
3680 }
3681 if (paddrp->spp_flags & SPP_HB_ENABLE) {
3682 sctp_feature_off(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
3683
3684 } else if (paddrp->spp_flags & SPP_HB_DISABLE) {
3685 sctp_feature_on(inp, SCTP_PCB_FLAGS_DONOT_HEARTBEAT);
3686 }
3687 SCTP_INP_WUNLOCK(inp);
3688 }
3689 }
3690 break;
3691 case SCTP_RTOINFO:
3692 {
3693 struct sctp_rtoinfo *srto;
3694 uint32_t new_init, new_min, new_max;
3695
3696 SCTP_CHECK_AND_CAST(srto, optval, struct sctp_rtoinfo, optsize);
3697 SCTP_FIND_STCB(inp, stcb, srto->srto_assoc_id);
3698
3699 if (stcb) {
3700 if (srto->srto_initial)
3701 new_init = srto->srto_initial;
3702 else
3703 new_init = stcb->asoc.initial_rto;
3704 if (srto->srto_max)
3705 new_max = srto->srto_max;
3706 else
3707 new_max = stcb->asoc.maxrto;
3708 if (srto->srto_min)
3709 new_min = srto->srto_min;
3710 else
3711 new_min = stcb->asoc.minrto;
3712 if ((new_min <= new_init) && (new_init <= new_max)) {
3713 stcb->asoc.initial_rto = new_init;
3714 stcb->asoc.maxrto = new_max;
3715 stcb->asoc.minrto = new_min;
3716 } else {
3717 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3718 error = EINVAL;
3719 }
3720 SCTP_TCB_UNLOCK(stcb);
3721 } else {
3722 SCTP_INP_WLOCK(inp);
3723 if (srto->srto_initial)
3724 new_init = srto->srto_initial;
3725 else
3726 new_init = inp->sctp_ep.initial_rto;
3727 if (srto->srto_max)
3728 new_max = srto->srto_max;
3729 else
3730 new_max = inp->sctp_ep.sctp_maxrto;
3731 if (srto->srto_min)
3732 new_min = srto->srto_min;
3733 else
3734 new_min = inp->sctp_ep.sctp_minrto;
3735 if ((new_min <= new_init) && (new_init <= new_max)) {
3736 inp->sctp_ep.initial_rto = new_init;
3737 inp->sctp_ep.sctp_maxrto = new_max;
3738 inp->sctp_ep.sctp_minrto = new_min;
3739 } else {
3740 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3741 error = EINVAL;
3742 }
3743 SCTP_INP_WUNLOCK(inp);
3744 }
3745 }
3746 break;
3747 case SCTP_ASSOCINFO:
3748 {
3749 struct sctp_assocparams *sasoc;
3750
3751 SCTP_CHECK_AND_CAST(sasoc, optval, struct sctp_assocparams, optsize);
3752 SCTP_FIND_STCB(inp, stcb, sasoc->sasoc_assoc_id);
3753 if (sasoc->sasoc_cookie_life) {
3754 /* boundary check the cookie life */
3755 if (sasoc->sasoc_cookie_life < 1000)
3756 sasoc->sasoc_cookie_life = 1000;
3757 if (sasoc->sasoc_cookie_life > SCTP_MAX_COOKIE_LIFE) {
3758 sasoc->sasoc_cookie_life = SCTP_MAX_COOKIE_LIFE;
3759 }
3760 }
3761 if (stcb) {
3762 if (sasoc->sasoc_asocmaxrxt)
3763 stcb->asoc.max_send_times = sasoc->sasoc_asocmaxrxt;
3764 sasoc->sasoc_number_peer_destinations = stcb->asoc.numnets;
3765 sasoc->sasoc_peer_rwnd = 0;
3766 sasoc->sasoc_local_rwnd = 0;
3767 if (sasoc->sasoc_cookie_life) {
3768 stcb->asoc.cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life);
3769 }
3770 SCTP_TCB_UNLOCK(stcb);
3771 } else {
3772 SCTP_INP_WLOCK(inp);
3773 if (sasoc->sasoc_asocmaxrxt)
3774 inp->sctp_ep.max_send_times = sasoc->sasoc_asocmaxrxt;
3775 sasoc->sasoc_number_peer_destinations = 0;
3776 sasoc->sasoc_peer_rwnd = 0;
3777 sasoc->sasoc_local_rwnd = 0;
3778 if (sasoc->sasoc_cookie_life) {
3779 inp->sctp_ep.def_cookie_life = MSEC_TO_TICKS(sasoc->sasoc_cookie_life);
3780 }
3781 SCTP_INP_WUNLOCK(inp);
3782 }
3783 }
3784 break;
3785 case SCTP_INITMSG:
3786 {
3787 struct sctp_initmsg *sinit;
3788
3789 SCTP_CHECK_AND_CAST(sinit, optval, struct sctp_initmsg, optsize);
3790 SCTP_INP_WLOCK(inp);
3791 if (sinit->sinit_num_ostreams)
3792 inp->sctp_ep.pre_open_stream_count = sinit->sinit_num_ostreams;
3793
3794 if (sinit->sinit_max_instreams)
3795 inp->sctp_ep.max_open_streams_intome = sinit->sinit_max_instreams;
3796
3797 if (sinit->sinit_max_attempts)
3798 inp->sctp_ep.max_init_times = sinit->sinit_max_attempts;
3799
3800 if (sinit->sinit_max_init_timeo)
3801 inp->sctp_ep.initial_init_rto_max = sinit->sinit_max_init_timeo;
3802 SCTP_INP_WUNLOCK(inp);
3803 }
3804 break;
3805 case SCTP_PRIMARY_ADDR:
3806 {
3807 struct sctp_setprim *spa;
3808 struct sctp_nets *net, *lnet;
3809
3810 SCTP_CHECK_AND_CAST(spa, optval, struct sctp_setprim, optsize);
3811 SCTP_FIND_STCB(inp, stcb, spa->ssp_assoc_id);
3812
3813 net = NULL;
3814 if (stcb) {
3815 net = sctp_findnet(stcb, (struct sockaddr *)&spa->ssp_addr);
3816 } else {
3817 /*
3818 * We increment here since
3819 * sctp_findassociation_ep_addr() wil do a
3820 * decrement if it finds the stcb as long as
3821 * the locked tcb (last argument) is NOT a
3822 * TCB.. aka NULL.
3823 */
3824 SCTP_INP_INCR_REF(inp);
3825 stcb = sctp_findassociation_ep_addr(&inp,
3826 (struct sockaddr *)&spa->ssp_addr,
3827 &net, NULL, NULL);
3828 if (stcb == NULL) {
3829 SCTP_INP_DECR_REF(inp);
3830 }
3831 }
3832
3833 if ((stcb) && (net)) {
3834 if ((net != stcb->asoc.primary_destination) &&
3835 (!(net->dest_state & SCTP_ADDR_UNCONFIRMED))) {
3836 /* Ok we need to set it */
3837 lnet = stcb->asoc.primary_destination;
3838 if (sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net) == 0) {
3839 if (net->dest_state & SCTP_ADDR_SWITCH_PRIMARY) {
3840 net->dest_state |= SCTP_ADDR_DOUBLE_SWITCH;
3841 }
3842 net->dest_state |= SCTP_ADDR_SWITCH_PRIMARY;
3843 }
3844 }
3845 } else {
3846 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3847 error = EINVAL;
3848 }
3849 if (stcb) {
3850 SCTP_TCB_UNLOCK(stcb);
3851 }
3852 }
3853 break;
3854 case SCTP_SET_DYNAMIC_PRIMARY:
3855 {
3856 union sctp_sockstore *ss;
3857
3858 error = priv_check(curthread,
3859 PRIV_NETINET_RESERVEDPORT);
3860 if (error)
3861 break;
3862
3863 SCTP_CHECK_AND_CAST(ss, optval, union sctp_sockstore, optsize);
3864 /* SUPER USER CHECK? */
3865 error = sctp_dynamic_set_primary(&ss->sa, vrf_id);
3866 }
3867 break;
3868 case SCTP_SET_PEER_PRIMARY_ADDR:
3869 {
3870 struct sctp_setpeerprim *sspp;
3871
3872 SCTP_CHECK_AND_CAST(sspp, optval, struct sctp_setpeerprim, optsize);
3873 SCTP_FIND_STCB(inp, stcb, sspp->sspp_assoc_id);
3874 if (stcb != NULL) {
3875 struct sctp_ifa *ifa;
3876
3877 ifa = sctp_find_ifa_by_addr((struct sockaddr *)&sspp->sspp_addr,
3878 stcb->asoc.vrf_id, SCTP_ADDR_NOT_LOCKED);
3879 if (ifa == NULL) {
3880 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3881 error = EINVAL;
3882 goto out_of_it;
3883 }
3884 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
3885 /*
3886 * Must validate the ifa found is in
3887 * our ep
3888 */
3889 struct sctp_laddr *laddr;
3890 int found = 0;
3891
3892 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
3893 if (laddr->ifa == NULL) {
3894 SCTPDBG(SCTP_DEBUG_OUTPUT1, "%s: NULL ifa\n",
3895 __FUNCTION__);
3896 continue;
3897 }
3898 if (laddr->ifa == ifa) {
3899 found = 1;
3900 break;
3901 }
3902 }
3903 if (!found) {
3904 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3905 error = EINVAL;
3906 goto out_of_it;
3907 }
3908 }
3909 if (sctp_set_primary_ip_address_sa(stcb,
3910 (struct sockaddr *)&sspp->sspp_addr) != 0) {
3911 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3912 error = EINVAL;
3913 }
3914 out_of_it:
3915 SCTP_TCB_UNLOCK(stcb);
3916 } else {
3917 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3918 error = EINVAL;
3919 }
3920
3921 }
3922 break;
3923 case SCTP_BINDX_ADD_ADDR:
3924 {
3925 struct sctp_getaddresses *addrs;
3926 size_t sz;
3927 struct thread *td;
3928 int prison = 0;
3929
3930 td = (struct thread *)p;
3931 if (jailed(td->td_ucred)) {
3932 prison = 1;
3933 }
3934 SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses,
3935 optsize);
3936 if (addrs->addr->sa_family == AF_INET) {
3937 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in);
3938 if (optsize < sz) {
3939 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3940 error = EINVAL;
3941 break;
3942 }
3943 if (prison && prison_ip(td->td_ucred, 0, &(((struct sockaddr_in *)(addrs->addr))->sin_addr.s_addr))) {
3944 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRNOTAVAIL);
3945 error = EADDRNOTAVAIL;
3946 }
3947 } else if (addrs->addr->sa_family == AF_INET6) {
3948 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6);
3949 if (optsize < sz) {
3950 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3951 error = EINVAL;
3952 break;
3953 }
3954 /* JAIL XXXX Add else here for V6 */
3955 }
3956 sctp_bindx_add_address(so, inp, addrs->addr,
3957 addrs->sget_assoc_id, vrf_id,
3958 &error, p);
3959 }
3960 break;
3961 case SCTP_BINDX_REM_ADDR:
3962 {
3963 struct sctp_getaddresses *addrs;
3964 size_t sz;
3965 struct thread *td;
3966 int prison = 0;
3967
3968 td = (struct thread *)p;
3969 if (jailed(td->td_ucred)) {
3970 prison = 1;
3971 }
3972 SCTP_CHECK_AND_CAST(addrs, optval, struct sctp_getaddresses, optsize);
3973 if (addrs->addr->sa_family == AF_INET) {
3974 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in);
3975 if (optsize < sz) {
3976 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3977 error = EINVAL;
3978 break;
3979 }
3980 if (prison && prison_ip(td->td_ucred, 0, &(((struct sockaddr_in *)(addrs->addr))->sin_addr.s_addr))) {
3981 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRNOTAVAIL);
3982 error = EADDRNOTAVAIL;
3983 }
3984 } else if (addrs->addr->sa_family == AF_INET6) {
3985 sz = sizeof(struct sctp_getaddresses) - sizeof(struct sockaddr) + sizeof(struct sockaddr_in6);
3986 if (optsize < sz) {
3987 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
3988 error = EINVAL;
3989 break;
3990 }
3991 /* JAIL XXXX Add else here for V6 */
3992 }
3993 sctp_bindx_delete_address(so, inp, addrs->addr,
3994 addrs->sget_assoc_id, vrf_id,
3995 &error);
3996 }
3997 break;
3998 default:
3999 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOPROTOOPT);
4000 error = ENOPROTOOPT;
4001 break;
4002 } /* end switch (opt) */
4003 return (error);
4004}
4005
4006
4007int
4008sctp_ctloutput(struct socket *so, struct sockopt *sopt)
4009{
4010 void *optval = NULL;
4011 size_t optsize = 0;
4012 struct sctp_inpcb *inp;
4013 void *p;
4014 int error = 0;
4015
4016 inp = (struct sctp_inpcb *)so->so_pcb;
4017 if (inp == 0) {
4018 /* I made the same as TCP since we are not setup? */
4019 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4020 return (ECONNRESET);
4021 }
4022 if (sopt->sopt_level != IPPROTO_SCTP) {
4023 /* wrong proto level... send back up to IP */
4024#ifdef INET6
4025 if (INP_CHECK_SOCKAF(so, AF_INET6))
4026 error = ip6_ctloutput(so, sopt);
4027 else
4028#endif /* INET6 */
4029 error = ip_ctloutput(so, sopt);
4030 return (error);
4031 }
4032 optsize = sopt->sopt_valsize;
4033 if (optsize) {
4034 SCTP_MALLOC(optval, void *, optsize, SCTP_M_SOCKOPT);
4035 if (optval == NULL) {
4036 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOBUFS);
4037 return (ENOBUFS);
4038 }
4039 error = sooptcopyin(sopt, optval, optsize, optsize);
4040 if (error) {
4041 SCTP_FREE(optval, SCTP_M_SOCKOPT);
4042 goto out;
4043 }
4044 }
4045 p = (void *)sopt->sopt_td;
4046 if (sopt->sopt_dir == SOPT_SET) {
4047 error = sctp_setopt(so, sopt->sopt_name, optval, optsize, p);
4048 } else if (sopt->sopt_dir == SOPT_GET) {
4049 error = sctp_getopt(so, sopt->sopt_name, optval, &optsize, p);
4050 } else {
4051 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4052 error = EINVAL;
4053 }
4054 if ((error == 0) && (optval != NULL)) {
4055 error = sooptcopyout(sopt, optval, optsize);
4056 SCTP_FREE(optval, SCTP_M_SOCKOPT);
4057 } else if (optval != NULL) {
4058 SCTP_FREE(optval, SCTP_M_SOCKOPT);
4059 }
4060out:
4061 return (error);
4062}
4063
4064
4065static int
4066sctp_connect(struct socket *so, struct sockaddr *addr, struct thread *p)
4067{
4068 int error = 0;
4069 int create_lock_on = 0;
4070 uint32_t vrf_id;
4071 struct sctp_inpcb *inp;
4072 struct sctp_tcb *stcb = NULL;
4073
4074 inp = (struct sctp_inpcb *)so->so_pcb;
4075 if (inp == 0) {
4076 /* I made the same as TCP since we are not setup? */
4077 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4078 return (ECONNRESET);
4079 }
4080 if (addr == NULL) {
4081 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4082 return EINVAL;
4083 }
4084 if ((addr->sa_family == AF_INET6) && (addr->sa_len != sizeof(struct sockaddr_in6))) {
4085 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4086 return (EINVAL);
4087 }
4088 if ((addr->sa_family == AF_INET) && (addr->sa_len != sizeof(struct sockaddr_in))) {
4089 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4090 return (EINVAL);
4091 }
4092 SCTP_INP_INCR_REF(inp);
4093 SCTP_ASOC_CREATE_LOCK(inp);
4094 create_lock_on = 1;
4095
4096
4097 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
4098 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) {
4099 /* Should I really unlock ? */
4100 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EFAULT);
4101 error = EFAULT;
4102 goto out_now;
4103 }
4104#ifdef INET6
4105 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) &&
4106 (addr->sa_family == AF_INET6)) {
4107 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4108 error = EINVAL;
4109 goto out_now;
4110 }
4111#endif /* INET6 */
4112 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) ==
4113 SCTP_PCB_FLAGS_UNBOUND) {
4114 /* Bind a ephemeral port */
4115 error = sctp_inpcb_bind(so, NULL, NULL, p);
4116 if (error) {
4117 goto out_now;
4118 }
4119 }
4120 /* Now do we connect? */
4121 if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) {
4122 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4123 error = EINVAL;
4124 goto out_now;
4125 }
4126 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4127 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
4128 /* We are already connected AND the TCP model */
4129 SCTP_LTRACE_ERR_RET(inp, stcb, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
4130 error = EADDRINUSE;
4131 goto out_now;
4132 }
4133 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
4134 SCTP_INP_RLOCK(inp);
4135 stcb = LIST_FIRST(&inp->sctp_asoc_list);
4136 SCTP_INP_RUNLOCK(inp);
4137 } else {
4138 /*
4139 * We increment here since sctp_findassociation_ep_addr()
4140 * wil do a decrement if it finds the stcb as long as the
4141 * locked tcb (last argument) is NOT a TCB.. aka NULL.
4142 */
4143 SCTP_INP_INCR_REF(inp);
4144 stcb = sctp_findassociation_ep_addr(&inp, addr, NULL, NULL, NULL);
4145 if (stcb == NULL) {
4146 SCTP_INP_DECR_REF(inp);
4147 } else {
4148 SCTP_TCB_UNLOCK(stcb);
4149 }
4150 }
4151 if (stcb != NULL) {
4152 /* Already have or am bring up an association */
4153 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EALREADY);
4154 error = EALREADY;
4155 goto out_now;
4156 }
4157 vrf_id = inp->def_vrf_id;
4158 /* We are GOOD to go */
4159 stcb = sctp_aloc_assoc(inp, addr, 1, &error, 0, vrf_id, p);
4160 if (stcb == NULL) {
4161 /* Gak! no memory */
4162 goto out_now;
4163 }
4164 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
4165 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED;
4166 /* Set the connected flag so we can queue data */
4167 soisconnecting(so);
4168 }
4169 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_WAIT);
4170 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered);
4171
4172 /* initialize authentication parameters for the assoc */
4173 sctp_initialize_auth_params(inp, stcb);
4174
4175 sctp_send_initiate(inp, stcb, SCTP_SO_LOCKED);
4176 SCTP_TCB_UNLOCK(stcb);
4177out_now:
4178 if (create_lock_on) {
4179 SCTP_ASOC_CREATE_UNLOCK(inp);
4180 }
4181 SCTP_INP_DECR_REF(inp);
4182 return error;
4183}
4184
4185int
4186sctp_listen(struct socket *so, int backlog, struct thread *p)
4187{
4188 /*
4189 * Note this module depends on the protocol processing being called
4190 * AFTER any socket level flags and backlog are applied to the
4191 * socket. The traditional way that the socket flags are applied is
4192 * AFTER protocol processing. We have made a change to the
4193 * sys/kern/uipc_socket.c module to reverse this but this MUST be in
4194 * place if the socket API for SCTP is to work properly.
4195 */
4196
4197 int error = 0;
4198 struct sctp_inpcb *inp;
4199
4200 inp = (struct sctp_inpcb *)so->so_pcb;
4201 if (inp == 0) {
4202 /* I made the same as TCP since we are not setup? */
4203 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4204 return (ECONNRESET);
4205 }
4206 SCTP_INP_RLOCK(inp);
4207#ifdef SCTP_LOCK_LOGGING
4208 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOCK_LOGGING_ENABLE) {
4209 sctp_log_lock(inp, (struct sctp_tcb *)NULL, SCTP_LOG_LOCK_SOCK);
4210 }
4211#endif
4212 SOCK_LOCK(so);
4213 error = solisten_proto_check(so);
4214 if (error) {
4215 SOCK_UNLOCK(so);
4216 SCTP_INP_RUNLOCK(inp);
4217 return (error);
4218 }
4219 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) &&
4220 (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED)) {
4221 /* We are already connected AND the TCP model */
4222 SCTP_INP_RUNLOCK(inp);
4223 SOCK_UNLOCK(so);
4224 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EADDRINUSE);
4225 return (EADDRINUSE);
4226 }
4227 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
4228 /* We must do a bind. */
4229 SOCK_UNLOCK(so);
4230 SCTP_INP_RUNLOCK(inp);
4231 if ((error = sctp_inpcb_bind(so, NULL, NULL, p))) {
4232 /* bind error, probably perm */
4233 return (error);
4234 }
4235 SOCK_LOCK(so);
4236 } else {
4237 if (backlog != 0) {
4238 inp->sctp_flags |= SCTP_PCB_FLAGS_LISTENING;
4239 } else {
4240 inp->sctp_flags &= ~SCTP_PCB_FLAGS_LISTENING;
4241 }
4242 SCTP_INP_RUNLOCK(inp);
4243 }
4244 /* It appears for 7.0 and on, we must always call this. */
4245 solisten_proto(so, backlog);
4246 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
4247 /* remove the ACCEPTCONN flag for one-to-many sockets */
4248 so->so_options &= ~SO_ACCEPTCONN;
4249 }
4250 if (backlog == 0) {
4251 /* turning off listen */
4252 so->so_options &= ~SO_ACCEPTCONN;
4253 }
4254 SOCK_UNLOCK(so);
4255 return (error);
4256}
4257
4258static int sctp_defered_wakeup_cnt = 0;
4259
4260int
4261sctp_accept(struct socket *so, struct sockaddr **addr)
4262{
4263 struct sctp_tcb *stcb;
4264 struct sctp_inpcb *inp;
4265 union sctp_sockstore store;
4266
4267#ifdef INET6
4268 int error;
4269
4270#endif
4271 inp = (struct sctp_inpcb *)so->so_pcb;
4272
4273 if (inp == 0) {
4274 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4275 return (ECONNRESET);
4276 }
4277 SCTP_INP_RLOCK(inp);
4278 if (inp->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) {
4279 SCTP_INP_RUNLOCK(inp);
4280 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EOPNOTSUPP);
4281 return (EOPNOTSUPP);
4282 }
4283 if (so->so_state & SS_ISDISCONNECTED) {
4284 SCTP_INP_RUNLOCK(inp);
4285 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ECONNABORTED);
4286 return (ECONNABORTED);
4287 }
4288 stcb = LIST_FIRST(&inp->sctp_asoc_list);
4289 if (stcb == NULL) {
4290 SCTP_INP_RUNLOCK(inp);
4291 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4292 return (ECONNRESET);
4293 }
4294 SCTP_TCB_LOCK(stcb);
4295 SCTP_INP_RUNLOCK(inp);
4296 store = stcb->asoc.primary_destination->ro._l_addr;
4297 SCTP_TCB_UNLOCK(stcb);
4298 switch (store.sa.sa_family) {
4299 case AF_INET:
4300 {
4301 struct sockaddr_in *sin;
4302
4303 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
4304 sin->sin_family = AF_INET;
4305 sin->sin_len = sizeof(*sin);
4306 sin->sin_port = ((struct sockaddr_in *)&store)->sin_port;
4307 sin->sin_addr = ((struct sockaddr_in *)&store)->sin_addr;
4308 *addr = (struct sockaddr *)sin;
4309 break;
4310 }
4311#ifdef INET6
4312 case AF_INET6:
4313 {
4314 struct sockaddr_in6 *sin6;
4315
4316 SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6);
4317 sin6->sin6_family = AF_INET6;
4318 sin6->sin6_len = sizeof(*sin6);
4319 sin6->sin6_port = ((struct sockaddr_in6 *)&store)->sin6_port;
4320
4321 sin6->sin6_addr = ((struct sockaddr_in6 *)&store)->sin6_addr;
4322 if ((error = sa6_recoverscope(sin6)) != 0) {
4323 SCTP_FREE_SONAME(sin6);
4324 return (error);
4325 }
4326 *addr = (struct sockaddr *)sin6;
4327 break;
4328 }
4329#endif
4330 default:
4331 /* TSNH */
4332 break;
4333 }
4334 /* Wake any delayed sleep action */
4335 if (inp->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) {
4336 SCTP_INP_WLOCK(inp);
4337 inp->sctp_flags &= ~SCTP_PCB_FLAGS_DONT_WAKE;
4338 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) {
4339 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEOUTPUT;
4340 SCTP_INP_WUNLOCK(inp);
4341 SOCKBUF_LOCK(&inp->sctp_socket->so_snd);
4342 if (sowriteable(inp->sctp_socket)) {
4343 sowwakeup_locked(inp->sctp_socket);
4344 } else {
4345 SOCKBUF_UNLOCK(&inp->sctp_socket->so_snd);
4346 }
4347 SCTP_INP_WLOCK(inp);
4348 }
4349 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) {
4350 inp->sctp_flags &= ~SCTP_PCB_FLAGS_WAKEINPUT;
4351 SCTP_INP_WUNLOCK(inp);
4352 SOCKBUF_LOCK(&inp->sctp_socket->so_rcv);
4353 if (soreadable(inp->sctp_socket)) {
4354 sctp_defered_wakeup_cnt++;
4355 sorwakeup_locked(inp->sctp_socket);
4356 } else {
4357 SOCKBUF_UNLOCK(&inp->sctp_socket->so_rcv);
4358 }
4359 SCTP_INP_WLOCK(inp);
4360 }
4361 SCTP_INP_WUNLOCK(inp);
4362 }
4363 return (0);
4364}
4365
4366int
4367sctp_ingetaddr(struct socket *so, struct sockaddr **addr)
4368{
4369 struct sockaddr_in *sin;
4370 uint32_t vrf_id;
4371 struct sctp_inpcb *inp;
4372 struct sctp_ifa *sctp_ifa;
4373
4374 /*
4375 * Do the malloc first in case it blocks.
4376 */
4377 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
4378 sin->sin_family = AF_INET;
4379 sin->sin_len = sizeof(*sin);
4380 inp = (struct sctp_inpcb *)so->so_pcb;
4381 if (!inp) {
4382 SCTP_FREE_SONAME(sin);
4383 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4384 return ECONNRESET;
4385 }
4386 SCTP_INP_RLOCK(inp);
4387 sin->sin_port = inp->sctp_lport;
4388 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
4389 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
4390 struct sctp_tcb *stcb;
4391 struct sockaddr_in *sin_a;
4392 struct sctp_nets *net;
4393 int fnd;
4394
4395 stcb = LIST_FIRST(&inp->sctp_asoc_list);
4396 if (stcb == NULL) {
4397 goto notConn;
4398 }
4399 fnd = 0;
4400 sin_a = NULL;
4401 SCTP_TCB_LOCK(stcb);
4402 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
4403 sin_a = (struct sockaddr_in *)&net->ro._l_addr;
4404 if (sin_a == NULL)
4405 /* this will make coverity happy */
4406 continue;
4407
4408 if (sin_a->sin_family == AF_INET) {
4409 fnd = 1;
4410 break;
4411 }
4412 }
4413 if ((!fnd) || (sin_a == NULL)) {
4414 /* punt */
4415 SCTP_TCB_UNLOCK(stcb);
4416 goto notConn;
4417 }
4418 vrf_id = inp->def_vrf_id;
4419 sctp_ifa = sctp_source_address_selection(inp,
4420 stcb,
4421 (sctp_route_t *) & net->ro,
4422 net, 0, vrf_id);
4423 if (sctp_ifa) {
4424 sin->sin_addr = sctp_ifa->address.sin.sin_addr;
4425 sctp_free_ifa(sctp_ifa);
4426 }
4427 SCTP_TCB_UNLOCK(stcb);
4428 } else {
4429 /* For the bound all case you get back 0 */
4430 notConn:
4431 sin->sin_addr.s_addr = 0;
4432 }
4433
4434 } else {
4435 /* Take the first IPv4 address in the list */
4436 struct sctp_laddr *laddr;
4437 int fnd = 0;
4438
4439 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4440 if (laddr->ifa->address.sa.sa_family == AF_INET) {
4441 struct sockaddr_in *sin_a;
4442
4443 sin_a = (struct sockaddr_in *)&laddr->ifa->address.sa;
4444 sin->sin_addr = sin_a->sin_addr;
4445 fnd = 1;
4446 break;
4447 }
4448 }
4449 if (!fnd) {
4450 SCTP_FREE_SONAME(sin);
4451 SCTP_INP_RUNLOCK(inp);
4452 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
4453 return ENOENT;
4454 }
4455 }
4456 SCTP_INP_RUNLOCK(inp);
4457 (*addr) = (struct sockaddr *)sin;
4458 return (0);
4459}
4460
4461int
4462sctp_peeraddr(struct socket *so, struct sockaddr **addr)
4463{
4464 struct sockaddr_in *sin = (struct sockaddr_in *)*addr;
4465 int fnd;
4466 struct sockaddr_in *sin_a;
4467 struct sctp_inpcb *inp;
4468 struct sctp_tcb *stcb;
4469 struct sctp_nets *net;
4470
4471 /* Do the malloc first in case it blocks. */
4472 inp = (struct sctp_inpcb *)so->so_pcb;
4473 if ((inp == NULL) ||
4474 ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) {
4475 /* UDP type and listeners will drop out here */
4476 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOTCONN);
4477 return (ENOTCONN);
4478 }
4479 SCTP_MALLOC_SONAME(sin, struct sockaddr_in *, sizeof *sin);
4480 sin->sin_family = AF_INET;
4481 sin->sin_len = sizeof(*sin);
4482
4483 /* We must recapture incase we blocked */
4484 inp = (struct sctp_inpcb *)so->so_pcb;
4485 if (!inp) {
4486 SCTP_FREE_SONAME(sin);
4487 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4488 return ECONNRESET;
4489 }
4490 SCTP_INP_RLOCK(inp);
4491 stcb = LIST_FIRST(&inp->sctp_asoc_list);
4492 if (stcb) {
4493 SCTP_TCB_LOCK(stcb);
4494 }
4495 SCTP_INP_RUNLOCK(inp);
4496 if (stcb == NULL) {
4497 SCTP_FREE_SONAME(sin);
4498 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, EINVAL);
4499 return ECONNRESET;
4500 }
4501 fnd = 0;
4502 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
4503 sin_a = (struct sockaddr_in *)&net->ro._l_addr;
4504 if (sin_a->sin_family == AF_INET) {
4505 fnd = 1;
4506 sin->sin_port = stcb->rport;
4507 sin->sin_addr = sin_a->sin_addr;
4508 break;
4509 }
4510 }
4511 SCTP_TCB_UNLOCK(stcb);
4512 if (!fnd) {
4513 /* No IPv4 address */
4514 SCTP_FREE_SONAME(sin);
4515 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP_USRREQ, ENOENT);
4516 return ENOENT;
4517 }
4518 (*addr) = (struct sockaddr *)sin;
4519 return (0);
4520}
4521
4522struct pr_usrreqs sctp_usrreqs = {
4523 .pru_abort = sctp_abort,
4524 .pru_accept = sctp_accept,
4525 .pru_attach = sctp_attach,
4526 .pru_bind = sctp_bind,
4527 .pru_connect = sctp_connect,
4528 .pru_control = in_control,
4529 .pru_close = sctp_close,
4530 .pru_detach = sctp_close,
4531 .pru_sopoll = sopoll_generic,
4532 .pru_flush = sctp_flush,
4533 .pru_disconnect = sctp_disconnect,
4534 .pru_listen = sctp_listen,
4535 .pru_peeraddr = sctp_peeraddr,
4536 .pru_send = sctp_sendm,
4537 .pru_shutdown = sctp_shutdown,
4538 .pru_sockaddr = sctp_ingetaddr,
4539 .pru_sosend = sctp_sosend,
4540 .pru_soreceive = sctp_soreceive
4541};