Deleted Added
full compact
sctp_pcb.c (166675) sctp_pcb.c (167598)
1/*-
2 * Copyright (c) 2001-2007, Cisco Systems, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 *
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
13 *
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/* $KAME: sctp_pcb.c,v 1.38 2005/03/06 16:04:18 itojun Exp $ */
32
33#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2001-2007, Cisco Systems, Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 *
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
13 *
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31/* $KAME: sctp_pcb.c,v 1.38 2005/03/06 16:04:18 itojun Exp $ */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/netinet/sctp_pcb.c 166675 2007-02-12 23:24:31Z rrs $");
34__FBSDID("$FreeBSD: head/sys/netinet/sctp_pcb.c 167598 2007-03-15 11:27:14Z rrs $");
35
36#include <netinet/sctp_os.h>
37#include <sys/proc.h>
38#include <netinet/sctp_var.h>
35
36#include <netinet/sctp_os.h>
37#include <sys/proc.h>
38#include <netinet/sctp_var.h>
39#include <netinet/sctp_sysctl.h>
39#include <netinet/sctp_pcb.h>
40#include <netinet/sctputil.h>
41#include <netinet/sctp.h>
42#include <netinet/sctp_header.h>
43#include <netinet/sctp_asconf.h>
44#include <netinet/sctp_output.h>
45#include <netinet/sctp_timer.h>
40#include <netinet/sctp_pcb.h>
41#include <netinet/sctputil.h>
42#include <netinet/sctp.h>
43#include <netinet/sctp_header.h>
44#include <netinet/sctp_asconf.h>
45#include <netinet/sctp_output.h>
46#include <netinet/sctp_timer.h>
47#include <netinet/sctp_bsd_addr.h>
46
47
48
49
48#ifdef SCTP_DEBUG
49uint32_t sctp_debug_on = 0;
50
51#endif /* SCTP_DEBUG */
52
53
54extern int sctp_pcbtblsize;
55extern int sctp_hashtblsize;
56extern int sctp_chunkscale;
57
58struct sctp_epinfo sctppcbinfo;
59
60/* FIX: we don't handle multiple link local scopes */
61/* "scopeless" replacement IN6_ARE_ADDR_EQUAL */
62int
63SCTP6_ARE_ADDR_EQUAL(struct in6_addr *a, struct in6_addr *b)
64{
65 struct in6_addr tmp_a, tmp_b;
66
67 /* use a copy of a and b */
68 tmp_a = *a;
69 tmp_b = *b;
70 in6_clearscope(&tmp_a);
71 in6_clearscope(&tmp_b);
72 return (IN6_ARE_ADDR_EQUAL(&tmp_a, &tmp_b));
73}
74
50struct sctp_epinfo sctppcbinfo;
51
52/* FIX: we don't handle multiple link local scopes */
53/* "scopeless" replacement IN6_ARE_ADDR_EQUAL */
54int
55SCTP6_ARE_ADDR_EQUAL(struct in6_addr *a, struct in6_addr *b)
56{
57 struct in6_addr tmp_a, tmp_b;
58
59 /* use a copy of a and b */
60 tmp_a = *a;
61 tmp_b = *b;
62 in6_clearscope(&tmp_a);
63 in6_clearscope(&tmp_b);
64 return (IN6_ARE_ADDR_EQUAL(&tmp_a, &tmp_b));
65}
66
75
76void
77sctp_fill_pcbinfo(struct sctp_pcbinfo *spcb)
78{
79 /*
80 * We really don't need to lock this, but I will just because it
81 * does not hurt.
82 */
83 SCTP_INP_INFO_RLOCK();
84 spcb->ep_count = sctppcbinfo.ipi_count_ep;
85 spcb->asoc_count = sctppcbinfo.ipi_count_asoc;
86 spcb->laddr_count = sctppcbinfo.ipi_count_laddr;
87 spcb->raddr_count = sctppcbinfo.ipi_count_raddr;
88 spcb->chk_count = sctppcbinfo.ipi_count_chunk;
89 spcb->readq_count = sctppcbinfo.ipi_count_readq;
90 spcb->stream_oque = sctppcbinfo.ipi_count_strmoq;
91 spcb->free_chunks = sctppcbinfo.ipi_free_chunks;
92
93 SCTP_INP_INFO_RUNLOCK();
94}
95
67void
68sctp_fill_pcbinfo(struct sctp_pcbinfo *spcb)
69{
70 /*
71 * We really don't need to lock this, but I will just because it
72 * does not hurt.
73 */
74 SCTP_INP_INFO_RLOCK();
75 spcb->ep_count = sctppcbinfo.ipi_count_ep;
76 spcb->asoc_count = sctppcbinfo.ipi_count_asoc;
77 spcb->laddr_count = sctppcbinfo.ipi_count_laddr;
78 spcb->raddr_count = sctppcbinfo.ipi_count_raddr;
79 spcb->chk_count = sctppcbinfo.ipi_count_chunk;
80 spcb->readq_count = sctppcbinfo.ipi_count_readq;
81 spcb->stream_oque = sctppcbinfo.ipi_count_strmoq;
82 spcb->free_chunks = sctppcbinfo.ipi_free_chunks;
83
84 SCTP_INP_INFO_RUNLOCK();
85}
86
87/*
88 * Addresses are added to VRF's (Virtual Router's). For BSD we
89 * have only the default VRF 0. We maintain a hash list of
90 * VRF's. Each VRF has its own list of sctp_ifn's. Each of
91 * these has a list of addresses. When we add a new address
92 * to a VRF we lookup the ifn/ifn_index, if the ifn does
93 * not exist we create it and add it to the list of IFN's
94 * within the VRF. Once we have the sctp_ifn, we add the
95 * address to the list. So we look something like:
96 *
97 * hash-vrf-table
98 * vrf-> ifn-> ifn -> ifn
99 * vrf |
100 * ... +--ifa-> ifa -> ifa
101 * vrf
102 *
103 * We keep these seperate lists since the SCTP subsystem will
104 * point to these from its source address selection nets structure.
105 * When an address is deleted it does not happen right away on
106 * the SCTP side, it gets scheduled. What we do when a
107 * delete happens is immediately remove the address from
108 * the master list and decrement the refcount. As our
109 * addip iterator works through and frees the src address
110 * selection pointing to the sctp_ifa, eventually the refcount
111 * will reach 0 and we will delete it. Note that it is assumed
112 * that any locking on system level ifn/ifa is done at the
113 * caller of these functions and these routines will only
114 * lock the SCTP structures as they add or delete things.
115 *
116 * Other notes on VRF concepts.
117 * - An endpoint can be in multiple VRF's
118 * - An association lives within a VRF and only one VRF.
119 * - Any incoming packet we can deduce the VRF for by
120 * looking at the mbuf/pak inbound (for BSD its VRF=0 :D)
121 * - Any downward send call or connect call must supply the
122 * VRF via ancillary data or via some sort of set default
123 * VRF socket option call (again for BSD no brainer since
124 * the VRF is always 0).
125 * - An endpoint may add multiple VRF's to it.
126 * - Listening sockets can accept associations in any
127 * of the VRF's they are in but the assoc will end up
128 * in only one VRF (gotten from the packet or connect/send).
129 *
130 */
96
131
132struct sctp_vrf *
133sctp_allocate_vrf(int vrfid)
134{
135 struct sctp_vrf *vrf = NULL;
136 struct sctp_vrflist *bucket;
137
138 /* First allocate the VRF structure */
139 vrf = sctp_find_vrf(vrfid);
140 if (vrf) {
141 /* Already allocated */
142 return (vrf);
143 }
144 SCTP_MALLOC(vrf, struct sctp_vrf *, sizeof(struct sctp_vrf),
145 "SCTP_VRF");
146 if (vrf == NULL) {
147 /* No memory */
148#ifdef INVARIANTS
149 panic("No memory for VRF:%d", vrfid);
150#endif
151 return (NULL);
152 }
153 /* setup the VRF */
154 memset(vrf, 0, sizeof(struct sctp_vrf));
155 vrf->vrf_id = vrfid;
156 LIST_INIT(&vrf->ifnlist);
157 vrf->total_ifa_count = 0;
158 /* Add it to the hash table */
159 bucket = &sctppcbinfo.sctp_vrfhash[(vrfid & sctppcbinfo.hashvrfmark)];
160 LIST_INSERT_HEAD(bucket, vrf, next_vrf);
161 return (vrf);
162}
163
164
165struct sctp_ifn *
166sctp_find_ifn(struct sctp_vrf *vrf, void *ifn, uint32_t ifn_index)
167{
168 struct sctp_ifn *sctp_ifnp;
169
170 /*
171 * We assume the lock is held for the addresses if thats wrong
172 * problems could occur :-)
173 */
174 LIST_FOREACH(sctp_ifnp, &vrf->ifnlist, next_ifn) {
175 if (sctp_ifnp->ifn_index == ifn_index) {
176 return (sctp_ifnp);
177 }
178 if (sctp_ifnp->ifn_p && ifn && (sctp_ifnp->ifn_p == ifn)) {
179 return (sctp_ifnp);
180 }
181 }
182 return (NULL);
183}
184
185struct sctp_vrf *
186sctp_find_vrf(uint32_t vrfid)
187{
188 struct sctp_vrflist *bucket;
189 struct sctp_vrf *liste;
190
191 bucket = &sctppcbinfo.sctp_vrfhash[(vrfid & sctppcbinfo.hashvrfmark)];
192 LIST_FOREACH(liste, bucket, next_vrf) {
193 if (vrfid == liste->vrf_id) {
194 return (liste);
195 }
196 }
197 return (NULL);
198}
199
200void
201sctp_free_ifa(struct sctp_ifa *sctp_ifap)
202{
203 int ret;
204
205 ret = atomic_fetchadd_int(&sctp_ifap->refcount, -1);
206 if (ret == 1) {
207 /* We zero'd the count */
208 SCTP_FREE(sctp_ifap);
209 }
210}
211
212struct sctp_ifa *
213sctp_add_addr_to_vrf(uint32_t vrfid, void *ifn, uint32_t ifn_index,
214 uint32_t ifn_type, const char *if_name,
215 void *ifa, struct sockaddr *addr, uint32_t ifa_flags)
216{
217 struct sctp_vrf *vrf;
218 struct sctp_ifn *sctp_ifnp = NULL;
219 struct sctp_ifa *sctp_ifap = NULL;
220
221 /* How granular do we need the locks to be here? */
222 SCTP_IPI_ADDR_LOCK();
223 vrf = sctp_find_vrf(vrfid);
224 if (vrf == NULL) {
225 vrf = sctp_allocate_vrf(vrfid);
226 if (vrf == NULL) {
227 SCTP_IPI_ADDR_UNLOCK();
228 return (NULL);
229 }
230 }
231 sctp_ifnp = sctp_find_ifn(vrf, ifn, ifn_index);
232 if (sctp_ifnp == NULL) {
233 /*
234 * build one and add it, can't hold lock until after malloc
235 * done though.
236 */
237 SCTP_IPI_ADDR_UNLOCK();
238 SCTP_MALLOC(sctp_ifnp, struct sctp_ifn *, sizeof(struct sctp_ifn), "SCTP_IFN");
239 if (sctp_ifnp == NULL) {
240#ifdef INVARIANTS
241 panic("No memory for IFN:%u", sctp_ifnp->ifn_index);
242#endif
243 return (NULL);
244 }
245 sctp_ifnp->ifn_index = ifn_index;
246 sctp_ifnp->ifn_p = ifn;
247 sctp_ifnp->ifn_type = ifn_type;
248 sctp_ifnp->ifa_count = 0;
249 sctp_ifnp->refcount = 0;
250 sctp_ifnp->vrf = vrf;
251 memcpy(sctp_ifnp->ifn_name, if_name, SCTP_IFNAMSIZ);
252 LIST_INIT(&sctp_ifnp->ifalist);
253 SCTP_IPI_ADDR_LOCK();
254 LIST_INSERT_HEAD(&vrf->ifnlist, sctp_ifnp, next_ifn);
255 }
256 sctp_ifap = sctp_find_ifa_by_addr(addr, vrf->vrf_id, 1);
257 if (sctp_ifap) {
258 /* Hmm, it already exists? */
259 if ((sctp_ifap->ifn_p) &&
260 (sctp_ifap->ifn_p->ifn_index == ifn_index)) {
261 if (sctp_ifap->localifa_flags & SCTP_BEING_DELETED) {
262 /* easy to solve, just switch back to active */
263 sctp_ifap->localifa_flags = SCTP_ADDR_VALID;
264 sctp_ifap->ifn_p = sctp_ifnp;
265 exit_stage_left:
266 SCTP_IPI_ADDR_UNLOCK();
267 return (sctp_ifap);
268 } else {
269 goto exit_stage_left;
270 }
271 } else {
272 if (sctp_ifap->ifn_p) {
273 /*
274 * The first IFN gets the address,
275 * duplicates are ignored.
276 */
277 goto exit_stage_left;
278 } else {
279 /* repair ifnp which was NULL ? */
280 sctp_ifap->localifa_flags = SCTP_ADDR_VALID;
281 sctp_ifap->ifn_p = sctp_ifnp;
282 atomic_add_int(&sctp_ifnp->refcount, 1);
283 }
284 goto exit_stage_left;
285 }
286 }
287 SCTP_IPI_ADDR_UNLOCK();
288 SCTP_MALLOC(sctp_ifap, struct sctp_ifa *, sizeof(struct sctp_ifa), "SCTP_IFA");
289 if (sctp_ifap == NULL) {
290#ifdef INVARIANTS
291 panic("No memory for IFA");
292#endif
293 return (NULL);
294 }
295 memset(sctp_ifap, 0, sizeof(sctp_ifap));
296 sctp_ifap->ifn_p = sctp_ifnp;
297 atomic_add_int(&sctp_ifnp->refcount, 1);
298
299 sctp_ifap->ifa = ifa;
300 memcpy(&sctp_ifap->address, addr, addr->sa_len);
301 sctp_ifap->localifa_flags = SCTP_ADDR_VALID | SCTP_ADDR_DEFER_USE;
302 sctp_ifap->flags = ifa_flags;
303 /* Set scope */
304 if (sctp_ifap->address.sa.sa_family == AF_INET) {
305 struct sockaddr_in *sin;
306
307 sin = (struct sockaddr_in *)&sctp_ifap->address.sin;
308 if (SCTP_IFN_IS_IFT_LOOP(sctp_ifap->ifn_p) ||
309 (IN4_ISLOOPBACK_ADDRESS(&sin->sin_addr))) {
310 sctp_ifap->src_is_loop = 1;
311 }
312 if ((IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) {
313 sctp_ifap->src_is_priv = 1;
314 }
315 } else if (sctp_ifap->address.sa.sa_family == AF_INET6) {
316 /* ok to use deprecated addresses? */
317 struct sockaddr_in6 *sin6;
318
319 sin6 = (struct sockaddr_in6 *)&sctp_ifap->address.sin6;
320 if (SCTP_IFN_IS_IFT_LOOP(sctp_ifap->ifn_p) ||
321 (IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr))) {
322 sctp_ifap->src_is_loop = 1;
323 }
324 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
325 sctp_ifap->src_is_priv = 1;
326 }
327 }
328 if ((sctp_ifap->src_is_priv == 0) &&
329 (sctp_ifap->src_is_loop == 0)) {
330 sctp_ifap->src_is_glob = 1;
331 }
332 SCTP_IPI_ADDR_LOCK();
333 sctp_ifap->refcount = 1;
334 LIST_INSERT_HEAD(&sctp_ifnp->ifalist, sctp_ifap, next_ifa);
335 sctp_ifnp->ifa_count++;
336 vrf->total_ifa_count++;
337 SCTP_IPI_ADDR_UNLOCK();
338 return (sctp_ifap);
339}
340
341struct sctp_ifa *
342sctp_del_addr_from_vrf(uint32_t vrfid, struct sockaddr *addr,
343 uint32_t ifn_index)
344{
345 struct sctp_vrf *vrf;
346 struct sctp_ifa *sctp_ifap = NULL;
347 struct sctp_ifn *sctp_ifnp = NULL;
348
349 SCTP_IPI_ADDR_LOCK();
350
351 vrf = sctp_find_vrf(vrfid);
352 if (vrf == NULL) {
353 printf("Can't find vrfid:%d\n", vrfid);
354 goto out_now;
355 }
356 sctp_ifnp = sctp_find_ifn(vrf, (void *)NULL, ifn_index);
357 if (sctp_ifnp == NULL) {
358 sctp_ifap = sctp_find_ifa_by_addr(addr, vrf->vrf_id, 1);
359 } else {
360 sctp_ifap = sctp_find_ifa_in_ifn(sctp_ifnp, addr, 1);
361 }
362
363 if (sctp_ifap) {
364 sctp_ifap->localifa_flags &= SCTP_ADDR_VALID;
365 sctp_ifap->localifa_flags |= SCTP_BEING_DELETED;
366 sctp_ifnp->ifa_count--;
367 vrf->total_ifa_count--;
368 LIST_REMOVE(sctp_ifap, next_ifa);
369 atomic_add_int(&sctp_ifnp->refcount, -1);
370 } else {
371 printf("Del Addr-ifn:%d Could not find address:",
372 ifn_index);
373 sctp_print_address(addr);
374 }
375out_now:
376 SCTP_IPI_ADDR_UNLOCK();
377 return (sctp_ifap);
378}
379
97/*
98 * Notes on locks for FreeBSD 5 and up. All association lookups that have a
99 * definte ep, the INP structure is assumed to be locked for reading. If we
100 * need to go find the INP (ususally when a **inp is passed) then we must
101 * lock the INFO structure first and if needed lock the INP too. Note that if
102 * we lock it we must
103 *
104 */
105
106
107/*
108 * Given a endpoint, look and find in its association list any association
109 * with the "to" address given. This can be a "from" address, too, for
110 * inbound packets. For outbound packets it is a true "to" address.
111 */
112
113static struct sctp_tcb *
114sctp_tcb_special_locate(struct sctp_inpcb **inp_p, struct sockaddr *from,
115 struct sockaddr *to, struct sctp_nets **netp)
116{
117 /**** ASSUMSES THE CALLER holds the INP_INFO_RLOCK */
118
119 /*
120 * Note for this module care must be taken when observing what to is
121 * for. In most of the rest of the code the TO field represents my
122 * peer and the FROM field represents my address. For this module it
123 * is reversed of that.
124 */
125 /*
126 * If we support the TCP model, then we must now dig through to see
127 * if we can find our endpoint in the list of tcp ep's.
128 */
129 uint16_t lport, rport;
130 struct sctppcbhead *ephead;
131 struct sctp_inpcb *inp;
132 struct sctp_laddr *laddr;
133 struct sctp_tcb *stcb;
134 struct sctp_nets *net;
135
136 if ((to == NULL) || (from == NULL)) {
137 return (NULL);
138 }
139 if (to->sa_family == AF_INET && from->sa_family == AF_INET) {
140 lport = ((struct sockaddr_in *)to)->sin_port;
141 rport = ((struct sockaddr_in *)from)->sin_port;
142 } else if (to->sa_family == AF_INET6 && from->sa_family == AF_INET6) {
143 lport = ((struct sockaddr_in6 *)to)->sin6_port;
144 rport = ((struct sockaddr_in6 *)from)->sin6_port;
145 } else {
146 return NULL;
147 }
148 ephead = &sctppcbinfo.sctp_tcpephash[SCTP_PCBHASH_ALLADDR(
149 (lport + rport), sctppcbinfo.hashtcpmark)];
150 /*
151 * Ok now for each of the guys in this bucket we must look and see:
152 * - Does the remote port match. - Does there single association's
153 * addresses match this address (to). If so we update p_ep to point
154 * to this ep and return the tcb from it.
155 */
156 LIST_FOREACH(inp, ephead, sctp_hash) {
157 if (lport != inp->sctp_lport) {
158 continue;
159 }
160 SCTP_INP_RLOCK(inp);
161 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
162 SCTP_INP_RUNLOCK(inp);
163 continue;
164 }
165 /* check to see if the ep has one of the addresses */
166 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
167 /* We are NOT bound all, so look further */
168 int match = 0;
169
170 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
171
172 if (laddr->ifa == NULL) {
173#ifdef SCTP_DEBUG
174 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
175 printf("An ounce of prevention is worth a pound of cure\n");
176 }
177#endif
178 continue;
179 }
380/*
381 * Notes on locks for FreeBSD 5 and up. All association lookups that have a
382 * definte ep, the INP structure is assumed to be locked for reading. If we
383 * need to go find the INP (ususally when a **inp is passed) then we must
384 * lock the INFO structure first and if needed lock the INP too. Note that if
385 * we lock it we must
386 *
387 */
388
389
390/*
391 * Given a endpoint, look and find in its association list any association
392 * with the "to" address given. This can be a "from" address, too, for
393 * inbound packets. For outbound packets it is a true "to" address.
394 */
395
396static struct sctp_tcb *
397sctp_tcb_special_locate(struct sctp_inpcb **inp_p, struct sockaddr *from,
398 struct sockaddr *to, struct sctp_nets **netp)
399{
400 /**** ASSUMSES THE CALLER holds the INP_INFO_RLOCK */
401
402 /*
403 * Note for this module care must be taken when observing what to is
404 * for. In most of the rest of the code the TO field represents my
405 * peer and the FROM field represents my address. For this module it
406 * is reversed of that.
407 */
408 /*
409 * If we support the TCP model, then we must now dig through to see
410 * if we can find our endpoint in the list of tcp ep's.
411 */
412 uint16_t lport, rport;
413 struct sctppcbhead *ephead;
414 struct sctp_inpcb *inp;
415 struct sctp_laddr *laddr;
416 struct sctp_tcb *stcb;
417 struct sctp_nets *net;
418
419 if ((to == NULL) || (from == NULL)) {
420 return (NULL);
421 }
422 if (to->sa_family == AF_INET && from->sa_family == AF_INET) {
423 lport = ((struct sockaddr_in *)to)->sin_port;
424 rport = ((struct sockaddr_in *)from)->sin_port;
425 } else if (to->sa_family == AF_INET6 && from->sa_family == AF_INET6) {
426 lport = ((struct sockaddr_in6 *)to)->sin6_port;
427 rport = ((struct sockaddr_in6 *)from)->sin6_port;
428 } else {
429 return NULL;
430 }
431 ephead = &sctppcbinfo.sctp_tcpephash[SCTP_PCBHASH_ALLADDR(
432 (lport + rport), sctppcbinfo.hashtcpmark)];
433 /*
434 * Ok now for each of the guys in this bucket we must look and see:
435 * - Does the remote port match. - Does there single association's
436 * addresses match this address (to). If so we update p_ep to point
437 * to this ep and return the tcb from it.
438 */
439 LIST_FOREACH(inp, ephead, sctp_hash) {
440 if (lport != inp->sctp_lport) {
441 continue;
442 }
443 SCTP_INP_RLOCK(inp);
444 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
445 SCTP_INP_RUNLOCK(inp);
446 continue;
447 }
448 /* check to see if the ep has one of the addresses */
449 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
450 /* We are NOT bound all, so look further */
451 int match = 0;
452
453 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
454
455 if (laddr->ifa == NULL) {
456#ifdef SCTP_DEBUG
457 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
458 printf("An ounce of prevention is worth a pound of cure\n");
459 }
460#endif
461 continue;
462 }
180 if (laddr->ifa->ifa_addr == NULL) {
463 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
181#ifdef SCTP_DEBUG
182 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
464#ifdef SCTP_DEBUG
465 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
183 printf("ifa with a NULL address\n");
466 printf("ifa being deleted\n");
184 }
185#endif
186 continue;
187 }
467 }
468#endif
469 continue;
470 }
188 if (laddr->ifa->ifa_addr->sa_family ==
471 if (laddr->ifa->address.sa.sa_family ==
189 to->sa_family) {
190 /* see if it matches */
191 struct sockaddr_in *intf_addr, *sin;
192
472 to->sa_family) {
473 /* see if it matches */
474 struct sockaddr_in *intf_addr, *sin;
475
193 intf_addr = (struct sockaddr_in *)
194 laddr->ifa->ifa_addr;
476 intf_addr = &laddr->ifa->address.sin;
195 sin = (struct sockaddr_in *)to;
196 if (from->sa_family == AF_INET) {
197 if (sin->sin_addr.s_addr ==
198 intf_addr->sin_addr.s_addr) {
199 match = 1;
200 break;
201 }
202 } else {
203 struct sockaddr_in6 *intf_addr6;
204 struct sockaddr_in6 *sin6;
205
206 sin6 = (struct sockaddr_in6 *)
207 to;
477 sin = (struct sockaddr_in *)to;
478 if (from->sa_family == AF_INET) {
479 if (sin->sin_addr.s_addr ==
480 intf_addr->sin_addr.s_addr) {
481 match = 1;
482 break;
483 }
484 } else {
485 struct sockaddr_in6 *intf_addr6;
486 struct sockaddr_in6 *sin6;
487
488 sin6 = (struct sockaddr_in6 *)
489 to;
208 intf_addr6 = (struct sockaddr_in6 *)
209 laddr->ifa->ifa_addr;
490 intf_addr6 = &laddr->ifa->address.sin6;
210
211 if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
212 &intf_addr6->sin6_addr)) {
213 match = 1;
214 break;
215 }
216 }
217 }
218 }
219 if (match == 0) {
220 /* This endpoint does not have this address */
221 SCTP_INP_RUNLOCK(inp);
222 continue;
223 }
224 }
225 /*
226 * Ok if we hit here the ep has the address, does it hold
227 * the tcb?
228 */
229
230 stcb = LIST_FIRST(&inp->sctp_asoc_list);
231 if (stcb == NULL) {
232 SCTP_INP_RUNLOCK(inp);
233 continue;
234 }
235 SCTP_TCB_LOCK(stcb);
236 if (stcb->rport != rport) {
237 /* remote port does not match. */
238 SCTP_TCB_UNLOCK(stcb);
239 SCTP_INP_RUNLOCK(inp);
240 continue;
241 }
242 /* Does this TCB have a matching address? */
243 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
244
245 if (net->ro._l_addr.sa.sa_family != from->sa_family) {
246 /* not the same family, can't be a match */
247 continue;
248 }
249 if (from->sa_family == AF_INET) {
250 struct sockaddr_in *sin, *rsin;
251
252 sin = (struct sockaddr_in *)&net->ro._l_addr;
253 rsin = (struct sockaddr_in *)from;
254 if (sin->sin_addr.s_addr ==
255 rsin->sin_addr.s_addr) {
256 /* found it */
257 if (netp != NULL) {
258 *netp = net;
259 }
260 /* Update the endpoint pointer */
261 *inp_p = inp;
262 SCTP_INP_RUNLOCK(inp);
263 return (stcb);
264 }
265 } else {
266 struct sockaddr_in6 *sin6, *rsin6;
267
268 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
269 rsin6 = (struct sockaddr_in6 *)from;
270 if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
271 &rsin6->sin6_addr)) {
272 /* found it */
273 if (netp != NULL) {
274 *netp = net;
275 }
276 /* Update the endpoint pointer */
277 *inp_p = inp;
278 SCTP_INP_RUNLOCK(inp);
279 return (stcb);
280 }
281 }
282 }
283 SCTP_TCB_UNLOCK(stcb);
284 SCTP_INP_RUNLOCK(inp);
285 }
286 return (NULL);
287}
288
289/*
290 * rules for use
291 *
292 * 1) If I return a NULL you must decrement any INP ref cnt. 2) If I find an
293 * stcb, both will be locked (locked_tcb and stcb) but decrement will be done
294 * (if locked == NULL). 3) Decrement happens on return ONLY if locked ==
295 * NULL.
296 */
297
298struct sctp_tcb *
299sctp_findassociation_ep_addr(struct sctp_inpcb **inp_p, struct sockaddr *remote,
300 struct sctp_nets **netp, struct sockaddr *local, struct sctp_tcb *locked_tcb)
301{
302 struct sctpasochead *head;
303 struct sctp_inpcb *inp;
304 struct sctp_tcb *stcb;
305 struct sctp_nets *net;
306 uint16_t rport;
307
308 inp = *inp_p;
309 if (remote->sa_family == AF_INET) {
310 rport = (((struct sockaddr_in *)remote)->sin_port);
311 } else if (remote->sa_family == AF_INET6) {
312 rport = (((struct sockaddr_in6 *)remote)->sin6_port);
313 } else {
314 return (NULL);
315 }
316 if (locked_tcb) {
317 /*
318 * UN-lock so we can do proper locking here this occurs when
319 * called from load_addresses_from_init.
320 */
321 SCTP_TCB_UNLOCK(locked_tcb);
322 }
323 SCTP_INP_INFO_RLOCK();
324 if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
325 /*
326 * Now either this guy is our listener or it's the
327 * connector. If it is the one that issued the connect, then
328 * it's only chance is to be the first TCB in the list. If
329 * it is the acceptor, then do the special_lookup to hash
330 * and find the real inp.
331 */
332 if ((inp->sctp_socket) && (inp->sctp_socket->so_qlimit)) {
333 /* to is peer addr, from is my addr */
334 stcb = sctp_tcb_special_locate(inp_p, remote, local,
335 netp);
336 if ((stcb != NULL) && (locked_tcb == NULL)) {
337 /* we have a locked tcb, lower refcount */
338 SCTP_INP_WLOCK(inp);
339 SCTP_INP_DECR_REF(inp);
340 SCTP_INP_WUNLOCK(inp);
341 }
342 if ((locked_tcb != NULL) && (locked_tcb != stcb)) {
343 SCTP_INP_RLOCK(locked_tcb->sctp_ep);
344 SCTP_TCB_LOCK(locked_tcb);
345 SCTP_INP_RUNLOCK(locked_tcb->sctp_ep);
346 }
347 SCTP_INP_INFO_RUNLOCK();
348 return (stcb);
349 } else {
350 SCTP_INP_WLOCK(inp);
351 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
352 goto null_return;
353 }
354 stcb = LIST_FIRST(&inp->sctp_asoc_list);
355 if (stcb == NULL) {
356 goto null_return;
357 }
358 SCTP_TCB_LOCK(stcb);
359 if (stcb->rport != rport) {
360 /* remote port does not match. */
361 SCTP_TCB_UNLOCK(stcb);
362 goto null_return;
363 }
364 /* now look at the list of remote addresses */
365 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
366#ifdef INVARIANTS
367 if (net == (TAILQ_NEXT(net, sctp_next))) {
368 panic("Corrupt net list");
369 }
370#endif
371 if (net->ro._l_addr.sa.sa_family !=
372 remote->sa_family) {
373 /* not the same family */
374 continue;
375 }
376 if (remote->sa_family == AF_INET) {
377 struct sockaddr_in *sin, *rsin;
378
379 sin = (struct sockaddr_in *)
380 &net->ro._l_addr;
381 rsin = (struct sockaddr_in *)remote;
382 if (sin->sin_addr.s_addr ==
383 rsin->sin_addr.s_addr) {
384 /* found it */
385 if (netp != NULL) {
386 *netp = net;
387 }
388 if (locked_tcb == NULL) {
389 SCTP_INP_DECR_REF(inp);
390 } else if (locked_tcb != stcb) {
391 SCTP_TCB_LOCK(locked_tcb);
392 }
393 SCTP_INP_WUNLOCK(inp);
394 SCTP_INP_INFO_RUNLOCK();
395 return (stcb);
396 }
397 } else if (remote->sa_family == AF_INET6) {
398 struct sockaddr_in6 *sin6, *rsin6;
399
400 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
401 rsin6 = (struct sockaddr_in6 *)remote;
402 if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
403 &rsin6->sin6_addr)) {
404 /* found it */
405 if (netp != NULL) {
406 *netp = net;
407 }
408 if (locked_tcb == NULL) {
409 SCTP_INP_DECR_REF(inp);
410 } else if (locked_tcb != stcb) {
411 SCTP_TCB_LOCK(locked_tcb);
412 }
413 SCTP_INP_WUNLOCK(inp);
414 SCTP_INP_INFO_RUNLOCK();
415 return (stcb);
416 }
417 }
418 }
419 SCTP_TCB_UNLOCK(stcb);
420 }
421 } else {
422 SCTP_INP_WLOCK(inp);
423 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
424 goto null_return;
425 }
426 head = &inp->sctp_tcbhash[SCTP_PCBHASH_ALLADDR(rport,
427 inp->sctp_hashmark)];
428 if (head == NULL) {
429 goto null_return;
430 }
431 LIST_FOREACH(stcb, head, sctp_tcbhash) {
432 if (stcb->rport != rport) {
433 /* remote port does not match */
434 continue;
435 }
436 /* now look at the list of remote addresses */
437 SCTP_TCB_LOCK(stcb);
438 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
439#ifdef INVARIANTS
440 if (net == (TAILQ_NEXT(net, sctp_next))) {
441 panic("Corrupt net list");
442 }
443#endif
444 if (net->ro._l_addr.sa.sa_family !=
445 remote->sa_family) {
446 /* not the same family */
447 continue;
448 }
449 if (remote->sa_family == AF_INET) {
450 struct sockaddr_in *sin, *rsin;
451
452 sin = (struct sockaddr_in *)
453 &net->ro._l_addr;
454 rsin = (struct sockaddr_in *)remote;
455 if (sin->sin_addr.s_addr ==
456 rsin->sin_addr.s_addr) {
457 /* found it */
458 if (netp != NULL) {
459 *netp = net;
460 }
461 if (locked_tcb == NULL) {
462 SCTP_INP_DECR_REF(inp);
463 } else if (locked_tcb != stcb) {
464 SCTP_TCB_LOCK(locked_tcb);
465 }
466 SCTP_INP_WUNLOCK(inp);
467 SCTP_INP_INFO_RUNLOCK();
468 return (stcb);
469 }
470 } else if (remote->sa_family == AF_INET6) {
471 struct sockaddr_in6 *sin6, *rsin6;
472
473 sin6 = (struct sockaddr_in6 *)
474 &net->ro._l_addr;
475 rsin6 = (struct sockaddr_in6 *)remote;
476 if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
477 &rsin6->sin6_addr)) {
478 /* found it */
479 if (netp != NULL) {
480 *netp = net;
481 }
482 if (locked_tcb == NULL) {
483 SCTP_INP_DECR_REF(inp);
484 } else if (locked_tcb != stcb) {
485 SCTP_TCB_LOCK(locked_tcb);
486 }
487 SCTP_INP_WUNLOCK(inp);
488 SCTP_INP_INFO_RUNLOCK();
489 return (stcb);
490 }
491 }
492 }
493 SCTP_TCB_UNLOCK(stcb);
494 }
495 }
496null_return:
497 /* clean up for returning null */
498 if (locked_tcb) {
499 SCTP_TCB_LOCK(locked_tcb);
500 }
501 SCTP_INP_WUNLOCK(inp);
502 SCTP_INP_INFO_RUNLOCK();
503 /* not found */
504 return (NULL);
505}
506
507/*
508 * Find an association for a specific endpoint using the association id given
509 * out in the COMM_UP notification
510 */
511
512struct sctp_tcb *
513sctp_findassociation_ep_asocid(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int want_lock)
514{
515 /*
516 * Use my the assoc_id to find a endpoint
517 */
518 struct sctpasochead *head;
519 struct sctp_tcb *stcb;
520 uint32_t id;
521
522 if (asoc_id == 0 || inp == NULL) {
523 return (NULL);
524 }
525 SCTP_INP_INFO_RLOCK();
526 id = (uint32_t) asoc_id;
527 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(id,
528 sctppcbinfo.hashasocmark)];
529 if (head == NULL) {
530 /* invalid id TSNH */
531 SCTP_INP_INFO_RUNLOCK();
532 return (NULL);
533 }
534 LIST_FOREACH(stcb, head, sctp_asocs) {
535 SCTP_INP_RLOCK(stcb->sctp_ep);
536 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
537 SCTP_INP_RUNLOCK(stcb->sctp_ep);
538 SCTP_INP_INFO_RUNLOCK();
539 return (NULL);
540 }
541 if (stcb->asoc.assoc_id == id) {
542 /* candidate */
543 if (inp != stcb->sctp_ep) {
544 /*
545 * some other guy has the same id active (id
546 * collision ??).
547 */
548 SCTP_INP_RUNLOCK(stcb->sctp_ep);
549 continue;
550 }
551 if (want_lock) {
552 SCTP_TCB_LOCK(stcb);
553 }
554 SCTP_INP_RUNLOCK(stcb->sctp_ep);
555 SCTP_INP_INFO_RUNLOCK();
556 return (stcb);
557 }
558 SCTP_INP_RUNLOCK(stcb->sctp_ep);
559 }
560 /* Ok if we missed here, lets try the restart hash */
561 head = &sctppcbinfo.sctp_restarthash[SCTP_PCBHASH_ASOC(id, sctppcbinfo.hashrestartmark)];
562 if (head == NULL) {
563 /* invalid id TSNH */
564 SCTP_INP_INFO_RUNLOCK();
565 return (NULL);
566 }
567 LIST_FOREACH(stcb, head, sctp_tcbrestarhash) {
568 SCTP_INP_RLOCK(stcb->sctp_ep);
569 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
570 SCTP_INP_RUNLOCK(stcb->sctp_ep);
571 SCTP_INP_INFO_RUNLOCK();
572 return (NULL);
573 }
574 SCTP_TCB_LOCK(stcb);
575 SCTP_INP_RUNLOCK(stcb->sctp_ep);
576 if (stcb->asoc.assoc_id == id) {
577 /* candidate */
578 if (inp != stcb->sctp_ep) {
579 /*
580 * some other guy has the same id active (id
581 * collision ??).
582 */
583 SCTP_TCB_UNLOCK(stcb);
584 continue;
585 }
586 SCTP_INP_INFO_RUNLOCK();
587 return (stcb);
588 }
589 SCTP_TCB_UNLOCK(stcb);
590 }
591 SCTP_INP_INFO_RUNLOCK();
592 return (NULL);
593}
594
595
596static struct sctp_inpcb *
597sctp_endpoint_probe(struct sockaddr *nam, struct sctppcbhead *head,
491
492 if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
493 &intf_addr6->sin6_addr)) {
494 match = 1;
495 break;
496 }
497 }
498 }
499 }
500 if (match == 0) {
501 /* This endpoint does not have this address */
502 SCTP_INP_RUNLOCK(inp);
503 continue;
504 }
505 }
506 /*
507 * Ok if we hit here the ep has the address, does it hold
508 * the tcb?
509 */
510
511 stcb = LIST_FIRST(&inp->sctp_asoc_list);
512 if (stcb == NULL) {
513 SCTP_INP_RUNLOCK(inp);
514 continue;
515 }
516 SCTP_TCB_LOCK(stcb);
517 if (stcb->rport != rport) {
518 /* remote port does not match. */
519 SCTP_TCB_UNLOCK(stcb);
520 SCTP_INP_RUNLOCK(inp);
521 continue;
522 }
523 /* Does this TCB have a matching address? */
524 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
525
526 if (net->ro._l_addr.sa.sa_family != from->sa_family) {
527 /* not the same family, can't be a match */
528 continue;
529 }
530 if (from->sa_family == AF_INET) {
531 struct sockaddr_in *sin, *rsin;
532
533 sin = (struct sockaddr_in *)&net->ro._l_addr;
534 rsin = (struct sockaddr_in *)from;
535 if (sin->sin_addr.s_addr ==
536 rsin->sin_addr.s_addr) {
537 /* found it */
538 if (netp != NULL) {
539 *netp = net;
540 }
541 /* Update the endpoint pointer */
542 *inp_p = inp;
543 SCTP_INP_RUNLOCK(inp);
544 return (stcb);
545 }
546 } else {
547 struct sockaddr_in6 *sin6, *rsin6;
548
549 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
550 rsin6 = (struct sockaddr_in6 *)from;
551 if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
552 &rsin6->sin6_addr)) {
553 /* found it */
554 if (netp != NULL) {
555 *netp = net;
556 }
557 /* Update the endpoint pointer */
558 *inp_p = inp;
559 SCTP_INP_RUNLOCK(inp);
560 return (stcb);
561 }
562 }
563 }
564 SCTP_TCB_UNLOCK(stcb);
565 SCTP_INP_RUNLOCK(inp);
566 }
567 return (NULL);
568}
569
570/*
571 * rules for use
572 *
573 * 1) If I return a NULL you must decrement any INP ref cnt. 2) If I find an
574 * stcb, both will be locked (locked_tcb and stcb) but decrement will be done
575 * (if locked == NULL). 3) Decrement happens on return ONLY if locked ==
576 * NULL.
577 */
578
579struct sctp_tcb *
580sctp_findassociation_ep_addr(struct sctp_inpcb **inp_p, struct sockaddr *remote,
581 struct sctp_nets **netp, struct sockaddr *local, struct sctp_tcb *locked_tcb)
582{
583 struct sctpasochead *head;
584 struct sctp_inpcb *inp;
585 struct sctp_tcb *stcb;
586 struct sctp_nets *net;
587 uint16_t rport;
588
589 inp = *inp_p;
590 if (remote->sa_family == AF_INET) {
591 rport = (((struct sockaddr_in *)remote)->sin_port);
592 } else if (remote->sa_family == AF_INET6) {
593 rport = (((struct sockaddr_in6 *)remote)->sin6_port);
594 } else {
595 return (NULL);
596 }
597 if (locked_tcb) {
598 /*
599 * UN-lock so we can do proper locking here this occurs when
600 * called from load_addresses_from_init.
601 */
602 SCTP_TCB_UNLOCK(locked_tcb);
603 }
604 SCTP_INP_INFO_RLOCK();
605 if (inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) {
606 /*
607 * Now either this guy is our listener or it's the
608 * connector. If it is the one that issued the connect, then
609 * it's only chance is to be the first TCB in the list. If
610 * it is the acceptor, then do the special_lookup to hash
611 * and find the real inp.
612 */
613 if ((inp->sctp_socket) && (inp->sctp_socket->so_qlimit)) {
614 /* to is peer addr, from is my addr */
615 stcb = sctp_tcb_special_locate(inp_p, remote, local,
616 netp);
617 if ((stcb != NULL) && (locked_tcb == NULL)) {
618 /* we have a locked tcb, lower refcount */
619 SCTP_INP_WLOCK(inp);
620 SCTP_INP_DECR_REF(inp);
621 SCTP_INP_WUNLOCK(inp);
622 }
623 if ((locked_tcb != NULL) && (locked_tcb != stcb)) {
624 SCTP_INP_RLOCK(locked_tcb->sctp_ep);
625 SCTP_TCB_LOCK(locked_tcb);
626 SCTP_INP_RUNLOCK(locked_tcb->sctp_ep);
627 }
628 SCTP_INP_INFO_RUNLOCK();
629 return (stcb);
630 } else {
631 SCTP_INP_WLOCK(inp);
632 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
633 goto null_return;
634 }
635 stcb = LIST_FIRST(&inp->sctp_asoc_list);
636 if (stcb == NULL) {
637 goto null_return;
638 }
639 SCTP_TCB_LOCK(stcb);
640 if (stcb->rport != rport) {
641 /* remote port does not match. */
642 SCTP_TCB_UNLOCK(stcb);
643 goto null_return;
644 }
645 /* now look at the list of remote addresses */
646 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
647#ifdef INVARIANTS
648 if (net == (TAILQ_NEXT(net, sctp_next))) {
649 panic("Corrupt net list");
650 }
651#endif
652 if (net->ro._l_addr.sa.sa_family !=
653 remote->sa_family) {
654 /* not the same family */
655 continue;
656 }
657 if (remote->sa_family == AF_INET) {
658 struct sockaddr_in *sin, *rsin;
659
660 sin = (struct sockaddr_in *)
661 &net->ro._l_addr;
662 rsin = (struct sockaddr_in *)remote;
663 if (sin->sin_addr.s_addr ==
664 rsin->sin_addr.s_addr) {
665 /* found it */
666 if (netp != NULL) {
667 *netp = net;
668 }
669 if (locked_tcb == NULL) {
670 SCTP_INP_DECR_REF(inp);
671 } else if (locked_tcb != stcb) {
672 SCTP_TCB_LOCK(locked_tcb);
673 }
674 SCTP_INP_WUNLOCK(inp);
675 SCTP_INP_INFO_RUNLOCK();
676 return (stcb);
677 }
678 } else if (remote->sa_family == AF_INET6) {
679 struct sockaddr_in6 *sin6, *rsin6;
680
681 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
682 rsin6 = (struct sockaddr_in6 *)remote;
683 if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
684 &rsin6->sin6_addr)) {
685 /* found it */
686 if (netp != NULL) {
687 *netp = net;
688 }
689 if (locked_tcb == NULL) {
690 SCTP_INP_DECR_REF(inp);
691 } else if (locked_tcb != stcb) {
692 SCTP_TCB_LOCK(locked_tcb);
693 }
694 SCTP_INP_WUNLOCK(inp);
695 SCTP_INP_INFO_RUNLOCK();
696 return (stcb);
697 }
698 }
699 }
700 SCTP_TCB_UNLOCK(stcb);
701 }
702 } else {
703 SCTP_INP_WLOCK(inp);
704 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
705 goto null_return;
706 }
707 head = &inp->sctp_tcbhash[SCTP_PCBHASH_ALLADDR(rport,
708 inp->sctp_hashmark)];
709 if (head == NULL) {
710 goto null_return;
711 }
712 LIST_FOREACH(stcb, head, sctp_tcbhash) {
713 if (stcb->rport != rport) {
714 /* remote port does not match */
715 continue;
716 }
717 /* now look at the list of remote addresses */
718 SCTP_TCB_LOCK(stcb);
719 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
720#ifdef INVARIANTS
721 if (net == (TAILQ_NEXT(net, sctp_next))) {
722 panic("Corrupt net list");
723 }
724#endif
725 if (net->ro._l_addr.sa.sa_family !=
726 remote->sa_family) {
727 /* not the same family */
728 continue;
729 }
730 if (remote->sa_family == AF_INET) {
731 struct sockaddr_in *sin, *rsin;
732
733 sin = (struct sockaddr_in *)
734 &net->ro._l_addr;
735 rsin = (struct sockaddr_in *)remote;
736 if (sin->sin_addr.s_addr ==
737 rsin->sin_addr.s_addr) {
738 /* found it */
739 if (netp != NULL) {
740 *netp = net;
741 }
742 if (locked_tcb == NULL) {
743 SCTP_INP_DECR_REF(inp);
744 } else if (locked_tcb != stcb) {
745 SCTP_TCB_LOCK(locked_tcb);
746 }
747 SCTP_INP_WUNLOCK(inp);
748 SCTP_INP_INFO_RUNLOCK();
749 return (stcb);
750 }
751 } else if (remote->sa_family == AF_INET6) {
752 struct sockaddr_in6 *sin6, *rsin6;
753
754 sin6 = (struct sockaddr_in6 *)
755 &net->ro._l_addr;
756 rsin6 = (struct sockaddr_in6 *)remote;
757 if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
758 &rsin6->sin6_addr)) {
759 /* found it */
760 if (netp != NULL) {
761 *netp = net;
762 }
763 if (locked_tcb == NULL) {
764 SCTP_INP_DECR_REF(inp);
765 } else if (locked_tcb != stcb) {
766 SCTP_TCB_LOCK(locked_tcb);
767 }
768 SCTP_INP_WUNLOCK(inp);
769 SCTP_INP_INFO_RUNLOCK();
770 return (stcb);
771 }
772 }
773 }
774 SCTP_TCB_UNLOCK(stcb);
775 }
776 }
777null_return:
778 /* clean up for returning null */
779 if (locked_tcb) {
780 SCTP_TCB_LOCK(locked_tcb);
781 }
782 SCTP_INP_WUNLOCK(inp);
783 SCTP_INP_INFO_RUNLOCK();
784 /* not found */
785 return (NULL);
786}
787
788/*
789 * Find an association for a specific endpoint using the association id given
790 * out in the COMM_UP notification
791 */
792
793struct sctp_tcb *
794sctp_findassociation_ep_asocid(struct sctp_inpcb *inp, sctp_assoc_t asoc_id, int want_lock)
795{
796 /*
797 * Use my the assoc_id to find a endpoint
798 */
799 struct sctpasochead *head;
800 struct sctp_tcb *stcb;
801 uint32_t id;
802
803 if (asoc_id == 0 || inp == NULL) {
804 return (NULL);
805 }
806 SCTP_INP_INFO_RLOCK();
807 id = (uint32_t) asoc_id;
808 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(id,
809 sctppcbinfo.hashasocmark)];
810 if (head == NULL) {
811 /* invalid id TSNH */
812 SCTP_INP_INFO_RUNLOCK();
813 return (NULL);
814 }
815 LIST_FOREACH(stcb, head, sctp_asocs) {
816 SCTP_INP_RLOCK(stcb->sctp_ep);
817 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
818 SCTP_INP_RUNLOCK(stcb->sctp_ep);
819 SCTP_INP_INFO_RUNLOCK();
820 return (NULL);
821 }
822 if (stcb->asoc.assoc_id == id) {
823 /* candidate */
824 if (inp != stcb->sctp_ep) {
825 /*
826 * some other guy has the same id active (id
827 * collision ??).
828 */
829 SCTP_INP_RUNLOCK(stcb->sctp_ep);
830 continue;
831 }
832 if (want_lock) {
833 SCTP_TCB_LOCK(stcb);
834 }
835 SCTP_INP_RUNLOCK(stcb->sctp_ep);
836 SCTP_INP_INFO_RUNLOCK();
837 return (stcb);
838 }
839 SCTP_INP_RUNLOCK(stcb->sctp_ep);
840 }
841 /* Ok if we missed here, lets try the restart hash */
842 head = &sctppcbinfo.sctp_restarthash[SCTP_PCBHASH_ASOC(id, sctppcbinfo.hashrestartmark)];
843 if (head == NULL) {
844 /* invalid id TSNH */
845 SCTP_INP_INFO_RUNLOCK();
846 return (NULL);
847 }
848 LIST_FOREACH(stcb, head, sctp_tcbrestarhash) {
849 SCTP_INP_RLOCK(stcb->sctp_ep);
850 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
851 SCTP_INP_RUNLOCK(stcb->sctp_ep);
852 SCTP_INP_INFO_RUNLOCK();
853 return (NULL);
854 }
855 SCTP_TCB_LOCK(stcb);
856 SCTP_INP_RUNLOCK(stcb->sctp_ep);
857 if (stcb->asoc.assoc_id == id) {
858 /* candidate */
859 if (inp != stcb->sctp_ep) {
860 /*
861 * some other guy has the same id active (id
862 * collision ??).
863 */
864 SCTP_TCB_UNLOCK(stcb);
865 continue;
866 }
867 SCTP_INP_INFO_RUNLOCK();
868 return (stcb);
869 }
870 SCTP_TCB_UNLOCK(stcb);
871 }
872 SCTP_INP_INFO_RUNLOCK();
873 return (NULL);
874}
875
876
877static struct sctp_inpcb *
878sctp_endpoint_probe(struct sockaddr *nam, struct sctppcbhead *head,
598 uint16_t lport)
879 uint16_t lport, uint32_t vrf_id)
599{
600 struct sctp_inpcb *inp;
601 struct sockaddr_in *sin;
602 struct sockaddr_in6 *sin6;
603 struct sctp_laddr *laddr;
880{
881 struct sctp_inpcb *inp;
882 struct sockaddr_in *sin;
883 struct sockaddr_in6 *sin6;
884 struct sctp_laddr *laddr;
885 int fnd;
604
605 /*
606 * Endpoing probe expects that the INP_INFO is locked.
607 */
608 if (nam->sa_family == AF_INET) {
609 sin = (struct sockaddr_in *)nam;
610 sin6 = NULL;
611 } else if (nam->sa_family == AF_INET6) {
612 sin6 = (struct sockaddr_in6 *)nam;
613 sin = NULL;
614 } else {
615 /* unsupported family */
616 return (NULL);
617 }
618 if (head == NULL)
619 return (NULL);
620 LIST_FOREACH(inp, head, sctp_hash) {
621 SCTP_INP_RLOCK(inp);
622 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
623 SCTP_INP_RUNLOCK(inp);
624 continue;
625 }
626 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) &&
627 (inp->sctp_lport == lport)) {
628 /* got it */
629 if ((nam->sa_family == AF_INET) &&
630 (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
631 SCTP_IPV6_V6ONLY(inp)) {
632 /* IPv4 on a IPv6 socket with ONLY IPv6 set */
633 SCTP_INP_RUNLOCK(inp);
634 continue;
635 }
636 /* A V6 address and the endpoint is NOT bound V6 */
637 if (nam->sa_family == AF_INET6 &&
638 (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
639 SCTP_INP_RUNLOCK(inp);
640 continue;
641 }
886
887 /*
888 * Endpoing probe expects that the INP_INFO is locked.
889 */
890 if (nam->sa_family == AF_INET) {
891 sin = (struct sockaddr_in *)nam;
892 sin6 = NULL;
893 } else if (nam->sa_family == AF_INET6) {
894 sin6 = (struct sockaddr_in6 *)nam;
895 sin = NULL;
896 } else {
897 /* unsupported family */
898 return (NULL);
899 }
900 if (head == NULL)
901 return (NULL);
902 LIST_FOREACH(inp, head, sctp_hash) {
903 SCTP_INP_RLOCK(inp);
904 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
905 SCTP_INP_RUNLOCK(inp);
906 continue;
907 }
908 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) &&
909 (inp->sctp_lport == lport)) {
910 /* got it */
911 if ((nam->sa_family == AF_INET) &&
912 (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
913 SCTP_IPV6_V6ONLY(inp)) {
914 /* IPv4 on a IPv6 socket with ONLY IPv6 set */
915 SCTP_INP_RUNLOCK(inp);
916 continue;
917 }
918 /* A V6 address and the endpoint is NOT bound V6 */
919 if (nam->sa_family == AF_INET6 &&
920 (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) {
921 SCTP_INP_RUNLOCK(inp);
922 continue;
923 }
924 /* does a VRF id match? */
925 fnd = 0;
926 if (inp->def_vrf_id == vrf_id)
927 fnd = 1;
928
642 SCTP_INP_RUNLOCK(inp);
929 SCTP_INP_RUNLOCK(inp);
930 if (!fnd)
931 continue;
643 return (inp);
644 }
645 SCTP_INP_RUNLOCK(inp);
646 }
647
648 if ((nam->sa_family == AF_INET) &&
649 (sin->sin_addr.s_addr == INADDR_ANY)) {
650 /* Can't hunt for one that has no address specified */
651 return (NULL);
652 } else if ((nam->sa_family == AF_INET6) &&
653 (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))) {
654 /* Can't hunt for one that has no address specified */
655 return (NULL);
656 }
657 /*
658 * ok, not bound to all so see if we can find a EP bound to this
659 * address.
660 */
661 LIST_FOREACH(inp, head, sctp_hash) {
662 SCTP_INP_RLOCK(inp);
663 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
664 SCTP_INP_RUNLOCK(inp);
665 continue;
666 }
667 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL)) {
668 SCTP_INP_RUNLOCK(inp);
669 continue;
670 }
671 /*
672 * Ok this could be a likely candidate, look at all of its
673 * addresses
674 */
675 if (inp->sctp_lport != lport) {
676 SCTP_INP_RUNLOCK(inp);
677 continue;
678 }
932 return (inp);
933 }
934 SCTP_INP_RUNLOCK(inp);
935 }
936
937 if ((nam->sa_family == AF_INET) &&
938 (sin->sin_addr.s_addr == INADDR_ANY)) {
939 /* Can't hunt for one that has no address specified */
940 return (NULL);
941 } else if ((nam->sa_family == AF_INET6) &&
942 (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))) {
943 /* Can't hunt for one that has no address specified */
944 return (NULL);
945 }
946 /*
947 * ok, not bound to all so see if we can find a EP bound to this
948 * address.
949 */
950 LIST_FOREACH(inp, head, sctp_hash) {
951 SCTP_INP_RLOCK(inp);
952 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
953 SCTP_INP_RUNLOCK(inp);
954 continue;
955 }
956 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL)) {
957 SCTP_INP_RUNLOCK(inp);
958 continue;
959 }
960 /*
961 * Ok this could be a likely candidate, look at all of its
962 * addresses
963 */
964 if (inp->sctp_lport != lport) {
965 SCTP_INP_RUNLOCK(inp);
966 continue;
967 }
968 /* does a VRF id match? */
969 fnd = 0;
970 if (inp->def_vrf_id == vrf_id)
971 fnd = 1;
972
973 if (!fnd) {
974 SCTP_INP_RUNLOCK(inp);
975 continue;
976 }
679 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
680 if (laddr->ifa == NULL) {
681#ifdef SCTP_DEBUG
682 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
683 printf("An ounce of prevention is worth a pound of cure\n");
684 }
685#endif
686 continue;
687 }
688#ifdef SCTP_DEBUG
689 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
690 printf("Ok laddr->ifa:%p is possible, ",
691 laddr->ifa);
692 }
693#endif
977 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
978 if (laddr->ifa == NULL) {
979#ifdef SCTP_DEBUG
980 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
981 printf("An ounce of prevention is worth a pound of cure\n");
982 }
983#endif
984 continue;
985 }
986#ifdef SCTP_DEBUG
987 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
988 printf("Ok laddr->ifa:%p is possible, ",
989 laddr->ifa);
990 }
991#endif
694 if (laddr->ifa->ifa_addr == NULL) {
992 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
695#ifdef SCTP_DEBUG
696 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
993#ifdef SCTP_DEBUG
994 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
697 printf("Huh IFA as an ifa_addr=NULL, ");
995 printf("Huh IFA being deleted\n");
698 }
699#endif
700 continue;
701 }
996 }
997#endif
998 continue;
999 }
702 if (laddr->ifa->ifa_addr->sa_family == nam->sa_family) {
1000 if (laddr->ifa->address.sa.sa_family == nam->sa_family) {
703 /* possible, see if it matches */
704 struct sockaddr_in *intf_addr;
705
1001 /* possible, see if it matches */
1002 struct sockaddr_in *intf_addr;
1003
706 intf_addr = (struct sockaddr_in *)
707 laddr->ifa->ifa_addr;
1004 intf_addr = &laddr->ifa->address.sin;
708 if (nam->sa_family == AF_INET) {
709 if (sin->sin_addr.s_addr ==
710 intf_addr->sin_addr.s_addr) {
711 SCTP_INP_RUNLOCK(inp);
712 return (inp);
713 }
714 } else if (nam->sa_family == AF_INET6) {
715 struct sockaddr_in6 *intf_addr6;
716
1005 if (nam->sa_family == AF_INET) {
1006 if (sin->sin_addr.s_addr ==
1007 intf_addr->sin_addr.s_addr) {
1008 SCTP_INP_RUNLOCK(inp);
1009 return (inp);
1010 }
1011 } else if (nam->sa_family == AF_INET6) {
1012 struct sockaddr_in6 *intf_addr6;
1013
717 intf_addr6 = (struct sockaddr_in6 *)
718 laddr->ifa->ifa_addr;
1014 intf_addr6 = &laddr->ifa->address.sin6;
719 if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
720 &intf_addr6->sin6_addr)) {
721 SCTP_INP_RUNLOCK(inp);
722 return (inp);
723 }
724 }
725 }
726 }
727 SCTP_INP_RUNLOCK(inp);
728 }
729 return (NULL);
730}
731
732
733struct sctp_inpcb *
1015 if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
1016 &intf_addr6->sin6_addr)) {
1017 SCTP_INP_RUNLOCK(inp);
1018 return (inp);
1019 }
1020 }
1021 }
1022 }
1023 SCTP_INP_RUNLOCK(inp);
1024 }
1025 return (NULL);
1026}
1027
1028
1029struct sctp_inpcb *
734sctp_pcb_findep(struct sockaddr *nam, int find_tcp_pool, int have_lock)
1030sctp_pcb_findep(struct sockaddr *nam, int find_tcp_pool, int have_lock, uint32_t vrf_id)
735{
736 /*
737 * First we check the hash table to see if someone has this port
738 * bound with just the port.
739 */
740 struct sctp_inpcb *inp;
741 struct sctppcbhead *head;
742 struct sockaddr_in *sin;
743 struct sockaddr_in6 *sin6;
744 int lport;
745
746 if (nam->sa_family == AF_INET) {
747 sin = (struct sockaddr_in *)nam;
748 lport = ((struct sockaddr_in *)nam)->sin_port;
749 } else if (nam->sa_family == AF_INET6) {
750 sin6 = (struct sockaddr_in6 *)nam;
751 lport = ((struct sockaddr_in6 *)nam)->sin6_port;
752 } else {
753 /* unsupported family */
754 return (NULL);
755 }
756 /*
757 * I could cheat here and just cast to one of the types but we will
758 * do it right. It also provides the check against an Unsupported
759 * type too.
760 */
761 /* Find the head of the ALLADDR chain */
762 if (have_lock == 0) {
763 SCTP_INP_INFO_RLOCK();
764
765 }
766 head = &sctppcbinfo.sctp_ephash[SCTP_PCBHASH_ALLADDR(lport,
767 sctppcbinfo.hashmark)];
1031{
1032 /*
1033 * First we check the hash table to see if someone has this port
1034 * bound with just the port.
1035 */
1036 struct sctp_inpcb *inp;
1037 struct sctppcbhead *head;
1038 struct sockaddr_in *sin;
1039 struct sockaddr_in6 *sin6;
1040 int lport;
1041
1042 if (nam->sa_family == AF_INET) {
1043 sin = (struct sockaddr_in *)nam;
1044 lport = ((struct sockaddr_in *)nam)->sin_port;
1045 } else if (nam->sa_family == AF_INET6) {
1046 sin6 = (struct sockaddr_in6 *)nam;
1047 lport = ((struct sockaddr_in6 *)nam)->sin6_port;
1048 } else {
1049 /* unsupported family */
1050 return (NULL);
1051 }
1052 /*
1053 * I could cheat here and just cast to one of the types but we will
1054 * do it right. It also provides the check against an Unsupported
1055 * type too.
1056 */
1057 /* Find the head of the ALLADDR chain */
1058 if (have_lock == 0) {
1059 SCTP_INP_INFO_RLOCK();
1060
1061 }
1062 head = &sctppcbinfo.sctp_ephash[SCTP_PCBHASH_ALLADDR(lport,
1063 sctppcbinfo.hashmark)];
768 inp = sctp_endpoint_probe(nam, head, lport);
1064 inp = sctp_endpoint_probe(nam, head, lport, vrf_id);
769
770 /*
771 * If the TCP model exists it could be that the main listening
772 * endpoint is gone but there exists a connected socket for this guy
773 * yet. If so we can return the first one that we find. This may NOT
774 * be the correct one but the sctp_findassociation_ep_addr has
775 * further code to look at all TCP models.
776 */
777 if (inp == NULL && find_tcp_pool) {
778 unsigned int i;
779
780 for (i = 0; i < sctppcbinfo.hashtblsize; i++) {
781 /*
782 * This is real gross, but we do NOT have a remote
783 * port at this point depending on who is calling.
784 * We must therefore look for ANY one that matches
785 * our local port :/
786 */
787 head = &sctppcbinfo.sctp_tcpephash[i];
788 if (LIST_FIRST(head)) {
1065
1066 /*
1067 * If the TCP model exists it could be that the main listening
1068 * endpoint is gone but there exists a connected socket for this guy
1069 * yet. If so we can return the first one that we find. This may NOT
1070 * be the correct one but the sctp_findassociation_ep_addr has
1071 * further code to look at all TCP models.
1072 */
1073 if (inp == NULL && find_tcp_pool) {
1074 unsigned int i;
1075
1076 for (i = 0; i < sctppcbinfo.hashtblsize; i++) {
1077 /*
1078 * This is real gross, but we do NOT have a remote
1079 * port at this point depending on who is calling.
1080 * We must therefore look for ANY one that matches
1081 * our local port :/
1082 */
1083 head = &sctppcbinfo.sctp_tcpephash[i];
1084 if (LIST_FIRST(head)) {
789 inp = sctp_endpoint_probe(nam, head, lport);
1085 inp = sctp_endpoint_probe(nam, head, lport, vrf_id);
790 if (inp) {
791 /* Found one */
792 break;
793 }
794 }
795 }
796 }
797 if (inp) {
798 SCTP_INP_INCR_REF(inp);
799 }
800 if (have_lock == 0) {
801 SCTP_INP_INFO_RUNLOCK();
802 }
803 return (inp);
804}
805
806/*
807 * Find an association for an endpoint with the pointer to whom you want to
808 * send to and the endpoint pointer. The address can be IPv4 or IPv6. We may
809 * need to change the *to to some other struct like a mbuf...
810 */
811struct sctp_tcb *
812sctp_findassociation_addr_sa(struct sockaddr *to, struct sockaddr *from,
1086 if (inp) {
1087 /* Found one */
1088 break;
1089 }
1090 }
1091 }
1092 }
1093 if (inp) {
1094 SCTP_INP_INCR_REF(inp);
1095 }
1096 if (have_lock == 0) {
1097 SCTP_INP_INFO_RUNLOCK();
1098 }
1099 return (inp);
1100}
1101
1102/*
1103 * Find an association for an endpoint with the pointer to whom you want to
1104 * send to and the endpoint pointer. The address can be IPv4 or IPv6. We may
1105 * need to change the *to to some other struct like a mbuf...
1106 */
1107struct sctp_tcb *
1108sctp_findassociation_addr_sa(struct sockaddr *to, struct sockaddr *from,
813 struct sctp_inpcb **inp_p, struct sctp_nets **netp, int find_tcp_pool)
1109 struct sctp_inpcb **inp_p, struct sctp_nets **netp, int find_tcp_pool, uint32_t vrf_id)
814{
815 struct sctp_inpcb *inp;
816 struct sctp_tcb *retval;
817
818 SCTP_INP_INFO_RLOCK();
819 if (find_tcp_pool) {
820 if (inp_p != NULL) {
821 retval = sctp_tcb_special_locate(inp_p, from, to, netp);
822 } else {
823 retval = sctp_tcb_special_locate(&inp, from, to, netp);
824 }
825 if (retval != NULL) {
826 SCTP_INP_INFO_RUNLOCK();
827 return (retval);
828 }
829 }
1110{
1111 struct sctp_inpcb *inp;
1112 struct sctp_tcb *retval;
1113
1114 SCTP_INP_INFO_RLOCK();
1115 if (find_tcp_pool) {
1116 if (inp_p != NULL) {
1117 retval = sctp_tcb_special_locate(inp_p, from, to, netp);
1118 } else {
1119 retval = sctp_tcb_special_locate(&inp, from, to, netp);
1120 }
1121 if (retval != NULL) {
1122 SCTP_INP_INFO_RUNLOCK();
1123 return (retval);
1124 }
1125 }
830 inp = sctp_pcb_findep(to, 0, 1);
1126 inp = sctp_pcb_findep(to, 0, 1, vrf_id);
831 if (inp_p != NULL) {
832 *inp_p = inp;
833 }
834 SCTP_INP_INFO_RUNLOCK();
835
836 if (inp == NULL) {
837 return (NULL);
838 }
839 /*
840 * ok, we have an endpoint, now lets find the assoc for it (if any)
841 * we now place the source address or from in the to of the find
842 * endpoint call. Since in reality this chain is used from the
843 * inbound packet side.
844 */
845 if (inp_p != NULL) {
846 retval = sctp_findassociation_ep_addr(inp_p, from, netp, to, NULL);
847 } else {
848 retval = sctp_findassociation_ep_addr(&inp, from, netp, to, NULL);
849 }
850 return retval;
851}
852
853
854/*
855 * This routine will grub through the mbuf that is a INIT or INIT-ACK and
856 * find all addresses that the sender has specified in any address list. Each
857 * address will be used to lookup the TCB and see if one exits.
858 */
859static struct sctp_tcb *
860sctp_findassociation_special_addr(struct mbuf *m, int iphlen, int offset,
861 struct sctphdr *sh, struct sctp_inpcb **inp_p, struct sctp_nets **netp,
862 struct sockaddr *dest)
863{
864 struct sockaddr_in sin4;
865 struct sockaddr_in6 sin6;
866 struct sctp_paramhdr *phdr, parm_buf;
867 struct sctp_tcb *retval;
868 uint32_t ptype, plen;
869
870 memset(&sin4, 0, sizeof(sin4));
871 memset(&sin6, 0, sizeof(sin6));
872 sin4.sin_len = sizeof(sin4);
873 sin4.sin_family = AF_INET;
874 sin4.sin_port = sh->src_port;
875 sin6.sin6_len = sizeof(sin6);
876 sin6.sin6_family = AF_INET6;
877 sin6.sin6_port = sh->src_port;
878
879 retval = NULL;
880 offset += sizeof(struct sctp_init_chunk);
881
882 phdr = sctp_get_next_param(m, offset, &parm_buf, sizeof(parm_buf));
883 while (phdr != NULL) {
884 /* now we must see if we want the parameter */
885 ptype = ntohs(phdr->param_type);
886 plen = ntohs(phdr->param_length);
887 if (plen == 0) {
888 break;
889 }
890 if (ptype == SCTP_IPV4_ADDRESS &&
891 plen == sizeof(struct sctp_ipv4addr_param)) {
892 /* Get the rest of the address */
893 struct sctp_ipv4addr_param ip4_parm, *p4;
894
895 phdr = sctp_get_next_param(m, offset,
896 (struct sctp_paramhdr *)&ip4_parm, plen);
897 if (phdr == NULL) {
898 return (NULL);
899 }
900 p4 = (struct sctp_ipv4addr_param *)phdr;
901 memcpy(&sin4.sin_addr, &p4->addr, sizeof(p4->addr));
902 /* look it up */
903 retval = sctp_findassociation_ep_addr(inp_p,
904 (struct sockaddr *)&sin4, netp, dest, NULL);
905 if (retval != NULL) {
906 return (retval);
907 }
908 } else if (ptype == SCTP_IPV6_ADDRESS &&
909 plen == sizeof(struct sctp_ipv6addr_param)) {
910 /* Get the rest of the address */
911 struct sctp_ipv6addr_param ip6_parm, *p6;
912
913 phdr = sctp_get_next_param(m, offset,
914 (struct sctp_paramhdr *)&ip6_parm, plen);
915 if (phdr == NULL) {
916 return (NULL);
917 }
918 p6 = (struct sctp_ipv6addr_param *)phdr;
919 memcpy(&sin6.sin6_addr, &p6->addr, sizeof(p6->addr));
920 /* look it up */
921 retval = sctp_findassociation_ep_addr(inp_p,
922 (struct sockaddr *)&sin6, netp, dest, NULL);
923 if (retval != NULL) {
924 return (retval);
925 }
926 }
927 offset += SCTP_SIZE32(plen);
928 phdr = sctp_get_next_param(m, offset, &parm_buf,
929 sizeof(parm_buf));
930 }
931 return (NULL);
932}
933
934
935static struct sctp_tcb *
936sctp_findassoc_by_vtag(struct sockaddr *from, uint32_t vtag,
937 struct sctp_inpcb **inp_p, struct sctp_nets **netp, uint16_t rport,
938 uint16_t lport, int skip_src_check)
939{
940 /*
941 * Use my vtag to hash. If we find it we then verify the source addr
942 * is in the assoc. If all goes well we save a bit on rec of a
943 * packet.
944 */
945 struct sctpasochead *head;
946 struct sctp_nets *net;
947 struct sctp_tcb *stcb;
948
949 *netp = NULL;
950 *inp_p = NULL;
951 SCTP_INP_INFO_RLOCK();
952 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(vtag,
953 sctppcbinfo.hashasocmark)];
954 if (head == NULL) {
955 /* invalid vtag */
956 SCTP_INP_INFO_RUNLOCK();
957 return (NULL);
958 }
959 LIST_FOREACH(stcb, head, sctp_asocs) {
960 SCTP_INP_RLOCK(stcb->sctp_ep);
961 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
962 SCTP_INP_RUNLOCK(stcb->sctp_ep);
963 SCTP_INP_INFO_RUNLOCK();
964 return (NULL);
965 }
966 SCTP_TCB_LOCK(stcb);
967 SCTP_INP_RUNLOCK(stcb->sctp_ep);
968 if (stcb->asoc.my_vtag == vtag) {
969 /* candidate */
970 if (stcb->rport != rport) {
971 /*
972 * we could remove this if vtags are unique
973 * across the system.
974 */
975 SCTP_TCB_UNLOCK(stcb);
976 continue;
977 }
978 if (stcb->sctp_ep->sctp_lport != lport) {
979 /*
980 * we could remove this if vtags are unique
981 * across the system.
982 */
983 SCTP_TCB_UNLOCK(stcb);
984 continue;
985 }
986 if (skip_src_check) {
987 *netp = NULL; /* unknown */
988 *inp_p = stcb->sctp_ep;
989 SCTP_INP_INFO_RUNLOCK();
990 return (stcb);
991 }
992 net = sctp_findnet(stcb, from);
993 if (net) {
994 /* yep its him. */
995 *netp = net;
996 SCTP_STAT_INCR(sctps_vtagexpress);
997 *inp_p = stcb->sctp_ep;
998 SCTP_INP_INFO_RUNLOCK();
999 return (stcb);
1000 } else {
1001 /*
1002 * not him, this should only happen in rare
1003 * cases so I peg it.
1004 */
1005 SCTP_STAT_INCR(sctps_vtagbogus);
1006 }
1007 }
1008 SCTP_TCB_UNLOCK(stcb);
1009 }
1010 SCTP_INP_INFO_RUNLOCK();
1011 return (NULL);
1012}
1013
1014/*
1015 * Find an association with the pointer to the inbound IP packet. This can be
1016 * a IPv4 or IPv6 packet.
1017 */
1018struct sctp_tcb *
1019sctp_findassociation_addr(struct mbuf *m, int iphlen, int offset,
1020 struct sctphdr *sh, struct sctp_chunkhdr *ch,
1021 struct sctp_inpcb **inp_p, struct sctp_nets **netp)
1022{
1023 int find_tcp_pool;
1024 struct ip *iph;
1025 struct sctp_tcb *retval;
1026 struct sockaddr_storage to_store, from_store;
1027 struct sockaddr *to = (struct sockaddr *)&to_store;
1028 struct sockaddr *from = (struct sockaddr *)&from_store;
1029 struct sctp_inpcb *inp;
1127 if (inp_p != NULL) {
1128 *inp_p = inp;
1129 }
1130 SCTP_INP_INFO_RUNLOCK();
1131
1132 if (inp == NULL) {
1133 return (NULL);
1134 }
1135 /*
1136 * ok, we have an endpoint, now lets find the assoc for it (if any)
1137 * we now place the source address or from in the to of the find
1138 * endpoint call. Since in reality this chain is used from the
1139 * inbound packet side.
1140 */
1141 if (inp_p != NULL) {
1142 retval = sctp_findassociation_ep_addr(inp_p, from, netp, to, NULL);
1143 } else {
1144 retval = sctp_findassociation_ep_addr(&inp, from, netp, to, NULL);
1145 }
1146 return retval;
1147}
1148
1149
1150/*
1151 * This routine will grub through the mbuf that is a INIT or INIT-ACK and
1152 * find all addresses that the sender has specified in any address list. Each
1153 * address will be used to lookup the TCB and see if one exits.
1154 */
1155static struct sctp_tcb *
1156sctp_findassociation_special_addr(struct mbuf *m, int iphlen, int offset,
1157 struct sctphdr *sh, struct sctp_inpcb **inp_p, struct sctp_nets **netp,
1158 struct sockaddr *dest)
1159{
1160 struct sockaddr_in sin4;
1161 struct sockaddr_in6 sin6;
1162 struct sctp_paramhdr *phdr, parm_buf;
1163 struct sctp_tcb *retval;
1164 uint32_t ptype, plen;
1165
1166 memset(&sin4, 0, sizeof(sin4));
1167 memset(&sin6, 0, sizeof(sin6));
1168 sin4.sin_len = sizeof(sin4);
1169 sin4.sin_family = AF_INET;
1170 sin4.sin_port = sh->src_port;
1171 sin6.sin6_len = sizeof(sin6);
1172 sin6.sin6_family = AF_INET6;
1173 sin6.sin6_port = sh->src_port;
1174
1175 retval = NULL;
1176 offset += sizeof(struct sctp_init_chunk);
1177
1178 phdr = sctp_get_next_param(m, offset, &parm_buf, sizeof(parm_buf));
1179 while (phdr != NULL) {
1180 /* now we must see if we want the parameter */
1181 ptype = ntohs(phdr->param_type);
1182 plen = ntohs(phdr->param_length);
1183 if (plen == 0) {
1184 break;
1185 }
1186 if (ptype == SCTP_IPV4_ADDRESS &&
1187 plen == sizeof(struct sctp_ipv4addr_param)) {
1188 /* Get the rest of the address */
1189 struct sctp_ipv4addr_param ip4_parm, *p4;
1190
1191 phdr = sctp_get_next_param(m, offset,
1192 (struct sctp_paramhdr *)&ip4_parm, plen);
1193 if (phdr == NULL) {
1194 return (NULL);
1195 }
1196 p4 = (struct sctp_ipv4addr_param *)phdr;
1197 memcpy(&sin4.sin_addr, &p4->addr, sizeof(p4->addr));
1198 /* look it up */
1199 retval = sctp_findassociation_ep_addr(inp_p,
1200 (struct sockaddr *)&sin4, netp, dest, NULL);
1201 if (retval != NULL) {
1202 return (retval);
1203 }
1204 } else if (ptype == SCTP_IPV6_ADDRESS &&
1205 plen == sizeof(struct sctp_ipv6addr_param)) {
1206 /* Get the rest of the address */
1207 struct sctp_ipv6addr_param ip6_parm, *p6;
1208
1209 phdr = sctp_get_next_param(m, offset,
1210 (struct sctp_paramhdr *)&ip6_parm, plen);
1211 if (phdr == NULL) {
1212 return (NULL);
1213 }
1214 p6 = (struct sctp_ipv6addr_param *)phdr;
1215 memcpy(&sin6.sin6_addr, &p6->addr, sizeof(p6->addr));
1216 /* look it up */
1217 retval = sctp_findassociation_ep_addr(inp_p,
1218 (struct sockaddr *)&sin6, netp, dest, NULL);
1219 if (retval != NULL) {
1220 return (retval);
1221 }
1222 }
1223 offset += SCTP_SIZE32(plen);
1224 phdr = sctp_get_next_param(m, offset, &parm_buf,
1225 sizeof(parm_buf));
1226 }
1227 return (NULL);
1228}
1229
1230
1231static struct sctp_tcb *
1232sctp_findassoc_by_vtag(struct sockaddr *from, uint32_t vtag,
1233 struct sctp_inpcb **inp_p, struct sctp_nets **netp, uint16_t rport,
1234 uint16_t lport, int skip_src_check)
1235{
1236 /*
1237 * Use my vtag to hash. If we find it we then verify the source addr
1238 * is in the assoc. If all goes well we save a bit on rec of a
1239 * packet.
1240 */
1241 struct sctpasochead *head;
1242 struct sctp_nets *net;
1243 struct sctp_tcb *stcb;
1244
1245 *netp = NULL;
1246 *inp_p = NULL;
1247 SCTP_INP_INFO_RLOCK();
1248 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(vtag,
1249 sctppcbinfo.hashasocmark)];
1250 if (head == NULL) {
1251 /* invalid vtag */
1252 SCTP_INP_INFO_RUNLOCK();
1253 return (NULL);
1254 }
1255 LIST_FOREACH(stcb, head, sctp_asocs) {
1256 SCTP_INP_RLOCK(stcb->sctp_ep);
1257 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
1258 SCTP_INP_RUNLOCK(stcb->sctp_ep);
1259 SCTP_INP_INFO_RUNLOCK();
1260 return (NULL);
1261 }
1262 SCTP_TCB_LOCK(stcb);
1263 SCTP_INP_RUNLOCK(stcb->sctp_ep);
1264 if (stcb->asoc.my_vtag == vtag) {
1265 /* candidate */
1266 if (stcb->rport != rport) {
1267 /*
1268 * we could remove this if vtags are unique
1269 * across the system.
1270 */
1271 SCTP_TCB_UNLOCK(stcb);
1272 continue;
1273 }
1274 if (stcb->sctp_ep->sctp_lport != lport) {
1275 /*
1276 * we could remove this if vtags are unique
1277 * across the system.
1278 */
1279 SCTP_TCB_UNLOCK(stcb);
1280 continue;
1281 }
1282 if (skip_src_check) {
1283 *netp = NULL; /* unknown */
1284 *inp_p = stcb->sctp_ep;
1285 SCTP_INP_INFO_RUNLOCK();
1286 return (stcb);
1287 }
1288 net = sctp_findnet(stcb, from);
1289 if (net) {
1290 /* yep its him. */
1291 *netp = net;
1292 SCTP_STAT_INCR(sctps_vtagexpress);
1293 *inp_p = stcb->sctp_ep;
1294 SCTP_INP_INFO_RUNLOCK();
1295 return (stcb);
1296 } else {
1297 /*
1298 * not him, this should only happen in rare
1299 * cases so I peg it.
1300 */
1301 SCTP_STAT_INCR(sctps_vtagbogus);
1302 }
1303 }
1304 SCTP_TCB_UNLOCK(stcb);
1305 }
1306 SCTP_INP_INFO_RUNLOCK();
1307 return (NULL);
1308}
1309
1310/*
1311 * Find an association with the pointer to the inbound IP packet. This can be
1312 * a IPv4 or IPv6 packet.
1313 */
1314struct sctp_tcb *
1315sctp_findassociation_addr(struct mbuf *m, int iphlen, int offset,
1316 struct sctphdr *sh, struct sctp_chunkhdr *ch,
1317 struct sctp_inpcb **inp_p, struct sctp_nets **netp)
1318{
1319 int find_tcp_pool;
1320 struct ip *iph;
1321 struct sctp_tcb *retval;
1322 struct sockaddr_storage to_store, from_store;
1323 struct sockaddr *to = (struct sockaddr *)&to_store;
1324 struct sockaddr *from = (struct sockaddr *)&from_store;
1325 struct sctp_inpcb *inp;
1326 uint32_t vrf_id;
1030
1327
1031
1328 vrf_id = SCTP_DEFAULT_VRFID;
1032 iph = mtod(m, struct ip *);
1033 if (iph->ip_v == IPVERSION) {
1034 /* its IPv4 */
1035 struct sockaddr_in *from4;
1036
1037 from4 = (struct sockaddr_in *)&from_store;
1038 bzero(from4, sizeof(*from4));
1039 from4->sin_family = AF_INET;
1040 from4->sin_len = sizeof(struct sockaddr_in);
1041 from4->sin_addr.s_addr = iph->ip_src.s_addr;
1042 from4->sin_port = sh->src_port;
1043 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
1044 /* its IPv6 */
1045 struct ip6_hdr *ip6;
1046 struct sockaddr_in6 *from6;
1047
1048 ip6 = mtod(m, struct ip6_hdr *);
1049 from6 = (struct sockaddr_in6 *)&from_store;
1050 bzero(from6, sizeof(*from6));
1051 from6->sin6_family = AF_INET6;
1052 from6->sin6_len = sizeof(struct sockaddr_in6);
1053 from6->sin6_addr = ip6->ip6_src;
1054 from6->sin6_port = sh->src_port;
1055 /* Get the scopes in properly to the sin6 addr's */
1056 /* we probably don't need these operations */
1057 (void)sa6_recoverscope(from6);
1058 sa6_embedscope(from6, ip6_use_defzone);
1059 } else {
1060 /* Currently not supported. */
1061 return (NULL);
1062 }
1063 if (sh->v_tag) {
1064 /* we only go down this path if vtag is non-zero */
1065 retval = sctp_findassoc_by_vtag(from, ntohl(sh->v_tag),
1066 inp_p, netp, sh->src_port, sh->dest_port, 0);
1067 if (retval) {
1068 return (retval);
1069 }
1070 }
1071 if (iph->ip_v == IPVERSION) {
1072 /* its IPv4 */
1073 struct sockaddr_in *to4;
1074
1075 to4 = (struct sockaddr_in *)&to_store;
1076 bzero(to4, sizeof(*to4));
1077 to4->sin_family = AF_INET;
1078 to4->sin_len = sizeof(struct sockaddr_in);
1079 to4->sin_addr.s_addr = iph->ip_dst.s_addr;
1080 to4->sin_port = sh->dest_port;
1081 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
1082 /* its IPv6 */
1083 struct ip6_hdr *ip6;
1084 struct sockaddr_in6 *to6;
1085
1086 ip6 = mtod(m, struct ip6_hdr *);
1087 to6 = (struct sockaddr_in6 *)&to_store;
1088 bzero(to6, sizeof(*to6));
1089 to6->sin6_family = AF_INET6;
1090 to6->sin6_len = sizeof(struct sockaddr_in6);
1091 to6->sin6_addr = ip6->ip6_dst;
1092 to6->sin6_port = sh->dest_port;
1093 /* Get the scopes in properly to the sin6 addr's */
1094 /* we probably don't need these operations */
1095 (void)sa6_recoverscope(to6);
1096 sa6_embedscope(to6, ip6_use_defzone);
1097 }
1098 find_tcp_pool = 0;
1099 /*
1100 * FIX FIX?, I think we only need to look in the TCP pool if its an
1101 * INIT or COOKIE-ECHO, We really don't need to find it that way if
1102 * its a INIT-ACK or COOKIE_ACK since these in bot one-2-one and
1103 * one-2-N would be in the main pool anyway.
1104 */
1105 if ((ch->chunk_type != SCTP_INITIATION) &&
1106 (ch->chunk_type != SCTP_INITIATION_ACK) &&
1107 (ch->chunk_type != SCTP_COOKIE_ACK) &&
1108 (ch->chunk_type != SCTP_COOKIE_ECHO)) {
1109 /* Other chunk types go to the tcp pool. */
1110 find_tcp_pool = 1;
1111 }
1112 if (inp_p) {
1113 retval = sctp_findassociation_addr_sa(to, from, inp_p, netp,
1329 iph = mtod(m, struct ip *);
1330 if (iph->ip_v == IPVERSION) {
1331 /* its IPv4 */
1332 struct sockaddr_in *from4;
1333
1334 from4 = (struct sockaddr_in *)&from_store;
1335 bzero(from4, sizeof(*from4));
1336 from4->sin_family = AF_INET;
1337 from4->sin_len = sizeof(struct sockaddr_in);
1338 from4->sin_addr.s_addr = iph->ip_src.s_addr;
1339 from4->sin_port = sh->src_port;
1340 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
1341 /* its IPv6 */
1342 struct ip6_hdr *ip6;
1343 struct sockaddr_in6 *from6;
1344
1345 ip6 = mtod(m, struct ip6_hdr *);
1346 from6 = (struct sockaddr_in6 *)&from_store;
1347 bzero(from6, sizeof(*from6));
1348 from6->sin6_family = AF_INET6;
1349 from6->sin6_len = sizeof(struct sockaddr_in6);
1350 from6->sin6_addr = ip6->ip6_src;
1351 from6->sin6_port = sh->src_port;
1352 /* Get the scopes in properly to the sin6 addr's */
1353 /* we probably don't need these operations */
1354 (void)sa6_recoverscope(from6);
1355 sa6_embedscope(from6, ip6_use_defzone);
1356 } else {
1357 /* Currently not supported. */
1358 return (NULL);
1359 }
1360 if (sh->v_tag) {
1361 /* we only go down this path if vtag is non-zero */
1362 retval = sctp_findassoc_by_vtag(from, ntohl(sh->v_tag),
1363 inp_p, netp, sh->src_port, sh->dest_port, 0);
1364 if (retval) {
1365 return (retval);
1366 }
1367 }
1368 if (iph->ip_v == IPVERSION) {
1369 /* its IPv4 */
1370 struct sockaddr_in *to4;
1371
1372 to4 = (struct sockaddr_in *)&to_store;
1373 bzero(to4, sizeof(*to4));
1374 to4->sin_family = AF_INET;
1375 to4->sin_len = sizeof(struct sockaddr_in);
1376 to4->sin_addr.s_addr = iph->ip_dst.s_addr;
1377 to4->sin_port = sh->dest_port;
1378 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
1379 /* its IPv6 */
1380 struct ip6_hdr *ip6;
1381 struct sockaddr_in6 *to6;
1382
1383 ip6 = mtod(m, struct ip6_hdr *);
1384 to6 = (struct sockaddr_in6 *)&to_store;
1385 bzero(to6, sizeof(*to6));
1386 to6->sin6_family = AF_INET6;
1387 to6->sin6_len = sizeof(struct sockaddr_in6);
1388 to6->sin6_addr = ip6->ip6_dst;
1389 to6->sin6_port = sh->dest_port;
1390 /* Get the scopes in properly to the sin6 addr's */
1391 /* we probably don't need these operations */
1392 (void)sa6_recoverscope(to6);
1393 sa6_embedscope(to6, ip6_use_defzone);
1394 }
1395 find_tcp_pool = 0;
1396 /*
1397 * FIX FIX?, I think we only need to look in the TCP pool if its an
1398 * INIT or COOKIE-ECHO, We really don't need to find it that way if
1399 * its a INIT-ACK or COOKIE_ACK since these in bot one-2-one and
1400 * one-2-N would be in the main pool anyway.
1401 */
1402 if ((ch->chunk_type != SCTP_INITIATION) &&
1403 (ch->chunk_type != SCTP_INITIATION_ACK) &&
1404 (ch->chunk_type != SCTP_COOKIE_ACK) &&
1405 (ch->chunk_type != SCTP_COOKIE_ECHO)) {
1406 /* Other chunk types go to the tcp pool. */
1407 find_tcp_pool = 1;
1408 }
1409 if (inp_p) {
1410 retval = sctp_findassociation_addr_sa(to, from, inp_p, netp,
1114 find_tcp_pool);
1411 find_tcp_pool, vrf_id);
1115 inp = *inp_p;
1116 } else {
1117 retval = sctp_findassociation_addr_sa(to, from, &inp, netp,
1412 inp = *inp_p;
1413 } else {
1414 retval = sctp_findassociation_addr_sa(to, from, &inp, netp,
1118 find_tcp_pool);
1415 find_tcp_pool, vrf_id);
1119 }
1120#ifdef SCTP_DEBUG
1121 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1122 printf("retval:%p inp:%p\n", retval, inp);
1123 }
1124#endif
1125 if (retval == NULL && inp) {
1126 /* Found a EP but not this address */
1127 if ((ch->chunk_type == SCTP_INITIATION) ||
1128 (ch->chunk_type == SCTP_INITIATION_ACK)) {
1129 /*
1130 * special hook, we do NOT return linp or an
1131 * association that is linked to an existing
1132 * association that is under the TCP pool (i.e. no
1133 * listener exists). The endpoint finding routine
1134 * will always find a listner before examining the
1135 * TCP pool.
1136 */
1137 if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) {
1138 if (inp_p) {
1139 *inp_p = NULL;
1140 }
1141 return (NULL);
1142 }
1143 retval = sctp_findassociation_special_addr(m, iphlen,
1144 offset, sh, &inp, netp, to);
1145 if (inp_p != NULL) {
1146 *inp_p = inp;
1147 }
1148 }
1149 }
1150#ifdef SCTP_DEBUG
1151 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1152 printf("retval is %p\n", retval);
1153 }
1154#endif
1155 return (retval);
1156}
1157
1158/*
1159 * lookup an association by an ASCONF lookup address.
1160 * if the lookup address is 0.0.0.0 or ::0, use the vtag to do the lookup
1161 */
1162struct sctp_tcb *
1163sctp_findassociation_ep_asconf(struct mbuf *m, int iphlen, int offset,
1164 struct sctphdr *sh, struct sctp_inpcb **inp_p, struct sctp_nets **netp)
1165{
1166 struct sctp_tcb *stcb;
1167 struct sockaddr_in *sin;
1168 struct sockaddr_in6 *sin6;
1169 struct sockaddr_storage local_store, remote_store;
1170 struct ip *iph;
1171 struct sctp_paramhdr parm_buf, *phdr;
1172 int ptype;
1173 int zero_address = 0;
1174
1175
1176 memset(&local_store, 0, sizeof(local_store));
1177 memset(&remote_store, 0, sizeof(remote_store));
1178
1179 /* First get the destination address setup too. */
1180 iph = mtod(m, struct ip *);
1181 if (iph->ip_v == IPVERSION) {
1182 /* its IPv4 */
1183 sin = (struct sockaddr_in *)&local_store;
1184 sin->sin_family = AF_INET;
1185 sin->sin_len = sizeof(*sin);
1186 sin->sin_port = sh->dest_port;
1187 sin->sin_addr.s_addr = iph->ip_dst.s_addr;
1188 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
1189 /* its IPv6 */
1190 struct ip6_hdr *ip6;
1191
1192 ip6 = mtod(m, struct ip6_hdr *);
1193 sin6 = (struct sockaddr_in6 *)&local_store;
1194 sin6->sin6_family = AF_INET6;
1195 sin6->sin6_len = sizeof(*sin6);
1196 sin6->sin6_port = sh->dest_port;
1197 sin6->sin6_addr = ip6->ip6_dst;
1198 } else {
1199 return NULL;
1200 }
1201
1202 phdr = sctp_get_next_param(m, offset + sizeof(struct sctp_asconf_chunk),
1203 &parm_buf, sizeof(struct sctp_paramhdr));
1204 if (phdr == NULL) {
1205#ifdef SCTP_DEBUG
1206 if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
1207 printf("findassociation_ep_asconf: failed to get asconf lookup addr\n");
1208 }
1209#endif /* SCTP_DEBUG */
1210 return NULL;
1211 }
1212 ptype = (int)((uint32_t) ntohs(phdr->param_type));
1213 /* get the correlation address */
1214 if (ptype == SCTP_IPV6_ADDRESS) {
1215 /* ipv6 address param */
1216 struct sctp_ipv6addr_param *p6, p6_buf;
1217
1218 if (ntohs(phdr->param_length) != sizeof(struct sctp_ipv6addr_param)) {
1219 return NULL;
1220 }
1221 p6 = (struct sctp_ipv6addr_param *)sctp_get_next_param(m,
1222 offset + sizeof(struct sctp_asconf_chunk),
1223 &p6_buf.ph, sizeof(*p6));
1224 if (p6 == NULL) {
1225#ifdef SCTP_DEBUG
1226 if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
1227 printf("findassociation_ep_asconf: failed to get asconf v6 lookup addr\n");
1228 }
1229#endif /* SCTP_DEBUG */
1230 return (NULL);
1231 }
1232 sin6 = (struct sockaddr_in6 *)&remote_store;
1233 sin6->sin6_family = AF_INET6;
1234 sin6->sin6_len = sizeof(*sin6);
1235 sin6->sin6_port = sh->src_port;
1236 memcpy(&sin6->sin6_addr, &p6->addr, sizeof(struct in6_addr));
1237 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
1238 zero_address = 1;
1239 } else if (ptype == SCTP_IPV4_ADDRESS) {
1240 /* ipv4 address param */
1241 struct sctp_ipv4addr_param *p4, p4_buf;
1242
1243 if (ntohs(phdr->param_length) != sizeof(struct sctp_ipv4addr_param)) {
1244 return NULL;
1245 }
1246 p4 = (struct sctp_ipv4addr_param *)sctp_get_next_param(m,
1247 offset + sizeof(struct sctp_asconf_chunk),
1248 &p4_buf.ph, sizeof(*p4));
1249 if (p4 == NULL) {
1250#ifdef SCTP_DEBUG
1251 if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
1252 printf("findassociation_ep_asconf: failed to get asconf v4 lookup addr\n");
1253 }
1254#endif /* SCTP_DEBUG */
1255 return (NULL);
1256 }
1257 sin = (struct sockaddr_in *)&remote_store;
1258 sin->sin_family = AF_INET;
1259 sin->sin_len = sizeof(*sin);
1260 sin->sin_port = sh->src_port;
1261 memcpy(&sin->sin_addr, &p4->addr, sizeof(struct in_addr));
1262 if (sin->sin_addr.s_addr == INADDR_ANY)
1263 zero_address = 1;
1264 } else {
1265 /* invalid address param type */
1266 return NULL;
1267 }
1268
1269 if (zero_address) {
1270 stcb = sctp_findassoc_by_vtag(NULL, ntohl(sh->v_tag), inp_p,
1271 netp, sh->src_port, sh->dest_port, 1);
1272 /*
1273 * printf("findassociation_ep_asconf: zero lookup address
1274 * finds stcb 0x%x\n", (uint32_t)stcb);
1275 */
1276 } else {
1277 stcb = sctp_findassociation_ep_addr(inp_p,
1278 (struct sockaddr *)&remote_store, netp,
1279 (struct sockaddr *)&local_store, NULL);
1280 }
1281 return (stcb);
1282}
1283
1284
1416 }
1417#ifdef SCTP_DEBUG
1418 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1419 printf("retval:%p inp:%p\n", retval, inp);
1420 }
1421#endif
1422 if (retval == NULL && inp) {
1423 /* Found a EP but not this address */
1424 if ((ch->chunk_type == SCTP_INITIATION) ||
1425 (ch->chunk_type == SCTP_INITIATION_ACK)) {
1426 /*
1427 * special hook, we do NOT return linp or an
1428 * association that is linked to an existing
1429 * association that is under the TCP pool (i.e. no
1430 * listener exists). The endpoint finding routine
1431 * will always find a listner before examining the
1432 * TCP pool.
1433 */
1434 if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) {
1435 if (inp_p) {
1436 *inp_p = NULL;
1437 }
1438 return (NULL);
1439 }
1440 retval = sctp_findassociation_special_addr(m, iphlen,
1441 offset, sh, &inp, netp, to);
1442 if (inp_p != NULL) {
1443 *inp_p = inp;
1444 }
1445 }
1446 }
1447#ifdef SCTP_DEBUG
1448 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1449 printf("retval is %p\n", retval);
1450 }
1451#endif
1452 return (retval);
1453}
1454
1455/*
1456 * lookup an association by an ASCONF lookup address.
1457 * if the lookup address is 0.0.0.0 or ::0, use the vtag to do the lookup
1458 */
1459struct sctp_tcb *
1460sctp_findassociation_ep_asconf(struct mbuf *m, int iphlen, int offset,
1461 struct sctphdr *sh, struct sctp_inpcb **inp_p, struct sctp_nets **netp)
1462{
1463 struct sctp_tcb *stcb;
1464 struct sockaddr_in *sin;
1465 struct sockaddr_in6 *sin6;
1466 struct sockaddr_storage local_store, remote_store;
1467 struct ip *iph;
1468 struct sctp_paramhdr parm_buf, *phdr;
1469 int ptype;
1470 int zero_address = 0;
1471
1472
1473 memset(&local_store, 0, sizeof(local_store));
1474 memset(&remote_store, 0, sizeof(remote_store));
1475
1476 /* First get the destination address setup too. */
1477 iph = mtod(m, struct ip *);
1478 if (iph->ip_v == IPVERSION) {
1479 /* its IPv4 */
1480 sin = (struct sockaddr_in *)&local_store;
1481 sin->sin_family = AF_INET;
1482 sin->sin_len = sizeof(*sin);
1483 sin->sin_port = sh->dest_port;
1484 sin->sin_addr.s_addr = iph->ip_dst.s_addr;
1485 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
1486 /* its IPv6 */
1487 struct ip6_hdr *ip6;
1488
1489 ip6 = mtod(m, struct ip6_hdr *);
1490 sin6 = (struct sockaddr_in6 *)&local_store;
1491 sin6->sin6_family = AF_INET6;
1492 sin6->sin6_len = sizeof(*sin6);
1493 sin6->sin6_port = sh->dest_port;
1494 sin6->sin6_addr = ip6->ip6_dst;
1495 } else {
1496 return NULL;
1497 }
1498
1499 phdr = sctp_get_next_param(m, offset + sizeof(struct sctp_asconf_chunk),
1500 &parm_buf, sizeof(struct sctp_paramhdr));
1501 if (phdr == NULL) {
1502#ifdef SCTP_DEBUG
1503 if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
1504 printf("findassociation_ep_asconf: failed to get asconf lookup addr\n");
1505 }
1506#endif /* SCTP_DEBUG */
1507 return NULL;
1508 }
1509 ptype = (int)((uint32_t) ntohs(phdr->param_type));
1510 /* get the correlation address */
1511 if (ptype == SCTP_IPV6_ADDRESS) {
1512 /* ipv6 address param */
1513 struct sctp_ipv6addr_param *p6, p6_buf;
1514
1515 if (ntohs(phdr->param_length) != sizeof(struct sctp_ipv6addr_param)) {
1516 return NULL;
1517 }
1518 p6 = (struct sctp_ipv6addr_param *)sctp_get_next_param(m,
1519 offset + sizeof(struct sctp_asconf_chunk),
1520 &p6_buf.ph, sizeof(*p6));
1521 if (p6 == NULL) {
1522#ifdef SCTP_DEBUG
1523 if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
1524 printf("findassociation_ep_asconf: failed to get asconf v6 lookup addr\n");
1525 }
1526#endif /* SCTP_DEBUG */
1527 return (NULL);
1528 }
1529 sin6 = (struct sockaddr_in6 *)&remote_store;
1530 sin6->sin6_family = AF_INET6;
1531 sin6->sin6_len = sizeof(*sin6);
1532 sin6->sin6_port = sh->src_port;
1533 memcpy(&sin6->sin6_addr, &p6->addr, sizeof(struct in6_addr));
1534 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))
1535 zero_address = 1;
1536 } else if (ptype == SCTP_IPV4_ADDRESS) {
1537 /* ipv4 address param */
1538 struct sctp_ipv4addr_param *p4, p4_buf;
1539
1540 if (ntohs(phdr->param_length) != sizeof(struct sctp_ipv4addr_param)) {
1541 return NULL;
1542 }
1543 p4 = (struct sctp_ipv4addr_param *)sctp_get_next_param(m,
1544 offset + sizeof(struct sctp_asconf_chunk),
1545 &p4_buf.ph, sizeof(*p4));
1546 if (p4 == NULL) {
1547#ifdef SCTP_DEBUG
1548 if (sctp_debug_on & SCTP_DEBUG_INPUT3) {
1549 printf("findassociation_ep_asconf: failed to get asconf v4 lookup addr\n");
1550 }
1551#endif /* SCTP_DEBUG */
1552 return (NULL);
1553 }
1554 sin = (struct sockaddr_in *)&remote_store;
1555 sin->sin_family = AF_INET;
1556 sin->sin_len = sizeof(*sin);
1557 sin->sin_port = sh->src_port;
1558 memcpy(&sin->sin_addr, &p4->addr, sizeof(struct in_addr));
1559 if (sin->sin_addr.s_addr == INADDR_ANY)
1560 zero_address = 1;
1561 } else {
1562 /* invalid address param type */
1563 return NULL;
1564 }
1565
1566 if (zero_address) {
1567 stcb = sctp_findassoc_by_vtag(NULL, ntohl(sh->v_tag), inp_p,
1568 netp, sh->src_port, sh->dest_port, 1);
1569 /*
1570 * printf("findassociation_ep_asconf: zero lookup address
1571 * finds stcb 0x%x\n", (uint32_t)stcb);
1572 */
1573 } else {
1574 stcb = sctp_findassociation_ep_addr(inp_p,
1575 (struct sockaddr *)&remote_store, netp,
1576 (struct sockaddr *)&local_store, NULL);
1577 }
1578 return (stcb);
1579}
1580
1581
1285extern int sctp_max_burst_default;
1286
1287extern unsigned int sctp_delayed_sack_time_default;
1288extern unsigned int sctp_heartbeat_interval_default;
1289extern unsigned int sctp_pmtu_raise_time_default;
1290extern unsigned int sctp_shutdown_guard_time_default;
1291extern unsigned int sctp_secret_lifetime_default;
1292
1293extern unsigned int sctp_rto_max_default;
1294extern unsigned int sctp_rto_min_default;
1295extern unsigned int sctp_rto_initial_default;
1296extern unsigned int sctp_init_rto_max_default;
1297extern unsigned int sctp_valid_cookie_life_default;
1298extern unsigned int sctp_init_rtx_max_default;
1299extern unsigned int sctp_assoc_rtx_max_default;
1300extern unsigned int sctp_path_rtx_max_default;
1301extern unsigned int sctp_nr_outgoing_streams_default;
1302
1303/*
1304 * allocate a sctp_inpcb and setup a temporary binding to a port/all
1305 * addresses. This way if we don't get a bind we by default pick a ephemeral
1306 * port with all addresses bound.
1307 */
1308int
1309sctp_inpcb_alloc(struct socket *so)
1310{
1311 /*
1312 * we get called when a new endpoint starts up. We need to allocate
1313 * the sctp_inpcb structure from the zone and init it. Mark it as
1314 * unbound and find a port that we can use as an ephemeral with
1315 * INADDR_ANY. If the user binds later no problem we can then add in
1316 * the specific addresses. And setup the default parameters for the
1317 * EP.
1318 */
1319 int i, error;
1320 struct sctp_inpcb *inp;
1321
1322 struct sctp_pcb *m;
1323 struct timeval time;
1324 sctp_sharedkey_t *null_key;
1325
1326 error = 0;
1327
1328 SCTP_INP_INFO_WLOCK();
1329 inp = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_ep, struct sctp_inpcb);
1330 if (inp == NULL) {
1331 printf("Out of SCTP-INPCB structures - no resources\n");
1332 SCTP_INP_INFO_WUNLOCK();
1333 return (ENOBUFS);
1334 }
1335 /* zap it */
1336 bzero(inp, sizeof(*inp));
1337
1338 /* bump generations */
1339 /* setup socket pointers */
1340 inp->sctp_socket = so;
1341 inp->ip_inp.inp.inp_socket = so;
1342
1343 inp->partial_delivery_point = so->so_rcv.sb_hiwat >> SCTP_PARTIAL_DELIVERY_SHIFT;
1344 inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT;
1345
1346#ifdef IPSEC
1347 {
1348 struct inpcbpolicy *pcb_sp = NULL;
1349
1350 error = ipsec_init_pcbpolicy(so, &pcb_sp);
1351 /* Arrange to share the policy */
1352 inp->ip_inp.inp.inp_sp = pcb_sp;
1353 ((struct in6pcb *)(&inp->ip_inp.inp))->in6p_sp = pcb_sp;
1354 }
1355 if (error != 0) {
1356 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp);
1357 SCTP_INP_INFO_WUNLOCK();
1358 return error;
1359 }
1360#endif /* IPSEC */
1361 SCTP_INCR_EP_COUNT();
1362 inp->ip_inp.inp.inp_ip_ttl = ip_defttl;
1363 SCTP_INP_INFO_WUNLOCK();
1364
1365 so->so_pcb = (caddr_t)inp;
1366
1582/*
1583 * allocate a sctp_inpcb and setup a temporary binding to a port/all
1584 * addresses. This way if we don't get a bind we by default pick a ephemeral
1585 * port with all addresses bound.
1586 */
1587int
1588sctp_inpcb_alloc(struct socket *so)
1589{
1590 /*
1591 * we get called when a new endpoint starts up. We need to allocate
1592 * the sctp_inpcb structure from the zone and init it. Mark it as
1593 * unbound and find a port that we can use as an ephemeral with
1594 * INADDR_ANY. If the user binds later no problem we can then add in
1595 * the specific addresses. And setup the default parameters for the
1596 * EP.
1597 */
1598 int i, error;
1599 struct sctp_inpcb *inp;
1600
1601 struct sctp_pcb *m;
1602 struct timeval time;
1603 sctp_sharedkey_t *null_key;
1604
1605 error = 0;
1606
1607 SCTP_INP_INFO_WLOCK();
1608 inp = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_ep, struct sctp_inpcb);
1609 if (inp == NULL) {
1610 printf("Out of SCTP-INPCB structures - no resources\n");
1611 SCTP_INP_INFO_WUNLOCK();
1612 return (ENOBUFS);
1613 }
1614 /* zap it */
1615 bzero(inp, sizeof(*inp));
1616
1617 /* bump generations */
1618 /* setup socket pointers */
1619 inp->sctp_socket = so;
1620 inp->ip_inp.inp.inp_socket = so;
1621
1622 inp->partial_delivery_point = so->so_rcv.sb_hiwat >> SCTP_PARTIAL_DELIVERY_SHIFT;
1623 inp->sctp_frag_point = SCTP_DEFAULT_MAXSEGMENT;
1624
1625#ifdef IPSEC
1626 {
1627 struct inpcbpolicy *pcb_sp = NULL;
1628
1629 error = ipsec_init_pcbpolicy(so, &pcb_sp);
1630 /* Arrange to share the policy */
1631 inp->ip_inp.inp.inp_sp = pcb_sp;
1632 ((struct in6pcb *)(&inp->ip_inp.inp))->in6p_sp = pcb_sp;
1633 }
1634 if (error != 0) {
1635 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp);
1636 SCTP_INP_INFO_WUNLOCK();
1637 return error;
1638 }
1639#endif /* IPSEC */
1640 SCTP_INCR_EP_COUNT();
1641 inp->ip_inp.inp.inp_ip_ttl = ip_defttl;
1642 SCTP_INP_INFO_WUNLOCK();
1643
1644 so->so_pcb = (caddr_t)inp;
1645
1367 if ((so->so_type == SOCK_DGRAM) ||
1368 (so->so_type == SOCK_SEQPACKET)) {
1646 if ((SCTP_SO_TYPE(so) == SOCK_DGRAM) ||
1647 (SCTP_SO_TYPE(so) == SOCK_SEQPACKET)) {
1369 /* UDP style socket */
1370 inp->sctp_flags = (SCTP_PCB_FLAGS_UDPTYPE |
1371 SCTP_PCB_FLAGS_UNBOUND);
1372 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
1373 /* Be sure it is NON-BLOCKING IO for UDP */
1648 /* UDP style socket */
1649 inp->sctp_flags = (SCTP_PCB_FLAGS_UDPTYPE |
1650 SCTP_PCB_FLAGS_UNBOUND);
1651 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
1652 /* Be sure it is NON-BLOCKING IO for UDP */
1374 /* so->so_state |= SS_NBIO; */
1375 } else if (so->so_type == SOCK_STREAM) {
1653 /* SCTP_SET_SO_NBIO(so); */
1654 } else if (SCTP_SO_TYPE(so) == SOCK_STREAM) {
1376 /* TCP style socket */
1377 inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
1378 SCTP_PCB_FLAGS_UNBOUND);
1379 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
1380 /* Be sure we have blocking IO by default */
1655 /* TCP style socket */
1656 inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE |
1657 SCTP_PCB_FLAGS_UNBOUND);
1658 sctp_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT);
1659 /* Be sure we have blocking IO by default */
1381 so->so_state &= ~SS_NBIO;
1660 SCTP_CLEAR_SO_NBIO(so);
1382 } else {
1383 /*
1384 * unsupported socket type (RAW, etc)- in case we missed it
1385 * in protosw
1386 */
1387 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp);
1388 return (EOPNOTSUPP);
1389 }
1390 inp->sctp_tcbhash = SCTP_HASH_INIT(sctp_pcbtblsize,
1391 &inp->sctp_hashmark);
1392 if (inp->sctp_tcbhash == NULL) {
1393 printf("Out of SCTP-INPCB->hashinit - no resources\n");
1394 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp);
1395 return (ENOBUFS);
1396 }
1661 } else {
1662 /*
1663 * unsupported socket type (RAW, etc)- in case we missed it
1664 * in protosw
1665 */
1666 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp);
1667 return (EOPNOTSUPP);
1668 }
1669 inp->sctp_tcbhash = SCTP_HASH_INIT(sctp_pcbtblsize,
1670 &inp->sctp_hashmark);
1671 if (inp->sctp_tcbhash == NULL) {
1672 printf("Out of SCTP-INPCB->hashinit - no resources\n");
1673 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp);
1674 return (ENOBUFS);
1675 }
1676 inp->def_vrf_id = SCTP_DEFAULT_VRFID;
1677
1397 SCTP_INP_INFO_WLOCK();
1398 SCTP_INP_LOCK_INIT(inp);
1399 SCTP_INP_READ_INIT(inp);
1400 SCTP_ASOC_CREATE_LOCK_INIT(inp);
1401 /* lock the new ep */
1402 SCTP_INP_WLOCK(inp);
1403
1404 /* add it to the info area */
1405 LIST_INSERT_HEAD(&sctppcbinfo.listhead, inp, sctp_list);
1406 SCTP_INP_INFO_WUNLOCK();
1407
1408 TAILQ_INIT(&inp->read_queue);
1409 LIST_INIT(&inp->sctp_addr_list);
1678 SCTP_INP_INFO_WLOCK();
1679 SCTP_INP_LOCK_INIT(inp);
1680 SCTP_INP_READ_INIT(inp);
1681 SCTP_ASOC_CREATE_LOCK_INIT(inp);
1682 /* lock the new ep */
1683 SCTP_INP_WLOCK(inp);
1684
1685 /* add it to the info area */
1686 LIST_INSERT_HEAD(&sctppcbinfo.listhead, inp, sctp_list);
1687 SCTP_INP_INFO_WUNLOCK();
1688
1689 TAILQ_INIT(&inp->read_queue);
1690 LIST_INIT(&inp->sctp_addr_list);
1691
1410 LIST_INIT(&inp->sctp_asoc_list);
1411
1412#ifdef SCTP_TRACK_FREED_ASOCS
1413 /* TEMP CODE */
1414 LIST_INIT(&inp->sctp_asoc_free_list);
1415#endif
1416 /* Init the timer structure for signature change */
1417 SCTP_OS_TIMER_INIT(&inp->sctp_ep.signature_change.timer);
1418 inp->sctp_ep.signature_change.type = SCTP_TIMER_TYPE_NEWCOOKIE;
1419
1420 /* now init the actual endpoint default data */
1421 m = &inp->sctp_ep;
1422
1423 /* setup the base timeout information */
1424 m->sctp_timeoutticks[SCTP_TIMER_SEND] = SEC_TO_TICKS(SCTP_SEND_SEC); /* needed ? */
1425 m->sctp_timeoutticks[SCTP_TIMER_INIT] = SEC_TO_TICKS(SCTP_INIT_SEC); /* needed ? */
1426 m->sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(sctp_delayed_sack_time_default);
1427 m->sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(sctp_heartbeat_interval_default);
1428 m->sctp_timeoutticks[SCTP_TIMER_PMTU] = SEC_TO_TICKS(sctp_pmtu_raise_time_default);
1429 m->sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] = SEC_TO_TICKS(sctp_shutdown_guard_time_default);
1430 m->sctp_timeoutticks[SCTP_TIMER_SIGNATURE] = SEC_TO_TICKS(sctp_secret_lifetime_default);
1431 /* all max/min max are in ms */
1432 m->sctp_maxrto = sctp_rto_max_default;
1433 m->sctp_minrto = sctp_rto_min_default;
1434 m->initial_rto = sctp_rto_initial_default;
1435 m->initial_init_rto_max = sctp_init_rto_max_default;
1692 LIST_INIT(&inp->sctp_asoc_list);
1693
1694#ifdef SCTP_TRACK_FREED_ASOCS
1695 /* TEMP CODE */
1696 LIST_INIT(&inp->sctp_asoc_free_list);
1697#endif
1698 /* Init the timer structure for signature change */
1699 SCTP_OS_TIMER_INIT(&inp->sctp_ep.signature_change.timer);
1700 inp->sctp_ep.signature_change.type = SCTP_TIMER_TYPE_NEWCOOKIE;
1701
1702 /* now init the actual endpoint default data */
1703 m = &inp->sctp_ep;
1704
1705 /* setup the base timeout information */
1706 m->sctp_timeoutticks[SCTP_TIMER_SEND] = SEC_TO_TICKS(SCTP_SEND_SEC); /* needed ? */
1707 m->sctp_timeoutticks[SCTP_TIMER_INIT] = SEC_TO_TICKS(SCTP_INIT_SEC); /* needed ? */
1708 m->sctp_timeoutticks[SCTP_TIMER_RECV] = MSEC_TO_TICKS(sctp_delayed_sack_time_default);
1709 m->sctp_timeoutticks[SCTP_TIMER_HEARTBEAT] = MSEC_TO_TICKS(sctp_heartbeat_interval_default);
1710 m->sctp_timeoutticks[SCTP_TIMER_PMTU] = SEC_TO_TICKS(sctp_pmtu_raise_time_default);
1711 m->sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN] = SEC_TO_TICKS(sctp_shutdown_guard_time_default);
1712 m->sctp_timeoutticks[SCTP_TIMER_SIGNATURE] = SEC_TO_TICKS(sctp_secret_lifetime_default);
1713 /* all max/min max are in ms */
1714 m->sctp_maxrto = sctp_rto_max_default;
1715 m->sctp_minrto = sctp_rto_min_default;
1716 m->initial_rto = sctp_rto_initial_default;
1717 m->initial_init_rto_max = sctp_init_rto_max_default;
1718 m->sctp_sack_freq = sctp_sack_freq_default;
1436
1437 m->max_open_streams_intome = MAX_SCTP_STREAMS;
1438
1439 m->max_init_times = sctp_init_rtx_max_default;
1440 m->max_send_times = sctp_assoc_rtx_max_default;
1441 m->def_net_failure = sctp_path_rtx_max_default;
1442 m->sctp_sws_sender = SCTP_SWS_SENDER_DEF;
1443 m->sctp_sws_receiver = SCTP_SWS_RECEIVER_DEF;
1444 m->max_burst = sctp_max_burst_default;
1445 /* number of streams to pre-open on a association */
1446 m->pre_open_stream_count = sctp_nr_outgoing_streams_default;
1447
1448 /* Add adaptation cookie */
1449 m->adaptation_layer_indicator = 0x504C5253;
1450
1451 /* seed random number generator */
1452 m->random_counter = 1;
1453 m->store_at = SCTP_SIGNATURE_SIZE;
1454 SCTP_READ_RANDOM(m->random_numbers, sizeof(m->random_numbers));
1455 sctp_fill_random_store(m);
1456
1457 /* Minimum cookie size */
1458 m->size_of_a_cookie = (sizeof(struct sctp_init_msg) * 2) +
1459 sizeof(struct sctp_state_cookie);
1460 m->size_of_a_cookie += SCTP_SIGNATURE_SIZE;
1461
1462 /* Setup the initial secret */
1463 SCTP_GETTIME_TIMEVAL(&time);
1464 m->time_of_secret_change = time.tv_sec;
1465
1466 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1467 m->secret_key[0][i] = sctp_select_initial_TSN(m);
1468 }
1469 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL);
1470
1471 /* How long is a cookie good for ? */
1472 m->def_cookie_life = sctp_valid_cookie_life_default;
1719
1720 m->max_open_streams_intome = MAX_SCTP_STREAMS;
1721
1722 m->max_init_times = sctp_init_rtx_max_default;
1723 m->max_send_times = sctp_assoc_rtx_max_default;
1724 m->def_net_failure = sctp_path_rtx_max_default;
1725 m->sctp_sws_sender = SCTP_SWS_SENDER_DEF;
1726 m->sctp_sws_receiver = SCTP_SWS_RECEIVER_DEF;
1727 m->max_burst = sctp_max_burst_default;
1728 /* number of streams to pre-open on a association */
1729 m->pre_open_stream_count = sctp_nr_outgoing_streams_default;
1730
1731 /* Add adaptation cookie */
1732 m->adaptation_layer_indicator = 0x504C5253;
1733
1734 /* seed random number generator */
1735 m->random_counter = 1;
1736 m->store_at = SCTP_SIGNATURE_SIZE;
1737 SCTP_READ_RANDOM(m->random_numbers, sizeof(m->random_numbers));
1738 sctp_fill_random_store(m);
1739
1740 /* Minimum cookie size */
1741 m->size_of_a_cookie = (sizeof(struct sctp_init_msg) * 2) +
1742 sizeof(struct sctp_state_cookie);
1743 m->size_of_a_cookie += SCTP_SIGNATURE_SIZE;
1744
1745 /* Setup the initial secret */
1746 SCTP_GETTIME_TIMEVAL(&time);
1747 m->time_of_secret_change = time.tv_sec;
1748
1749 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) {
1750 m->secret_key[0][i] = sctp_select_initial_TSN(m);
1751 }
1752 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL);
1753
1754 /* How long is a cookie good for ? */
1755 m->def_cookie_life = sctp_valid_cookie_life_default;
1473
1474 /*
1475 * Initialize authentication parameters
1476 */
1477 m->local_hmacs = sctp_default_supported_hmaclist();
1478 m->local_auth_chunks = sctp_alloc_chunklist();
1479 sctp_auth_set_default_chunks(m->local_auth_chunks);
1480 LIST_INIT(&m->shared_keys);
1481 /* add default NULL key as key id 0 */
1482 null_key = sctp_alloc_sharedkey();
1483 sctp_insert_sharedkey(&m->shared_keys, null_key);
1484 SCTP_INP_WUNLOCK(inp);
1485#ifdef SCTP_LOG_CLOSING
1486 sctp_log_closing(inp, NULL, 12);
1487#endif
1488 return (error);
1489}
1490
1491
1492void
1493sctp_move_pcb_and_assoc(struct sctp_inpcb *old_inp, struct sctp_inpcb *new_inp,
1494 struct sctp_tcb *stcb)
1495{
1496 struct sctp_nets *net;
1497 uint16_t lport, rport;
1498 struct sctppcbhead *head;
1499 struct sctp_laddr *laddr, *oladdr;
1500
1501 SCTP_TCB_UNLOCK(stcb);
1502 SCTP_INP_INFO_WLOCK();
1503 SCTP_INP_WLOCK(old_inp);
1504 SCTP_INP_WLOCK(new_inp);
1505 SCTP_TCB_LOCK(stcb);
1506
1507 new_inp->sctp_ep.time_of_secret_change =
1508 old_inp->sctp_ep.time_of_secret_change;
1509 memcpy(new_inp->sctp_ep.secret_key, old_inp->sctp_ep.secret_key,
1510 sizeof(old_inp->sctp_ep.secret_key));
1511 new_inp->sctp_ep.current_secret_number =
1512 old_inp->sctp_ep.current_secret_number;
1513 new_inp->sctp_ep.last_secret_number =
1514 old_inp->sctp_ep.last_secret_number;
1515 new_inp->sctp_ep.size_of_a_cookie = old_inp->sctp_ep.size_of_a_cookie;
1516
1517 /* make it so new data pours into the new socket */
1518 stcb->sctp_socket = new_inp->sctp_socket;
1519 stcb->sctp_ep = new_inp;
1520
1521 /* Copy the port across */
1522 lport = new_inp->sctp_lport = old_inp->sctp_lport;
1523 rport = stcb->rport;
1524 /* Pull the tcb from the old association */
1525 LIST_REMOVE(stcb, sctp_tcbhash);
1526 LIST_REMOVE(stcb, sctp_tcblist);
1527
1528 /* Now insert the new_inp into the TCP connected hash */
1529 head = &sctppcbinfo.sctp_tcpephash[SCTP_PCBHASH_ALLADDR((lport + rport),
1530 sctppcbinfo.hashtcpmark)];
1531
1532 LIST_INSERT_HEAD(head, new_inp, sctp_hash);
1533 /* Its safe to access */
1534 new_inp->sctp_flags &= ~SCTP_PCB_FLAGS_UNBOUND;
1535
1536 /* Now move the tcb into the endpoint list */
1537 LIST_INSERT_HEAD(&new_inp->sctp_asoc_list, stcb, sctp_tcblist);
1538 /*
1539 * Question, do we even need to worry about the ep-hash since we
1540 * only have one connection? Probably not :> so lets get rid of it
1541 * and not suck up any kernel memory in that.
1542 */
1543
1544 /* Ok. Let's restart timer. */
1545 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1546 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, new_inp,
1547 stcb, net);
1548 }
1549
1550 SCTP_INP_INFO_WUNLOCK();
1551 if (new_inp->sctp_tcbhash != NULL) {
1552 SCTP_HASH_FREE(new_inp->sctp_tcbhash, new_inp->sctp_hashmark);
1553 new_inp->sctp_tcbhash = NULL;
1554 }
1555 if ((new_inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
1556 /* Subset bound, so copy in the laddr list from the old_inp */
1557 LIST_FOREACH(oladdr, &old_inp->sctp_addr_list, sctp_nxt_addr) {
1558 laddr = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_laddr, struct sctp_laddr);
1559 if (laddr == NULL) {
1560 /*
1561 * Gak, what can we do? This assoc is really
1562 * HOSED. We probably should send an abort
1563 * here.
1564 */
1565#ifdef SCTP_DEBUG
1566 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1567 printf("Association hosed in TCP model, out of laddr memory\n");
1568 }
1569#endif /* SCTP_DEBUG */
1570 continue;
1571 }
1572 SCTP_INCR_LADDR_COUNT();
1573 bzero(laddr, sizeof(*laddr));
1574 laddr->ifa = oladdr->ifa;
1575 LIST_INSERT_HEAD(&new_inp->sctp_addr_list, laddr,
1576 sctp_nxt_addr);
1577 new_inp->laddr_count++;
1578 }
1579 }
1580 /*
1581 * Now any running timers need to be adjusted since we really don't
1582 * care if they are running or not just blast in the new_inp into
1583 * all of them.
1584 */
1585
1586 stcb->asoc.hb_timer.ep = (void *)new_inp;
1587 stcb->asoc.dack_timer.ep = (void *)new_inp;
1588 stcb->asoc.asconf_timer.ep = (void *)new_inp;
1589 stcb->asoc.strreset_timer.ep = (void *)new_inp;
1590 stcb->asoc.shut_guard_timer.ep = (void *)new_inp;
1591 stcb->asoc.autoclose_timer.ep = (void *)new_inp;
1592 stcb->asoc.delayed_event_timer.ep = (void *)new_inp;
1593 /* now what about the nets? */
1594 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1595 net->pmtu_timer.ep = (void *)new_inp;
1596 net->rxt_timer.ep = (void *)new_inp;
1597 net->fr_timer.ep = (void *)new_inp;
1598 }
1599 SCTP_INP_WUNLOCK(new_inp);
1600 SCTP_INP_WUNLOCK(old_inp);
1601}
1602
1603static int
1756 /*
1757 * Initialize authentication parameters
1758 */
1759 m->local_hmacs = sctp_default_supported_hmaclist();
1760 m->local_auth_chunks = sctp_alloc_chunklist();
1761 sctp_auth_set_default_chunks(m->local_auth_chunks);
1762 LIST_INIT(&m->shared_keys);
1763 /* add default NULL key as key id 0 */
1764 null_key = sctp_alloc_sharedkey();
1765 sctp_insert_sharedkey(&m->shared_keys, null_key);
1766 SCTP_INP_WUNLOCK(inp);
1767#ifdef SCTP_LOG_CLOSING
1768 sctp_log_closing(inp, NULL, 12);
1769#endif
1770 return (error);
1771}
1772
1773
1774void
1775sctp_move_pcb_and_assoc(struct sctp_inpcb *old_inp, struct sctp_inpcb *new_inp,
1776 struct sctp_tcb *stcb)
1777{
1778 struct sctp_nets *net;
1779 uint16_t lport, rport;
1780 struct sctppcbhead *head;
1781 struct sctp_laddr *laddr, *oladdr;
1782
1783 SCTP_TCB_UNLOCK(stcb);
1784 SCTP_INP_INFO_WLOCK();
1785 SCTP_INP_WLOCK(old_inp);
1786 SCTP_INP_WLOCK(new_inp);
1787 SCTP_TCB_LOCK(stcb);
1788
1789 new_inp->sctp_ep.time_of_secret_change =
1790 old_inp->sctp_ep.time_of_secret_change;
1791 memcpy(new_inp->sctp_ep.secret_key, old_inp->sctp_ep.secret_key,
1792 sizeof(old_inp->sctp_ep.secret_key));
1793 new_inp->sctp_ep.current_secret_number =
1794 old_inp->sctp_ep.current_secret_number;
1795 new_inp->sctp_ep.last_secret_number =
1796 old_inp->sctp_ep.last_secret_number;
1797 new_inp->sctp_ep.size_of_a_cookie = old_inp->sctp_ep.size_of_a_cookie;
1798
1799 /* make it so new data pours into the new socket */
1800 stcb->sctp_socket = new_inp->sctp_socket;
1801 stcb->sctp_ep = new_inp;
1802
1803 /* Copy the port across */
1804 lport = new_inp->sctp_lport = old_inp->sctp_lport;
1805 rport = stcb->rport;
1806 /* Pull the tcb from the old association */
1807 LIST_REMOVE(stcb, sctp_tcbhash);
1808 LIST_REMOVE(stcb, sctp_tcblist);
1809
1810 /* Now insert the new_inp into the TCP connected hash */
1811 head = &sctppcbinfo.sctp_tcpephash[SCTP_PCBHASH_ALLADDR((lport + rport),
1812 sctppcbinfo.hashtcpmark)];
1813
1814 LIST_INSERT_HEAD(head, new_inp, sctp_hash);
1815 /* Its safe to access */
1816 new_inp->sctp_flags &= ~SCTP_PCB_FLAGS_UNBOUND;
1817
1818 /* Now move the tcb into the endpoint list */
1819 LIST_INSERT_HEAD(&new_inp->sctp_asoc_list, stcb, sctp_tcblist);
1820 /*
1821 * Question, do we even need to worry about the ep-hash since we
1822 * only have one connection? Probably not :> so lets get rid of it
1823 * and not suck up any kernel memory in that.
1824 */
1825
1826 /* Ok. Let's restart timer. */
1827 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1828 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, new_inp,
1829 stcb, net);
1830 }
1831
1832 SCTP_INP_INFO_WUNLOCK();
1833 if (new_inp->sctp_tcbhash != NULL) {
1834 SCTP_HASH_FREE(new_inp->sctp_tcbhash, new_inp->sctp_hashmark);
1835 new_inp->sctp_tcbhash = NULL;
1836 }
1837 if ((new_inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
1838 /* Subset bound, so copy in the laddr list from the old_inp */
1839 LIST_FOREACH(oladdr, &old_inp->sctp_addr_list, sctp_nxt_addr) {
1840 laddr = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_laddr, struct sctp_laddr);
1841 if (laddr == NULL) {
1842 /*
1843 * Gak, what can we do? This assoc is really
1844 * HOSED. We probably should send an abort
1845 * here.
1846 */
1847#ifdef SCTP_DEBUG
1848 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1849 printf("Association hosed in TCP model, out of laddr memory\n");
1850 }
1851#endif /* SCTP_DEBUG */
1852 continue;
1853 }
1854 SCTP_INCR_LADDR_COUNT();
1855 bzero(laddr, sizeof(*laddr));
1856 laddr->ifa = oladdr->ifa;
1857 LIST_INSERT_HEAD(&new_inp->sctp_addr_list, laddr,
1858 sctp_nxt_addr);
1859 new_inp->laddr_count++;
1860 }
1861 }
1862 /*
1863 * Now any running timers need to be adjusted since we really don't
1864 * care if they are running or not just blast in the new_inp into
1865 * all of them.
1866 */
1867
1868 stcb->asoc.hb_timer.ep = (void *)new_inp;
1869 stcb->asoc.dack_timer.ep = (void *)new_inp;
1870 stcb->asoc.asconf_timer.ep = (void *)new_inp;
1871 stcb->asoc.strreset_timer.ep = (void *)new_inp;
1872 stcb->asoc.shut_guard_timer.ep = (void *)new_inp;
1873 stcb->asoc.autoclose_timer.ep = (void *)new_inp;
1874 stcb->asoc.delayed_event_timer.ep = (void *)new_inp;
1875 /* now what about the nets? */
1876 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1877 net->pmtu_timer.ep = (void *)new_inp;
1878 net->rxt_timer.ep = (void *)new_inp;
1879 net->fr_timer.ep = (void *)new_inp;
1880 }
1881 SCTP_INP_WUNLOCK(new_inp);
1882 SCTP_INP_WUNLOCK(old_inp);
1883}
1884
1885static int
1604sctp_isport_inuse(struct sctp_inpcb *inp, uint16_t lport)
1886sctp_isport_inuse(struct sctp_inpcb *inp, uint16_t lport, uint32_t vrf_id)
1605{
1606 struct sctppcbhead *head;
1607 struct sctp_inpcb *t_inp;
1887{
1888 struct sctppcbhead *head;
1889 struct sctp_inpcb *t_inp;
1890 int fnd;
1608
1609 head = &sctppcbinfo.sctp_ephash[SCTP_PCBHASH_ALLADDR(lport,
1610 sctppcbinfo.hashmark)];
1611
1612 LIST_FOREACH(t_inp, head, sctp_hash) {
1613 if (t_inp->sctp_lport != lport) {
1614 continue;
1615 }
1891
1892 head = &sctppcbinfo.sctp_ephash[SCTP_PCBHASH_ALLADDR(lport,
1893 sctppcbinfo.hashmark)];
1894
1895 LIST_FOREACH(t_inp, head, sctp_hash) {
1896 if (t_inp->sctp_lport != lport) {
1897 continue;
1898 }
1899 /* is it in the VRF in question */
1900 fnd = 0;
1901 if (t_inp->def_vrf_id == vrf_id)
1902 fnd = 1;
1903 if (!fnd)
1904 continue;
1905
1616 /* This one is in use. */
1617 /* check the v6/v4 binding issue */
1618 if ((t_inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
1619 SCTP_IPV6_V6ONLY(t_inp)) {
1620 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1621 /* collision in V6 space */
1622 return (1);
1623 } else {
1624 /* inp is BOUND_V4 no conflict */
1625 continue;
1626 }
1627 } else if (t_inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1628 /* t_inp is bound v4 and v6, conflict always */
1629 return (1);
1630 } else {
1631 /* t_inp is bound only V4 */
1632 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
1633 SCTP_IPV6_V6ONLY(t_inp)) {
1634 /* no conflict */
1635 continue;
1636 }
1637 /* else fall through to conflict */
1638 }
1639 return (1);
1640 }
1641 return (0);
1642}
1643
1644
1645
1646int
1647sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, struct thread *p)
1648{
1649 /* bind a ep to a socket address */
1650 struct sctppcbhead *head;
1651 struct sctp_inpcb *inp, *inp_tmp;
1652 struct inpcb *ip_inp;
1653 int bindall;
1654 uint16_t lport;
1655 int error;
1906 /* This one is in use. */
1907 /* check the v6/v4 binding issue */
1908 if ((t_inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
1909 SCTP_IPV6_V6ONLY(t_inp)) {
1910 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1911 /* collision in V6 space */
1912 return (1);
1913 } else {
1914 /* inp is BOUND_V4 no conflict */
1915 continue;
1916 }
1917 } else if (t_inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) {
1918 /* t_inp is bound v4 and v6, conflict always */
1919 return (1);
1920 } else {
1921 /* t_inp is bound only V4 */
1922 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) &&
1923 SCTP_IPV6_V6ONLY(t_inp)) {
1924 /* no conflict */
1925 continue;
1926 }
1927 /* else fall through to conflict */
1928 }
1929 return (1);
1930 }
1931 return (0);
1932}
1933
1934
1935
1936int
1937sctp_inpcb_bind(struct socket *so, struct sockaddr *addr, struct thread *p)
1938{
1939 /* bind a ep to a socket address */
1940 struct sctppcbhead *head;
1941 struct sctp_inpcb *inp, *inp_tmp;
1942 struct inpcb *ip_inp;
1943 int bindall;
1944 uint16_t lport;
1945 int error;
1946 uint32_t vrf_id;
1656
1657 lport = 0;
1658 error = 0;
1659 bindall = 1;
1660 inp = (struct sctp_inpcb *)so->so_pcb;
1661 ip_inp = (struct inpcb *)so->so_pcb;
1662#ifdef SCTP_DEBUG
1663 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1664 if (addr) {
1665 printf("Bind called port:%d\n",
1666 ntohs(((struct sockaddr_in *)addr)->sin_port));
1667 printf("Addr :");
1668 sctp_print_address(addr);
1669 }
1670 }
1671#endif /* SCTP_DEBUG */
1672 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) {
1673 /* already did a bind, subsequent binds NOT allowed ! */
1674 return (EINVAL);
1675 }
1676 if (addr != NULL) {
1677 if (addr->sa_family == AF_INET) {
1678 struct sockaddr_in *sin;
1679
1680 /* IPV6_V6ONLY socket? */
1681 if (SCTP_IPV6_V6ONLY(ip_inp)) {
1682 return (EINVAL);
1683 }
1684 if (addr->sa_len != sizeof(*sin))
1685 return (EINVAL);
1686
1687 sin = (struct sockaddr_in *)addr;
1688 lport = sin->sin_port;
1689
1690 if (sin->sin_addr.s_addr != INADDR_ANY) {
1691 bindall = 0;
1692 }
1693 } else if (addr->sa_family == AF_INET6) {
1694 /* Only for pure IPv6 Address. (No IPv4 Mapped!) */
1695 struct sockaddr_in6 *sin6;
1696
1697 sin6 = (struct sockaddr_in6 *)addr;
1698
1699 if (addr->sa_len != sizeof(*sin6))
1700 return (EINVAL);
1701
1702 lport = sin6->sin6_port;
1703 if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1704 bindall = 0;
1705 /* KAME hack: embed scopeid */
1706 if (sa6_embedscope(sin6, ip6_use_defzone) != 0)
1707 return (EINVAL);
1708 }
1709 /* this must be cleared for ifa_ifwithaddr() */
1710 sin6->sin6_scope_id = 0;
1711 } else {
1712 return (EAFNOSUPPORT);
1713 }
1714 }
1947
1948 lport = 0;
1949 error = 0;
1950 bindall = 1;
1951 inp = (struct sctp_inpcb *)so->so_pcb;
1952 ip_inp = (struct inpcb *)so->so_pcb;
1953#ifdef SCTP_DEBUG
1954 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1955 if (addr) {
1956 printf("Bind called port:%d\n",
1957 ntohs(((struct sockaddr_in *)addr)->sin_port));
1958 printf("Addr :");
1959 sctp_print_address(addr);
1960 }
1961 }
1962#endif /* SCTP_DEBUG */
1963 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) == 0) {
1964 /* already did a bind, subsequent binds NOT allowed ! */
1965 return (EINVAL);
1966 }
1967 if (addr != NULL) {
1968 if (addr->sa_family == AF_INET) {
1969 struct sockaddr_in *sin;
1970
1971 /* IPV6_V6ONLY socket? */
1972 if (SCTP_IPV6_V6ONLY(ip_inp)) {
1973 return (EINVAL);
1974 }
1975 if (addr->sa_len != sizeof(*sin))
1976 return (EINVAL);
1977
1978 sin = (struct sockaddr_in *)addr;
1979 lport = sin->sin_port;
1980
1981 if (sin->sin_addr.s_addr != INADDR_ANY) {
1982 bindall = 0;
1983 }
1984 } else if (addr->sa_family == AF_INET6) {
1985 /* Only for pure IPv6 Address. (No IPv4 Mapped!) */
1986 struct sockaddr_in6 *sin6;
1987
1988 sin6 = (struct sockaddr_in6 *)addr;
1989
1990 if (addr->sa_len != sizeof(*sin6))
1991 return (EINVAL);
1992
1993 lport = sin6->sin6_port;
1994 if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1995 bindall = 0;
1996 /* KAME hack: embed scopeid */
1997 if (sa6_embedscope(sin6, ip6_use_defzone) != 0)
1998 return (EINVAL);
1999 }
2000 /* this must be cleared for ifa_ifwithaddr() */
2001 sin6->sin6_scope_id = 0;
2002 } else {
2003 return (EAFNOSUPPORT);
2004 }
2005 }
2006 /*
2007 * Setup a vrf_id to be the default for the non-bind-all case.
2008 */
2009 vrf_id = inp->def_vrf_id;
2010
1715 SCTP_INP_INFO_WLOCK();
1716 SCTP_INP_WLOCK(inp);
1717 /* increase our count due to the unlock we do */
1718 SCTP_INP_INCR_REF(inp);
1719 if (lport) {
1720 /*
1721 * Did the caller specify a port? if so we must see if a ep
1722 * already has this one bound.
1723 */
1724 /* got to be root to get at low ports */
1725 if (ntohs(lport) < IPPORT_RESERVED) {
1726 if (p && (error =
2011 SCTP_INP_INFO_WLOCK();
2012 SCTP_INP_WLOCK(inp);
2013 /* increase our count due to the unlock we do */
2014 SCTP_INP_INCR_REF(inp);
2015 if (lport) {
2016 /*
2017 * Did the caller specify a port? if so we must see if a ep
2018 * already has this one bound.
2019 */
2020 /* got to be root to get at low ports */
2021 if (ntohs(lport) < IPPORT_RESERVED) {
2022 if (p && (error =
1727 priv_check(p,
1728 PRIV_NETINET_RESERVEDPORT)
2023 priv_check_cred(p->td_ucred,
2024 PRIV_NETINET_RESERVEDPORT,
2025 SUSER_ALLOWJAIL
2026 )
1729 )) {
1730 SCTP_INP_DECR_REF(inp);
1731 SCTP_INP_WUNLOCK(inp);
1732 SCTP_INP_INFO_WUNLOCK();
1733 return (error);
1734 }
1735 }
1736 if (p == NULL) {
1737 SCTP_INP_DECR_REF(inp);
1738 SCTP_INP_WUNLOCK(inp);
1739 SCTP_INP_INFO_WUNLOCK();
1740 return (error);
1741 }
1742 SCTP_INP_WUNLOCK(inp);
2027 )) {
2028 SCTP_INP_DECR_REF(inp);
2029 SCTP_INP_WUNLOCK(inp);
2030 SCTP_INP_INFO_WUNLOCK();
2031 return (error);
2032 }
2033 }
2034 if (p == NULL) {
2035 SCTP_INP_DECR_REF(inp);
2036 SCTP_INP_WUNLOCK(inp);
2037 SCTP_INP_INFO_WUNLOCK();
2038 return (error);
2039 }
2040 SCTP_INP_WUNLOCK(inp);
1743 inp_tmp = sctp_pcb_findep(addr, 0, 1);
1744 if (inp_tmp != NULL) {
1745 /*
1746 * lock guy returned and lower count note that we
1747 * are not bound so inp_tmp should NEVER be inp. And
1748 * it is this inp (inp_tmp) that gets the reference
1749 * bump, so we must lower it.
1750 */
1751 SCTP_INP_DECR_REF(inp_tmp);
1752 SCTP_INP_DECR_REF(inp);
1753 /* unlock info */
1754 SCTP_INP_INFO_WUNLOCK();
1755 return (EADDRNOTAVAIL);
2041 if (bindall) {
2042 vrf_id = inp->def_vrf_id;
2043 inp_tmp = sctp_pcb_findep(addr, 0, 1, vrf_id);
2044 if (inp_tmp != NULL) {
2045 /*
2046 * lock guy returned and lower count note
2047 * that we are not bound so inp_tmp should
2048 * NEVER be inp. And it is this inp
2049 * (inp_tmp) that gets the reference bump,
2050 * so we must lower it.
2051 */
2052 SCTP_INP_DECR_REF(inp_tmp);
2053 SCTP_INP_DECR_REF(inp);
2054 /* unlock info */
2055 SCTP_INP_INFO_WUNLOCK();
2056 return (EADDRNOTAVAIL);
2057 }
2058 } else {
2059 inp_tmp = sctp_pcb_findep(addr, 0, 1, vrf_id);
2060 if (inp_tmp != NULL) {
2061 /*
2062 * lock guy returned and lower count note
2063 * that we are not bound so inp_tmp should
2064 * NEVER be inp. And it is this inp
2065 * (inp_tmp) that gets the reference bump,
2066 * so we must lower it.
2067 */
2068 SCTP_INP_DECR_REF(inp_tmp);
2069 SCTP_INP_DECR_REF(inp);
2070 /* unlock info */
2071 SCTP_INP_INFO_WUNLOCK();
2072 return (EADDRNOTAVAIL);
2073 }
1756 }
1757 SCTP_INP_WLOCK(inp);
1758 if (bindall) {
1759 /* verify that no lport is not used by a singleton */
2074 }
2075 SCTP_INP_WLOCK(inp);
2076 if (bindall) {
2077 /* verify that no lport is not used by a singleton */
1760 if (sctp_isport_inuse(inp, lport)) {
2078 if (sctp_isport_inuse(inp, lport, vrf_id)) {
1761 /* Sorry someone already has this one bound */
1762 SCTP_INP_DECR_REF(inp);
1763 SCTP_INP_WUNLOCK(inp);
1764 SCTP_INP_INFO_WUNLOCK();
1765 return (EADDRNOTAVAIL);
1766 }
1767 }
1768 } else {
1769 /*
1770 * get any port but lets make sure no one has any address
1771 * with this port bound
1772 */
1773
1774 /*
1775 * setup the inp to the top (I could use the union but this
1776 * is just as easy
1777 */
1778 uint32_t port_guess;
1779 uint16_t port_attempt;
1780 int not_done = 1;
2079 /* Sorry someone already has this one bound */
2080 SCTP_INP_DECR_REF(inp);
2081 SCTP_INP_WUNLOCK(inp);
2082 SCTP_INP_INFO_WUNLOCK();
2083 return (EADDRNOTAVAIL);
2084 }
2085 }
2086 } else {
2087 /*
2088 * get any port but lets make sure no one has any address
2089 * with this port bound
2090 */
2091
2092 /*
2093 * setup the inp to the top (I could use the union but this
2094 * is just as easy
2095 */
2096 uint32_t port_guess;
2097 uint16_t port_attempt;
2098 int not_done = 1;
2099 int not_found = 1;
1781
1782 while (not_done) {
1783 port_guess = sctp_select_initial_TSN(&inp->sctp_ep);
1784 port_attempt = (port_guess & 0x0000ffff);
1785 if (port_attempt == 0) {
1786 goto next_half;
1787 }
1788 if (port_attempt < IPPORT_RESERVED) {
1789 port_attempt += IPPORT_RESERVED;
1790 }
2100
2101 while (not_done) {
2102 port_guess = sctp_select_initial_TSN(&inp->sctp_ep);
2103 port_attempt = (port_guess & 0x0000ffff);
2104 if (port_attempt == 0) {
2105 goto next_half;
2106 }
2107 if (port_attempt < IPPORT_RESERVED) {
2108 port_attempt += IPPORT_RESERVED;
2109 }
1791 if (sctp_isport_inuse(inp, htons(port_attempt)) == 0) {
2110 vrf_id = inp->def_vrf_id;
2111 if (sctp_isport_inuse(inp, htons(port_attempt), vrf_id) == 1) {
1792 /* got a port we can use */
2112 /* got a port we can use */
2113 not_found = 0;
2114 break;
2115 }
2116 if (not_found == 1) {
2117 /* We can use this port */
1793 not_done = 0;
1794 continue;
1795 }
1796 /* try upper half */
1797 next_half:
1798 port_attempt = ((port_guess >> 16) & 0x0000ffff);
1799 if (port_attempt == 0) {
1800 goto last_try;
1801 }
1802 if (port_attempt < IPPORT_RESERVED) {
1803 port_attempt += IPPORT_RESERVED;
1804 }
2118 not_done = 0;
2119 continue;
2120 }
2121 /* try upper half */
2122 next_half:
2123 port_attempt = ((port_guess >> 16) & 0x0000ffff);
2124 if (port_attempt == 0) {
2125 goto last_try;
2126 }
2127 if (port_attempt < IPPORT_RESERVED) {
2128 port_attempt += IPPORT_RESERVED;
2129 }
1805 if (sctp_isport_inuse(inp, htons(port_attempt)) == 0) {
2130 vrf_id = inp->def_vrf_id;
2131 if (sctp_isport_inuse(inp, htons(port_attempt), vrf_id) == 1) {
1806 /* got a port we can use */
2132 /* got a port we can use */
2133 not_found = 0;
2134 break;
2135 }
2136 if (not_found == 1) {
2137 /* We can use this port */
1807 not_done = 0;
1808 continue;
1809 }
1810 /* try two half's added together */
1811 last_try:
1812 port_attempt = (((port_guess >> 16) & 0x0000ffff) +
1813 (port_guess & 0x0000ffff));
1814 if (port_attempt == 0) {
1815 /* get a new random number */
1816 continue;
1817 }
1818 if (port_attempt < IPPORT_RESERVED) {
1819 port_attempt += IPPORT_RESERVED;
1820 }
2138 not_done = 0;
2139 continue;
2140 }
2141 /* try two half's added together */
2142 last_try:
2143 port_attempt = (((port_guess >> 16) & 0x0000ffff) +
2144 (port_guess & 0x0000ffff));
2145 if (port_attempt == 0) {
2146 /* get a new random number */
2147 continue;
2148 }
2149 if (port_attempt < IPPORT_RESERVED) {
2150 port_attempt += IPPORT_RESERVED;
2151 }
1821 if (sctp_isport_inuse(inp, htons(port_attempt)) == 0) {
2152 vrf_id = inp->def_vrf_id;
2153 if (sctp_isport_inuse(inp, htons(port_attempt), vrf_id) == 1) {
1822 /* got a port we can use */
2154 /* got a port we can use */
2155 not_found = 0;
2156 break;
2157 }
2158 if (not_found == 1) {
2159 /* We can use this port */
1823 not_done = 0;
1824 continue;
1825 }
1826 }
1827 /* we don't get out of the loop until we have a port */
1828 lport = htons(port_attempt);
1829 }
1830 SCTP_INP_DECR_REF(inp);
1831 if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE |
1832 SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
1833 /*
1834 * this really should not happen. The guy did a non-blocking
1835 * bind and then did a close at the same time.
1836 */
1837 SCTP_INP_WUNLOCK(inp);
1838 SCTP_INP_INFO_WUNLOCK();
1839 return (EINVAL);
1840 }
1841 /* ok we look clear to give out this port, so lets setup the binding */
1842 if (bindall) {
1843 /* binding to all addresses, so just set in the proper flags */
1844 inp->sctp_flags |= SCTP_PCB_FLAGS_BOUNDALL;
1845 sctp_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF);
1846 /* set the automatic addr changes from kernel flag */
1847 if (sctp_auto_asconf == 0) {
1848 sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
1849 } else {
1850 sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
1851 }
1852 } else {
1853 /*
1854 * bind specific, make sure flags is off and add a new
1855 * address structure to the sctp_addr_list inside the ep
1856 * structure.
1857 *
1858 * We will need to allocate one and insert it at the head. The
1859 * socketopt call can just insert new addresses in there as
1860 * well. It will also have to do the embed scope kame hack
1861 * too (before adding).
1862 */
2160 not_done = 0;
2161 continue;
2162 }
2163 }
2164 /* we don't get out of the loop until we have a port */
2165 lport = htons(port_attempt);
2166 }
2167 SCTP_INP_DECR_REF(inp);
2168 if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE |
2169 SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
2170 /*
2171 * this really should not happen. The guy did a non-blocking
2172 * bind and then did a close at the same time.
2173 */
2174 SCTP_INP_WUNLOCK(inp);
2175 SCTP_INP_INFO_WUNLOCK();
2176 return (EINVAL);
2177 }
2178 /* ok we look clear to give out this port, so lets setup the binding */
2179 if (bindall) {
2180 /* binding to all addresses, so just set in the proper flags */
2181 inp->sctp_flags |= SCTP_PCB_FLAGS_BOUNDALL;
2182 sctp_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF);
2183 /* set the automatic addr changes from kernel flag */
2184 if (sctp_auto_asconf == 0) {
2185 sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
2186 } else {
2187 sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
2188 }
2189 } else {
2190 /*
2191 * bind specific, make sure flags is off and add a new
2192 * address structure to the sctp_addr_list inside the ep
2193 * structure.
2194 *
2195 * We will need to allocate one and insert it at the head. The
2196 * socketopt call can just insert new addresses in there as
2197 * well. It will also have to do the embed scope kame hack
2198 * too (before adding).
2199 */
1863 struct ifaddr *ifa;
2200 struct sctp_ifa *ifa;
1864 struct sockaddr_storage store_sa;
1865
1866 memset(&store_sa, 0, sizeof(store_sa));
1867 if (addr->sa_family == AF_INET) {
1868 struct sockaddr_in *sin;
1869
1870 sin = (struct sockaddr_in *)&store_sa;
1871 memcpy(sin, addr, sizeof(struct sockaddr_in));
1872 sin->sin_port = 0;
1873 } else if (addr->sa_family == AF_INET6) {
1874 struct sockaddr_in6 *sin6;
1875
1876 sin6 = (struct sockaddr_in6 *)&store_sa;
1877 memcpy(sin6, addr, sizeof(struct sockaddr_in6));
1878 sin6->sin6_port = 0;
1879 }
1880 /*
1881 * first find the interface with the bound address need to
1882 * zero out the port to find the address! yuck! can't do
1883 * this earlier since need port for sctp_pcb_findep()
1884 */
2201 struct sockaddr_storage store_sa;
2202
2203 memset(&store_sa, 0, sizeof(store_sa));
2204 if (addr->sa_family == AF_INET) {
2205 struct sockaddr_in *sin;
2206
2207 sin = (struct sockaddr_in *)&store_sa;
2208 memcpy(sin, addr, sizeof(struct sockaddr_in));
2209 sin->sin_port = 0;
2210 } else if (addr->sa_family == AF_INET6) {
2211 struct sockaddr_in6 *sin6;
2212
2213 sin6 = (struct sockaddr_in6 *)&store_sa;
2214 memcpy(sin6, addr, sizeof(struct sockaddr_in6));
2215 sin6->sin6_port = 0;
2216 }
2217 /*
2218 * first find the interface with the bound address need to
2219 * zero out the port to find the address! yuck! can't do
2220 * this earlier since need port for sctp_pcb_findep()
2221 */
1885 ifa = sctp_find_ifa_by_addr((struct sockaddr *)&store_sa);
2222 ifa = sctp_find_ifa_by_addr((struct sockaddr *)&store_sa, vrf_id, 0);
1886 if (ifa == NULL) {
1887 /* Can't find an interface with that address */
1888 SCTP_INP_WUNLOCK(inp);
1889 SCTP_INP_INFO_WUNLOCK();
1890 return (EADDRNOTAVAIL);
1891 }
1892 if (addr->sa_family == AF_INET6) {
2223 if (ifa == NULL) {
2224 /* Can't find an interface with that address */
2225 SCTP_INP_WUNLOCK(inp);
2226 SCTP_INP_INFO_WUNLOCK();
2227 return (EADDRNOTAVAIL);
2228 }
2229 if (addr->sa_family == AF_INET6) {
1893 struct in6_ifaddr *ifa6;
1894
1895 ifa6 = (struct in6_ifaddr *)ifa;
1896 /*
1897 * allow binding of deprecated addresses as per RFC
1898 * 2462 and ipng discussion
1899 */
1900 if (ifa6->ia6_flags & (IN6_IFF_DETACHED |
1901 IN6_IFF_ANYCAST |
1902 IN6_IFF_NOTREADY)) {
2230 /* GAK, more FIXME IFA lock? */
2231 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
1903 /* Can't bind a non-existent addr. */
1904 SCTP_INP_WUNLOCK(inp);
1905 SCTP_INP_INFO_WUNLOCK();
1906 return (EINVAL);
1907 }
1908 }
1909 /* we're not bound all */
1910 inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUNDALL;
1911 /* set the automatic addr changes from kernel flag */
2232 /* Can't bind a non-existent addr. */
2233 SCTP_INP_WUNLOCK(inp);
2234 SCTP_INP_INFO_WUNLOCK();
2235 return (EINVAL);
2236 }
2237 }
2238 /* we're not bound all */
2239 inp->sctp_flags &= ~SCTP_PCB_FLAGS_BOUNDALL;
2240 /* set the automatic addr changes from kernel flag */
2241 sctp_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF);
1912 if (sctp_auto_asconf == 0) {
1913 sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
1914 } else {
2242 if (sctp_auto_asconf == 0) {
2243 sctp_feature_off(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
2244 } else {
2245 /*
2246 * allow bindx() to send ASCONF's for binding
2247 * changes
2248 */
1915 sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
1916 }
2249 sctp_feature_on(inp, SCTP_PCB_FLAGS_AUTO_ASCONF);
2250 }
1917 /* allow bindx() to send ASCONF's for binding changes */
1918 sctp_feature_on(inp, SCTP_PCB_FLAGS_DO_ASCONF);
2251
1919 /* add this address to the endpoint list */
2252 /* add this address to the endpoint list */
1920 error = sctp_insert_laddr(&inp->sctp_addr_list, ifa);
2253 error = sctp_insert_laddr(&inp->sctp_addr_list, ifa, 0);
1921 if (error != 0) {
1922 SCTP_INP_WUNLOCK(inp);
1923 SCTP_INP_INFO_WUNLOCK();
1924 return (error);
1925 }
1926 inp->laddr_count++;
1927 }
1928 /* find the bucket */
1929 head = &sctppcbinfo.sctp_ephash[SCTP_PCBHASH_ALLADDR(lport,
1930 sctppcbinfo.hashmark)];
1931 /* put it in the bucket */
1932 LIST_INSERT_HEAD(head, inp, sctp_hash);
1933#ifdef SCTP_DEBUG
1934 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
1935 printf("Main hash to bind at head:%p, bound port:%d\n", head, ntohs(lport));
1936 }
1937#endif
1938 /* set in the port */
1939 inp->sctp_lport = lport;
1940
1941 /* turn off just the unbound flag */
1942 inp->sctp_flags &= ~SCTP_PCB_FLAGS_UNBOUND;
1943 SCTP_INP_WUNLOCK(inp);
1944 SCTP_INP_INFO_WUNLOCK();
1945 return (0);
1946}
1947
1948
1949static void
1950sctp_iterator_inp_being_freed(struct sctp_inpcb *inp, struct sctp_inpcb *inp_next)
1951{
1952 struct sctp_iterator *it;
1953
1954 /*
1955 * We enter with the only the ITERATOR_LOCK in place and a write
1956 * lock on the inp_info stuff.
1957 */
1958
1959 /*
1960 * Go through all iterators, we must do this since it is possible
1961 * that some iterator does NOT have the lock, but is waiting for it.
1962 * And the one that had the lock has either moved in the last
1963 * iteration or we just cleared it above. We need to find all of
1964 * those guys. The list of iterators should never be very big
1965 * though.
1966 */
2254 if (error != 0) {
2255 SCTP_INP_WUNLOCK(inp);
2256 SCTP_INP_INFO_WUNLOCK();
2257 return (error);
2258 }
2259 inp->laddr_count++;
2260 }
2261 /* find the bucket */
2262 head = &sctppcbinfo.sctp_ephash[SCTP_PCBHASH_ALLADDR(lport,
2263 sctppcbinfo.hashmark)];
2264 /* put it in the bucket */
2265 LIST_INSERT_HEAD(head, inp, sctp_hash);
2266#ifdef SCTP_DEBUG
2267 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
2268 printf("Main hash to bind at head:%p, bound port:%d\n", head, ntohs(lport));
2269 }
2270#endif
2271 /* set in the port */
2272 inp->sctp_lport = lport;
2273
2274 /* turn off just the unbound flag */
2275 inp->sctp_flags &= ~SCTP_PCB_FLAGS_UNBOUND;
2276 SCTP_INP_WUNLOCK(inp);
2277 SCTP_INP_INFO_WUNLOCK();
2278 return (0);
2279}
2280
2281
2282static void
2283sctp_iterator_inp_being_freed(struct sctp_inpcb *inp, struct sctp_inpcb *inp_next)
2284{
2285 struct sctp_iterator *it;
2286
2287 /*
2288 * We enter with the only the ITERATOR_LOCK in place and a write
2289 * lock on the inp_info stuff.
2290 */
2291
2292 /*
2293 * Go through all iterators, we must do this since it is possible
2294 * that some iterator does NOT have the lock, but is waiting for it.
2295 * And the one that had the lock has either moved in the last
2296 * iteration or we just cleared it above. We need to find all of
2297 * those guys. The list of iterators should never be very big
2298 * though.
2299 */
1967 LIST_FOREACH(it, &sctppcbinfo.iteratorhead, sctp_nxt_itr) {
2300 TAILQ_FOREACH(it, &sctppcbinfo.iteratorhead, sctp_nxt_itr) {
1968 if (it == inp->inp_starting_point_for_iterator)
1969 /* skip this guy, he's special */
1970 continue;
1971 if (it->inp == inp) {
1972 /*
1973 * This is tricky and we DON'T lock the iterator.
1974 * Reason is he's running but waiting for me since
1975 * inp->inp_starting_point_for_iterator has the lock
1976 * on me (the guy above we skipped). This tells us
1977 * its is not running but waiting for
1978 * inp->inp_starting_point_for_iterator to be
1979 * released by the guy that does have our INP in a
1980 * lock.
1981 */
1982 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1983 it->inp = NULL;
1984 it->stcb = NULL;
1985 } else {
1986 /* set him up to do the next guy not me */
1987 it->inp = inp_next;
1988 it->stcb = NULL;
1989 }
1990 }
1991 }
1992 it = inp->inp_starting_point_for_iterator;
1993 if (it) {
1994 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1995 it->inp = NULL;
1996 } else {
1997 it->inp = inp_next;
1998 }
1999 it->stcb = NULL;
2000 }
2001}
2002
2003/* release sctp_inpcb unbind the port */
2004void
2005sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from)
2006{
2007 /*
2008 * Here we free a endpoint. We must find it (if it is in the Hash
2009 * table) and remove it from there. Then we must also find it in the
2010 * overall list and remove it from there. After all removals are
2011 * complete then any timer has to be stopped. Then start the actual
2012 * freeing. a) Any local lists. b) Any associations. c) The hash of
2013 * all associations. d) finally the ep itself.
2014 */
2015 struct sctp_pcb *m;
2016 struct sctp_inpcb *inp_save;
2017 struct sctp_tcb *asoc, *nasoc;
2018 struct sctp_laddr *laddr, *nladdr;
2019 struct inpcb *ip_pcb;
2020 struct socket *so;
2021
2022 struct sctp_queued_to_read *sq;
2023
2024 int cnt;
2025 sctp_sharedkey_t *shared_key;
2026
2027
2028#ifdef SCTP_LOG_CLOSING
2029 sctp_log_closing(inp, NULL, 0);
2030#endif
2031
2032 SCTP_ITERATOR_LOCK();
2033 so = inp->sctp_socket;
2034 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
2035 /* been here before.. eeks.. get out of here */
2036 printf("This conflict in free SHOULD not be happening!\n");
2037 SCTP_ITERATOR_UNLOCK();
2038#ifdef SCTP_LOG_CLOSING
2039 sctp_log_closing(inp, NULL, 1);
2040#endif
2041 return;
2042 }
2043 SCTP_ASOC_CREATE_LOCK(inp);
2044 SCTP_INP_INFO_WLOCK();
2045
2046 SCTP_INP_WLOCK(inp);
2047 /*
2048 * First time through we have the socket lock, after that no more.
2049 */
2050 if (from == 1) {
2051 /*
2052 * Once we are in we can remove the flag from = 1 is only
2053 * passed from the actual closing routines that are called
2054 * via the sockets layer.
2055 */
2056 inp->sctp_flags &= ~SCTP_PCB_FLAGS_CLOSE_IP;
2057 }
2058 sctp_timer_stop(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL,
2059 SCTP_FROM_SCTP_PCB + SCTP_LOC_1);
2060
2061 if (inp->control) {
2062 sctp_m_freem(inp->control);
2063 inp->control = NULL;
2064 }
2065 if (inp->pkt) {
2066 sctp_m_freem(inp->pkt);
2067 inp->pkt = NULL;
2068 }
2069 m = &inp->sctp_ep;
2070 ip_pcb = &inp->ip_inp.inp; /* we could just cast the main pointer
2071 * here but I will be nice :> (i.e.
2072 * ip_pcb = ep;) */
2073 if (immediate == 0) {
2074 int cnt_in_sd;
2075
2076 cnt_in_sd = 0;
2077 for ((asoc = LIST_FIRST(&inp->sctp_asoc_list)); asoc != NULL;
2078 asoc = nasoc) {
2079 nasoc = LIST_NEXT(asoc, sctp_tcblist);
2080 if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
2081 /* Skip guys being freed */
2082 asoc->sctp_socket = NULL;
2083 cnt_in_sd++;
2084 continue;
2085 }
2086 if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2087 (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2088 /* Just abandon things in the front states */
2089 if (asoc->asoc.total_output_queue_size == 0) {
2090 sctp_free_assoc(inp, asoc, SCTP_PCBFREE_NOFORCE, SCTP_FROM_SCTP_PCB + SCTP_LOC_2);
2091 continue;
2092 }
2093 }
2094 SCTP_TCB_LOCK(asoc);
2095 /* Disconnect the socket please */
2096 asoc->sctp_socket = NULL;
2097 asoc->asoc.state |= SCTP_STATE_CLOSED_SOCKET;
2098 if ((asoc->asoc.size_on_reasm_queue > 0) ||
2099 (asoc->asoc.control_pdapi) ||
2100 (asoc->asoc.size_on_all_streams > 0) ||
2101 (so && (so->so_rcv.sb_cc > 0))
2102 ) {
2103 /* Left with Data unread */
2104 struct mbuf *op_err;
2105
2106 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
2107 0, M_DONTWAIT, 1, MT_DATA);
2108 if (op_err) {
2109 /* Fill in the user initiated abort */
2110 struct sctp_paramhdr *ph;
2111 uint32_t *ippp;
2112
2113 SCTP_BUF_LEN(op_err) =
2114 sizeof(struct sctp_paramhdr) + sizeof(uint32_t);
2115 ph = mtod(op_err,
2116 struct sctp_paramhdr *);
2117 ph->param_type = htons(
2118 SCTP_CAUSE_USER_INITIATED_ABT);
2119 ph->param_length = htons(SCTP_BUF_LEN(op_err));
2120 ippp = (uint32_t *) (ph + 1);
2121 *ippp = htonl(SCTP_FROM_SCTP_PCB + SCTP_LOC_3);
2122 }
2123 asoc->sctp_ep->last_abort_code = SCTP_FROM_SCTP_PCB + SCTP_LOC_3;
2124 sctp_send_abort_tcb(asoc, op_err);
2125 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
2126 if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_OPEN) ||
2127 (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
2128 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
2129 }
2130 sctp_free_assoc(inp, asoc, SCTP_PCBFREE_NOFORCE, SCTP_FROM_SCTP_PCB + SCTP_LOC_4);
2131 continue;
2132 } else if (TAILQ_EMPTY(&asoc->asoc.send_queue) &&
2133 TAILQ_EMPTY(&asoc->asoc.sent_queue) &&
2134 (asoc->asoc.stream_queue_cnt == 0)
2135 ) {
2136 if (asoc->asoc.locked_on_sending) {
2137 goto abort_anyway;
2138 }
2139 if ((SCTP_GET_STATE(&asoc->asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
2140 (SCTP_GET_STATE(&asoc->asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
2141 /*
2142 * there is nothing queued to send,
2143 * so I send shutdown
2144 */
2145 sctp_send_shutdown(asoc, asoc->asoc.primary_destination);
2146 if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_OPEN) ||
2147 (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
2148 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
2149 }
2150 asoc->asoc.state = SCTP_STATE_SHUTDOWN_SENT;
2151 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, asoc->sctp_ep, asoc,
2152 asoc->asoc.primary_destination);
2153 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, asoc->sctp_ep, asoc,
2154 asoc->asoc.primary_destination);
2155 sctp_chunk_output(inp, asoc, SCTP_OUTPUT_FROM_SHUT_TMR);
2156 }
2157 } else {
2158 /* mark into shutdown pending */
2159 struct sctp_stream_queue_pending *sp;
2160
2161 asoc->asoc.state |= SCTP_STATE_SHUTDOWN_PENDING;
2162 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, asoc->sctp_ep, asoc,
2163 asoc->asoc.primary_destination);
2164 if (asoc->asoc.locked_on_sending) {
2165 sp = TAILQ_LAST(&((asoc->asoc.locked_on_sending)->outqueue),
2166 sctp_streamhead);
2167 if (sp == NULL) {
2168 printf("Error, sp is NULL, locked on sending is %p strm:%d\n",
2169 asoc->asoc.locked_on_sending,
2170 asoc->asoc.locked_on_sending->stream_no);
2171 } else {
2172 if ((sp->length == 0) && (sp->msg_is_complete == 0))
2173 asoc->asoc.state |= SCTP_STATE_PARTIAL_MSG_LEFT;
2174 }
2175 }
2176 if (TAILQ_EMPTY(&asoc->asoc.send_queue) &&
2177 TAILQ_EMPTY(&asoc->asoc.sent_queue) &&
2178 (asoc->asoc.state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
2179 struct mbuf *op_err;
2180
2181 abort_anyway:
2182 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
2183 0, M_DONTWAIT, 1, MT_DATA);
2184 if (op_err) {
2185 /*
2186 * Fill in the user
2187 * initiated abort
2188 */
2189 struct sctp_paramhdr *ph;
2190 uint32_t *ippp;
2191
2192 SCTP_BUF_LEN(op_err) =
2193 (sizeof(struct sctp_paramhdr) +
2194 sizeof(uint32_t));
2195 ph = mtod(op_err,
2196 struct sctp_paramhdr *);
2197 ph->param_type = htons(
2198 SCTP_CAUSE_USER_INITIATED_ABT);
2199 ph->param_length = htons(SCTP_BUF_LEN(op_err));
2200 ippp = (uint32_t *) (ph + 1);
2201 *ippp = htonl(SCTP_FROM_SCTP_PCB + SCTP_LOC_5);
2202 }
2203 asoc->sctp_ep->last_abort_code = SCTP_FROM_SCTP_PCB + SCTP_LOC_5;
2204 sctp_send_abort_tcb(asoc, op_err);
2205 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
2206 if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_OPEN) ||
2207 (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
2208 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
2209 }
2210 sctp_free_assoc(inp, asoc, SCTP_PCBFREE_NOFORCE, SCTP_FROM_SCTP_PCB + SCTP_LOC_6);
2211 continue;
2212 }
2213 }
2214 cnt_in_sd++;
2215 SCTP_TCB_UNLOCK(asoc);
2216 }
2217 /* now is there some left in our SHUTDOWN state? */
2218 if (cnt_in_sd) {
2219 SCTP_INP_WUNLOCK(inp);
2220 SCTP_ASOC_CREATE_UNLOCK(inp);
2221 SCTP_INP_INFO_WUNLOCK();
2222 SCTP_ITERATOR_UNLOCK();
2223#ifdef SCTP_LOG_CLOSING
2224 sctp_log_closing(inp, NULL, 2);
2225#endif
2226 return;
2227 }
2228 }
2229 inp->sctp_socket = NULL;
2230 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) !=
2231 SCTP_PCB_FLAGS_UNBOUND) {
2232 /*
2233 * ok, this guy has been bound. It's port is somewhere in
2234 * the sctppcbinfo hash table. Remove it!
2235 */
2236 LIST_REMOVE(inp, sctp_hash);
2237 inp->sctp_flags |= SCTP_PCB_FLAGS_UNBOUND;
2238 }
2239 /*
2240 * If there is a timer running to kill us, forget it, since it may
2241 * have a contest on the INP lock.. which would cause us to die ...
2242 */
2243 cnt = 0;
2244 for ((asoc = LIST_FIRST(&inp->sctp_asoc_list)); asoc != NULL;
2245 asoc = nasoc) {
2246 nasoc = LIST_NEXT(asoc, sctp_tcblist);
2247 if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
2248 cnt++;
2249 continue;
2250 }
2251 /* Free associations that are NOT killing us */
2252 SCTP_TCB_LOCK(asoc);
2253 if ((SCTP_GET_STATE(&asoc->asoc) != SCTP_STATE_COOKIE_WAIT) &&
2254 ((asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) {
2255 struct mbuf *op_err;
2256 uint32_t *ippp;
2257
2258 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
2259 0, M_DONTWAIT, 1, MT_DATA);
2260 if (op_err) {
2261 /* Fill in the user initiated abort */
2262 struct sctp_paramhdr *ph;
2263
2264 SCTP_BUF_LEN(op_err) = (sizeof(struct sctp_paramhdr) +
2265 sizeof(uint32_t));
2266 ph = mtod(op_err, struct sctp_paramhdr *);
2267 ph->param_type = htons(
2268 SCTP_CAUSE_USER_INITIATED_ABT);
2269 ph->param_length = htons(SCTP_BUF_LEN(op_err));
2270 ippp = (uint32_t *) (ph + 1);
2271 *ippp = htonl(SCTP_FROM_SCTP_PCB + SCTP_LOC_7);
2272
2273 }
2274 asoc->sctp_ep->last_abort_code = SCTP_FROM_SCTP_PCB + SCTP_LOC_7;
2275 sctp_send_abort_tcb(asoc, op_err);
2276 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
2277 } else if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
2278 cnt++;
2279 SCTP_TCB_UNLOCK(asoc);
2280 continue;
2281 }
2282 if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_OPEN) ||
2283 (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
2284 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
2285 }
2286 sctp_free_assoc(inp, asoc, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_PCB + SCTP_LOC_8);
2287 }
2288 if (cnt) {
2289 /* Ok we have someone out there that will kill us */
2290 SCTP_OS_TIMER_STOP(&inp->sctp_ep.signature_change.timer);
2291 SCTP_INP_WUNLOCK(inp);
2292 SCTP_ASOC_CREATE_UNLOCK(inp);
2293 SCTP_INP_INFO_WUNLOCK();
2294 SCTP_ITERATOR_UNLOCK();
2295#ifdef SCTP_LOG_CLOSING
2296 sctp_log_closing(inp, NULL, 3);
2297#endif
2298 return;
2299 }
2300 if ((inp->refcount) || (inp->sctp_flags & SCTP_PCB_FLAGS_CLOSE_IP)) {
2301 SCTP_OS_TIMER_STOP(&inp->sctp_ep.signature_change.timer);
2302 sctp_timer_start(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL);
2303 SCTP_INP_WUNLOCK(inp);
2304 SCTP_ASOC_CREATE_UNLOCK(inp);
2305 SCTP_INP_INFO_WUNLOCK();
2306 SCTP_ITERATOR_UNLOCK();
2307#ifdef SCTP_LOG_CLOSING
2308 sctp_log_closing(inp, NULL, 4);
2309#endif
2310 return;
2311 }
2312 SCTP_OS_TIMER_STOP(&inp->sctp_ep.signature_change.timer);
2313 inp->sctp_ep.signature_change.type = 0;
2314 inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_ALLGONE;
2315
2316#ifdef SCTP_LOG_CLOSING
2317 sctp_log_closing(inp, NULL, 5);
2318#endif
2319
2320 SCTP_OS_TIMER_STOP(&inp->sctp_ep.signature_change.timer);
2321 inp->sctp_ep.signature_change.type = SCTP_TIMER_TYPE_NONE;
2322 /* Clear the read queue */
2323 while ((sq = TAILQ_FIRST(&inp->read_queue)) != NULL) {
2324 TAILQ_REMOVE(&inp->read_queue, sq, next);
2325 sctp_free_remote_addr(sq->whoFrom);
2326 if (so)
2327 so->so_rcv.sb_cc -= sq->length;
2328 if (sq->data) {
2329 sctp_m_freem(sq->data);
2330 sq->data = NULL;
2331 }
2332 /*
2333 * no need to free the net count, since at this point all
2334 * assoc's are gone.
2335 */
2336 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_readq, sq);
2337 SCTP_DECR_READQ_COUNT();
2338 }
2339 /* Now the sctp_pcb things */
2340 /*
2341 * free each asoc if it is not already closed/free. we can't use the
2342 * macro here since le_next will get freed as part of the
2343 * sctp_free_assoc() call.
2344 */
2345 cnt = 0;
2346 if (so) {
2347#ifdef IPSEC
2348 ipsec4_delete_pcbpolicy(ip_pcb);
2349#endif /* IPSEC */
2350
2351 /* Unlocks not needed since the socket is gone now */
2352 }
2353 if (ip_pcb->inp_options) {
2354 (void)sctp_m_free(ip_pcb->inp_options);
2355 ip_pcb->inp_options = 0;
2356 }
2357 if (ip_pcb->inp_moptions) {
2358 ip_freemoptions(ip_pcb->inp_moptions);
2359 ip_pcb->inp_moptions = 0;
2360 }
2361#ifdef INET6
2362 if (ip_pcb->inp_vflag & INP_IPV6) {
2363 struct in6pcb *in6p;
2364
2365 in6p = (struct in6pcb *)inp;
2366 ip6_freepcbopts(in6p->in6p_outputopts);
2367 }
2368#endif /* INET6 */
2369 ip_pcb->inp_vflag = 0;
2370 /* free up authentication fields */
2371 if (inp->sctp_ep.local_auth_chunks != NULL)
2372 sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
2373 if (inp->sctp_ep.local_hmacs != NULL)
2374 sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
2375
2376 shared_key = LIST_FIRST(&inp->sctp_ep.shared_keys);
2377 while (shared_key) {
2378 LIST_REMOVE(shared_key, next);
2379 sctp_free_sharedkey(shared_key);
2380 shared_key = LIST_FIRST(&inp->sctp_ep.shared_keys);
2381 }
2382
2383 inp_save = LIST_NEXT(inp, sctp_list);
2384 LIST_REMOVE(inp, sctp_list);
2385
2386 /* fix any iterators only after out of the list */
2387 sctp_iterator_inp_being_freed(inp, inp_save);
2388 /*
2389 * if we have an address list the following will free the list of
2390 * ifaddr's that are set into this ep. Again macro limitations here,
2391 * since the LIST_FOREACH could be a bad idea.
2392 */
2393 for ((laddr = LIST_FIRST(&inp->sctp_addr_list)); laddr != NULL;
2394 laddr = nladdr) {
2395 nladdr = LIST_NEXT(laddr, sctp_nxt_addr);
2301 if (it == inp->inp_starting_point_for_iterator)
2302 /* skip this guy, he's special */
2303 continue;
2304 if (it->inp == inp) {
2305 /*
2306 * This is tricky and we DON'T lock the iterator.
2307 * Reason is he's running but waiting for me since
2308 * inp->inp_starting_point_for_iterator has the lock
2309 * on me (the guy above we skipped). This tells us
2310 * its is not running but waiting for
2311 * inp->inp_starting_point_for_iterator to be
2312 * released by the guy that does have our INP in a
2313 * lock.
2314 */
2315 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
2316 it->inp = NULL;
2317 it->stcb = NULL;
2318 } else {
2319 /* set him up to do the next guy not me */
2320 it->inp = inp_next;
2321 it->stcb = NULL;
2322 }
2323 }
2324 }
2325 it = inp->inp_starting_point_for_iterator;
2326 if (it) {
2327 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
2328 it->inp = NULL;
2329 } else {
2330 it->inp = inp_next;
2331 }
2332 it->stcb = NULL;
2333 }
2334}
2335
2336/* release sctp_inpcb unbind the port */
2337void
2338sctp_inpcb_free(struct sctp_inpcb *inp, int immediate, int from)
2339{
2340 /*
2341 * Here we free a endpoint. We must find it (if it is in the Hash
2342 * table) and remove it from there. Then we must also find it in the
2343 * overall list and remove it from there. After all removals are
2344 * complete then any timer has to be stopped. Then start the actual
2345 * freeing. a) Any local lists. b) Any associations. c) The hash of
2346 * all associations. d) finally the ep itself.
2347 */
2348 struct sctp_pcb *m;
2349 struct sctp_inpcb *inp_save;
2350 struct sctp_tcb *asoc, *nasoc;
2351 struct sctp_laddr *laddr, *nladdr;
2352 struct inpcb *ip_pcb;
2353 struct socket *so;
2354
2355 struct sctp_queued_to_read *sq;
2356
2357 int cnt;
2358 sctp_sharedkey_t *shared_key;
2359
2360
2361#ifdef SCTP_LOG_CLOSING
2362 sctp_log_closing(inp, NULL, 0);
2363#endif
2364
2365 SCTP_ITERATOR_LOCK();
2366 so = inp->sctp_socket;
2367 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) {
2368 /* been here before.. eeks.. get out of here */
2369 printf("This conflict in free SHOULD not be happening!\n");
2370 SCTP_ITERATOR_UNLOCK();
2371#ifdef SCTP_LOG_CLOSING
2372 sctp_log_closing(inp, NULL, 1);
2373#endif
2374 return;
2375 }
2376 SCTP_ASOC_CREATE_LOCK(inp);
2377 SCTP_INP_INFO_WLOCK();
2378
2379 SCTP_INP_WLOCK(inp);
2380 /*
2381 * First time through we have the socket lock, after that no more.
2382 */
2383 if (from == 1) {
2384 /*
2385 * Once we are in we can remove the flag from = 1 is only
2386 * passed from the actual closing routines that are called
2387 * via the sockets layer.
2388 */
2389 inp->sctp_flags &= ~SCTP_PCB_FLAGS_CLOSE_IP;
2390 }
2391 sctp_timer_stop(SCTP_TIMER_TYPE_NEWCOOKIE, inp, NULL, NULL,
2392 SCTP_FROM_SCTP_PCB + SCTP_LOC_1);
2393
2394 if (inp->control) {
2395 sctp_m_freem(inp->control);
2396 inp->control = NULL;
2397 }
2398 if (inp->pkt) {
2399 sctp_m_freem(inp->pkt);
2400 inp->pkt = NULL;
2401 }
2402 m = &inp->sctp_ep;
2403 ip_pcb = &inp->ip_inp.inp; /* we could just cast the main pointer
2404 * here but I will be nice :> (i.e.
2405 * ip_pcb = ep;) */
2406 if (immediate == 0) {
2407 int cnt_in_sd;
2408
2409 cnt_in_sd = 0;
2410 for ((asoc = LIST_FIRST(&inp->sctp_asoc_list)); asoc != NULL;
2411 asoc = nasoc) {
2412 nasoc = LIST_NEXT(asoc, sctp_tcblist);
2413 if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
2414 /* Skip guys being freed */
2415 asoc->sctp_socket = NULL;
2416 cnt_in_sd++;
2417 continue;
2418 }
2419 if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_COOKIE_WAIT) ||
2420 (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_COOKIE_ECHOED)) {
2421 /* Just abandon things in the front states */
2422 if (asoc->asoc.total_output_queue_size == 0) {
2423 sctp_free_assoc(inp, asoc, SCTP_PCBFREE_NOFORCE, SCTP_FROM_SCTP_PCB + SCTP_LOC_2);
2424 continue;
2425 }
2426 }
2427 SCTP_TCB_LOCK(asoc);
2428 /* Disconnect the socket please */
2429 asoc->sctp_socket = NULL;
2430 asoc->asoc.state |= SCTP_STATE_CLOSED_SOCKET;
2431 if ((asoc->asoc.size_on_reasm_queue > 0) ||
2432 (asoc->asoc.control_pdapi) ||
2433 (asoc->asoc.size_on_all_streams > 0) ||
2434 (so && (so->so_rcv.sb_cc > 0))
2435 ) {
2436 /* Left with Data unread */
2437 struct mbuf *op_err;
2438
2439 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
2440 0, M_DONTWAIT, 1, MT_DATA);
2441 if (op_err) {
2442 /* Fill in the user initiated abort */
2443 struct sctp_paramhdr *ph;
2444 uint32_t *ippp;
2445
2446 SCTP_BUF_LEN(op_err) =
2447 sizeof(struct sctp_paramhdr) + sizeof(uint32_t);
2448 ph = mtod(op_err,
2449 struct sctp_paramhdr *);
2450 ph->param_type = htons(
2451 SCTP_CAUSE_USER_INITIATED_ABT);
2452 ph->param_length = htons(SCTP_BUF_LEN(op_err));
2453 ippp = (uint32_t *) (ph + 1);
2454 *ippp = htonl(SCTP_FROM_SCTP_PCB + SCTP_LOC_3);
2455 }
2456 asoc->sctp_ep->last_abort_code = SCTP_FROM_SCTP_PCB + SCTP_LOC_3;
2457 sctp_send_abort_tcb(asoc, op_err);
2458 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
2459 if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_OPEN) ||
2460 (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
2461 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
2462 }
2463 sctp_free_assoc(inp, asoc, SCTP_PCBFREE_NOFORCE, SCTP_FROM_SCTP_PCB + SCTP_LOC_4);
2464 continue;
2465 } else if (TAILQ_EMPTY(&asoc->asoc.send_queue) &&
2466 TAILQ_EMPTY(&asoc->asoc.sent_queue) &&
2467 (asoc->asoc.stream_queue_cnt == 0)
2468 ) {
2469 if (asoc->asoc.locked_on_sending) {
2470 goto abort_anyway;
2471 }
2472 if ((SCTP_GET_STATE(&asoc->asoc) != SCTP_STATE_SHUTDOWN_SENT) &&
2473 (SCTP_GET_STATE(&asoc->asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) {
2474 /*
2475 * there is nothing queued to send,
2476 * so I send shutdown
2477 */
2478 sctp_send_shutdown(asoc, asoc->asoc.primary_destination);
2479 if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_OPEN) ||
2480 (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
2481 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
2482 }
2483 asoc->asoc.state = SCTP_STATE_SHUTDOWN_SENT;
2484 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, asoc->sctp_ep, asoc,
2485 asoc->asoc.primary_destination);
2486 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, asoc->sctp_ep, asoc,
2487 asoc->asoc.primary_destination);
2488 sctp_chunk_output(inp, asoc, SCTP_OUTPUT_FROM_SHUT_TMR);
2489 }
2490 } else {
2491 /* mark into shutdown pending */
2492 struct sctp_stream_queue_pending *sp;
2493
2494 asoc->asoc.state |= SCTP_STATE_SHUTDOWN_PENDING;
2495 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, asoc->sctp_ep, asoc,
2496 asoc->asoc.primary_destination);
2497 if (asoc->asoc.locked_on_sending) {
2498 sp = TAILQ_LAST(&((asoc->asoc.locked_on_sending)->outqueue),
2499 sctp_streamhead);
2500 if (sp == NULL) {
2501 printf("Error, sp is NULL, locked on sending is %p strm:%d\n",
2502 asoc->asoc.locked_on_sending,
2503 asoc->asoc.locked_on_sending->stream_no);
2504 } else {
2505 if ((sp->length == 0) && (sp->msg_is_complete == 0))
2506 asoc->asoc.state |= SCTP_STATE_PARTIAL_MSG_LEFT;
2507 }
2508 }
2509 if (TAILQ_EMPTY(&asoc->asoc.send_queue) &&
2510 TAILQ_EMPTY(&asoc->asoc.sent_queue) &&
2511 (asoc->asoc.state & SCTP_STATE_PARTIAL_MSG_LEFT)) {
2512 struct mbuf *op_err;
2513
2514 abort_anyway:
2515 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
2516 0, M_DONTWAIT, 1, MT_DATA);
2517 if (op_err) {
2518 /*
2519 * Fill in the user
2520 * initiated abort
2521 */
2522 struct sctp_paramhdr *ph;
2523 uint32_t *ippp;
2524
2525 SCTP_BUF_LEN(op_err) =
2526 (sizeof(struct sctp_paramhdr) +
2527 sizeof(uint32_t));
2528 ph = mtod(op_err,
2529 struct sctp_paramhdr *);
2530 ph->param_type = htons(
2531 SCTP_CAUSE_USER_INITIATED_ABT);
2532 ph->param_length = htons(SCTP_BUF_LEN(op_err));
2533 ippp = (uint32_t *) (ph + 1);
2534 *ippp = htonl(SCTP_FROM_SCTP_PCB + SCTP_LOC_5);
2535 }
2536 asoc->sctp_ep->last_abort_code = SCTP_FROM_SCTP_PCB + SCTP_LOC_5;
2537 sctp_send_abort_tcb(asoc, op_err);
2538 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
2539 if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_OPEN) ||
2540 (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
2541 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
2542 }
2543 sctp_free_assoc(inp, asoc, SCTP_PCBFREE_NOFORCE, SCTP_FROM_SCTP_PCB + SCTP_LOC_6);
2544 continue;
2545 }
2546 }
2547 cnt_in_sd++;
2548 SCTP_TCB_UNLOCK(asoc);
2549 }
2550 /* now is there some left in our SHUTDOWN state? */
2551 if (cnt_in_sd) {
2552 SCTP_INP_WUNLOCK(inp);
2553 SCTP_ASOC_CREATE_UNLOCK(inp);
2554 SCTP_INP_INFO_WUNLOCK();
2555 SCTP_ITERATOR_UNLOCK();
2556#ifdef SCTP_LOG_CLOSING
2557 sctp_log_closing(inp, NULL, 2);
2558#endif
2559 return;
2560 }
2561 }
2562 inp->sctp_socket = NULL;
2563 if ((inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) !=
2564 SCTP_PCB_FLAGS_UNBOUND) {
2565 /*
2566 * ok, this guy has been bound. It's port is somewhere in
2567 * the sctppcbinfo hash table. Remove it!
2568 */
2569 LIST_REMOVE(inp, sctp_hash);
2570 inp->sctp_flags |= SCTP_PCB_FLAGS_UNBOUND;
2571 }
2572 /*
2573 * If there is a timer running to kill us, forget it, since it may
2574 * have a contest on the INP lock.. which would cause us to die ...
2575 */
2576 cnt = 0;
2577 for ((asoc = LIST_FIRST(&inp->sctp_asoc_list)); asoc != NULL;
2578 asoc = nasoc) {
2579 nasoc = LIST_NEXT(asoc, sctp_tcblist);
2580 if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
2581 cnt++;
2582 continue;
2583 }
2584 /* Free associations that are NOT killing us */
2585 SCTP_TCB_LOCK(asoc);
2586 if ((SCTP_GET_STATE(&asoc->asoc) != SCTP_STATE_COOKIE_WAIT) &&
2587 ((asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0)) {
2588 struct mbuf *op_err;
2589 uint32_t *ippp;
2590
2591 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
2592 0, M_DONTWAIT, 1, MT_DATA);
2593 if (op_err) {
2594 /* Fill in the user initiated abort */
2595 struct sctp_paramhdr *ph;
2596
2597 SCTP_BUF_LEN(op_err) = (sizeof(struct sctp_paramhdr) +
2598 sizeof(uint32_t));
2599 ph = mtod(op_err, struct sctp_paramhdr *);
2600 ph->param_type = htons(
2601 SCTP_CAUSE_USER_INITIATED_ABT);
2602 ph->param_length = htons(SCTP_BUF_LEN(op_err));
2603 ippp = (uint32_t *) (ph + 1);
2604 *ippp = htonl(SCTP_FROM_SCTP_PCB + SCTP_LOC_7);
2605
2606 }
2607 asoc->sctp_ep->last_abort_code = SCTP_FROM_SCTP_PCB + SCTP_LOC_7;
2608 sctp_send_abort_tcb(asoc, op_err);
2609 SCTP_STAT_INCR_COUNTER32(sctps_aborted);
2610 } else if (asoc->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
2611 cnt++;
2612 SCTP_TCB_UNLOCK(asoc);
2613 continue;
2614 }
2615 if ((SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_OPEN) ||
2616 (SCTP_GET_STATE(&asoc->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
2617 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
2618 }
2619 sctp_free_assoc(inp, asoc, SCTP_PCBFREE_FORCE, SCTP_FROM_SCTP_PCB + SCTP_LOC_8);
2620 }
2621 if (cnt) {
2622 /* Ok we have someone out there that will kill us */
2623 SCTP_OS_TIMER_STOP(&inp->sctp_ep.signature_change.timer);
2624 SCTP_INP_WUNLOCK(inp);
2625 SCTP_ASOC_CREATE_UNLOCK(inp);
2626 SCTP_INP_INFO_WUNLOCK();
2627 SCTP_ITERATOR_UNLOCK();
2628#ifdef SCTP_LOG_CLOSING
2629 sctp_log_closing(inp, NULL, 3);
2630#endif
2631 return;
2632 }
2633 if ((inp->refcount) || (inp->sctp_flags & SCTP_PCB_FLAGS_CLOSE_IP)) {
2634 SCTP_OS_TIMER_STOP(&inp->sctp_ep.signature_change.timer);
2635 sctp_timer_start(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL);
2636 SCTP_INP_WUNLOCK(inp);
2637 SCTP_ASOC_CREATE_UNLOCK(inp);
2638 SCTP_INP_INFO_WUNLOCK();
2639 SCTP_ITERATOR_UNLOCK();
2640#ifdef SCTP_LOG_CLOSING
2641 sctp_log_closing(inp, NULL, 4);
2642#endif
2643 return;
2644 }
2645 SCTP_OS_TIMER_STOP(&inp->sctp_ep.signature_change.timer);
2646 inp->sctp_ep.signature_change.type = 0;
2647 inp->sctp_flags |= SCTP_PCB_FLAGS_SOCKET_ALLGONE;
2648
2649#ifdef SCTP_LOG_CLOSING
2650 sctp_log_closing(inp, NULL, 5);
2651#endif
2652
2653 SCTP_OS_TIMER_STOP(&inp->sctp_ep.signature_change.timer);
2654 inp->sctp_ep.signature_change.type = SCTP_TIMER_TYPE_NONE;
2655 /* Clear the read queue */
2656 while ((sq = TAILQ_FIRST(&inp->read_queue)) != NULL) {
2657 TAILQ_REMOVE(&inp->read_queue, sq, next);
2658 sctp_free_remote_addr(sq->whoFrom);
2659 if (so)
2660 so->so_rcv.sb_cc -= sq->length;
2661 if (sq->data) {
2662 sctp_m_freem(sq->data);
2663 sq->data = NULL;
2664 }
2665 /*
2666 * no need to free the net count, since at this point all
2667 * assoc's are gone.
2668 */
2669 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_readq, sq);
2670 SCTP_DECR_READQ_COUNT();
2671 }
2672 /* Now the sctp_pcb things */
2673 /*
2674 * free each asoc if it is not already closed/free. we can't use the
2675 * macro here since le_next will get freed as part of the
2676 * sctp_free_assoc() call.
2677 */
2678 cnt = 0;
2679 if (so) {
2680#ifdef IPSEC
2681 ipsec4_delete_pcbpolicy(ip_pcb);
2682#endif /* IPSEC */
2683
2684 /* Unlocks not needed since the socket is gone now */
2685 }
2686 if (ip_pcb->inp_options) {
2687 (void)sctp_m_free(ip_pcb->inp_options);
2688 ip_pcb->inp_options = 0;
2689 }
2690 if (ip_pcb->inp_moptions) {
2691 ip_freemoptions(ip_pcb->inp_moptions);
2692 ip_pcb->inp_moptions = 0;
2693 }
2694#ifdef INET6
2695 if (ip_pcb->inp_vflag & INP_IPV6) {
2696 struct in6pcb *in6p;
2697
2698 in6p = (struct in6pcb *)inp;
2699 ip6_freepcbopts(in6p->in6p_outputopts);
2700 }
2701#endif /* INET6 */
2702 ip_pcb->inp_vflag = 0;
2703 /* free up authentication fields */
2704 if (inp->sctp_ep.local_auth_chunks != NULL)
2705 sctp_free_chunklist(inp->sctp_ep.local_auth_chunks);
2706 if (inp->sctp_ep.local_hmacs != NULL)
2707 sctp_free_hmaclist(inp->sctp_ep.local_hmacs);
2708
2709 shared_key = LIST_FIRST(&inp->sctp_ep.shared_keys);
2710 while (shared_key) {
2711 LIST_REMOVE(shared_key, next);
2712 sctp_free_sharedkey(shared_key);
2713 shared_key = LIST_FIRST(&inp->sctp_ep.shared_keys);
2714 }
2715
2716 inp_save = LIST_NEXT(inp, sctp_list);
2717 LIST_REMOVE(inp, sctp_list);
2718
2719 /* fix any iterators only after out of the list */
2720 sctp_iterator_inp_being_freed(inp, inp_save);
2721 /*
2722 * if we have an address list the following will free the list of
2723 * ifaddr's that are set into this ep. Again macro limitations here,
2724 * since the LIST_FOREACH could be a bad idea.
2725 */
2726 for ((laddr = LIST_FIRST(&inp->sctp_addr_list)); laddr != NULL;
2727 laddr = nladdr) {
2728 nladdr = LIST_NEXT(laddr, sctp_nxt_addr);
2396 LIST_REMOVE(laddr, sctp_nxt_addr);
2397 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_laddr, laddr);
2398 SCTP_DECR_LADDR_COUNT();
2729 sctp_remove_laddr(laddr);
2399 }
2400
2401#ifdef SCTP_TRACK_FREED_ASOCS
2402 /* TEMP CODE */
2403 for ((asoc = LIST_FIRST(&inp->sctp_asoc_free_list)); asoc != NULL;
2404 asoc = nasoc) {
2405 nasoc = LIST_NEXT(asoc, sctp_tcblist);
2406 LIST_REMOVE(asoc, sctp_tcblist);
2407 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, asoc);
2408 SCTP_DECR_ASOC_COUNT();
2409 }
2410 /* *** END TEMP CODE *** */
2411#endif
2412 /* Now lets see about freeing the EP hash table. */
2413 if (inp->sctp_tcbhash != NULL) {
2414 SCTP_HASH_FREE(inp->sctp_tcbhash, inp->sctp_hashmark);
2415 inp->sctp_tcbhash = NULL;
2416 }
2417 /* Now we must put the ep memory back into the zone pool */
2418 SCTP_INP_LOCK_DESTROY(inp);
2419 SCTP_INP_READ_DESTROY(inp);
2420 SCTP_ASOC_CREATE_LOCK_DESTROY(inp);
2421 SCTP_INP_INFO_WUNLOCK();
2422
2423 SCTP_ITERATOR_UNLOCK();
2424
2425 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp);
2426 SCTP_DECR_EP_COUNT();
2427
2428}
2429
2430
2431struct sctp_nets *
2432sctp_findnet(struct sctp_tcb *stcb, struct sockaddr *addr)
2433{
2434 struct sctp_nets *net;
2435
2436 /* locate the address */
2437 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2438 if (sctp_cmpaddr(addr, (struct sockaddr *)&net->ro._l_addr))
2439 return (net);
2440 }
2441 return (NULL);
2442}
2443
2444
2445/*
2446 * add's a remote endpoint address, done with the INIT/INIT-ACK as well as
2447 * when a ASCONF arrives that adds it. It will also initialize all the cwnd
2448 * stats of stuff.
2449 */
2450int
2730 }
2731
2732#ifdef SCTP_TRACK_FREED_ASOCS
2733 /* TEMP CODE */
2734 for ((asoc = LIST_FIRST(&inp->sctp_asoc_free_list)); asoc != NULL;
2735 asoc = nasoc) {
2736 nasoc = LIST_NEXT(asoc, sctp_tcblist);
2737 LIST_REMOVE(asoc, sctp_tcblist);
2738 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, asoc);
2739 SCTP_DECR_ASOC_COUNT();
2740 }
2741 /* *** END TEMP CODE *** */
2742#endif
2743 /* Now lets see about freeing the EP hash table. */
2744 if (inp->sctp_tcbhash != NULL) {
2745 SCTP_HASH_FREE(inp->sctp_tcbhash, inp->sctp_hashmark);
2746 inp->sctp_tcbhash = NULL;
2747 }
2748 /* Now we must put the ep memory back into the zone pool */
2749 SCTP_INP_LOCK_DESTROY(inp);
2750 SCTP_INP_READ_DESTROY(inp);
2751 SCTP_ASOC_CREATE_LOCK_DESTROY(inp);
2752 SCTP_INP_INFO_WUNLOCK();
2753
2754 SCTP_ITERATOR_UNLOCK();
2755
2756 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_ep, inp);
2757 SCTP_DECR_EP_COUNT();
2758
2759}
2760
2761
2762struct sctp_nets *
2763sctp_findnet(struct sctp_tcb *stcb, struct sockaddr *addr)
2764{
2765 struct sctp_nets *net;
2766
2767 /* locate the address */
2768 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
2769 if (sctp_cmpaddr(addr, (struct sockaddr *)&net->ro._l_addr))
2770 return (net);
2771 }
2772 return (NULL);
2773}
2774
2775
2776/*
2777 * add's a remote endpoint address, done with the INIT/INIT-ACK as well as
2778 * when a ASCONF arrives that adds it. It will also initialize all the cwnd
2779 * stats of stuff.
2780 */
2781int
2451sctp_is_address_on_local_host(struct sockaddr *addr)
2782sctp_is_address_on_local_host(struct sockaddr *addr, uint32_t vrf_id)
2452{
2783{
2453 struct ifnet *ifn;
2454 struct ifaddr *ifa;
2784 struct sctp_ifa *sctp_ifa;
2455
2785
2456 TAILQ_FOREACH(ifn, &ifnet, if_list) {
2457 TAILQ_FOREACH(ifa, &ifn->if_addrlist, ifa_list) {
2458 if (addr->sa_family == ifa->ifa_addr->sa_family) {
2459 /* same family */
2460 if (addr->sa_family == AF_INET) {
2461 struct sockaddr_in *sin, *sin_c;
2786 sctp_ifa = sctp_find_ifa_by_addr(addr, vrf_id, 0);
2787 if (sctp_ifa) {
2788 return (1);
2789 } else {
2790 return (0);
2791 }
2792}
2462
2793
2463 sin = (struct sockaddr_in *)addr;
2464 sin_c = (struct sockaddr_in *)
2465 ifa->ifa_addr;
2466 if (sin->sin_addr.s_addr ==
2467 sin_c->sin_addr.s_addr) {
2468 /*
2469 * we are on the same
2470 * machine
2471 */
2472 return (1);
2473 }
2474 } else if (addr->sa_family == AF_INET6) {
2475 struct sockaddr_in6 *sin6, *sin_c6;
2476
2477 sin6 = (struct sockaddr_in6 *)addr;
2478 sin_c6 = (struct sockaddr_in6 *)
2479 ifa->ifa_addr;
2480 if (SCTP6_ARE_ADDR_EQUAL(&sin6->sin6_addr,
2481 &sin_c6->sin6_addr)) {
2482 /*
2483 * we are on the same
2484 * machine
2485 */
2486 return (1);
2487 }
2488 }
2489 }
2490 }
2794void
2795sctp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
2796{
2797 net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
2798 /* we always get at LEAST 2 MTU's */
2799 if (net->cwnd < (2 * net->mtu)) {
2800 net->cwnd = 2 * net->mtu;
2491 }
2801 }
2492 return (0);
2802 net->ssthresh = stcb->asoc.peers_rwnd;
2493}
2494
2495int
2496sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr,
2497 int set_scope, int from)
2498{
2499 /*
2500 * The following is redundant to the same lines in the
2501 * sctp_aloc_assoc() but is needed since other's call the add
2502 * address function
2503 */
2504 struct sctp_nets *net, *netfirst;
2505 int addr_inscope;
2506
2507#ifdef SCTP_DEBUG
2508 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
2509 printf("Adding an address (from:%d) to the peer: ", from);
2510 sctp_print_address(newaddr);
2511 }
2512#endif
2513
2514 netfirst = sctp_findnet(stcb, newaddr);
2515 if (netfirst) {
2516 /*
2517 * Lie and return ok, we don't want to make the association
2518 * go away for this behavior. It will happen in the TCP
2519 * model in a connected socket. It does not reach the hash
2520 * table until after the association is built so it can't be
2521 * found. Mark as reachable, since the initial creation will
2522 * have been cleared and the NOT_IN_ASSOC flag will have
2523 * been added... and we don't want to end up removing it
2524 * back out.
2525 */
2526 if (netfirst->dest_state & SCTP_ADDR_UNCONFIRMED) {
2527 netfirst->dest_state = (SCTP_ADDR_REACHABLE |
2528 SCTP_ADDR_UNCONFIRMED);
2529 } else {
2530 netfirst->dest_state = SCTP_ADDR_REACHABLE;
2531 }
2532
2533 return (0);
2534 }
2535 addr_inscope = 1;
2536 if (newaddr->sa_family == AF_INET) {
2537 struct sockaddr_in *sin;
2538
2539 sin = (struct sockaddr_in *)newaddr;
2540 if (sin->sin_addr.s_addr == 0) {
2541 /* Invalid address */
2542 return (-1);
2543 }
2544 /* zero out the bzero area */
2545 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
2546
2547 /* assure len is set */
2548 sin->sin_len = sizeof(struct sockaddr_in);
2549 if (set_scope) {
2550#ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
2551 stcb->ipv4_local_scope = 1;
2552#else
2553 if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
2554 stcb->asoc.ipv4_local_scope = 1;
2555 }
2556#endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
2803}
2804
2805int
2806sctp_add_remote_addr(struct sctp_tcb *stcb, struct sockaddr *newaddr,
2807 int set_scope, int from)
2808{
2809 /*
2810 * The following is redundant to the same lines in the
2811 * sctp_aloc_assoc() but is needed since other's call the add
2812 * address function
2813 */
2814 struct sctp_nets *net, *netfirst;
2815 int addr_inscope;
2816
2817#ifdef SCTP_DEBUG
2818 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
2819 printf("Adding an address (from:%d) to the peer: ", from);
2820 sctp_print_address(newaddr);
2821 }
2822#endif
2823
2824 netfirst = sctp_findnet(stcb, newaddr);
2825 if (netfirst) {
2826 /*
2827 * Lie and return ok, we don't want to make the association
2828 * go away for this behavior. It will happen in the TCP
2829 * model in a connected socket. It does not reach the hash
2830 * table until after the association is built so it can't be
2831 * found. Mark as reachable, since the initial creation will
2832 * have been cleared and the NOT_IN_ASSOC flag will have
2833 * been added... and we don't want to end up removing it
2834 * back out.
2835 */
2836 if (netfirst->dest_state & SCTP_ADDR_UNCONFIRMED) {
2837 netfirst->dest_state = (SCTP_ADDR_REACHABLE |
2838 SCTP_ADDR_UNCONFIRMED);
2839 } else {
2840 netfirst->dest_state = SCTP_ADDR_REACHABLE;
2841 }
2842
2843 return (0);
2844 }
2845 addr_inscope = 1;
2846 if (newaddr->sa_family == AF_INET) {
2847 struct sockaddr_in *sin;
2848
2849 sin = (struct sockaddr_in *)newaddr;
2850 if (sin->sin_addr.s_addr == 0) {
2851 /* Invalid address */
2852 return (-1);
2853 }
2854 /* zero out the bzero area */
2855 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
2856
2857 /* assure len is set */
2858 sin->sin_len = sizeof(struct sockaddr_in);
2859 if (set_scope) {
2860#ifdef SCTP_DONT_DO_PRIVADDR_SCOPE
2861 stcb->ipv4_local_scope = 1;
2862#else
2863 if (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) {
2864 stcb->asoc.ipv4_local_scope = 1;
2865 }
2866#endif /* SCTP_DONT_DO_PRIVADDR_SCOPE */
2557
2558 if (sctp_is_address_on_local_host(newaddr)) {
2559 stcb->asoc.loopback_scope = 1;
2560 stcb->asoc.ipv4_local_scope = 1;
2561 stcb->asoc.local_scope = 1;
2562 stcb->asoc.site_scope = 1;
2563 }
2564 } else {
2867 } else {
2565 if (from == SCTP_ADDR_IS_CONFIRMED) {
2566 /* From connectx */
2567 if (sctp_is_address_on_local_host(newaddr)) {
2568 stcb->asoc.loopback_scope = 1;
2569 stcb->asoc.ipv4_local_scope = 1;
2570 stcb->asoc.local_scope = 1;
2571 stcb->asoc.site_scope = 1;
2572 }
2573 }
2574 /* Validate the address is in scope */
2575 if ((IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) &&
2576 (stcb->asoc.ipv4_local_scope == 0)) {
2577 addr_inscope = 0;
2578 }
2579 }
2580 } else if (newaddr->sa_family == AF_INET6) {
2581 struct sockaddr_in6 *sin6;
2582
2583 sin6 = (struct sockaddr_in6 *)newaddr;
2584 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
2585 /* Invalid address */
2586 return (-1);
2587 }
2588 /* assure len is set */
2589 sin6->sin6_len = sizeof(struct sockaddr_in6);
2590 if (set_scope) {
2868 /* Validate the address is in scope */
2869 if ((IN4_ISPRIVATE_ADDRESS(&sin->sin_addr)) &&
2870 (stcb->asoc.ipv4_local_scope == 0)) {
2871 addr_inscope = 0;
2872 }
2873 }
2874 } else if (newaddr->sa_family == AF_INET6) {
2875 struct sockaddr_in6 *sin6;
2876
2877 sin6 = (struct sockaddr_in6 *)newaddr;
2878 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
2879 /* Invalid address */
2880 return (-1);
2881 }
2882 /* assure len is set */
2883 sin6->sin6_len = sizeof(struct sockaddr_in6);
2884 if (set_scope) {
2591 if (sctp_is_address_on_local_host(newaddr)) {
2885 if (sctp_is_address_on_local_host(newaddr, stcb->asoc.vrf_id)) {
2592 stcb->asoc.loopback_scope = 1;
2886 stcb->asoc.loopback_scope = 1;
2593 stcb->asoc.local_scope = 1;
2887 stcb->asoc.local_scope = 0;
2594 stcb->asoc.ipv4_local_scope = 1;
2595 stcb->asoc.site_scope = 1;
2596 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
2597 /*
2598 * If the new destination is a LINK_LOCAL we
2599 * must have common site scope. Don't set
2600 * the local scope since we may not share
2601 * all links, only loopback can do this.
2602 * Links on the local network would also be
2603 * on our private network for v4 too.
2604 */
2605 stcb->asoc.ipv4_local_scope = 1;
2606 stcb->asoc.site_scope = 1;
2607 } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) {
2608 /*
2609 * If the new destination is SITE_LOCAL then
2610 * we must have site scope in common.
2611 */
2612 stcb->asoc.site_scope = 1;
2613 }
2614 } else {
2888 stcb->asoc.ipv4_local_scope = 1;
2889 stcb->asoc.site_scope = 1;
2890 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) {
2891 /*
2892 * If the new destination is a LINK_LOCAL we
2893 * must have common site scope. Don't set
2894 * the local scope since we may not share
2895 * all links, only loopback can do this.
2896 * Links on the local network would also be
2897 * on our private network for v4 too.
2898 */
2899 stcb->asoc.ipv4_local_scope = 1;
2900 stcb->asoc.site_scope = 1;
2901 } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr)) {
2902 /*
2903 * If the new destination is SITE_LOCAL then
2904 * we must have site scope in common.
2905 */
2906 stcb->asoc.site_scope = 1;
2907 }
2908 } else {
2615 if (from == SCTP_ADDR_IS_CONFIRMED) {
2616 /* From connectx so we check for localhost. */
2617 if (sctp_is_address_on_local_host(newaddr)) {
2618 stcb->asoc.loopback_scope = 1;
2619 stcb->asoc.ipv4_local_scope = 1;
2620 stcb->asoc.local_scope = 1;
2621 stcb->asoc.site_scope = 1;
2622 }
2623 }
2624 /* Validate the address is in scope */
2625 if (IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr) &&
2626 (stcb->asoc.loopback_scope == 0)) {
2627 addr_inscope = 0;
2628 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr) &&
2629 (stcb->asoc.local_scope == 0)) {
2630 addr_inscope = 0;
2631 } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr) &&
2632 (stcb->asoc.site_scope == 0)) {
2633 addr_inscope = 0;
2634 }
2635 }
2636 } else {
2637 /* not supported family type */
2638 return (-1);
2639 }
2640 net = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_net, struct sctp_nets);
2641 if (net == NULL) {
2642 return (-1);
2643 }
2644 SCTP_INCR_RADDR_COUNT();
2645 bzero(net, sizeof(*net));
2646 SCTP_GETTIME_TIMEVAL(&net->start_time);
2647 memcpy(&net->ro._l_addr, newaddr, newaddr->sa_len);
2648 if (newaddr->sa_family == AF_INET) {
2649 ((struct sockaddr_in *)&net->ro._l_addr)->sin_port = stcb->rport;
2650 } else if (newaddr->sa_family == AF_INET6) {
2651 ((struct sockaddr_in6 *)&net->ro._l_addr)->sin6_port = stcb->rport;
2652 }
2909 /* Validate the address is in scope */
2910 if (IN6_IS_ADDR_LOOPBACK(&sin6->sin6_addr) &&
2911 (stcb->asoc.loopback_scope == 0)) {
2912 addr_inscope = 0;
2913 } else if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr) &&
2914 (stcb->asoc.local_scope == 0)) {
2915 addr_inscope = 0;
2916 } else if (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr) &&
2917 (stcb->asoc.site_scope == 0)) {
2918 addr_inscope = 0;
2919 }
2920 }
2921 } else {
2922 /* not supported family type */
2923 return (-1);
2924 }
2925 net = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_net, struct sctp_nets);
2926 if (net == NULL) {
2927 return (-1);
2928 }
2929 SCTP_INCR_RADDR_COUNT();
2930 bzero(net, sizeof(*net));
2931 SCTP_GETTIME_TIMEVAL(&net->start_time);
2932 memcpy(&net->ro._l_addr, newaddr, newaddr->sa_len);
2933 if (newaddr->sa_family == AF_INET) {
2934 ((struct sockaddr_in *)&net->ro._l_addr)->sin_port = stcb->rport;
2935 } else if (newaddr->sa_family == AF_INET6) {
2936 ((struct sockaddr_in6 *)&net->ro._l_addr)->sin6_port = stcb->rport;
2937 }
2653 net->addr_is_local = sctp_is_address_on_local_host(newaddr);
2938 net->addr_is_local = sctp_is_address_on_local_host(newaddr, stcb->asoc.vrf_id);
2939 if (net->addr_is_local && ((set_scope || (from == SCTP_ADDR_IS_CONFIRMED)))) {
2940 stcb->asoc.loopback_scope = 1;
2941 stcb->asoc.ipv4_local_scope = 1;
2942 stcb->asoc.local_scope = 0;
2943 stcb->asoc.site_scope = 1;
2944 addr_inscope = 1;
2945 }
2654 net->failure_threshold = stcb->asoc.def_net_failure;
2655 if (addr_inscope == 0) {
2656 net->dest_state = (SCTP_ADDR_REACHABLE |
2657 SCTP_ADDR_OUT_OF_SCOPE);
2658 } else {
2659 if (from == SCTP_ADDR_IS_CONFIRMED)
2660 /* SCTP_ADDR_IS_CONFIRMED is passed by connect_x */
2661 net->dest_state = SCTP_ADDR_REACHABLE;
2662 else
2663 net->dest_state = SCTP_ADDR_REACHABLE |
2664 SCTP_ADDR_UNCONFIRMED;
2665 }
2666 net->RTO = stcb->asoc.initial_rto;
2667 stcb->asoc.numnets++;
2668 *(&net->ref_count) = 1;
2669 net->tos_flowlabel = 0;
2946 net->failure_threshold = stcb->asoc.def_net_failure;
2947 if (addr_inscope == 0) {
2948 net->dest_state = (SCTP_ADDR_REACHABLE |
2949 SCTP_ADDR_OUT_OF_SCOPE);
2950 } else {
2951 if (from == SCTP_ADDR_IS_CONFIRMED)
2952 /* SCTP_ADDR_IS_CONFIRMED is passed by connect_x */
2953 net->dest_state = SCTP_ADDR_REACHABLE;
2954 else
2955 net->dest_state = SCTP_ADDR_REACHABLE |
2956 SCTP_ADDR_UNCONFIRMED;
2957 }
2958 net->RTO = stcb->asoc.initial_rto;
2959 stcb->asoc.numnets++;
2960 *(&net->ref_count) = 1;
2961 net->tos_flowlabel = 0;
2670#ifdef AF_INET
2962#ifdef INET
2671 if (newaddr->sa_family == AF_INET)
2672 net->tos_flowlabel = stcb->asoc.default_tos;
2673#endif
2963 if (newaddr->sa_family == AF_INET)
2964 net->tos_flowlabel = stcb->asoc.default_tos;
2965#endif
2674#ifdef AF_INET6
2966#ifdef INET6
2675 if (newaddr->sa_family == AF_INET6)
2676 net->tos_flowlabel = stcb->asoc.default_flowlabel;
2677#endif
2678 /* Init the timer structure */
2679 SCTP_OS_TIMER_INIT(&net->rxt_timer.timer);
2680 SCTP_OS_TIMER_INIT(&net->fr_timer.timer);
2681 SCTP_OS_TIMER_INIT(&net->pmtu_timer.timer);
2682
2683 /* Now generate a route for this guy */
2684 /* KAME hack: embed scopeid */
2685 if (newaddr->sa_family == AF_INET6) {
2686 struct sockaddr_in6 *sin6;
2687
2688 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
2689 (void)sa6_embedscope(sin6, ip6_use_defzone);
2690 sin6->sin6_scope_id = 0;
2691 }
2692 rtalloc_ign((struct route *)&net->ro, 0UL);
2693 if (newaddr->sa_family == AF_INET6) {
2694 struct sockaddr_in6 *sin6;
2695
2696 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
2697 (void)sa6_recoverscope(sin6);
2698 }
2699 if ((net->ro.ro_rt) &&
2700 (net->ro.ro_rt->rt_ifp)) {
2701 net->mtu = net->ro.ro_rt->rt_ifp->if_mtu;
2702 if (from == SCTP_ALLOC_ASOC) {
2703 stcb->asoc.smallest_mtu = net->mtu;
2704 }
2705 /* start things off to match mtu of interface please. */
2706 net->ro.ro_rt->rt_rmx.rmx_mtu = net->ro.ro_rt->rt_ifp->if_mtu;
2707 } else {
2708 net->mtu = stcb->asoc.smallest_mtu;
2709 }
2710
2711 if (stcb->asoc.smallest_mtu > net->mtu) {
2712 stcb->asoc.smallest_mtu = net->mtu;
2713 }
2714 /*
2715 * We take the max of the burst limit times a MTU or the
2716 * INITIAL_CWND. We then limit this to 4 MTU's of sending.
2717 */
2967 if (newaddr->sa_family == AF_INET6)
2968 net->tos_flowlabel = stcb->asoc.default_flowlabel;
2969#endif
2970 /* Init the timer structure */
2971 SCTP_OS_TIMER_INIT(&net->rxt_timer.timer);
2972 SCTP_OS_TIMER_INIT(&net->fr_timer.timer);
2973 SCTP_OS_TIMER_INIT(&net->pmtu_timer.timer);
2974
2975 /* Now generate a route for this guy */
2976 /* KAME hack: embed scopeid */
2977 if (newaddr->sa_family == AF_INET6) {
2978 struct sockaddr_in6 *sin6;
2979
2980 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
2981 (void)sa6_embedscope(sin6, ip6_use_defzone);
2982 sin6->sin6_scope_id = 0;
2983 }
2984 rtalloc_ign((struct route *)&net->ro, 0UL);
2985 if (newaddr->sa_family == AF_INET6) {
2986 struct sockaddr_in6 *sin6;
2987
2988 sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
2989 (void)sa6_recoverscope(sin6);
2990 }
2991 if ((net->ro.ro_rt) &&
2992 (net->ro.ro_rt->rt_ifp)) {
2993 net->mtu = net->ro.ro_rt->rt_ifp->if_mtu;
2994 if (from == SCTP_ALLOC_ASOC) {
2995 stcb->asoc.smallest_mtu = net->mtu;
2996 }
2997 /* start things off to match mtu of interface please. */
2998 net->ro.ro_rt->rt_rmx.rmx_mtu = net->ro.ro_rt->rt_ifp->if_mtu;
2999 } else {
3000 net->mtu = stcb->asoc.smallest_mtu;
3001 }
3002
3003 if (stcb->asoc.smallest_mtu > net->mtu) {
3004 stcb->asoc.smallest_mtu = net->mtu;
3005 }
3006 /*
3007 * We take the max of the burst limit times a MTU or the
3008 * INITIAL_CWND. We then limit this to 4 MTU's of sending.
3009 */
2718 net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
3010 sctp_set_initial_cc_param(stcb, net);
2719
3011
2720 /* we always get at LEAST 2 MTU's */
2721 if (net->cwnd < (2 * net->mtu)) {
2722 net->cwnd = 2 * net->mtu;
2723 }
2724 net->ssthresh = stcb->asoc.peers_rwnd;
2725
2726#if defined(SCTP_CWND_MONITOR) || defined(SCTP_CWND_LOGGING)
2727 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
2728#endif
2729
2730 /*
2731 * CMT: CUC algo - set find_pseudo_cumack to TRUE (1) at beginning
2732 * of assoc (2005/06/27, iyengar@cis.udel.edu)
2733 */
2734 net->find_pseudo_cumack = 1;
2735 net->find_rtx_pseudo_cumack = 1;
2736 net->src_addr_selected = 0;
2737 netfirst = TAILQ_FIRST(&stcb->asoc.nets);
2738 if (net->ro.ro_rt == NULL) {
2739 /* Since we have no route put it at the back */
2740 TAILQ_INSERT_TAIL(&stcb->asoc.nets, net, sctp_next);
2741 } else if (netfirst == NULL) {
2742 /* We are the first one in the pool. */
2743 TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next);
2744 } else if (netfirst->ro.ro_rt == NULL) {
2745 /*
2746 * First one has NO route. Place this one ahead of the first
2747 * one.
2748 */
2749 TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next);
2750 } else if (net->ro.ro_rt->rt_ifp != netfirst->ro.ro_rt->rt_ifp) {
2751 /*
2752 * This one has a different interface than the one at the
2753 * top of the list. Place it ahead.
2754 */
2755 TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next);
2756 } else {
2757 /*
2758 * Ok we have the same interface as the first one. Move
2759 * forward until we find either a) one with a NULL route...
2760 * insert ahead of that b) one with a different ifp.. insert
2761 * after that. c) end of the list.. insert at the tail.
2762 */
2763 struct sctp_nets *netlook;
2764
2765 do {
2766 netlook = TAILQ_NEXT(netfirst, sctp_next);
2767 if (netlook == NULL) {
2768 /* End of the list */
2769 TAILQ_INSERT_TAIL(&stcb->asoc.nets, net,
2770 sctp_next);
2771 break;
2772 } else if (netlook->ro.ro_rt == NULL) {
2773 /* next one has NO route */
2774 TAILQ_INSERT_BEFORE(netfirst, net, sctp_next);
2775 break;
2776 } else if (netlook->ro.ro_rt->rt_ifp !=
2777 net->ro.ro_rt->rt_ifp) {
2778 TAILQ_INSERT_AFTER(&stcb->asoc.nets, netlook,
2779 net, sctp_next);
2780 break;
2781 }
2782 /* Shift forward */
2783 netfirst = netlook;
2784 } while (netlook != NULL);
2785 }
2786
2787 /* got to have a primary set */
2788 if (stcb->asoc.primary_destination == 0) {
2789 stcb->asoc.primary_destination = net;
2790 } else if ((stcb->asoc.primary_destination->ro.ro_rt == NULL) &&
2791 (net->ro.ro_rt) &&
2792 ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) {
2793 /* No route to current primary adopt new primary */
2794 stcb->asoc.primary_destination = net;
2795 }
2796 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, stcb->sctp_ep, stcb,
2797 net);
2798 /* Validate primary is first */
2799 net = TAILQ_FIRST(&stcb->asoc.nets);
2800 if ((net != stcb->asoc.primary_destination) &&
2801 (stcb->asoc.primary_destination)) {
2802 /*
2803 * first one on the list is NOT the primary sctp_cmpaddr()
2804 * is much more efficent if the primary is the first on the
2805 * list, make it so.
2806 */
2807 TAILQ_REMOVE(&stcb->asoc.nets,
2808 stcb->asoc.primary_destination, sctp_next);
2809 TAILQ_INSERT_HEAD(&stcb->asoc.nets,
2810 stcb->asoc.primary_destination, sctp_next);
2811 }
2812 return (0);
2813}
2814
2815
2816/*
2817 * allocate an association and add it to the endpoint. The caller must be
2818 * careful to add all additional addresses once they are know right away or
2819 * else the assoc will be may experience a blackout scenario.
2820 */
2821struct sctp_tcb *
2822sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr,
3012
3013#if defined(SCTP_CWND_MONITOR) || defined(SCTP_CWND_LOGGING)
3014 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
3015#endif
3016
3017 /*
3018 * CMT: CUC algo - set find_pseudo_cumack to TRUE (1) at beginning
3019 * of assoc (2005/06/27, iyengar@cis.udel.edu)
3020 */
3021 net->find_pseudo_cumack = 1;
3022 net->find_rtx_pseudo_cumack = 1;
3023 net->src_addr_selected = 0;
3024 netfirst = TAILQ_FIRST(&stcb->asoc.nets);
3025 if (net->ro.ro_rt == NULL) {
3026 /* Since we have no route put it at the back */
3027 TAILQ_INSERT_TAIL(&stcb->asoc.nets, net, sctp_next);
3028 } else if (netfirst == NULL) {
3029 /* We are the first one in the pool. */
3030 TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next);
3031 } else if (netfirst->ro.ro_rt == NULL) {
3032 /*
3033 * First one has NO route. Place this one ahead of the first
3034 * one.
3035 */
3036 TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next);
3037 } else if (net->ro.ro_rt->rt_ifp != netfirst->ro.ro_rt->rt_ifp) {
3038 /*
3039 * This one has a different interface than the one at the
3040 * top of the list. Place it ahead.
3041 */
3042 TAILQ_INSERT_HEAD(&stcb->asoc.nets, net, sctp_next);
3043 } else {
3044 /*
3045 * Ok we have the same interface as the first one. Move
3046 * forward until we find either a) one with a NULL route...
3047 * insert ahead of that b) one with a different ifp.. insert
3048 * after that. c) end of the list.. insert at the tail.
3049 */
3050 struct sctp_nets *netlook;
3051
3052 do {
3053 netlook = TAILQ_NEXT(netfirst, sctp_next);
3054 if (netlook == NULL) {
3055 /* End of the list */
3056 TAILQ_INSERT_TAIL(&stcb->asoc.nets, net,
3057 sctp_next);
3058 break;
3059 } else if (netlook->ro.ro_rt == NULL) {
3060 /* next one has NO route */
3061 TAILQ_INSERT_BEFORE(netfirst, net, sctp_next);
3062 break;
3063 } else if (netlook->ro.ro_rt->rt_ifp !=
3064 net->ro.ro_rt->rt_ifp) {
3065 TAILQ_INSERT_AFTER(&stcb->asoc.nets, netlook,
3066 net, sctp_next);
3067 break;
3068 }
3069 /* Shift forward */
3070 netfirst = netlook;
3071 } while (netlook != NULL);
3072 }
3073
3074 /* got to have a primary set */
3075 if (stcb->asoc.primary_destination == 0) {
3076 stcb->asoc.primary_destination = net;
3077 } else if ((stcb->asoc.primary_destination->ro.ro_rt == NULL) &&
3078 (net->ro.ro_rt) &&
3079 ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0)) {
3080 /* No route to current primary adopt new primary */
3081 stcb->asoc.primary_destination = net;
3082 }
3083 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, stcb->sctp_ep, stcb,
3084 net);
3085 /* Validate primary is first */
3086 net = TAILQ_FIRST(&stcb->asoc.nets);
3087 if ((net != stcb->asoc.primary_destination) &&
3088 (stcb->asoc.primary_destination)) {
3089 /*
3090 * first one on the list is NOT the primary sctp_cmpaddr()
3091 * is much more efficent if the primary is the first on the
3092 * list, make it so.
3093 */
3094 TAILQ_REMOVE(&stcb->asoc.nets,
3095 stcb->asoc.primary_destination, sctp_next);
3096 TAILQ_INSERT_HEAD(&stcb->asoc.nets,
3097 stcb->asoc.primary_destination, sctp_next);
3098 }
3099 return (0);
3100}
3101
3102
3103/*
3104 * allocate an association and add it to the endpoint. The caller must be
3105 * careful to add all additional addresses once they are know right away or
3106 * else the assoc will be may experience a blackout scenario.
3107 */
3108struct sctp_tcb *
3109sctp_aloc_assoc(struct sctp_inpcb *inp, struct sockaddr *firstaddr,
2823 int for_a_init, int *error, uint32_t override_tag)
3110 int for_a_init, int *error, uint32_t override_tag, uint32_t vrf)
2824{
2825 struct sctp_tcb *stcb;
2826 struct sctp_association *asoc;
2827 struct sctpasochead *head;
2828 uint16_t rport;
2829 int err;
2830
2831 /*
2832 * Assumption made here: Caller has done a
2833 * sctp_findassociation_ep_addr(ep, addr's); to make sure the
2834 * address does not exist already.
2835 */
2836 if (sctppcbinfo.ipi_count_asoc >= SCTP_MAX_NUM_OF_ASOC) {
2837 /* Hit max assoc, sorry no more */
2838 *error = ENOBUFS;
2839 return (NULL);
2840 }
2841 SCTP_INP_RLOCK(inp);
2842 if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) {
2843 /*
2844 * If its in the TCP pool, its NOT allowed to create an
2845 * association. The parent listener needs to call
2846 * sctp_aloc_assoc.. or the one-2-many socket. If a peeled
2847 * off, or connected one does this.. its an error.
2848 */
2849 SCTP_INP_RUNLOCK(inp);
2850 *error = EINVAL;
2851 return (NULL);
2852 }
2853#ifdef SCTP_DEBUG
2854 if (sctp_debug_on & SCTP_DEBUG_PCB3) {
2855 printf("Allocate an association for peer:");
2856 if (firstaddr)
2857 sctp_print_address(firstaddr);
2858 else
2859 printf("None\n");
2860 printf("Port:%d\n",
2861 ntohs(((struct sockaddr_in *)firstaddr)->sin_port));
2862 }
2863#endif /* SCTP_DEBUG */
2864 if (firstaddr->sa_family == AF_INET) {
2865 struct sockaddr_in *sin;
2866
2867 sin = (struct sockaddr_in *)firstaddr;
2868 if ((sin->sin_port == 0) || (sin->sin_addr.s_addr == 0)) {
2869 /* Invalid address */
2870 SCTP_INP_RUNLOCK(inp);
2871 *error = EINVAL;
2872 return (NULL);
2873 }
2874 rport = sin->sin_port;
2875 } else if (firstaddr->sa_family == AF_INET6) {
2876 struct sockaddr_in6 *sin6;
2877
2878 sin6 = (struct sockaddr_in6 *)firstaddr;
2879 if ((sin6->sin6_port == 0) ||
2880 (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))) {
2881 /* Invalid address */
2882 SCTP_INP_RUNLOCK(inp);
2883 *error = EINVAL;
2884 return (NULL);
2885 }
2886 rport = sin6->sin6_port;
2887 } else {
2888 /* not supported family type */
2889 SCTP_INP_RUNLOCK(inp);
2890 *error = EINVAL;
2891 return (NULL);
2892 }
2893 SCTP_INP_RUNLOCK(inp);
2894 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
2895 /*
2896 * If you have not performed a bind, then we need to do the
2897 * ephemerial bind for you.
2898 */
2899 if ((err = sctp_inpcb_bind(inp->sctp_socket,
2900 (struct sockaddr *)NULL,
2901 (struct thread *)NULL
2902 ))) {
2903 /* bind error, probably perm */
2904 *error = err;
2905 return (NULL);
2906 }
2907 }
2908 stcb = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_asoc, struct sctp_tcb);
2909 if (stcb == NULL) {
2910 /* out of memory? */
2911 *error = ENOMEM;
2912 return (NULL);
2913 }
2914 SCTP_INCR_ASOC_COUNT();
2915
2916 bzero(stcb, sizeof(*stcb));
2917 asoc = &stcb->asoc;
2918 SCTP_TCB_LOCK_INIT(stcb);
2919 SCTP_TCB_SEND_LOCK_INIT(stcb);
2920 /* setup back pointer's */
2921 stcb->sctp_ep = inp;
2922 stcb->sctp_socket = inp->sctp_socket;
3111{
3112 struct sctp_tcb *stcb;
3113 struct sctp_association *asoc;
3114 struct sctpasochead *head;
3115 uint16_t rport;
3116 int err;
3117
3118 /*
3119 * Assumption made here: Caller has done a
3120 * sctp_findassociation_ep_addr(ep, addr's); to make sure the
3121 * address does not exist already.
3122 */
3123 if (sctppcbinfo.ipi_count_asoc >= SCTP_MAX_NUM_OF_ASOC) {
3124 /* Hit max assoc, sorry no more */
3125 *error = ENOBUFS;
3126 return (NULL);
3127 }
3128 SCTP_INP_RLOCK(inp);
3129 if (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) {
3130 /*
3131 * If its in the TCP pool, its NOT allowed to create an
3132 * association. The parent listener needs to call
3133 * sctp_aloc_assoc.. or the one-2-many socket. If a peeled
3134 * off, or connected one does this.. its an error.
3135 */
3136 SCTP_INP_RUNLOCK(inp);
3137 *error = EINVAL;
3138 return (NULL);
3139 }
3140#ifdef SCTP_DEBUG
3141 if (sctp_debug_on & SCTP_DEBUG_PCB3) {
3142 printf("Allocate an association for peer:");
3143 if (firstaddr)
3144 sctp_print_address(firstaddr);
3145 else
3146 printf("None\n");
3147 printf("Port:%d\n",
3148 ntohs(((struct sockaddr_in *)firstaddr)->sin_port));
3149 }
3150#endif /* SCTP_DEBUG */
3151 if (firstaddr->sa_family == AF_INET) {
3152 struct sockaddr_in *sin;
3153
3154 sin = (struct sockaddr_in *)firstaddr;
3155 if ((sin->sin_port == 0) || (sin->sin_addr.s_addr == 0)) {
3156 /* Invalid address */
3157 SCTP_INP_RUNLOCK(inp);
3158 *error = EINVAL;
3159 return (NULL);
3160 }
3161 rport = sin->sin_port;
3162 } else if (firstaddr->sa_family == AF_INET6) {
3163 struct sockaddr_in6 *sin6;
3164
3165 sin6 = (struct sockaddr_in6 *)firstaddr;
3166 if ((sin6->sin6_port == 0) ||
3167 (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr))) {
3168 /* Invalid address */
3169 SCTP_INP_RUNLOCK(inp);
3170 *error = EINVAL;
3171 return (NULL);
3172 }
3173 rport = sin6->sin6_port;
3174 } else {
3175 /* not supported family type */
3176 SCTP_INP_RUNLOCK(inp);
3177 *error = EINVAL;
3178 return (NULL);
3179 }
3180 SCTP_INP_RUNLOCK(inp);
3181 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) {
3182 /*
3183 * If you have not performed a bind, then we need to do the
3184 * ephemerial bind for you.
3185 */
3186 if ((err = sctp_inpcb_bind(inp->sctp_socket,
3187 (struct sockaddr *)NULL,
3188 (struct thread *)NULL
3189 ))) {
3190 /* bind error, probably perm */
3191 *error = err;
3192 return (NULL);
3193 }
3194 }
3195 stcb = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_asoc, struct sctp_tcb);
3196 if (stcb == NULL) {
3197 /* out of memory? */
3198 *error = ENOMEM;
3199 return (NULL);
3200 }
3201 SCTP_INCR_ASOC_COUNT();
3202
3203 bzero(stcb, sizeof(*stcb));
3204 asoc = &stcb->asoc;
3205 SCTP_TCB_LOCK_INIT(stcb);
3206 SCTP_TCB_SEND_LOCK_INIT(stcb);
3207 /* setup back pointer's */
3208 stcb->sctp_ep = inp;
3209 stcb->sctp_socket = inp->sctp_socket;
2923 if ((err = sctp_init_asoc(inp, asoc, for_a_init, override_tag))) {
3210 if ((err = sctp_init_asoc(inp, asoc, for_a_init, override_tag, vrf))) {
2924 /* failed */
2925 SCTP_TCB_LOCK_DESTROY(stcb);
2926 SCTP_TCB_SEND_LOCK_DESTROY(stcb);
2927 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb);
2928 SCTP_DECR_ASOC_COUNT();
2929 *error = err;
2930 return (NULL);
2931 }
2932 /* and the port */
2933 stcb->rport = rport;
2934 SCTP_INP_INFO_WLOCK();
2935 SCTP_INP_WLOCK(inp);
2936 if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
2937 /* inpcb freed while alloc going on */
2938 SCTP_TCB_LOCK_DESTROY(stcb);
2939 SCTP_TCB_SEND_LOCK_DESTROY(stcb);
2940 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb);
2941 SCTP_INP_WUNLOCK(inp);
2942 SCTP_INP_INFO_WUNLOCK();
2943 SCTP_DECR_ASOC_COUNT();
2944 *error = EINVAL;
2945 return (NULL);
2946 }
2947 SCTP_TCB_LOCK(stcb);
2948
2949 /* now that my_vtag is set, add it to the hash */
2950 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
2951 sctppcbinfo.hashasocmark)];
2952 /* put it in the bucket in the vtag hash of assoc's for the system */
2953 LIST_INSERT_HEAD(head, stcb, sctp_asocs);
2954 SCTP_INP_INFO_WUNLOCK();
2955
2956 if ((err = sctp_add_remote_addr(stcb, firstaddr, SCTP_DO_SETSCOPE, SCTP_ALLOC_ASOC))) {
2957 /* failure.. memory error? */
2958 if (asoc->strmout)
2959 SCTP_FREE(asoc->strmout);
2960 if (asoc->mapping_array)
2961 SCTP_FREE(asoc->mapping_array);
2962
2963 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb);
2964 SCTP_DECR_ASOC_COUNT();
2965 SCTP_TCB_LOCK_DESTROY(stcb);
2966 SCTP_TCB_SEND_LOCK_DESTROY(stcb);
2967 *error = ENOBUFS;
2968 return (NULL);
2969 }
2970 /* Init all the timers */
2971 SCTP_OS_TIMER_INIT(&asoc->hb_timer.timer);
2972 SCTP_OS_TIMER_INIT(&asoc->dack_timer.timer);
2973 SCTP_OS_TIMER_INIT(&asoc->strreset_timer.timer);
2974 SCTP_OS_TIMER_INIT(&asoc->asconf_timer.timer);
2975 SCTP_OS_TIMER_INIT(&asoc->shut_guard_timer.timer);
2976 SCTP_OS_TIMER_INIT(&asoc->autoclose_timer.timer);
2977 SCTP_OS_TIMER_INIT(&asoc->delayed_event_timer.timer);
2978
2979 LIST_INSERT_HEAD(&inp->sctp_asoc_list, stcb, sctp_tcblist);
2980 /* now file the port under the hash as well */
2981 if (inp->sctp_tcbhash != NULL) {
2982 head = &inp->sctp_tcbhash[SCTP_PCBHASH_ALLADDR(stcb->rport,
2983 inp->sctp_hashmark)];
2984 LIST_INSERT_HEAD(head, stcb, sctp_tcbhash);
2985 }
2986 SCTP_INP_WUNLOCK(inp);
2987#ifdef SCTP_DEBUG
2988 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
2989 printf("Association %p now allocated\n", stcb);
2990 }
2991#endif
2992 return (stcb);
2993}
2994
2995
2996void
2997sctp_remove_net(struct sctp_tcb *stcb, struct sctp_nets *net)
2998{
2999 struct sctp_association *asoc;
3000
3001 asoc = &stcb->asoc;
3002 asoc->numnets--;
3003 TAILQ_REMOVE(&asoc->nets, net, sctp_next);
3211 /* failed */
3212 SCTP_TCB_LOCK_DESTROY(stcb);
3213 SCTP_TCB_SEND_LOCK_DESTROY(stcb);
3214 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb);
3215 SCTP_DECR_ASOC_COUNT();
3216 *error = err;
3217 return (NULL);
3218 }
3219 /* and the port */
3220 stcb->rport = rport;
3221 SCTP_INP_INFO_WLOCK();
3222 SCTP_INP_WLOCK(inp);
3223 if (inp->sctp_flags & (SCTP_PCB_FLAGS_SOCKET_GONE | SCTP_PCB_FLAGS_SOCKET_ALLGONE)) {
3224 /* inpcb freed while alloc going on */
3225 SCTP_TCB_LOCK_DESTROY(stcb);
3226 SCTP_TCB_SEND_LOCK_DESTROY(stcb);
3227 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb);
3228 SCTP_INP_WUNLOCK(inp);
3229 SCTP_INP_INFO_WUNLOCK();
3230 SCTP_DECR_ASOC_COUNT();
3231 *error = EINVAL;
3232 return (NULL);
3233 }
3234 SCTP_TCB_LOCK(stcb);
3235
3236 /* now that my_vtag is set, add it to the hash */
3237 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag,
3238 sctppcbinfo.hashasocmark)];
3239 /* put it in the bucket in the vtag hash of assoc's for the system */
3240 LIST_INSERT_HEAD(head, stcb, sctp_asocs);
3241 SCTP_INP_INFO_WUNLOCK();
3242
3243 if ((err = sctp_add_remote_addr(stcb, firstaddr, SCTP_DO_SETSCOPE, SCTP_ALLOC_ASOC))) {
3244 /* failure.. memory error? */
3245 if (asoc->strmout)
3246 SCTP_FREE(asoc->strmout);
3247 if (asoc->mapping_array)
3248 SCTP_FREE(asoc->mapping_array);
3249
3250 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb);
3251 SCTP_DECR_ASOC_COUNT();
3252 SCTP_TCB_LOCK_DESTROY(stcb);
3253 SCTP_TCB_SEND_LOCK_DESTROY(stcb);
3254 *error = ENOBUFS;
3255 return (NULL);
3256 }
3257 /* Init all the timers */
3258 SCTP_OS_TIMER_INIT(&asoc->hb_timer.timer);
3259 SCTP_OS_TIMER_INIT(&asoc->dack_timer.timer);
3260 SCTP_OS_TIMER_INIT(&asoc->strreset_timer.timer);
3261 SCTP_OS_TIMER_INIT(&asoc->asconf_timer.timer);
3262 SCTP_OS_TIMER_INIT(&asoc->shut_guard_timer.timer);
3263 SCTP_OS_TIMER_INIT(&asoc->autoclose_timer.timer);
3264 SCTP_OS_TIMER_INIT(&asoc->delayed_event_timer.timer);
3265
3266 LIST_INSERT_HEAD(&inp->sctp_asoc_list, stcb, sctp_tcblist);
3267 /* now file the port under the hash as well */
3268 if (inp->sctp_tcbhash != NULL) {
3269 head = &inp->sctp_tcbhash[SCTP_PCBHASH_ALLADDR(stcb->rport,
3270 inp->sctp_hashmark)];
3271 LIST_INSERT_HEAD(head, stcb, sctp_tcbhash);
3272 }
3273 SCTP_INP_WUNLOCK(inp);
3274#ifdef SCTP_DEBUG
3275 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
3276 printf("Association %p now allocated\n", stcb);
3277 }
3278#endif
3279 return (stcb);
3280}
3281
3282
3283void
3284sctp_remove_net(struct sctp_tcb *stcb, struct sctp_nets *net)
3285{
3286 struct sctp_association *asoc;
3287
3288 asoc = &stcb->asoc;
3289 asoc->numnets--;
3290 TAILQ_REMOVE(&asoc->nets, net, sctp_next);
3004 sctp_free_remote_addr(net);
3005 if (net == asoc->primary_destination) {
3006 /* Reset primary */
3007 struct sctp_nets *lnet;
3008
3009 lnet = TAILQ_FIRST(&asoc->nets);
3010 /* Try to find a confirmed primary */
3291 if (net == asoc->primary_destination) {
3292 /* Reset primary */
3293 struct sctp_nets *lnet;
3294
3295 lnet = TAILQ_FIRST(&asoc->nets);
3296 /* Try to find a confirmed primary */
3011 asoc->primary_destination = sctp_find_alternate_net(stcb, lnet,
3012 0);
3297 asoc->primary_destination = sctp_find_alternate_net(stcb, lnet, 0);
3013 }
3014 if (net == asoc->last_data_chunk_from) {
3015 /* Reset primary */
3016 asoc->last_data_chunk_from = TAILQ_FIRST(&asoc->nets);
3017 }
3018 if (net == asoc->last_control_chunk_from) {
3019 /* Clear net */
3020 asoc->last_control_chunk_from = NULL;
3021 }
3298 }
3299 if (net == asoc->last_data_chunk_from) {
3300 /* Reset primary */
3301 asoc->last_data_chunk_from = TAILQ_FIRST(&asoc->nets);
3302 }
3303 if (net == asoc->last_control_chunk_from) {
3304 /* Clear net */
3305 asoc->last_control_chunk_from = NULL;
3306 }
3022/* if (net == asoc->asconf_last_sent_to) {*/
3023 /* Reset primary */
3024/* asoc->asconf_last_sent_to = TAILQ_FIRST(&asoc->nets);*/
3025/* }*/
3307 sctp_free_remote_addr(net);
3026}
3027
3028/*
3029 * remove a remote endpoint address from an association, it will fail if the
3030 * address does not exist.
3031 */
3032int
3033sctp_del_remote_addr(struct sctp_tcb *stcb, struct sockaddr *remaddr)
3034{
3035 /*
3036 * Here we need to remove a remote address. This is quite simple, we
3037 * first find it in the list of address for the association
3038 * (tasoc->asoc.nets) and then if it is there, we do a LIST_REMOVE
3039 * on that item. Note we do not allow it to be removed if there are
3040 * no other addresses.
3041 */
3042 struct sctp_association *asoc;
3043 struct sctp_nets *net, *net_tmp;
3044
3045 asoc = &stcb->asoc;
3046
3047 /* locate the address */
3048 for (net = TAILQ_FIRST(&asoc->nets); net != NULL; net = net_tmp) {
3049 net_tmp = TAILQ_NEXT(net, sctp_next);
3050 if (net->ro._l_addr.sa.sa_family != remaddr->sa_family) {
3051 continue;
3052 }
3053 if (sctp_cmpaddr((struct sockaddr *)&net->ro._l_addr,
3054 remaddr)) {
3055 /* we found the guy */
3056 if (asoc->numnets < 2) {
3057 /* Must have at LEAST two remote addresses */
3058 return (-1);
3059 } else {
3060 sctp_remove_net(stcb, net);
3061 return (0);
3062 }
3063 }
3064 }
3065 /* not found. */
3066 return (-2);
3067}
3068
3069
3070void
3071sctp_add_vtag_to_timewait(struct sctp_inpcb *inp, uint32_t tag, uint32_t time)
3072{
3073 struct sctpvtaghead *chain;
3074 struct sctp_tagblock *twait_block;
3075 struct timeval now;
3076 int set, i;
3077
3078 SCTP_GETTIME_TIMEVAL(&now);
3079 chain = &sctppcbinfo.vtag_timewait[(tag % SCTP_STACK_VTAG_HASH_SIZE)];
3080 set = 0;
3081 if (!SCTP_LIST_EMPTY(chain)) {
3082 /* Block(s) present, lets find space, and expire on the fly */
3083 LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
3084 for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) {
3085 if ((twait_block->vtag_block[i].v_tag == 0) &&
3086 !set) {
3087 twait_block->vtag_block[i].tv_sec_at_expire =
3088 now.tv_sec + time;
3089 twait_block->vtag_block[i].v_tag = tag;
3090 set = 1;
3091 } else if ((twait_block->vtag_block[i].v_tag) &&
3092 ((long)twait_block->vtag_block[i].tv_sec_at_expire >
3093 now.tv_sec)) {
3094 /* Audit expires this guy */
3095 twait_block->vtag_block[i].tv_sec_at_expire = 0;
3096 twait_block->vtag_block[i].v_tag = 0;
3097 if (set == 0) {
3098 /* Reuse it for my new tag */
3099 twait_block->vtag_block[0].tv_sec_at_expire = now.tv_sec + SCTP_TIME_WAIT;
3100 twait_block->vtag_block[0].v_tag = tag;
3101 set = 1;
3102 }
3103 }
3104 }
3105 if (set) {
3106 /*
3107 * We only do up to the block where we can
3108 * place our tag for audits
3109 */
3110 break;
3111 }
3112 }
3113 }
3114 /* Need to add a new block to chain */
3115 if (!set) {
3116 SCTP_MALLOC(twait_block, struct sctp_tagblock *,
3117 sizeof(struct sctp_tagblock), "TimeWait");
3118 if (twait_block == NULL) {
3119 return;
3120 }
3121 memset(twait_block, 0, sizeof(struct sctp_tagblock));
3122 LIST_INSERT_HEAD(chain, twait_block, sctp_nxt_tagblock);
3123 twait_block->vtag_block[0].tv_sec_at_expire = now.tv_sec +
3124 SCTP_TIME_WAIT;
3125 twait_block->vtag_block[0].v_tag = tag;
3126 }
3127}
3128
3129
3130static void
3131sctp_iterator_asoc_being_freed(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
3132{
3133 struct sctp_iterator *it;
3134
3135 /*
3136 * Unlock the tcb lock we do this so we avoid a dead lock scenario
3137 * where the iterator is waiting on the TCB lock and the TCB lock is
3138 * waiting on the iterator lock.
3139 */
3140 it = stcb->asoc.stcb_starting_point_for_iterator;
3141 if (it == NULL) {
3142 return;
3143 }
3144 if (it->inp != stcb->sctp_ep) {
3145 /* hmm, focused on the wrong one? */
3146 return;
3147 }
3148 if (it->stcb != stcb) {
3149 return;
3150 }
3151 it->stcb = LIST_NEXT(stcb, sctp_tcblist);
3152 if (it->stcb == NULL) {
3153 /* done with all asoc's in this assoc */
3154 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
3155 it->inp = NULL;
3156 } else {
3157 it->inp = LIST_NEXT(inp, sctp_list);
3158 }
3159 }
3160}
3161
3162/*
3163 * Free the association after un-hashing the remote port.
3164 */
3165int
3166sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfree, int from_location)
3167{
3168 int i;
3169 struct sctp_association *asoc;
3170 struct sctp_nets *net, *prev;
3171 struct sctp_laddr *laddr;
3172 struct sctp_tmit_chunk *chk;
3173 struct sctp_asconf_addr *aparam;
3174 struct sctp_stream_reset_list *liste;
3175 struct sctp_queued_to_read *sq;
3176 struct sctp_stream_queue_pending *sp;
3177 sctp_sharedkey_t *shared_key;
3178 struct socket *so;
3179 int ccnt = 0;
3180 int cnt = 0;
3181
3182 /* first, lets purge the entry from the hash table. */
3183
3184#ifdef SCTP_LOG_CLOSING
3185 sctp_log_closing(inp, stcb, 6);
3186#endif
3187 if (stcb->asoc.state == 0) {
3188#ifdef SCTP_LOG_CLOSING
3189 sctp_log_closing(inp, NULL, 7);
3190#endif
3191 /* there is no asoc, really TSNH :-0 */
3192 return (1);
3193 }
3194 /* TEMP CODE */
3195 if (stcb->freed_from_where == 0) {
3196 /* Only record the first place free happened from */
3197 stcb->freed_from_where = from_location;
3198 }
3199 /* TEMP CODE */
3200
3201 asoc = &stcb->asoc;
3202 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3203 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE))
3204 /* nothing around */
3205 so = NULL;
3206 else
3207 so = inp->sctp_socket;
3208
3209 /*
3210 * We used timer based freeing if a reader or writer is in the way.
3211 * So we first check if we are actually being called from a timer,
3212 * if so we abort early if a reader or writer is still in the way.
3213 */
3214 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) &&
3215 (from_inpcbfree == SCTP_NORMAL_PROC)) {
3216 /*
3217 * is it the timer driving us? if so are the reader/writers
3218 * gone?
3219 */
3220 if (stcb->asoc.refcnt) {
3221 /* nope, reader or writer in the way */
3222 sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL);
3223 /* no asoc destroyed */
3224 SCTP_TCB_UNLOCK(stcb);
3225#ifdef SCTP_LOG_CLOSING
3226 sctp_log_closing(inp, stcb, 8);
3227#endif
3228 return (0);
3229 }
3230 }
3231 /* now clean up any other timers */
3232 SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
3233 SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
3234 SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
3235 SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
3236 SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
3237 SCTP_OS_TIMER_STOP(&asoc->shut_guard_timer.timer);
3238 SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
3239
3240 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3241 SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
3242 SCTP_OS_TIMER_STOP(&net->rxt_timer.timer);
3243 SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
3244 }
3245 /* Now the read queue needs to be cleaned up (only once) */
3246 cnt = 0;
3247 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0) {
3248 SCTP_INP_READ_LOCK(inp);
3249 TAILQ_FOREACH(sq, &inp->read_queue, next) {
3250 if (sq->stcb == stcb) {
3251 sq->do_not_ref_stcb = 1;
3252 sq->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
3253 /*
3254 * If there is no end, there never will be
3255 * now.
3256 */
3257 if (sq->end_added == 0) {
3258 /* Held for PD-API clear that. */
3259 sq->pdapi_aborted = 1;
3260 sq->held_length = 0;
3261 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3262 /*
3263 * Need to add a PD-API
3264 * aborted indication.
3265 * Setting the control_pdapi
3266 * assures that it will be
3267 * added right after this
3268 * msg.
3269 */
3270 stcb->asoc.control_pdapi = sq;
3271 sctp_notify_partial_delivery_indication(stcb,
3272 SCTP_PARTIAL_DELIVERY_ABORTED, 1);
3273 stcb->asoc.control_pdapi = NULL;
3274 }
3275 }
3276 /* Add an end to wake them */
3277 sq->end_added = 1;
3278 cnt++;
3279 }
3280 }
3281 SCTP_INP_READ_UNLOCK(inp);
3282 if (stcb->block_entry) {
3283 cnt++;
3284 stcb->block_entry->error = ECONNRESET;
3285 stcb->block_entry = NULL;
3286 }
3287 }
3288 stcb->asoc.state |= SCTP_STATE_ABOUT_TO_BE_FREED;
3289 if ((from_inpcbfree != SCTP_PCBFREE_FORCE) && (stcb->asoc.refcnt)) {
3290 /*
3291 * reader or writer in the way, we have hopefully given him
3292 * something to chew on above.
3293 */
3294 sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL);
3295 SCTP_TCB_UNLOCK(stcb);
3296 if (so) {
3297 SCTP_INP_RLOCK(inp);
3298 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3299 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE))
3300 /* nothing around */
3301 so = NULL;
3302 if (so) {
3303 /* Wake any reader/writers */
3304 sctp_sorwakeup(inp, so);
3305 sctp_sowwakeup(inp, so);
3306 }
3307 SCTP_INP_RUNLOCK(inp);
3308
3309 }
3310#ifdef SCTP_LOG_CLOSING
3311 sctp_log_closing(inp, stcb, 9);
3312#endif
3313 /* no asoc destroyed */
3314 return (0);
3315 }
3316#ifdef SCTP_LOG_CLOSING
3317 sctp_log_closing(inp, stcb, 10);
3318#endif
3319 /*
3320 * When I reach here, no others want to kill the assoc yet.. and I
3321 * own the lock. Now its possible an abort comes in when I do the
3322 * lock exchange below to grab all the locks to do the final take
3323 * out. to prevent this we increment the count, which will start a
3324 * timer and blow out above thus assuring us that we hold exclusive
3325 * killing of the asoc. Note that after getting back the TCB lock we
3326 * will go ahead and increment the counter back up and stop any
3327 * timer a passing stranger may have started :-S
3328 */
3329 if (from_inpcbfree == SCTP_NORMAL_PROC) {
3330 atomic_add_int(&stcb->asoc.refcnt, 1);
3331
3332 SCTP_TCB_UNLOCK(stcb);
3333
3334 SCTP_ITERATOR_LOCK();
3335 SCTP_INP_INFO_WLOCK();
3336 SCTP_INP_WLOCK(inp);
3337 SCTP_TCB_LOCK(stcb);
3338 }
3339 /* Double check the GONE flag */
3340 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3341 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE))
3342 /* nothing around */
3343 so = NULL;
3344
3345 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3346 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3347 /*
3348 * For TCP type we need special handling when we are
3349 * connected. We also include the peel'ed off ones to.
3350 */
3351 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3352 inp->sctp_flags &= ~SCTP_PCB_FLAGS_CONNECTED;
3353 inp->sctp_flags |= SCTP_PCB_FLAGS_WAS_CONNECTED;
3354 if (so) {
3355 SOCK_LOCK(so);
3356 if (so->so_rcv.sb_cc == 0) {
3357 so->so_state &= ~(SS_ISCONNECTING |
3358 SS_ISDISCONNECTING |
3359 SS_ISCONFIRMING |
3360 SS_ISCONNECTED);
3361 }
3362 SOCK_UNLOCK(so);
3363 sctp_sowwakeup(inp, so);
3364 sctp_sorwakeup(inp, so);
3365 wakeup(&so->so_timeo);
3366 }
3367 }
3368 }
3369 /*
3370 * Make it invalid too, that way if its about to run it will abort
3371 * and return.
3372 */
3373 sctp_iterator_asoc_being_freed(inp, stcb);
3374 /* re-increment the lock */
3375 if (from_inpcbfree == SCTP_NORMAL_PROC) {
3376 atomic_add_int(&stcb->asoc.refcnt, -1);
3377 }
3378 asoc->state = 0;
3379 if (inp->sctp_tcbhash) {
3380 LIST_REMOVE(stcb, sctp_tcbhash);
3381 }
3382 if (stcb->asoc.in_restart_hash) {
3383 LIST_REMOVE(stcb, sctp_tcbrestarhash);
3384 }
3385 /* Now lets remove it from the list of ALL associations in the EP */
3386 LIST_REMOVE(stcb, sctp_tcblist);
3387 if (from_inpcbfree == SCTP_NORMAL_PROC) {
3388 SCTP_INP_INCR_REF(inp);
3389 SCTP_INP_WUNLOCK(inp);
3390 SCTP_ITERATOR_UNLOCK();
3391 }
3392 /* pull from vtag hash */
3393 LIST_REMOVE(stcb, sctp_asocs);
3394 sctp_add_vtag_to_timewait(inp, asoc->my_vtag, SCTP_TIME_WAIT);
3395
3396
3397 /*
3398 * Now restop the timers to be sure - this is paranoia at is finest!
3399 */
3400 SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
3401 SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
3402 SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
3403 SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
3404 SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
3405 SCTP_OS_TIMER_STOP(&asoc->shut_guard_timer.timer);
3406 SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
3407 SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
3408
3409 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3410 SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
3411 SCTP_OS_TIMER_STOP(&net->rxt_timer.timer);
3412 SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
3413 }
3414
3415 asoc->strreset_timer.type = SCTP_TIMER_TYPE_NONE;
3416 prev = NULL;
3417 /*
3418 * The chunk lists and such SHOULD be empty but we check them just
3419 * in case.
3420 */
3421 /* anything on the wheel needs to be removed */
3422 for (i = 0; i < asoc->streamoutcnt; i++) {
3423 struct sctp_stream_out *outs;
3424
3425 outs = &asoc->strmout[i];
3426 /* now clean up any chunks here */
3427 sp = TAILQ_FIRST(&outs->outqueue);
3428 while (sp) {
3429 TAILQ_REMOVE(&outs->outqueue, sp, next);
3430 if (sp->data) {
3431 sctp_m_freem(sp->data);
3432 sp->data = NULL;
3433 sp->tail_mbuf = NULL;
3434 }
3435 sctp_free_remote_addr(sp->net);
3436 sctp_free_spbufspace(stcb, asoc, sp);
3437 /* Free the zone stuff */
3438 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_strmoq, sp);
3439 SCTP_DECR_STRMOQ_COUNT();
3440 sp = TAILQ_FIRST(&outs->outqueue);
3441 }
3442 }
3443
3444 while ((sp = TAILQ_FIRST(&asoc->free_strmoq)) != NULL) {
3445 TAILQ_REMOVE(&asoc->free_strmoq, sp, next);
3446 if (sp->data) {
3447 sctp_m_freem(sp->data);
3448 sp->data = NULL;
3449 sp->tail_mbuf = NULL;
3450 }
3451 /* Free the zone stuff */
3452 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_strmoq, sp);
3453 SCTP_DECR_STRMOQ_COUNT();
3454 atomic_add_int(&sctppcbinfo.ipi_free_strmoq, -1);
3455 }
3456
3457 while ((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) {
3458 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
3459 SCTP_FREE(liste);
3460 }
3461
3462 sq = TAILQ_FIRST(&asoc->pending_reply_queue);
3463 while (sq) {
3464 TAILQ_REMOVE(&asoc->pending_reply_queue, sq, next);
3465 if (sq->data) {
3466 sctp_m_freem(sq->data);
3467 sq->data = NULL;
3468 }
3469 sctp_free_remote_addr(sq->whoFrom);
3470 sq->whoFrom = NULL;
3471 sq->stcb = NULL;
3472 /* Free the ctl entry */
3473 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_readq, sq);
3474 SCTP_DECR_READQ_COUNT();
3475 sq = TAILQ_FIRST(&asoc->pending_reply_queue);
3476 }
3477
3478 chk = TAILQ_FIRST(&asoc->free_chunks);
3479 while (chk) {
3480 TAILQ_REMOVE(&asoc->free_chunks, chk, sctp_next);
3481 if (chk->data) {
3482 sctp_m_freem(chk->data);
3483 chk->data = NULL;
3484 }
3485 ccnt++;
3486 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
3487 SCTP_DECR_CHK_COUNT();
3488 atomic_subtract_int(&sctppcbinfo.ipi_free_chunks, 1);
3489 asoc->free_chunk_cnt--;
3490 chk = TAILQ_FIRST(&asoc->free_chunks);
3491 }
3492 /* pending send queue SHOULD be empty */
3493 if (!TAILQ_EMPTY(&asoc->send_queue)) {
3494 chk = TAILQ_FIRST(&asoc->send_queue);
3495 while (chk) {
3496 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3497 if (chk->data) {
3498 sctp_m_freem(chk->data);
3499 chk->data = NULL;
3500 }
3501 ccnt++;
3502 sctp_free_remote_addr(chk->whoTo);
3503 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
3504 SCTP_DECR_CHK_COUNT();
3505 chk = TAILQ_FIRST(&asoc->send_queue);
3506 }
3507 }
3508/*
3509 if(ccnt) {
3510 printf("Freed %d from send_queue\n", ccnt);
3511 ccnt = 0;
3512 }
3513*/
3514 /* sent queue SHOULD be empty */
3515 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3516 chk = TAILQ_FIRST(&asoc->sent_queue);
3517 while (chk) {
3518 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3519 if (chk->data) {
3520 sctp_m_freem(chk->data);
3521 chk->data = NULL;
3522 }
3523 ccnt++;
3524 sctp_free_remote_addr(chk->whoTo);
3525 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
3526 SCTP_DECR_CHK_COUNT();
3527 chk = TAILQ_FIRST(&asoc->sent_queue);
3528 }
3529 }
3530/*
3531 if(ccnt) {
3532 printf("Freed %d from sent_queue\n", ccnt);
3533 ccnt = 0;
3534 }
3535*/
3536 /* control queue MAY not be empty */
3537 if (!TAILQ_EMPTY(&asoc->control_send_queue)) {
3538 chk = TAILQ_FIRST(&asoc->control_send_queue);
3539 while (chk) {
3540 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
3541 if (chk->data) {
3542 sctp_m_freem(chk->data);
3543 chk->data = NULL;
3544 }
3545 ccnt++;
3546 sctp_free_remote_addr(chk->whoTo);
3547 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
3548 SCTP_DECR_CHK_COUNT();
3549 chk = TAILQ_FIRST(&asoc->control_send_queue);
3550 }
3551 }
3552/*
3553 if(ccnt) {
3554 printf("Freed %d from ctrl_queue\n", ccnt);
3555 ccnt = 0;
3556 }
3557*/
3558 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
3559 chk = TAILQ_FIRST(&asoc->reasmqueue);
3560 while (chk) {
3561 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
3562 if (chk->data) {
3563 sctp_m_freem(chk->data);
3564 chk->data = NULL;
3565 }
3566 sctp_free_remote_addr(chk->whoTo);
3567 ccnt++;
3568 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
3569 SCTP_DECR_CHK_COUNT();
3570 chk = TAILQ_FIRST(&asoc->reasmqueue);
3571 }
3572 }
3573/*
3574 if(ccnt) {
3575 printf("Freed %d from reasm_queue\n", ccnt);
3576 ccnt = 0;
3577 }
3578*/
3579 if (asoc->mapping_array) {
3580 SCTP_FREE(asoc->mapping_array);
3581 asoc->mapping_array = NULL;
3582 }
3583 /* the stream outs */
3584 if (asoc->strmout) {
3585 SCTP_FREE(asoc->strmout);
3586 asoc->strmout = NULL;
3587 }
3588 asoc->streamoutcnt = 0;
3589 if (asoc->strmin) {
3590 struct sctp_queued_to_read *ctl;
3591 int i;
3592
3593 for (i = 0; i < asoc->streamincnt; i++) {
3594 if (!TAILQ_EMPTY(&asoc->strmin[i].inqueue)) {
3595 /* We have somethings on the streamin queue */
3596 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
3597 while (ctl) {
3598 TAILQ_REMOVE(&asoc->strmin[i].inqueue,
3599 ctl, next);
3600 sctp_free_remote_addr(ctl->whoFrom);
3601 if (ctl->data) {
3602 sctp_m_freem(ctl->data);
3603 ctl->data = NULL;
3604 }
3605 /*
3606 * We don't free the address here
3607 * since all the net's were freed
3608 * above.
3609 */
3610 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_readq, ctl);
3611 SCTP_DECR_READQ_COUNT();
3612 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
3613 }
3614 }
3615 }
3616 SCTP_FREE(asoc->strmin);
3617 asoc->strmin = NULL;
3618 }
3619 asoc->streamincnt = 0;
3620 while (!TAILQ_EMPTY(&asoc->nets)) {
3621 net = TAILQ_FIRST(&asoc->nets);
3622 /* pull from list */
3623 if ((sctppcbinfo.ipi_count_raddr == 0) || (prev == net)) {
3624#ifdef INVARIANTS
3625 panic("no net's left alloc'ed, or list points to itself");
3626#endif
3627 break;
3628 }
3629 prev = net;
3630 TAILQ_REMOVE(&asoc->nets, net, sctp_next);
3631 sctp_free_remote_addr(net);
3632 }
3633
3308}
3309
3310/*
3311 * remove a remote endpoint address from an association, it will fail if the
3312 * address does not exist.
3313 */
3314int
3315sctp_del_remote_addr(struct sctp_tcb *stcb, struct sockaddr *remaddr)
3316{
3317 /*
3318 * Here we need to remove a remote address. This is quite simple, we
3319 * first find it in the list of address for the association
3320 * (tasoc->asoc.nets) and then if it is there, we do a LIST_REMOVE
3321 * on that item. Note we do not allow it to be removed if there are
3322 * no other addresses.
3323 */
3324 struct sctp_association *asoc;
3325 struct sctp_nets *net, *net_tmp;
3326
3327 asoc = &stcb->asoc;
3328
3329 /* locate the address */
3330 for (net = TAILQ_FIRST(&asoc->nets); net != NULL; net = net_tmp) {
3331 net_tmp = TAILQ_NEXT(net, sctp_next);
3332 if (net->ro._l_addr.sa.sa_family != remaddr->sa_family) {
3333 continue;
3334 }
3335 if (sctp_cmpaddr((struct sockaddr *)&net->ro._l_addr,
3336 remaddr)) {
3337 /* we found the guy */
3338 if (asoc->numnets < 2) {
3339 /* Must have at LEAST two remote addresses */
3340 return (-1);
3341 } else {
3342 sctp_remove_net(stcb, net);
3343 return (0);
3344 }
3345 }
3346 }
3347 /* not found. */
3348 return (-2);
3349}
3350
3351
3352void
3353sctp_add_vtag_to_timewait(struct sctp_inpcb *inp, uint32_t tag, uint32_t time)
3354{
3355 struct sctpvtaghead *chain;
3356 struct sctp_tagblock *twait_block;
3357 struct timeval now;
3358 int set, i;
3359
3360 SCTP_GETTIME_TIMEVAL(&now);
3361 chain = &sctppcbinfo.vtag_timewait[(tag % SCTP_STACK_VTAG_HASH_SIZE)];
3362 set = 0;
3363 if (!SCTP_LIST_EMPTY(chain)) {
3364 /* Block(s) present, lets find space, and expire on the fly */
3365 LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
3366 for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) {
3367 if ((twait_block->vtag_block[i].v_tag == 0) &&
3368 !set) {
3369 twait_block->vtag_block[i].tv_sec_at_expire =
3370 now.tv_sec + time;
3371 twait_block->vtag_block[i].v_tag = tag;
3372 set = 1;
3373 } else if ((twait_block->vtag_block[i].v_tag) &&
3374 ((long)twait_block->vtag_block[i].tv_sec_at_expire >
3375 now.tv_sec)) {
3376 /* Audit expires this guy */
3377 twait_block->vtag_block[i].tv_sec_at_expire = 0;
3378 twait_block->vtag_block[i].v_tag = 0;
3379 if (set == 0) {
3380 /* Reuse it for my new tag */
3381 twait_block->vtag_block[0].tv_sec_at_expire = now.tv_sec + SCTP_TIME_WAIT;
3382 twait_block->vtag_block[0].v_tag = tag;
3383 set = 1;
3384 }
3385 }
3386 }
3387 if (set) {
3388 /*
3389 * We only do up to the block where we can
3390 * place our tag for audits
3391 */
3392 break;
3393 }
3394 }
3395 }
3396 /* Need to add a new block to chain */
3397 if (!set) {
3398 SCTP_MALLOC(twait_block, struct sctp_tagblock *,
3399 sizeof(struct sctp_tagblock), "TimeWait");
3400 if (twait_block == NULL) {
3401 return;
3402 }
3403 memset(twait_block, 0, sizeof(struct sctp_tagblock));
3404 LIST_INSERT_HEAD(chain, twait_block, sctp_nxt_tagblock);
3405 twait_block->vtag_block[0].tv_sec_at_expire = now.tv_sec +
3406 SCTP_TIME_WAIT;
3407 twait_block->vtag_block[0].v_tag = tag;
3408 }
3409}
3410
3411
3412static void
3413sctp_iterator_asoc_being_freed(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
3414{
3415 struct sctp_iterator *it;
3416
3417 /*
3418 * Unlock the tcb lock we do this so we avoid a dead lock scenario
3419 * where the iterator is waiting on the TCB lock and the TCB lock is
3420 * waiting on the iterator lock.
3421 */
3422 it = stcb->asoc.stcb_starting_point_for_iterator;
3423 if (it == NULL) {
3424 return;
3425 }
3426 if (it->inp != stcb->sctp_ep) {
3427 /* hmm, focused on the wrong one? */
3428 return;
3429 }
3430 if (it->stcb != stcb) {
3431 return;
3432 }
3433 it->stcb = LIST_NEXT(stcb, sctp_tcblist);
3434 if (it->stcb == NULL) {
3435 /* done with all asoc's in this assoc */
3436 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
3437 it->inp = NULL;
3438 } else {
3439 it->inp = LIST_NEXT(inp, sctp_list);
3440 }
3441 }
3442}
3443
3444/*
3445 * Free the association after un-hashing the remote port.
3446 */
3447int
3448sctp_free_assoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int from_inpcbfree, int from_location)
3449{
3450 int i;
3451 struct sctp_association *asoc;
3452 struct sctp_nets *net, *prev;
3453 struct sctp_laddr *laddr;
3454 struct sctp_tmit_chunk *chk;
3455 struct sctp_asconf_addr *aparam;
3456 struct sctp_stream_reset_list *liste;
3457 struct sctp_queued_to_read *sq;
3458 struct sctp_stream_queue_pending *sp;
3459 sctp_sharedkey_t *shared_key;
3460 struct socket *so;
3461 int ccnt = 0;
3462 int cnt = 0;
3463
3464 /* first, lets purge the entry from the hash table. */
3465
3466#ifdef SCTP_LOG_CLOSING
3467 sctp_log_closing(inp, stcb, 6);
3468#endif
3469 if (stcb->asoc.state == 0) {
3470#ifdef SCTP_LOG_CLOSING
3471 sctp_log_closing(inp, NULL, 7);
3472#endif
3473 /* there is no asoc, really TSNH :-0 */
3474 return (1);
3475 }
3476 /* TEMP CODE */
3477 if (stcb->freed_from_where == 0) {
3478 /* Only record the first place free happened from */
3479 stcb->freed_from_where = from_location;
3480 }
3481 /* TEMP CODE */
3482
3483 asoc = &stcb->asoc;
3484 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3485 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE))
3486 /* nothing around */
3487 so = NULL;
3488 else
3489 so = inp->sctp_socket;
3490
3491 /*
3492 * We used timer based freeing if a reader or writer is in the way.
3493 * So we first check if we are actually being called from a timer,
3494 * if so we abort early if a reader or writer is still in the way.
3495 */
3496 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) &&
3497 (from_inpcbfree == SCTP_NORMAL_PROC)) {
3498 /*
3499 * is it the timer driving us? if so are the reader/writers
3500 * gone?
3501 */
3502 if (stcb->asoc.refcnt) {
3503 /* nope, reader or writer in the way */
3504 sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL);
3505 /* no asoc destroyed */
3506 SCTP_TCB_UNLOCK(stcb);
3507#ifdef SCTP_LOG_CLOSING
3508 sctp_log_closing(inp, stcb, 8);
3509#endif
3510 return (0);
3511 }
3512 }
3513 /* now clean up any other timers */
3514 SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
3515 SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
3516 SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
3517 SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
3518 SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
3519 SCTP_OS_TIMER_STOP(&asoc->shut_guard_timer.timer);
3520 SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
3521
3522 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3523 SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
3524 SCTP_OS_TIMER_STOP(&net->rxt_timer.timer);
3525 SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
3526 }
3527 /* Now the read queue needs to be cleaned up (only once) */
3528 cnt = 0;
3529 if ((stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) == 0) {
3530 SCTP_INP_READ_LOCK(inp);
3531 TAILQ_FOREACH(sq, &inp->read_queue, next) {
3532 if (sq->stcb == stcb) {
3533 sq->do_not_ref_stcb = 1;
3534 sq->sinfo_cumtsn = stcb->asoc.cumulative_tsn;
3535 /*
3536 * If there is no end, there never will be
3537 * now.
3538 */
3539 if (sq->end_added == 0) {
3540 /* Held for PD-API clear that. */
3541 sq->pdapi_aborted = 1;
3542 sq->held_length = 0;
3543 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_PDAPIEVNT)) {
3544 /*
3545 * Need to add a PD-API
3546 * aborted indication.
3547 * Setting the control_pdapi
3548 * assures that it will be
3549 * added right after this
3550 * msg.
3551 */
3552 stcb->asoc.control_pdapi = sq;
3553 sctp_notify_partial_delivery_indication(stcb,
3554 SCTP_PARTIAL_DELIVERY_ABORTED, 1);
3555 stcb->asoc.control_pdapi = NULL;
3556 }
3557 }
3558 /* Add an end to wake them */
3559 sq->end_added = 1;
3560 cnt++;
3561 }
3562 }
3563 SCTP_INP_READ_UNLOCK(inp);
3564 if (stcb->block_entry) {
3565 cnt++;
3566 stcb->block_entry->error = ECONNRESET;
3567 stcb->block_entry = NULL;
3568 }
3569 }
3570 stcb->asoc.state |= SCTP_STATE_ABOUT_TO_BE_FREED;
3571 if ((from_inpcbfree != SCTP_PCBFREE_FORCE) && (stcb->asoc.refcnt)) {
3572 /*
3573 * reader or writer in the way, we have hopefully given him
3574 * something to chew on above.
3575 */
3576 sctp_timer_start(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL);
3577 SCTP_TCB_UNLOCK(stcb);
3578 if (so) {
3579 SCTP_INP_RLOCK(inp);
3580 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3581 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE))
3582 /* nothing around */
3583 so = NULL;
3584 if (so) {
3585 /* Wake any reader/writers */
3586 sctp_sorwakeup(inp, so);
3587 sctp_sowwakeup(inp, so);
3588 }
3589 SCTP_INP_RUNLOCK(inp);
3590
3591 }
3592#ifdef SCTP_LOG_CLOSING
3593 sctp_log_closing(inp, stcb, 9);
3594#endif
3595 /* no asoc destroyed */
3596 return (0);
3597 }
3598#ifdef SCTP_LOG_CLOSING
3599 sctp_log_closing(inp, stcb, 10);
3600#endif
3601 /*
3602 * When I reach here, no others want to kill the assoc yet.. and I
3603 * own the lock. Now its possible an abort comes in when I do the
3604 * lock exchange below to grab all the locks to do the final take
3605 * out. to prevent this we increment the count, which will start a
3606 * timer and blow out above thus assuring us that we hold exclusive
3607 * killing of the asoc. Note that after getting back the TCB lock we
3608 * will go ahead and increment the counter back up and stop any
3609 * timer a passing stranger may have started :-S
3610 */
3611 if (from_inpcbfree == SCTP_NORMAL_PROC) {
3612 atomic_add_int(&stcb->asoc.refcnt, 1);
3613
3614 SCTP_TCB_UNLOCK(stcb);
3615
3616 SCTP_ITERATOR_LOCK();
3617 SCTP_INP_INFO_WLOCK();
3618 SCTP_INP_WLOCK(inp);
3619 SCTP_TCB_LOCK(stcb);
3620 }
3621 /* Double check the GONE flag */
3622 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
3623 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE))
3624 /* nothing around */
3625 so = NULL;
3626
3627 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) ||
3628 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) {
3629 /*
3630 * For TCP type we need special handling when we are
3631 * connected. We also include the peel'ed off ones to.
3632 */
3633 if (inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) {
3634 inp->sctp_flags &= ~SCTP_PCB_FLAGS_CONNECTED;
3635 inp->sctp_flags |= SCTP_PCB_FLAGS_WAS_CONNECTED;
3636 if (so) {
3637 SOCK_LOCK(so);
3638 if (so->so_rcv.sb_cc == 0) {
3639 so->so_state &= ~(SS_ISCONNECTING |
3640 SS_ISDISCONNECTING |
3641 SS_ISCONFIRMING |
3642 SS_ISCONNECTED);
3643 }
3644 SOCK_UNLOCK(so);
3645 sctp_sowwakeup(inp, so);
3646 sctp_sorwakeup(inp, so);
3647 wakeup(&so->so_timeo);
3648 }
3649 }
3650 }
3651 /*
3652 * Make it invalid too, that way if its about to run it will abort
3653 * and return.
3654 */
3655 sctp_iterator_asoc_being_freed(inp, stcb);
3656 /* re-increment the lock */
3657 if (from_inpcbfree == SCTP_NORMAL_PROC) {
3658 atomic_add_int(&stcb->asoc.refcnt, -1);
3659 }
3660 asoc->state = 0;
3661 if (inp->sctp_tcbhash) {
3662 LIST_REMOVE(stcb, sctp_tcbhash);
3663 }
3664 if (stcb->asoc.in_restart_hash) {
3665 LIST_REMOVE(stcb, sctp_tcbrestarhash);
3666 }
3667 /* Now lets remove it from the list of ALL associations in the EP */
3668 LIST_REMOVE(stcb, sctp_tcblist);
3669 if (from_inpcbfree == SCTP_NORMAL_PROC) {
3670 SCTP_INP_INCR_REF(inp);
3671 SCTP_INP_WUNLOCK(inp);
3672 SCTP_ITERATOR_UNLOCK();
3673 }
3674 /* pull from vtag hash */
3675 LIST_REMOVE(stcb, sctp_asocs);
3676 sctp_add_vtag_to_timewait(inp, asoc->my_vtag, SCTP_TIME_WAIT);
3677
3678
3679 /*
3680 * Now restop the timers to be sure - this is paranoia at is finest!
3681 */
3682 SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
3683 SCTP_OS_TIMER_STOP(&asoc->hb_timer.timer);
3684 SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer);
3685 SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer);
3686 SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer);
3687 SCTP_OS_TIMER_STOP(&asoc->shut_guard_timer.timer);
3688 SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer);
3689 SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer);
3690
3691 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3692 SCTP_OS_TIMER_STOP(&net->fr_timer.timer);
3693 SCTP_OS_TIMER_STOP(&net->rxt_timer.timer);
3694 SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer);
3695 }
3696
3697 asoc->strreset_timer.type = SCTP_TIMER_TYPE_NONE;
3698 prev = NULL;
3699 /*
3700 * The chunk lists and such SHOULD be empty but we check them just
3701 * in case.
3702 */
3703 /* anything on the wheel needs to be removed */
3704 for (i = 0; i < asoc->streamoutcnt; i++) {
3705 struct sctp_stream_out *outs;
3706
3707 outs = &asoc->strmout[i];
3708 /* now clean up any chunks here */
3709 sp = TAILQ_FIRST(&outs->outqueue);
3710 while (sp) {
3711 TAILQ_REMOVE(&outs->outqueue, sp, next);
3712 if (sp->data) {
3713 sctp_m_freem(sp->data);
3714 sp->data = NULL;
3715 sp->tail_mbuf = NULL;
3716 }
3717 sctp_free_remote_addr(sp->net);
3718 sctp_free_spbufspace(stcb, asoc, sp);
3719 /* Free the zone stuff */
3720 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_strmoq, sp);
3721 SCTP_DECR_STRMOQ_COUNT();
3722 sp = TAILQ_FIRST(&outs->outqueue);
3723 }
3724 }
3725
3726 while ((sp = TAILQ_FIRST(&asoc->free_strmoq)) != NULL) {
3727 TAILQ_REMOVE(&asoc->free_strmoq, sp, next);
3728 if (sp->data) {
3729 sctp_m_freem(sp->data);
3730 sp->data = NULL;
3731 sp->tail_mbuf = NULL;
3732 }
3733 /* Free the zone stuff */
3734 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_strmoq, sp);
3735 SCTP_DECR_STRMOQ_COUNT();
3736 atomic_add_int(&sctppcbinfo.ipi_free_strmoq, -1);
3737 }
3738
3739 while ((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) {
3740 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
3741 SCTP_FREE(liste);
3742 }
3743
3744 sq = TAILQ_FIRST(&asoc->pending_reply_queue);
3745 while (sq) {
3746 TAILQ_REMOVE(&asoc->pending_reply_queue, sq, next);
3747 if (sq->data) {
3748 sctp_m_freem(sq->data);
3749 sq->data = NULL;
3750 }
3751 sctp_free_remote_addr(sq->whoFrom);
3752 sq->whoFrom = NULL;
3753 sq->stcb = NULL;
3754 /* Free the ctl entry */
3755 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_readq, sq);
3756 SCTP_DECR_READQ_COUNT();
3757 sq = TAILQ_FIRST(&asoc->pending_reply_queue);
3758 }
3759
3760 chk = TAILQ_FIRST(&asoc->free_chunks);
3761 while (chk) {
3762 TAILQ_REMOVE(&asoc->free_chunks, chk, sctp_next);
3763 if (chk->data) {
3764 sctp_m_freem(chk->data);
3765 chk->data = NULL;
3766 }
3767 ccnt++;
3768 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
3769 SCTP_DECR_CHK_COUNT();
3770 atomic_subtract_int(&sctppcbinfo.ipi_free_chunks, 1);
3771 asoc->free_chunk_cnt--;
3772 chk = TAILQ_FIRST(&asoc->free_chunks);
3773 }
3774 /* pending send queue SHOULD be empty */
3775 if (!TAILQ_EMPTY(&asoc->send_queue)) {
3776 chk = TAILQ_FIRST(&asoc->send_queue);
3777 while (chk) {
3778 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next);
3779 if (chk->data) {
3780 sctp_m_freem(chk->data);
3781 chk->data = NULL;
3782 }
3783 ccnt++;
3784 sctp_free_remote_addr(chk->whoTo);
3785 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
3786 SCTP_DECR_CHK_COUNT();
3787 chk = TAILQ_FIRST(&asoc->send_queue);
3788 }
3789 }
3790/*
3791 if(ccnt) {
3792 printf("Freed %d from send_queue\n", ccnt);
3793 ccnt = 0;
3794 }
3795*/
3796 /* sent queue SHOULD be empty */
3797 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3798 chk = TAILQ_FIRST(&asoc->sent_queue);
3799 while (chk) {
3800 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
3801 if (chk->data) {
3802 sctp_m_freem(chk->data);
3803 chk->data = NULL;
3804 }
3805 ccnt++;
3806 sctp_free_remote_addr(chk->whoTo);
3807 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
3808 SCTP_DECR_CHK_COUNT();
3809 chk = TAILQ_FIRST(&asoc->sent_queue);
3810 }
3811 }
3812/*
3813 if(ccnt) {
3814 printf("Freed %d from sent_queue\n", ccnt);
3815 ccnt = 0;
3816 }
3817*/
3818 /* control queue MAY not be empty */
3819 if (!TAILQ_EMPTY(&asoc->control_send_queue)) {
3820 chk = TAILQ_FIRST(&asoc->control_send_queue);
3821 while (chk) {
3822 TAILQ_REMOVE(&asoc->control_send_queue, chk, sctp_next);
3823 if (chk->data) {
3824 sctp_m_freem(chk->data);
3825 chk->data = NULL;
3826 }
3827 ccnt++;
3828 sctp_free_remote_addr(chk->whoTo);
3829 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
3830 SCTP_DECR_CHK_COUNT();
3831 chk = TAILQ_FIRST(&asoc->control_send_queue);
3832 }
3833 }
3834/*
3835 if(ccnt) {
3836 printf("Freed %d from ctrl_queue\n", ccnt);
3837 ccnt = 0;
3838 }
3839*/
3840 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
3841 chk = TAILQ_FIRST(&asoc->reasmqueue);
3842 while (chk) {
3843 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
3844 if (chk->data) {
3845 sctp_m_freem(chk->data);
3846 chk->data = NULL;
3847 }
3848 sctp_free_remote_addr(chk->whoTo);
3849 ccnt++;
3850 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_chunk, chk);
3851 SCTP_DECR_CHK_COUNT();
3852 chk = TAILQ_FIRST(&asoc->reasmqueue);
3853 }
3854 }
3855/*
3856 if(ccnt) {
3857 printf("Freed %d from reasm_queue\n", ccnt);
3858 ccnt = 0;
3859 }
3860*/
3861 if (asoc->mapping_array) {
3862 SCTP_FREE(asoc->mapping_array);
3863 asoc->mapping_array = NULL;
3864 }
3865 /* the stream outs */
3866 if (asoc->strmout) {
3867 SCTP_FREE(asoc->strmout);
3868 asoc->strmout = NULL;
3869 }
3870 asoc->streamoutcnt = 0;
3871 if (asoc->strmin) {
3872 struct sctp_queued_to_read *ctl;
3873 int i;
3874
3875 for (i = 0; i < asoc->streamincnt; i++) {
3876 if (!TAILQ_EMPTY(&asoc->strmin[i].inqueue)) {
3877 /* We have somethings on the streamin queue */
3878 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
3879 while (ctl) {
3880 TAILQ_REMOVE(&asoc->strmin[i].inqueue,
3881 ctl, next);
3882 sctp_free_remote_addr(ctl->whoFrom);
3883 if (ctl->data) {
3884 sctp_m_freem(ctl->data);
3885 ctl->data = NULL;
3886 }
3887 /*
3888 * We don't free the address here
3889 * since all the net's were freed
3890 * above.
3891 */
3892 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_readq, ctl);
3893 SCTP_DECR_READQ_COUNT();
3894 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue);
3895 }
3896 }
3897 }
3898 SCTP_FREE(asoc->strmin);
3899 asoc->strmin = NULL;
3900 }
3901 asoc->streamincnt = 0;
3902 while (!TAILQ_EMPTY(&asoc->nets)) {
3903 net = TAILQ_FIRST(&asoc->nets);
3904 /* pull from list */
3905 if ((sctppcbinfo.ipi_count_raddr == 0) || (prev == net)) {
3906#ifdef INVARIANTS
3907 panic("no net's left alloc'ed, or list points to itself");
3908#endif
3909 break;
3910 }
3911 prev = net;
3912 TAILQ_REMOVE(&asoc->nets, net, sctp_next);
3913 sctp_free_remote_addr(net);
3914 }
3915
3634 /* local addresses, if any */
3635 while (!SCTP_LIST_EMPTY(&asoc->sctp_local_addr_list)) {
3636 laddr = LIST_FIRST(&asoc->sctp_local_addr_list);
3637 LIST_REMOVE(laddr, sctp_nxt_addr);
3638 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_laddr, laddr);
3639 SCTP_DECR_LADDR_COUNT();
3916 while (!SCTP_LIST_EMPTY(&asoc->sctp_restricted_addrs)) {
3917 laddr = LIST_FIRST(&asoc->sctp_restricted_addrs);
3918 sctp_remove_laddr(laddr);
3640 }
3919 }
3920
3641 /* pending asconf (address) parameters */
3642 while (!TAILQ_EMPTY(&asoc->asconf_queue)) {
3643 aparam = TAILQ_FIRST(&asoc->asconf_queue);
3644 TAILQ_REMOVE(&asoc->asconf_queue, aparam, next);
3645 SCTP_FREE(aparam);
3646 }
3647 if (asoc->last_asconf_ack_sent != NULL) {
3648 sctp_m_freem(asoc->last_asconf_ack_sent);
3649 asoc->last_asconf_ack_sent = NULL;
3650 }
3651 /* clean up auth stuff */
3652 if (asoc->local_hmacs)
3653 sctp_free_hmaclist(asoc->local_hmacs);
3654 if (asoc->peer_hmacs)
3655 sctp_free_hmaclist(asoc->peer_hmacs);
3656
3657 if (asoc->local_auth_chunks)
3658 sctp_free_chunklist(asoc->local_auth_chunks);
3659 if (asoc->peer_auth_chunks)
3660 sctp_free_chunklist(asoc->peer_auth_chunks);
3661
3662 sctp_free_authinfo(&asoc->authinfo);
3663
3664 shared_key = LIST_FIRST(&asoc->shared_keys);
3665 while (shared_key) {
3666 LIST_REMOVE(shared_key, next);
3667 sctp_free_sharedkey(shared_key);
3668 shared_key = LIST_FIRST(&asoc->shared_keys);
3669 }
3670
3671 /* Insert new items here :> */
3672
3673 /* Get rid of LOCK */
3674 SCTP_TCB_LOCK_DESTROY(stcb);
3675 SCTP_TCB_SEND_LOCK_DESTROY(stcb);
3676 if (from_inpcbfree == SCTP_NORMAL_PROC) {
3677 SCTP_INP_INFO_WUNLOCK();
3678 SCTP_INP_RLOCK(inp);
3679 }
3680#ifdef SCTP_TRACK_FREED_ASOCS
3681 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3682 /* now clean up the tasoc itself */
3683 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb);
3684 SCTP_DECR_ASOC_COUNT();
3685 } else {
3686 LIST_INSERT_HEAD(&inp->sctp_asoc_free_list, stcb, sctp_tcblist);
3687 }
3688#else
3689 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb);
3690 SCTP_DECR_ASOC_COUNT();
3691#endif
3692 if (from_inpcbfree == SCTP_NORMAL_PROC) {
3693 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3694 /*
3695 * If its NOT the inp_free calling us AND sctp_close
3696 * as been called, we call back...
3697 */
3698 SCTP_INP_RUNLOCK(inp);
3699 /*
3700 * This will start the kill timer (if we are the
3701 * lastone) since we hold an increment yet. But this
3702 * is the only safe way to do this since otherwise
3703 * if the socket closes at the same time we are here
3704 * we might collide in the cleanup.
3705 */
3706 sctp_inpcb_free(inp, 0, 0);
3707 SCTP_INP_DECR_REF(inp);
3708 goto out_of;
3709 } else {
3710 /* The socket is still open. */
3711 SCTP_INP_DECR_REF(inp);
3712 }
3713 }
3714 if (from_inpcbfree == SCTP_NORMAL_PROC) {
3715 SCTP_INP_RUNLOCK(inp);
3716 }
3717out_of:
3718 /* destroyed the asoc */
3719#ifdef SCTP_LOG_CLOSING
3720 sctp_log_closing(inp, NULL, 11);
3721#endif
3722 return (1);
3723}
3724
3725
3726
3727/*
3728 * determine if a destination is "reachable" based upon the addresses bound
3729 * to the current endpoint (e.g. only v4 or v6 currently bound)
3730 */
3731/*
3732 * FIX: if we allow assoc-level bindx(), then this needs to be fixed to use
3733 * assoc level v4/v6 flags, as the assoc *may* not have the same address
3734 * types bound as its endpoint
3735 */
3736int
3737sctp_destination_is_reachable(struct sctp_tcb *stcb, struct sockaddr *destaddr)
3738{
3739 struct sctp_inpcb *inp;
3740 int answer;
3741
3742 /*
3743 * No locks here, the TCB, in all cases is already locked and an
3744 * assoc is up. There is either a INP lock by the caller applied (in
3745 * asconf case when deleting an address) or NOT in the HB case,
3746 * however if HB then the INP increment is up and the INP will not
3747 * be removed (on top of the fact that we have a TCB lock). So we
3748 * only want to read the sctp_flags, which is either bound-all or
3749 * not.. no protection needed since once an assoc is up you can't be
3750 * changing your binding.
3751 */
3752 inp = stcb->sctp_ep;
3753 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3754 /* if bound all, destination is not restricted */
3755 /*
3756 * RRS: Question during lock work: Is this correct? If you
3757 * are bound-all you still might need to obey the V4--V6
3758 * flags??? IMO this bound-all stuff needs to be removed!
3759 */
3760 return (1);
3761 }
3762 /* NOTE: all "scope" checks are done when local addresses are added */
3763 if (destaddr->sa_family == AF_INET6) {
3764 answer = inp->ip_inp.inp.inp_vflag & INP_IPV6;
3765 } else if (destaddr->sa_family == AF_INET) {
3766 answer = inp->ip_inp.inp.inp_vflag & INP_IPV4;
3767 } else {
3768 /* invalid family, so it's unreachable */
3769 answer = 0;
3770 }
3771 return (answer);
3772}
3773
3774/*
3775 * update the inp_vflags on an endpoint
3776 */
3777static void
3778sctp_update_ep_vflag(struct sctp_inpcb *inp)
3779{
3780 struct sctp_laddr *laddr;
3781
3782 /* first clear the flag */
3783 inp->ip_inp.inp.inp_vflag = 0;
3784 /* set the flag based on addresses on the ep list */
3785 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
3786 if (laddr->ifa == NULL) {
3787#ifdef SCTP_DEBUG
3788 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
3789 printf("An ounce of prevention is worth a pound of cure\n");
3790 }
3791#endif /* SCTP_DEBUG */
3792 continue;
3793 }
3921 /* pending asconf (address) parameters */
3922 while (!TAILQ_EMPTY(&asoc->asconf_queue)) {
3923 aparam = TAILQ_FIRST(&asoc->asconf_queue);
3924 TAILQ_REMOVE(&asoc->asconf_queue, aparam, next);
3925 SCTP_FREE(aparam);
3926 }
3927 if (asoc->last_asconf_ack_sent != NULL) {
3928 sctp_m_freem(asoc->last_asconf_ack_sent);
3929 asoc->last_asconf_ack_sent = NULL;
3930 }
3931 /* clean up auth stuff */
3932 if (asoc->local_hmacs)
3933 sctp_free_hmaclist(asoc->local_hmacs);
3934 if (asoc->peer_hmacs)
3935 sctp_free_hmaclist(asoc->peer_hmacs);
3936
3937 if (asoc->local_auth_chunks)
3938 sctp_free_chunklist(asoc->local_auth_chunks);
3939 if (asoc->peer_auth_chunks)
3940 sctp_free_chunklist(asoc->peer_auth_chunks);
3941
3942 sctp_free_authinfo(&asoc->authinfo);
3943
3944 shared_key = LIST_FIRST(&asoc->shared_keys);
3945 while (shared_key) {
3946 LIST_REMOVE(shared_key, next);
3947 sctp_free_sharedkey(shared_key);
3948 shared_key = LIST_FIRST(&asoc->shared_keys);
3949 }
3950
3951 /* Insert new items here :> */
3952
3953 /* Get rid of LOCK */
3954 SCTP_TCB_LOCK_DESTROY(stcb);
3955 SCTP_TCB_SEND_LOCK_DESTROY(stcb);
3956 if (from_inpcbfree == SCTP_NORMAL_PROC) {
3957 SCTP_INP_INFO_WUNLOCK();
3958 SCTP_INP_RLOCK(inp);
3959 }
3960#ifdef SCTP_TRACK_FREED_ASOCS
3961 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3962 /* now clean up the tasoc itself */
3963 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb);
3964 SCTP_DECR_ASOC_COUNT();
3965 } else {
3966 LIST_INSERT_HEAD(&inp->sctp_asoc_free_list, stcb, sctp_tcblist);
3967 }
3968#else
3969 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_asoc, stcb);
3970 SCTP_DECR_ASOC_COUNT();
3971#endif
3972 if (from_inpcbfree == SCTP_NORMAL_PROC) {
3973 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
3974 /*
3975 * If its NOT the inp_free calling us AND sctp_close
3976 * as been called, we call back...
3977 */
3978 SCTP_INP_RUNLOCK(inp);
3979 /*
3980 * This will start the kill timer (if we are the
3981 * lastone) since we hold an increment yet. But this
3982 * is the only safe way to do this since otherwise
3983 * if the socket closes at the same time we are here
3984 * we might collide in the cleanup.
3985 */
3986 sctp_inpcb_free(inp, 0, 0);
3987 SCTP_INP_DECR_REF(inp);
3988 goto out_of;
3989 } else {
3990 /* The socket is still open. */
3991 SCTP_INP_DECR_REF(inp);
3992 }
3993 }
3994 if (from_inpcbfree == SCTP_NORMAL_PROC) {
3995 SCTP_INP_RUNLOCK(inp);
3996 }
3997out_of:
3998 /* destroyed the asoc */
3999#ifdef SCTP_LOG_CLOSING
4000 sctp_log_closing(inp, NULL, 11);
4001#endif
4002 return (1);
4003}
4004
4005
4006
4007/*
4008 * determine if a destination is "reachable" based upon the addresses bound
4009 * to the current endpoint (e.g. only v4 or v6 currently bound)
4010 */
4011/*
4012 * FIX: if we allow assoc-level bindx(), then this needs to be fixed to use
4013 * assoc level v4/v6 flags, as the assoc *may* not have the same address
4014 * types bound as its endpoint
4015 */
4016int
4017sctp_destination_is_reachable(struct sctp_tcb *stcb, struct sockaddr *destaddr)
4018{
4019 struct sctp_inpcb *inp;
4020 int answer;
4021
4022 /*
4023 * No locks here, the TCB, in all cases is already locked and an
4024 * assoc is up. There is either a INP lock by the caller applied (in
4025 * asconf case when deleting an address) or NOT in the HB case,
4026 * however if HB then the INP increment is up and the INP will not
4027 * be removed (on top of the fact that we have a TCB lock). So we
4028 * only want to read the sctp_flags, which is either bound-all or
4029 * not.. no protection needed since once an assoc is up you can't be
4030 * changing your binding.
4031 */
4032 inp = stcb->sctp_ep;
4033 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
4034 /* if bound all, destination is not restricted */
4035 /*
4036 * RRS: Question during lock work: Is this correct? If you
4037 * are bound-all you still might need to obey the V4--V6
4038 * flags??? IMO this bound-all stuff needs to be removed!
4039 */
4040 return (1);
4041 }
4042 /* NOTE: all "scope" checks are done when local addresses are added */
4043 if (destaddr->sa_family == AF_INET6) {
4044 answer = inp->ip_inp.inp.inp_vflag & INP_IPV6;
4045 } else if (destaddr->sa_family == AF_INET) {
4046 answer = inp->ip_inp.inp.inp_vflag & INP_IPV4;
4047 } else {
4048 /* invalid family, so it's unreachable */
4049 answer = 0;
4050 }
4051 return (answer);
4052}
4053
4054/*
4055 * update the inp_vflags on an endpoint
4056 */
4057static void
4058sctp_update_ep_vflag(struct sctp_inpcb *inp)
4059{
4060 struct sctp_laddr *laddr;
4061
4062 /* first clear the flag */
4063 inp->ip_inp.inp.inp_vflag = 0;
4064 /* set the flag based on addresses on the ep list */
4065 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4066 if (laddr->ifa == NULL) {
4067#ifdef SCTP_DEBUG
4068 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
4069 printf("An ounce of prevention is worth a pound of cure\n");
4070 }
4071#endif /* SCTP_DEBUG */
4072 continue;
4073 }
3794 if (laddr->ifa->ifa_addr) {
4074 if (laddr->ifa->localifa_flags & SCTP_BEING_DELETED) {
3795 continue;
3796 }
4075 continue;
4076 }
3797 if (laddr->ifa->ifa_addr->sa_family == AF_INET6) {
4077 if (laddr->ifa->address.sa.sa_family == AF_INET6) {
3798 inp->ip_inp.inp.inp_vflag |= INP_IPV6;
4078 inp->ip_inp.inp.inp_vflag |= INP_IPV6;
3799 } else if (laddr->ifa->ifa_addr->sa_family == AF_INET) {
4079 } else if (laddr->ifa->address.sa.sa_family == AF_INET) {
3800 inp->ip_inp.inp.inp_vflag |= INP_IPV4;
3801 }
3802 }
3803}
3804
3805/*
3806 * Add the address to the endpoint local address list There is nothing to be
3807 * done if we are bound to all addresses
3808 */
3809int
4080 inp->ip_inp.inp.inp_vflag |= INP_IPV4;
4081 }
4082 }
4083}
4084
4085/*
4086 * Add the address to the endpoint local address list There is nothing to be
4087 * done if we are bound to all addresses
4088 */
4089int
3810sctp_add_local_addr_ep(struct sctp_inpcb *inp, struct ifaddr *ifa)
4090sctp_add_local_addr_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa, uint32_t action)
3811{
3812 struct sctp_laddr *laddr;
3813 int fnd, error;
3814
3815 fnd = 0;
3816
3817 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3818 /* You are already bound to all. You have it already */
3819 return (0);
3820 }
4091{
4092 struct sctp_laddr *laddr;
4093 int fnd, error;
4094
4095 fnd = 0;
4096
4097 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
4098 /* You are already bound to all. You have it already */
4099 return (0);
4100 }
3821 if (ifa->ifa_addr->sa_family == AF_INET6) {
3822 struct in6_ifaddr *ifa6;
3823
3824 ifa6 = (struct in6_ifaddr *)ifa;
3825 if (ifa6->ia6_flags & (IN6_IFF_DETACHED |
3826 IN6_IFF_DEPRECATED | IN6_IFF_ANYCAST | IN6_IFF_NOTREADY))
3827 /* Can't bind a non-existent addr. */
4101 if (ifa->address.sa.sa_family == AF_INET6) {
4102 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
4103 /* Can't bind a non-useable addr. */
3828 return (-1);
4104 return (-1);
4105 }
3829 }
3830 /* first, is it already present? */
3831 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
3832 if (laddr->ifa == ifa) {
3833 fnd = 1;
3834 break;
3835 }
3836 }
3837
4106 }
4107 /* first, is it already present? */
4108 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4109 if (laddr->ifa == ifa) {
4110 fnd = 1;
4111 break;
4112 }
4113 }
4114
3838 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) && (fnd == 0)) {
3839 /* Not bound to all */
3840 error = sctp_insert_laddr(&inp->sctp_addr_list, ifa);
4115 if (fnd == 0) {
4116 /* Not in the ep list */
4117 error = sctp_insert_laddr(&inp->sctp_addr_list, ifa, action);
3841 if (error != 0)
3842 return (error);
3843 inp->laddr_count++;
3844 /* update inp_vflag flags */
4118 if (error != 0)
4119 return (error);
4120 inp->laddr_count++;
4121 /* update inp_vflag flags */
3845 if (ifa->ifa_addr->sa_family == AF_INET6) {
4122 if (ifa->address.sa.sa_family == AF_INET6) {
3846 inp->ip_inp.inp.inp_vflag |= INP_IPV6;
4123 inp->ip_inp.inp.inp_vflag |= INP_IPV6;
3847 } else if (ifa->ifa_addr->sa_family == AF_INET) {
4124 } else if (ifa->address.sa.sa_family == AF_INET) {
3848 inp->ip_inp.inp.inp_vflag |= INP_IPV4;
3849 }
3850 }
3851 return (0);
3852}
3853
3854
3855/*
3856 * select a new (hopefully reachable) destination net (should only be used
3857 * when we deleted an ep addr that is the only usable source address to reach
3858 * the destination net)
3859 */
3860static void
3861sctp_select_primary_destination(struct sctp_tcb *stcb)
3862{
3863 struct sctp_nets *net;
3864
3865 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
3866 /* for now, we'll just pick the first reachable one we find */
3867 if (net->dest_state & SCTP_ADDR_UNCONFIRMED)
3868 continue;
3869 if (sctp_destination_is_reachable(stcb,
3870 (struct sockaddr *)&net->ro._l_addr)) {
3871 /* found a reachable destination */
3872 stcb->asoc.primary_destination = net;
3873 }
3874 }
3875 /* I can't there from here! ...we're gonna die shortly... */
3876}
3877
3878
3879/*
3880 * Delete the address from the endpoint local address list There is nothing
3881 * to be done if we are bound to all addresses
3882 */
3883int
4125 inp->ip_inp.inp.inp_vflag |= INP_IPV4;
4126 }
4127 }
4128 return (0);
4129}
4130
4131
4132/*
4133 * select a new (hopefully reachable) destination net (should only be used
4134 * when we deleted an ep addr that is the only usable source address to reach
4135 * the destination net)
4136 */
4137static void
4138sctp_select_primary_destination(struct sctp_tcb *stcb)
4139{
4140 struct sctp_nets *net;
4141
4142 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
4143 /* for now, we'll just pick the first reachable one we find */
4144 if (net->dest_state & SCTP_ADDR_UNCONFIRMED)
4145 continue;
4146 if (sctp_destination_is_reachable(stcb,
4147 (struct sockaddr *)&net->ro._l_addr)) {
4148 /* found a reachable destination */
4149 stcb->asoc.primary_destination = net;
4150 }
4151 }
4152 /* I can't there from here! ...we're gonna die shortly... */
4153}
4154
4155
4156/*
4157 * Delete the address from the endpoint local address list There is nothing
4158 * to be done if we are bound to all addresses
4159 */
4160int
3884sctp_del_local_addr_ep(struct sctp_inpcb *inp, struct ifaddr *ifa)
4161sctp_del_local_addr_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa)
3885{
3886 struct sctp_laddr *laddr;
3887 int fnd;
3888
3889 fnd = 0;
3890 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
3891 /* You are already bound to all. You have it already */
3892 return (EINVAL);
3893 }
3894 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
3895 if (laddr->ifa == ifa) {
3896 fnd = 1;
3897 break;
3898 }
3899 }
3900 if (fnd && (inp->laddr_count < 2)) {
3901 /* can't delete unless there are at LEAST 2 addresses */
3902 return (-1);
3903 }
4162{
4163 struct sctp_laddr *laddr;
4164 int fnd;
4165
4166 fnd = 0;
4167 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
4168 /* You are already bound to all. You have it already */
4169 return (EINVAL);
4170 }
4171 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4172 if (laddr->ifa == ifa) {
4173 fnd = 1;
4174 break;
4175 }
4176 }
4177 if (fnd && (inp->laddr_count < 2)) {
4178 /* can't delete unless there are at LEAST 2 addresses */
4179 return (-1);
4180 }
3904 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) && (fnd)) {
4181 if (fnd) {
3905 /*
3906 * clean up any use of this address go through our
3907 * associations and clear any last_used_address that match
3908 * this one for each assoc, see if a new primary_destination
3909 * is needed
3910 */
3911 struct sctp_tcb *stcb;
3912
3913 /* clean up "next_addr_touse" */
3914 if (inp->next_addr_touse == laddr)
3915 /* delete this address */
3916 inp->next_addr_touse = NULL;
3917
3918 /* clean up "last_used_address" */
3919 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
4182 /*
4183 * clean up any use of this address go through our
4184 * associations and clear any last_used_address that match
4185 * this one for each assoc, see if a new primary_destination
4186 * is needed
4187 */
4188 struct sctp_tcb *stcb;
4189
4190 /* clean up "next_addr_touse" */
4191 if (inp->next_addr_touse == laddr)
4192 /* delete this address */
4193 inp->next_addr_touse = NULL;
4194
4195 /* clean up "last_used_address" */
4196 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
4197 struct sctp_nets *net;
4198
4199 SCTP_TCB_LOCK(stcb);
3920 if (stcb->asoc.last_used_address == laddr)
3921 /* delete this address */
3922 stcb->asoc.last_used_address = NULL;
4200 if (stcb->asoc.last_used_address == laddr)
4201 /* delete this address */
4202 stcb->asoc.last_used_address = NULL;
3923 } /* for each tcb */
4203 /*
4204 * Now spin through all the nets and purge any ref
4205 * to laddr
4206 */
4207 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
4208 if (net->ro._s_addr &&
4209 (net->ro._s_addr->ifa == laddr->ifa)) {
4210 /* Yep, purge src address selected */
4211 struct rtentry *rt;
3924
4212
4213 /* delete this address if cached */
4214 rt = net->ro.ro_rt;
4215 if (rt != NULL) {
4216 RTFREE(rt);
4217 net->ro.ro_rt = NULL;
4218 }
4219 sctp_free_ifa(net->ro._s_addr);
4220 net->ro._s_addr = NULL;
4221 net->src_addr_selected = 0;
4222 }
4223 }
4224 SCTP_TCB_UNLOCK(stcb);
4225 } /* for each tcb */
3925 /* remove it from the ep list */
3926 sctp_remove_laddr(laddr);
3927 inp->laddr_count--;
3928 /* update inp_vflag flags */
3929 sctp_update_ep_vflag(inp);
4226 /* remove it from the ep list */
4227 sctp_remove_laddr(laddr);
4228 inp->laddr_count--;
4229 /* update inp_vflag flags */
4230 sctp_update_ep_vflag(inp);
3930 /* select a new primary destination if needed */
3931 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
3932 /*
3933 * presume caller (sctp_asconf.c) already owns INP
3934 * lock
3935 */
3936 SCTP_TCB_LOCK(stcb);
3937 if (sctp_destination_is_reachable(stcb,
3938 (struct sockaddr *)&stcb->asoc.primary_destination->ro._l_addr) == 0) {
3939 sctp_select_primary_destination(stcb);
3940 }
3941 SCTP_TCB_UNLOCK(stcb);
3942 } /* for each tcb */
3943 }
3944 return (0);
3945}
3946
3947/*
3948 * Add the addr to the TCB local address list For the BOUNDALL or dynamic
3949 * case, this is a "pending" address list (eg. addresses waiting for an
3950 * ASCONF-ACK response) For the subset binding, static case, this is a
3951 * "valid" address list
3952 */
3953int
4231 }
4232 return (0);
4233}
4234
4235/*
4236 * Add the addr to the TCB local address list For the BOUNDALL or dynamic
4237 * case, this is a "pending" address list (eg. addresses waiting for an
4238 * ASCONF-ACK response) For the subset binding, static case, this is a
4239 * "valid" address list
4240 */
4241int
3954sctp_add_local_addr_assoc(struct sctp_tcb *stcb, struct ifaddr *ifa)
4242sctp_add_local_addr_assoc(struct sctp_tcb *stcb, struct sctp_ifa *ifa, int restricted_list)
3955{
3956 struct sctp_inpcb *inp;
3957 struct sctp_laddr *laddr;
4243{
4244 struct sctp_inpcb *inp;
4245 struct sctp_laddr *laddr;
4246 struct sctpladdr *list;
3958 int error;
3959
3960 /*
4247 int error;
4248
4249 /*
3961 * Assumes TCP is locked.. and possiblye the INP. May need to
4250 * Assumes TCB is locked.. and possibly the INP. May need to
3962 * confirm/fix that if we need it and is not the case.
3963 */
4251 * confirm/fix that if we need it and is not the case.
4252 */
3964 inp = stcb->sctp_ep;
3965 if (ifa->ifa_addr->sa_family == AF_INET6) {
3966 struct in6_ifaddr *ifa6;
4253 list = &stcb->asoc.sctp_restricted_addrs;
3967
4254
3968 ifa6 = (struct in6_ifaddr *)ifa;
3969 if (ifa6->ia6_flags & (IN6_IFF_DETACHED |
3970 /* IN6_IFF_DEPRECATED | */
3971 IN6_IFF_ANYCAST |
3972 IN6_IFF_NOTREADY))
4255 inp = stcb->sctp_ep;
4256 if (ifa->address.sa.sa_family == AF_INET6) {
4257 if (ifa->localifa_flags & SCTP_ADDR_IFA_UNUSEABLE) {
3973 /* Can't bind a non-existent addr. */
3974 return (-1);
4258 /* Can't bind a non-existent addr. */
4259 return (-1);
4260 }
3975 }
3976 /* does the address already exist? */
4261 }
4262 /* does the address already exist? */
3977 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list, sctp_nxt_addr) {
4263 LIST_FOREACH(laddr, list, sctp_nxt_addr) {
3978 if (laddr->ifa == ifa) {
3979 return (-1);
3980 }
3981 }
3982
3983 /* add to the list */
4264 if (laddr->ifa == ifa) {
4265 return (-1);
4266 }
4267 }
4268
4269 /* add to the list */
3984 error = sctp_insert_laddr(&stcb->asoc.sctp_local_addr_list, ifa);
4270 error = sctp_insert_laddr(list, ifa, 0);
3985 if (error != 0)
3986 return (error);
3987 return (0);
3988}
3989
3990/*
3991 * insert an laddr entry with the given ifa for the desired list
3992 */
3993int
4271 if (error != 0)
4272 return (error);
4273 return (0);
4274}
4275
4276/*
4277 * insert an laddr entry with the given ifa for the desired list
4278 */
4279int
3994sctp_insert_laddr(struct sctpladdr *list, struct ifaddr *ifa)
4280sctp_insert_laddr(struct sctpladdr *list, struct sctp_ifa *ifa, uint32_t act)
3995{
3996 struct sctp_laddr *laddr;
3997
3998 laddr = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_laddr, struct sctp_laddr);
3999 if (laddr == NULL) {
4000 /* out of memory? */
4001 return (EINVAL);
4002 }
4003 SCTP_INCR_LADDR_COUNT();
4004 bzero(laddr, sizeof(*laddr));
4005 laddr->ifa = ifa;
4281{
4282 struct sctp_laddr *laddr;
4283
4284 laddr = SCTP_ZONE_GET(sctppcbinfo.ipi_zone_laddr, struct sctp_laddr);
4285 if (laddr == NULL) {
4286 /* out of memory? */
4287 return (EINVAL);
4288 }
4289 SCTP_INCR_LADDR_COUNT();
4290 bzero(laddr, sizeof(*laddr));
4291 laddr->ifa = ifa;
4292 laddr->action = act;
4293 atomic_add_int(&ifa->refcount, 1);
4006 /* insert it */
4007 LIST_INSERT_HEAD(list, laddr, sctp_nxt_addr);
4008
4009 return (0);
4010}
4011
4012/*
4013 * Remove an laddr entry from the local address list (on an assoc)
4014 */
4015void
4016sctp_remove_laddr(struct sctp_laddr *laddr)
4017{
4018
4019 /* remove from the list */
4020 LIST_REMOVE(laddr, sctp_nxt_addr);
4294 /* insert it */
4295 LIST_INSERT_HEAD(list, laddr, sctp_nxt_addr);
4296
4297 return (0);
4298}
4299
4300/*
4301 * Remove an laddr entry from the local address list (on an assoc)
4302 */
4303void
4304sctp_remove_laddr(struct sctp_laddr *laddr)
4305{
4306
4307 /* remove from the list */
4308 LIST_REMOVE(laddr, sctp_nxt_addr);
4309 sctp_free_ifa(laddr->ifa);
4021 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_laddr, laddr);
4022 SCTP_DECR_LADDR_COUNT();
4023}
4024
4025/*
4026 * Remove an address from the TCB local address list
4027 */
4028int
4310 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_laddr, laddr);
4311 SCTP_DECR_LADDR_COUNT();
4312}
4313
4314/*
4315 * Remove an address from the TCB local address list
4316 */
4317int
4029sctp_del_local_addr_assoc(struct sctp_tcb *stcb, struct ifaddr *ifa)
4318sctp_del_local_addr_assoc(struct sctp_tcb *stcb, struct sctp_ifa *ifa)
4030{
4031 struct sctp_inpcb *inp;
4032 struct sctp_laddr *laddr;
4033
4034 /*
4035 * This is called by asconf work. It is assumed that a) The TCB is
4036 * locked and b) The INP is locked. This is true in as much as I can
4037 * trace through the entry asconf code where I did these locks.
4038 * Again, the ASCONF code is a bit different in that it does lock
4039 * the INP during its work often times. This must be since we don't
4040 * want other proc's looking up things while what they are looking
4041 * up is changing :-D
4042 */
4043
4044 inp = stcb->sctp_ep;
4045 /* if subset bound and don't allow ASCONF's, can't delete last */
4046 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) &&
4047 (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF) == 0)) {
4048 if (stcb->asoc.numnets < 2) {
4049 /* can't delete last address */
4050 return (-1);
4051 }
4052 }
4319{
4320 struct sctp_inpcb *inp;
4321 struct sctp_laddr *laddr;
4322
4323 /*
4324 * This is called by asconf work. It is assumed that a) The TCB is
4325 * locked and b) The INP is locked. This is true in as much as I can
4326 * trace through the entry asconf code where I did these locks.
4327 * Again, the ASCONF code is a bit different in that it does lock
4328 * the INP during its work often times. This must be since we don't
4329 * want other proc's looking up things while what they are looking
4330 * up is changing :-D
4331 */
4332
4333 inp = stcb->sctp_ep;
4334 /* if subset bound and don't allow ASCONF's, can't delete last */
4335 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) &&
4336 (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF) == 0)) {
4337 if (stcb->asoc.numnets < 2) {
4338 /* can't delete last address */
4339 return (-1);
4340 }
4341 }
4053 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list, sctp_nxt_addr) {
4342 LIST_FOREACH(laddr, &stcb->asoc.sctp_restricted_addrs, sctp_nxt_addr) {
4054 /* remove the address if it exists */
4055 if (laddr->ifa == NULL)
4056 continue;
4057 if (laddr->ifa == ifa) {
4058 sctp_remove_laddr(laddr);
4059 return (0);
4060 }
4061 }
4062
4063 /* address not found! */
4064 return (-1);
4065}
4066
4343 /* remove the address if it exists */
4344 if (laddr->ifa == NULL)
4345 continue;
4346 if (laddr->ifa == ifa) {
4347 sctp_remove_laddr(laddr);
4348 return (0);
4349 }
4350 }
4351
4352 /* address not found! */
4353 return (-1);
4354}
4355
4067/*
4068 * Remove an address from the TCB local address list lookup using a sockaddr
4069 * addr
4070 */
4071int
4072sctp_del_local_addr_assoc_sa(struct sctp_tcb *stcb, struct sockaddr *sa)
4073{
4074 struct sctp_inpcb *inp;
4075 struct sctp_laddr *laddr;
4076 struct sockaddr *l_sa;
4077
4078 /*
4079 * This function I find does not seem to have a caller. As such we
4080 * NEED TO DELETE this code. If we do find a caller, the caller MUST
4081 * have locked the TCB at the least and probably the INP as well.
4082 */
4083 inp = stcb->sctp_ep;
4084 /* if subset bound and don't allow ASCONF's, can't delete last */
4085 if (((inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) &&
4086 (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_DO_ASCONF) == 0)) {
4087 if (stcb->asoc.numnets < 2) {
4088 /* can't delete last address */
4089 return (-1);
4090 }
4091 }
4092 LIST_FOREACH(laddr, &stcb->asoc.sctp_local_addr_list, sctp_nxt_addr) {
4093 /* make sure the address exists */
4094 if (laddr->ifa == NULL)
4095 continue;
4096 if (laddr->ifa->ifa_addr == NULL)
4097 continue;
4098
4099 l_sa = laddr->ifa->ifa_addr;
4100 if (l_sa->sa_family == AF_INET6) {
4101 /* IPv6 address */
4102 struct sockaddr_in6 *sin1, *sin2;
4103
4104 sin1 = (struct sockaddr_in6 *)l_sa;
4105 sin2 = (struct sockaddr_in6 *)sa;
4106 if (memcmp(&sin1->sin6_addr, &sin2->sin6_addr,
4107 sizeof(struct in6_addr)) == 0) {
4108 /* matched */
4109 sctp_remove_laddr(laddr);
4110 return (0);
4111 }
4112 } else if (l_sa->sa_family == AF_INET) {
4113 /* IPv4 address */
4114 struct sockaddr_in *sin1, *sin2;
4115
4116 sin1 = (struct sockaddr_in *)l_sa;
4117 sin2 = (struct sockaddr_in *)sa;
4118 if (sin1->sin_addr.s_addr == sin2->sin_addr.s_addr) {
4119 /* matched */
4120 sctp_remove_laddr(laddr);
4121 return (0);
4122 }
4123 } else {
4124 /* invalid family */
4125 return (-1);
4126 }
4127 } /* end foreach */
4128 /* address not found! */
4129 return (-1);
4130}
4131
4132static char sctp_pcb_initialized = 0;
4133
4134/*
4135 * Temporarily remove for __APPLE__ until we use the Tiger equivalents
4136 */
4137/* sysctl */
4138static int sctp_max_number_of_assoc = SCTP_MAX_NUM_OF_ASOC;
4139static int sctp_scale_up_for_address = SCTP_SCALE_FOR_ADDR;
4140
4141void
4142sctp_pcb_init()
4143{
4144 /*
4145 * SCTP initialization for the PCB structures should be called by
4146 * the sctp_init() funciton.
4147 */
4148 int i;
4149
4150 if (sctp_pcb_initialized != 0) {
4151 /* error I was called twice */
4152 return;
4153 }
4154 sctp_pcb_initialized = 1;
4155
4156 bzero(&sctpstat, sizeof(struct sctpstat));
4157 SCTP_GETTIME_TIMEVAL(&sctpstat.sctps_discontinuitytime);
4158 /* init the empty list of (All) Endpoints */
4159 LIST_INIT(&sctppcbinfo.listhead);
4160
4161 /* init the iterator head */
4356static char sctp_pcb_initialized = 0;
4357
4358/*
4359 * Temporarily remove for __APPLE__ until we use the Tiger equivalents
4360 */
4361/* sysctl */
4362static int sctp_max_number_of_assoc = SCTP_MAX_NUM_OF_ASOC;
4363static int sctp_scale_up_for_address = SCTP_SCALE_FOR_ADDR;
4364
4365void
4366sctp_pcb_init()
4367{
4368 /*
4369 * SCTP initialization for the PCB structures should be called by
4370 * the sctp_init() funciton.
4371 */
4372 int i;
4373
4374 if (sctp_pcb_initialized != 0) {
4375 /* error I was called twice */
4376 return;
4377 }
4378 sctp_pcb_initialized = 1;
4379
4380 bzero(&sctpstat, sizeof(struct sctpstat));
4381 SCTP_GETTIME_TIMEVAL(&sctpstat.sctps_discontinuitytime);
4382 /* init the empty list of (All) Endpoints */
4383 LIST_INIT(&sctppcbinfo.listhead);
4384
4385 /* init the iterator head */
4162 LIST_INIT(&sctppcbinfo.iteratorhead);
4386 TAILQ_INIT(&sctppcbinfo.iteratorhead);
4163
4164 /* init the hash table of endpoints */
4165 TUNABLE_INT_FETCH("net.inet.sctp.tcbhashsize", &sctp_hashtblsize);
4166 TUNABLE_INT_FETCH("net.inet.sctp.pcbhashsize", &sctp_pcbtblsize);
4167 TUNABLE_INT_FETCH("net.inet.sctp.chunkscale", &sctp_chunkscale);
4168 sctppcbinfo.sctp_asochash = SCTP_HASH_INIT((sctp_hashtblsize * 31),
4169 &sctppcbinfo.hashasocmark);
4170 sctppcbinfo.sctp_ephash = SCTP_HASH_INIT(sctp_hashtblsize,
4171 &sctppcbinfo.hashmark);
4172 sctppcbinfo.sctp_tcpephash = SCTP_HASH_INIT(sctp_hashtblsize,
4173 &sctppcbinfo.hashtcpmark);
4174 sctppcbinfo.hashtblsize = sctp_hashtblsize;
4175
4176 /* init the small hash table we use to track restarted asoc's */
4177 sctppcbinfo.sctp_restarthash = SCTP_HASH_INIT(SCTP_STACK_VTAG_HASH_SIZE,
4178 &sctppcbinfo.hashrestartmark);
4179
4387
4388 /* init the hash table of endpoints */
4389 TUNABLE_INT_FETCH("net.inet.sctp.tcbhashsize", &sctp_hashtblsize);
4390 TUNABLE_INT_FETCH("net.inet.sctp.pcbhashsize", &sctp_pcbtblsize);
4391 TUNABLE_INT_FETCH("net.inet.sctp.chunkscale", &sctp_chunkscale);
4392 sctppcbinfo.sctp_asochash = SCTP_HASH_INIT((sctp_hashtblsize * 31),
4393 &sctppcbinfo.hashasocmark);
4394 sctppcbinfo.sctp_ephash = SCTP_HASH_INIT(sctp_hashtblsize,
4395 &sctppcbinfo.hashmark);
4396 sctppcbinfo.sctp_tcpephash = SCTP_HASH_INIT(sctp_hashtblsize,
4397 &sctppcbinfo.hashtcpmark);
4398 sctppcbinfo.hashtblsize = sctp_hashtblsize;
4399
4400 /* init the small hash table we use to track restarted asoc's */
4401 sctppcbinfo.sctp_restarthash = SCTP_HASH_INIT(SCTP_STACK_VTAG_HASH_SIZE,
4402 &sctppcbinfo.hashrestartmark);
4403
4404
4405 sctppcbinfo.sctp_vrfhash = SCTP_HASH_INIT(SCTP_SIZE_OF_VRF_HASH,
4406 &sctppcbinfo.hashvrfmark);
4407
4180 /* init the zones */
4181 /*
4182 * FIX ME: Should check for NULL returns, but if it does fail we are
4183 * doomed to panic anyways... add later maybe.
4184 */
4185 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_ep, "sctp_ep",
4186 sizeof(struct sctp_inpcb), maxsockets);
4187
4188 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_asoc, "sctp_asoc",
4189 sizeof(struct sctp_tcb), sctp_max_number_of_assoc);
4190
4191 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_laddr, "sctp_laddr",
4192 sizeof(struct sctp_laddr),
4193 (sctp_max_number_of_assoc * sctp_scale_up_for_address));
4194
4195 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_net, "sctp_raddr",
4196 sizeof(struct sctp_nets),
4197 (sctp_max_number_of_assoc * sctp_scale_up_for_address));
4198
4199 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_chunk, "sctp_chunk",
4200 sizeof(struct sctp_tmit_chunk),
4201 (sctp_max_number_of_assoc * sctp_chunkscale));
4202
4203 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_readq, "sctp_readq",
4204 sizeof(struct sctp_queued_to_read),
4205 (sctp_max_number_of_assoc * sctp_chunkscale));
4206
4207 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_strmoq, "sctp_stream_msg_out",
4208 sizeof(struct sctp_stream_queue_pending),
4209 (sctp_max_number_of_assoc * sctp_chunkscale));
4210
4211 /* Master Lock INIT for info structure */
4212 SCTP_INP_INFO_LOCK_INIT();
4213 SCTP_STATLOG_INIT_LOCK();
4214 SCTP_ITERATOR_LOCK_INIT();
4215
4216 SCTP_IPI_COUNT_INIT();
4217 SCTP_IPI_ADDR_INIT();
4408 /* init the zones */
4409 /*
4410 * FIX ME: Should check for NULL returns, but if it does fail we are
4411 * doomed to panic anyways... add later maybe.
4412 */
4413 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_ep, "sctp_ep",
4414 sizeof(struct sctp_inpcb), maxsockets);
4415
4416 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_asoc, "sctp_asoc",
4417 sizeof(struct sctp_tcb), sctp_max_number_of_assoc);
4418
4419 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_laddr, "sctp_laddr",
4420 sizeof(struct sctp_laddr),
4421 (sctp_max_number_of_assoc * sctp_scale_up_for_address));
4422
4423 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_net, "sctp_raddr",
4424 sizeof(struct sctp_nets),
4425 (sctp_max_number_of_assoc * sctp_scale_up_for_address));
4426
4427 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_chunk, "sctp_chunk",
4428 sizeof(struct sctp_tmit_chunk),
4429 (sctp_max_number_of_assoc * sctp_chunkscale));
4430
4431 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_readq, "sctp_readq",
4432 sizeof(struct sctp_queued_to_read),
4433 (sctp_max_number_of_assoc * sctp_chunkscale));
4434
4435 SCTP_ZONE_INIT(sctppcbinfo.ipi_zone_strmoq, "sctp_stream_msg_out",
4436 sizeof(struct sctp_stream_queue_pending),
4437 (sctp_max_number_of_assoc * sctp_chunkscale));
4438
4439 /* Master Lock INIT for info structure */
4440 SCTP_INP_INFO_LOCK_INIT();
4441 SCTP_STATLOG_INIT_LOCK();
4442 SCTP_ITERATOR_LOCK_INIT();
4443
4444 SCTP_IPI_COUNT_INIT();
4445 SCTP_IPI_ADDR_INIT();
4446 SCTP_IPI_ITERATOR_WQ_INIT();
4447
4218 LIST_INIT(&sctppcbinfo.addr_wq);
4219
4220 /* not sure if we need all the counts */
4221 sctppcbinfo.ipi_count_ep = 0;
4222 /* assoc/tcb zone info */
4223 sctppcbinfo.ipi_count_asoc = 0;
4224 /* local addrlist zone info */
4225 sctppcbinfo.ipi_count_laddr = 0;
4226 /* remote addrlist zone info */
4227 sctppcbinfo.ipi_count_raddr = 0;
4228 /* chunk info */
4229 sctppcbinfo.ipi_count_chunk = 0;
4230
4231 /* socket queue zone info */
4232 sctppcbinfo.ipi_count_readq = 0;
4233
4234 /* stream out queue cont */
4235 sctppcbinfo.ipi_count_strmoq = 0;
4236
4237 sctppcbinfo.ipi_free_strmoq = 0;
4238 sctppcbinfo.ipi_free_chunks = 0;
4239
4240 SCTP_OS_TIMER_INIT(&sctppcbinfo.addr_wq_timer.timer);
4241
4242 /* Init the TIMEWAIT list */
4243 for (i = 0; i < SCTP_STACK_VTAG_HASH_SIZE; i++) {
4244 LIST_INIT(&sctppcbinfo.vtag_timewait[i]);
4245 }
4246
4448 LIST_INIT(&sctppcbinfo.addr_wq);
4449
4450 /* not sure if we need all the counts */
4451 sctppcbinfo.ipi_count_ep = 0;
4452 /* assoc/tcb zone info */
4453 sctppcbinfo.ipi_count_asoc = 0;
4454 /* local addrlist zone info */
4455 sctppcbinfo.ipi_count_laddr = 0;
4456 /* remote addrlist zone info */
4457 sctppcbinfo.ipi_count_raddr = 0;
4458 /* chunk info */
4459 sctppcbinfo.ipi_count_chunk = 0;
4460
4461 /* socket queue zone info */
4462 sctppcbinfo.ipi_count_readq = 0;
4463
4464 /* stream out queue cont */
4465 sctppcbinfo.ipi_count_strmoq = 0;
4466
4467 sctppcbinfo.ipi_free_strmoq = 0;
4468 sctppcbinfo.ipi_free_chunks = 0;
4469
4470 SCTP_OS_TIMER_INIT(&sctppcbinfo.addr_wq_timer.timer);
4471
4472 /* Init the TIMEWAIT list */
4473 for (i = 0; i < SCTP_STACK_VTAG_HASH_SIZE; i++) {
4474 LIST_INIT(&sctppcbinfo.vtag_timewait[i]);
4475 }
4476
4477#if defined(SCTP_USE_THREAD_BASED_ITERATOR)
4478 sctppcbinfo.iterator_running = 0;
4479 sctp_startup_iterator();
4480#endif
4481
4482 /*
4483 * INIT the default VRF which for BSD is the only one, other O/S's
4484 * may have more. But initially they must start with one and then
4485 * add the VRF's as addresses are added.
4486 */
4487 sctp_init_vrf_list(SCTP_DEFAULT_VRF);
4488
4247}
4248
4249
4250int
4251sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
4252 int iphlen, int offset, int limit, struct sctphdr *sh,
4253 struct sockaddr *altsa)
4254{
4255 /*
4256 * grub through the INIT pulling addresses and loading them to the
4257 * nets structure in the asoc. The from address in the mbuf should
4258 * also be loaded (if it is not already). This routine can be called
4259 * with either INIT or INIT-ACK's as long as the m points to the IP
4260 * packet and the offset points to the beginning of the parameters.
4261 */
4262 struct sctp_inpcb *inp, *l_inp;
4263 struct sctp_nets *net, *net_tmp;
4264 struct ip *iph;
4265 struct sctp_paramhdr *phdr, parm_buf;
4266 struct sctp_tcb *stcb_tmp;
4267 uint16_t ptype, plen;
4268 struct sockaddr *sa;
4269 struct sockaddr_storage dest_store;
4270 struct sockaddr *local_sa = (struct sockaddr *)&dest_store;
4271 struct sockaddr_in sin;
4272 struct sockaddr_in6 sin6;
4273 uint8_t random_store[SCTP_PARAM_BUFFER_SIZE];
4274 struct sctp_auth_random *random = NULL;
4275 uint16_t random_len = 0;
4276 uint8_t hmacs_store[SCTP_PARAM_BUFFER_SIZE];
4277 struct sctp_auth_hmac_algo *hmacs = NULL;
4278 uint16_t hmacs_len = 0;
4279 uint8_t chunks_store[SCTP_PARAM_BUFFER_SIZE];
4280 struct sctp_auth_chunk_list *chunks = NULL;
4281 uint16_t num_chunks = 0;
4282 sctp_key_t *new_key;
4283 uint32_t keylen;
4284 int got_random = 0, got_hmacs = 0, got_chklist = 0;
4285
4286 /* First get the destination address setup too. */
4287 memset(&sin, 0, sizeof(sin));
4288 memset(&sin6, 0, sizeof(sin6));
4289
4290 sin.sin_family = AF_INET;
4291 sin.sin_len = sizeof(sin);
4292 sin.sin_port = stcb->rport;
4293
4294 sin6.sin6_family = AF_INET6;
4295 sin6.sin6_len = sizeof(struct sockaddr_in6);
4296 sin6.sin6_port = stcb->rport;
4297 if (altsa == NULL) {
4298 iph = mtod(m, struct ip *);
4299 if (iph->ip_v == IPVERSION) {
4300 /* its IPv4 */
4301 struct sockaddr_in *sin_2;
4302
4303 sin_2 = (struct sockaddr_in *)(local_sa);
4304 memset(sin_2, 0, sizeof(sin));
4305 sin_2->sin_family = AF_INET;
4306 sin_2->sin_len = sizeof(sin);
4307 sin_2->sin_port = sh->dest_port;
4308 sin_2->sin_addr.s_addr = iph->ip_dst.s_addr;
4309 sin.sin_addr = iph->ip_src;
4310 sa = (struct sockaddr *)&sin;
4311 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
4312 /* its IPv6 */
4313 struct ip6_hdr *ip6;
4314 struct sockaddr_in6 *sin6_2;
4315
4316 ip6 = mtod(m, struct ip6_hdr *);
4317 sin6_2 = (struct sockaddr_in6 *)(local_sa);
4318 memset(sin6_2, 0, sizeof(sin6));
4319 sin6_2->sin6_family = AF_INET6;
4320 sin6_2->sin6_len = sizeof(struct sockaddr_in6);
4321 sin6_2->sin6_port = sh->dest_port;
4322 sin6.sin6_addr = ip6->ip6_src;
4323 sa = (struct sockaddr *)&sin6;
4324 } else {
4325 sa = NULL;
4326 }
4327 } else {
4328 /*
4329 * For cookies we use the src address NOT from the packet
4330 * but from the original INIT
4331 */
4332 sa = altsa;
4333 }
4334 /* Turn off ECN until we get through all params */
4335 stcb->asoc.ecn_allowed = 0;
4336 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
4337 /* mark all addresses that we have currently on the list */
4338 net->dest_state |= SCTP_ADDR_NOT_IN_ASSOC;
4339 }
4340 /* does the source address already exist? if so skip it */
4341 l_inp = inp = stcb->sctp_ep;
4342
4343 atomic_add_int(&stcb->asoc.refcnt, 1);
4344 stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net_tmp, local_sa, stcb);
4345 atomic_add_int(&stcb->asoc.refcnt, -1);
4346
4347 if ((stcb_tmp == NULL && inp == stcb->sctp_ep) || inp == NULL) {
4348 /* we must add the source address */
4349 /* no scope set here since we have a tcb already. */
4350 if ((sa->sa_family == AF_INET) &&
4351 (stcb->asoc.ipv4_addr_legal)) {
4352 if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_2)) {
4353 return (-1);
4354 }
4355 } else if ((sa->sa_family == AF_INET6) &&
4356 (stcb->asoc.ipv6_addr_legal)) {
4357 if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_3)) {
4358 return (-2);
4359 }
4360 }
4361 } else {
4362 if (net_tmp != NULL && stcb_tmp == stcb) {
4363 net_tmp->dest_state &= ~SCTP_ADDR_NOT_IN_ASSOC;
4364 } else if (stcb_tmp != stcb) {
4365 /* It belongs to another association? */
4366 SCTP_TCB_UNLOCK(stcb_tmp);
4367 return (-3);
4368 }
4369 }
4370 if (stcb->asoc.state == 0) {
4371 /* the assoc was freed? */
4372 return (-4);
4373 }
4374 /* now we must go through each of the params. */
4375 phdr = sctp_get_next_param(m, offset, &parm_buf, sizeof(parm_buf));
4376 while (phdr) {
4377 ptype = ntohs(phdr->param_type);
4378 plen = ntohs(phdr->param_length);
4379 /*
4380 * printf("ptype => %0x, plen => %d\n", (uint32_t)ptype,
4381 * (int)plen);
4382 */
4383 if (offset + plen > limit) {
4384 break;
4385 }
4386 if (plen == 0) {
4387 break;
4388 }
4389 if (ptype == SCTP_IPV4_ADDRESS) {
4390 if (stcb->asoc.ipv4_addr_legal) {
4391 struct sctp_ipv4addr_param *p4, p4_buf;
4392
4393 /* ok get the v4 address and check/add */
4394 phdr = sctp_get_next_param(m, offset,
4395 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
4396 if (plen != sizeof(struct sctp_ipv4addr_param) ||
4397 phdr == NULL) {
4398 return (-5);
4399 }
4400 p4 = (struct sctp_ipv4addr_param *)phdr;
4401 sin.sin_addr.s_addr = p4->addr;
4402 sa = (struct sockaddr *)&sin;
4403 inp = stcb->sctp_ep;
4404 atomic_add_int(&stcb->asoc.refcnt, 1);
4405 stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net,
4406 local_sa, stcb);
4407 atomic_add_int(&stcb->asoc.refcnt, -1);
4408
4409 if ((stcb_tmp == NULL && inp == stcb->sctp_ep) ||
4410 inp == NULL) {
4411 /* we must add the source address */
4412 /*
4413 * no scope set since we have a tcb
4414 * already
4415 */
4416
4417 /*
4418 * we must validate the state again
4419 * here
4420 */
4421 if (stcb->asoc.state == 0) {
4422 /* the assoc was freed? */
4423 return (-7);
4424 }
4425 if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_4)) {
4426 return (-8);
4427 }
4428 } else if (stcb_tmp == stcb) {
4429 if (stcb->asoc.state == 0) {
4430 /* the assoc was freed? */
4431 return (-10);
4432 }
4433 if (net != NULL) {
4434 /* clear flag */
4435 net->dest_state &=
4436 ~SCTP_ADDR_NOT_IN_ASSOC;
4437 }
4438 } else {
4439 /*
4440 * strange, address is in another
4441 * assoc? straighten out locks.
4442 */
4443 if (stcb->asoc.state == 0) {
4444 /* the assoc was freed? */
4445 return (-12);
4446 }
4447 return (-13);
4448 }
4449 }
4450 } else if (ptype == SCTP_IPV6_ADDRESS) {
4451 if (stcb->asoc.ipv6_addr_legal) {
4452 /* ok get the v6 address and check/add */
4453 struct sctp_ipv6addr_param *p6, p6_buf;
4454
4455 phdr = sctp_get_next_param(m, offset,
4456 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
4457 if (plen != sizeof(struct sctp_ipv6addr_param) ||
4458 phdr == NULL) {
4459 return (-14);
4460 }
4461 p6 = (struct sctp_ipv6addr_param *)phdr;
4462 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
4463 sizeof(p6->addr));
4464 sa = (struct sockaddr *)&sin6;
4465 inp = stcb->sctp_ep;
4466 atomic_add_int(&stcb->asoc.refcnt, 1);
4467 stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net,
4468 local_sa, stcb);
4469 atomic_add_int(&stcb->asoc.refcnt, -1);
4470 if (stcb_tmp == NULL && (inp == stcb->sctp_ep ||
4471 inp == NULL)) {
4472 /*
4473 * we must validate the state again
4474 * here
4475 */
4476 if (stcb->asoc.state == 0) {
4477 /* the assoc was freed? */
4478 return (-16);
4479 }
4480 /*
4481 * we must add the address, no scope
4482 * set
4483 */
4484 if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_5)) {
4485 return (-17);
4486 }
4487 } else if (stcb_tmp == stcb) {
4488 /*
4489 * we must validate the state again
4490 * here
4491 */
4492 if (stcb->asoc.state == 0) {
4493 /* the assoc was freed? */
4494 return (-19);
4495 }
4496 if (net != NULL) {
4497 /* clear flag */
4498 net->dest_state &=
4499 ~SCTP_ADDR_NOT_IN_ASSOC;
4500 }
4501 } else {
4502 /*
4503 * strange, address is in another
4504 * assoc? straighten out locks.
4505 */
4506 if (stcb->asoc.state == 0) {
4507 /* the assoc was freed? */
4508 return (-21);
4509 }
4510 return (-22);
4511 }
4512 }
4513 } else if (ptype == SCTP_ECN_CAPABLE) {
4514 stcb->asoc.ecn_allowed = 1;
4515 } else if (ptype == SCTP_ULP_ADAPTATION) {
4516 if (stcb->asoc.state != SCTP_STATE_OPEN) {
4517 struct sctp_adaptation_layer_indication ai,
4518 *aip;
4519
4520 phdr = sctp_get_next_param(m, offset,
4521 (struct sctp_paramhdr *)&ai, sizeof(ai));
4522 aip = (struct sctp_adaptation_layer_indication *)phdr;
4523 sctp_ulp_notify(SCTP_NOTIFY_ADAPTATION_INDICATION,
4524 stcb, ntohl(aip->indication), NULL);
4525 }
4526 } else if (ptype == SCTP_SET_PRIM_ADDR) {
4527 struct sctp_asconf_addr_param lstore, *fee;
4528 struct sctp_asconf_addrv4_param *fii;
4529 int lptype;
4530 struct sockaddr *lsa = NULL;
4531
4532 stcb->asoc.peer_supports_asconf = 1;
4533 if (plen > sizeof(lstore)) {
4534 return (-23);
4535 }
4536 phdr = sctp_get_next_param(m, offset,
4537 (struct sctp_paramhdr *)&lstore, plen);
4538 if (phdr == NULL) {
4539 return (-24);
4540 }
4541 fee = (struct sctp_asconf_addr_param *)phdr;
4542 lptype = ntohs(fee->addrp.ph.param_type);
4543 if (lptype == SCTP_IPV4_ADDRESS) {
4544 if (plen !=
4545 sizeof(struct sctp_asconf_addrv4_param)) {
4546 printf("Sizeof setprim in init/init ack not %d but %d - ignored\n",
4547 (int)sizeof(struct sctp_asconf_addrv4_param),
4548 plen);
4549 } else {
4550 fii = (struct sctp_asconf_addrv4_param *)fee;
4551 sin.sin_addr.s_addr = fii->addrp.addr;
4552 lsa = (struct sockaddr *)&sin;
4553 }
4554 } else if (lptype == SCTP_IPV6_ADDRESS) {
4555 if (plen !=
4556 sizeof(struct sctp_asconf_addr_param)) {
4557 printf("Sizeof setprim (v6) in init/init ack not %d but %d - ignored\n",
4558 (int)sizeof(struct sctp_asconf_addr_param),
4559 plen);
4560 } else {
4561 memcpy(sin6.sin6_addr.s6_addr,
4562 fee->addrp.addr,
4563 sizeof(fee->addrp.addr));
4564 lsa = (struct sockaddr *)&sin6;
4565 }
4566 }
4567 if (lsa) {
4568 sctp_set_primary_addr(stcb, sa, NULL);
4569 }
4570 } else if (ptype == SCTP_PRSCTP_SUPPORTED) {
4571 /* Peer supports pr-sctp */
4572 stcb->asoc.peer_supports_prsctp = 1;
4573 } else if (ptype == SCTP_SUPPORTED_CHUNK_EXT) {
4574 /* A supported extension chunk */
4575 struct sctp_supported_chunk_types_param *pr_supported;
4576 uint8_t local_store[SCTP_PARAM_BUFFER_SIZE];
4577 int num_ent, i;
4578
4579 phdr = sctp_get_next_param(m, offset,
4580 (struct sctp_paramhdr *)&local_store, plen);
4581 if (phdr == NULL) {
4582 return (-25);
4583 }
4584 stcb->asoc.peer_supports_asconf = 0;
4585 stcb->asoc.peer_supports_prsctp = 0;
4586 stcb->asoc.peer_supports_pktdrop = 0;
4587 stcb->asoc.peer_supports_strreset = 0;
4588 stcb->asoc.peer_supports_auth = 0;
4589 pr_supported = (struct sctp_supported_chunk_types_param *)phdr;
4590 num_ent = plen - sizeof(struct sctp_paramhdr);
4591 for (i = 0; i < num_ent; i++) {
4592 switch (pr_supported->chunk_types[i]) {
4593 case SCTP_ASCONF:
4594 case SCTP_ASCONF_ACK:
4595 stcb->asoc.peer_supports_asconf = 1;
4596 break;
4597 case SCTP_FORWARD_CUM_TSN:
4598 stcb->asoc.peer_supports_prsctp = 1;
4599 break;
4600 case SCTP_PACKET_DROPPED:
4601 stcb->asoc.peer_supports_pktdrop = 1;
4602 break;
4603 case SCTP_STREAM_RESET:
4604 stcb->asoc.peer_supports_strreset = 1;
4605 break;
4606 case SCTP_AUTHENTICATION:
4607 stcb->asoc.peer_supports_auth = 1;
4608 break;
4609 default:
4610 /* one I have not learned yet */
4611 break;
4612
4613 }
4614 }
4615 } else if (ptype == SCTP_ECN_NONCE_SUPPORTED) {
4616 /* Peer supports ECN-nonce */
4617 stcb->asoc.peer_supports_ecn_nonce = 1;
4618 stcb->asoc.ecn_nonce_allowed = 1;
4619 } else if (ptype == SCTP_RANDOM) {
4620 if (plen > sizeof(random_store))
4621 break;
4622 if (got_random) {
4623 /* already processed a RANDOM */
4624 goto next_param;
4625 }
4626 phdr = sctp_get_next_param(m, offset,
4627 (struct sctp_paramhdr *)random_store,
4628 plen);
4629 if (phdr == NULL)
4630 return (-26);
4631 random = (struct sctp_auth_random *)phdr;
4632 random_len = plen - sizeof(*random);
4633 /* enforce the random length */
4634 if (random_len != SCTP_AUTH_RANDOM_SIZE_REQUIRED) {
4635#ifdef SCTP_DEBUG
4636 if (sctp_debug_on & SCTP_DEBUG_AUTH1)
4637 printf("SCTP: invalid RANDOM len\n");
4638#endif
4639 return (-27);
4640 }
4641 got_random = 1;
4642 } else if (ptype == SCTP_HMAC_LIST) {
4643 int num_hmacs;
4644 int i;
4645
4646 if (plen > sizeof(hmacs_store))
4647 break;
4648 if (got_hmacs) {
4649 /* already processed a HMAC list */
4650 goto next_param;
4651 }
4652 phdr = sctp_get_next_param(m, offset,
4653 (struct sctp_paramhdr *)hmacs_store,
4654 plen);
4655 if (phdr == NULL)
4656 return (-28);
4657 hmacs = (struct sctp_auth_hmac_algo *)phdr;
4658 hmacs_len = plen - sizeof(*hmacs);
4659 num_hmacs = hmacs_len / sizeof(hmacs->hmac_ids[0]);
4660 /* validate the hmac list */
4661 if (sctp_verify_hmac_param(hmacs, num_hmacs)) {
4662 return (-29);
4663 }
4664 if (stcb->asoc.peer_hmacs != NULL)
4665 sctp_free_hmaclist(stcb->asoc.peer_hmacs);
4666 stcb->asoc.peer_hmacs = sctp_alloc_hmaclist(num_hmacs);
4667 if (stcb->asoc.peer_hmacs != NULL) {
4668 for (i = 0; i < num_hmacs; i++) {
4669 sctp_auth_add_hmacid(stcb->asoc.peer_hmacs,
4670 ntohs(hmacs->hmac_ids[i]));
4671 }
4672 }
4673 got_hmacs = 1;
4674 } else if (ptype == SCTP_CHUNK_LIST) {
4675 int i;
4676
4677 if (plen > sizeof(chunks_store))
4678 break;
4679 if (got_chklist) {
4680 /* already processed a Chunks list */
4681 goto next_param;
4682 }
4683 phdr = sctp_get_next_param(m, offset,
4684 (struct sctp_paramhdr *)chunks_store,
4685 plen);
4686 if (phdr == NULL)
4687 return (-30);
4688 chunks = (struct sctp_auth_chunk_list *)phdr;
4689 num_chunks = plen - sizeof(*chunks);
4690 if (stcb->asoc.peer_auth_chunks != NULL)
4691 sctp_clear_chunklist(stcb->asoc.peer_auth_chunks);
4692 else
4693 stcb->asoc.peer_auth_chunks = sctp_alloc_chunklist();
4694 for (i = 0; i < num_chunks; i++) {
4695 sctp_auth_add_chunk(chunks->chunk_types[i],
4696 stcb->asoc.peer_auth_chunks);
4697 }
4698 got_chklist = 1;
4699 } else if ((ptype == SCTP_HEARTBEAT_INFO) ||
4700 (ptype == SCTP_STATE_COOKIE) ||
4701 (ptype == SCTP_UNRECOG_PARAM) ||
4702 (ptype == SCTP_COOKIE_PRESERVE) ||
4703 (ptype == SCTP_SUPPORTED_ADDRTYPE) ||
4704 (ptype == SCTP_ADD_IP_ADDRESS) ||
4705 (ptype == SCTP_DEL_IP_ADDRESS) ||
4706 (ptype == SCTP_ERROR_CAUSE_IND) ||
4707 (ptype == SCTP_SUCCESS_REPORT)) {
4708 /* don't care */ ;
4709 } else {
4710 if ((ptype & 0x8000) == 0x0000) {
4711 /*
4712 * must stop processing the rest of the
4713 * param's. Any report bits were handled
4714 * with the call to
4715 * sctp_arethere_unrecognized_parameters()
4716 * when the INIT or INIT-ACK was first seen.
4717 */
4718 break;
4719 }
4720 }
4721next_param:
4722 offset += SCTP_SIZE32(plen);
4723 if (offset >= limit) {
4724 break;
4725 }
4726 phdr = sctp_get_next_param(m, offset, &parm_buf,
4727 sizeof(parm_buf));
4728 }
4729 /* Now check to see if we need to purge any addresses */
4730 for (net = TAILQ_FIRST(&stcb->asoc.nets); net != NULL; net = net_tmp) {
4731 net_tmp = TAILQ_NEXT(net, sctp_next);
4732 if ((net->dest_state & SCTP_ADDR_NOT_IN_ASSOC) ==
4733 SCTP_ADDR_NOT_IN_ASSOC) {
4734 /* This address has been removed from the asoc */
4735 /* remove and free it */
4736 stcb->asoc.numnets--;
4737 TAILQ_REMOVE(&stcb->asoc.nets, net, sctp_next);
4738 sctp_free_remote_addr(net);
4739 if (net == stcb->asoc.primary_destination) {
4740 stcb->asoc.primary_destination = NULL;
4741 sctp_select_primary_destination(stcb);
4742 }
4743 }
4744 }
4745 /* validate authentication required parameters */
4746 if (got_random && got_hmacs) {
4747 stcb->asoc.peer_supports_auth = 1;
4748 } else {
4749 stcb->asoc.peer_supports_auth = 0;
4750 }
4751 if (!sctp_asconf_auth_nochk && stcb->asoc.peer_supports_asconf &&
4752 !stcb->asoc.peer_supports_auth) {
4753 return (-31);
4754 }
4755 /* concatenate the full random key */
4756#ifdef SCTP_AUTH_DRAFT_04
4757 keylen = random_len;
4758 new_key = sctp_alloc_key(keylen);
4759 if (new_key != NULL) {
4760 /* copy in the RANDOM */
4761 if (random != NULL)
4762 bcopy(random->random_data, new_key->key, random_len);
4763 }
4764#else
4765 keylen = sizeof(*random) + random_len + sizeof(*chunks) + num_chunks +
4766 sizeof(*hmacs) + hmacs_len;
4767 new_key = sctp_alloc_key(keylen);
4768 if (new_key != NULL) {
4769 /* copy in the RANDOM */
4770 if (random != NULL) {
4771 keylen = sizeof(*random) + random_len;
4772 bcopy(random, new_key->key, keylen);
4773 }
4774 /* append in the AUTH chunks */
4775 if (chunks != NULL) {
4776 bcopy(chunks, new_key->key + keylen,
4777 sizeof(*chunks) + num_chunks);
4778 keylen += sizeof(*chunks) + num_chunks;
4779 }
4780 /* append in the HMACs */
4781 if (hmacs != NULL) {
4782 bcopy(hmacs, new_key->key + keylen,
4783 sizeof(*hmacs) + hmacs_len);
4784 }
4785 }
4786#endif
4787 else {
4788 return (-32);
4789 }
4790 if (stcb->asoc.authinfo.peer_random != NULL)
4791 sctp_free_key(stcb->asoc.authinfo.peer_random);
4792 stcb->asoc.authinfo.peer_random = new_key;
4793#ifdef SCTP_AUTH_DRAFT_04
4794 /* don't include the chunks and hmacs for draft -04 */
4795 stcb->asoc.authinfo.peer_random->keylen = random_len;
4796#endif
4797 sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.assoc_keyid);
4798 sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.recv_keyid);
4799
4800 return (0);
4801}
4802
4803int
4804sctp_set_primary_addr(struct sctp_tcb *stcb, struct sockaddr *sa,
4805 struct sctp_nets *net)
4806{
4807 /* make sure the requested primary address exists in the assoc */
4808 if (net == NULL && sa)
4809 net = sctp_findnet(stcb, sa);
4810
4811 if (net == NULL) {
4812 /* didn't find the requested primary address! */
4813 return (-1);
4814 } else {
4815 /* set the primary address */
4816 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
4489}
4490
4491
4492int
4493sctp_load_addresses_from_init(struct sctp_tcb *stcb, struct mbuf *m,
4494 int iphlen, int offset, int limit, struct sctphdr *sh,
4495 struct sockaddr *altsa)
4496{
4497 /*
4498 * grub through the INIT pulling addresses and loading them to the
4499 * nets structure in the asoc. The from address in the mbuf should
4500 * also be loaded (if it is not already). This routine can be called
4501 * with either INIT or INIT-ACK's as long as the m points to the IP
4502 * packet and the offset points to the beginning of the parameters.
4503 */
4504 struct sctp_inpcb *inp, *l_inp;
4505 struct sctp_nets *net, *net_tmp;
4506 struct ip *iph;
4507 struct sctp_paramhdr *phdr, parm_buf;
4508 struct sctp_tcb *stcb_tmp;
4509 uint16_t ptype, plen;
4510 struct sockaddr *sa;
4511 struct sockaddr_storage dest_store;
4512 struct sockaddr *local_sa = (struct sockaddr *)&dest_store;
4513 struct sockaddr_in sin;
4514 struct sockaddr_in6 sin6;
4515 uint8_t random_store[SCTP_PARAM_BUFFER_SIZE];
4516 struct sctp_auth_random *random = NULL;
4517 uint16_t random_len = 0;
4518 uint8_t hmacs_store[SCTP_PARAM_BUFFER_SIZE];
4519 struct sctp_auth_hmac_algo *hmacs = NULL;
4520 uint16_t hmacs_len = 0;
4521 uint8_t chunks_store[SCTP_PARAM_BUFFER_SIZE];
4522 struct sctp_auth_chunk_list *chunks = NULL;
4523 uint16_t num_chunks = 0;
4524 sctp_key_t *new_key;
4525 uint32_t keylen;
4526 int got_random = 0, got_hmacs = 0, got_chklist = 0;
4527
4528 /* First get the destination address setup too. */
4529 memset(&sin, 0, sizeof(sin));
4530 memset(&sin6, 0, sizeof(sin6));
4531
4532 sin.sin_family = AF_INET;
4533 sin.sin_len = sizeof(sin);
4534 sin.sin_port = stcb->rport;
4535
4536 sin6.sin6_family = AF_INET6;
4537 sin6.sin6_len = sizeof(struct sockaddr_in6);
4538 sin6.sin6_port = stcb->rport;
4539 if (altsa == NULL) {
4540 iph = mtod(m, struct ip *);
4541 if (iph->ip_v == IPVERSION) {
4542 /* its IPv4 */
4543 struct sockaddr_in *sin_2;
4544
4545 sin_2 = (struct sockaddr_in *)(local_sa);
4546 memset(sin_2, 0, sizeof(sin));
4547 sin_2->sin_family = AF_INET;
4548 sin_2->sin_len = sizeof(sin);
4549 sin_2->sin_port = sh->dest_port;
4550 sin_2->sin_addr.s_addr = iph->ip_dst.s_addr;
4551 sin.sin_addr = iph->ip_src;
4552 sa = (struct sockaddr *)&sin;
4553 } else if (iph->ip_v == (IPV6_VERSION >> 4)) {
4554 /* its IPv6 */
4555 struct ip6_hdr *ip6;
4556 struct sockaddr_in6 *sin6_2;
4557
4558 ip6 = mtod(m, struct ip6_hdr *);
4559 sin6_2 = (struct sockaddr_in6 *)(local_sa);
4560 memset(sin6_2, 0, sizeof(sin6));
4561 sin6_2->sin6_family = AF_INET6;
4562 sin6_2->sin6_len = sizeof(struct sockaddr_in6);
4563 sin6_2->sin6_port = sh->dest_port;
4564 sin6.sin6_addr = ip6->ip6_src;
4565 sa = (struct sockaddr *)&sin6;
4566 } else {
4567 sa = NULL;
4568 }
4569 } else {
4570 /*
4571 * For cookies we use the src address NOT from the packet
4572 * but from the original INIT
4573 */
4574 sa = altsa;
4575 }
4576 /* Turn off ECN until we get through all params */
4577 stcb->asoc.ecn_allowed = 0;
4578 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
4579 /* mark all addresses that we have currently on the list */
4580 net->dest_state |= SCTP_ADDR_NOT_IN_ASSOC;
4581 }
4582 /* does the source address already exist? if so skip it */
4583 l_inp = inp = stcb->sctp_ep;
4584
4585 atomic_add_int(&stcb->asoc.refcnt, 1);
4586 stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net_tmp, local_sa, stcb);
4587 atomic_add_int(&stcb->asoc.refcnt, -1);
4588
4589 if ((stcb_tmp == NULL && inp == stcb->sctp_ep) || inp == NULL) {
4590 /* we must add the source address */
4591 /* no scope set here since we have a tcb already. */
4592 if ((sa->sa_family == AF_INET) &&
4593 (stcb->asoc.ipv4_addr_legal)) {
4594 if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_2)) {
4595 return (-1);
4596 }
4597 } else if ((sa->sa_family == AF_INET6) &&
4598 (stcb->asoc.ipv6_addr_legal)) {
4599 if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_3)) {
4600 return (-2);
4601 }
4602 }
4603 } else {
4604 if (net_tmp != NULL && stcb_tmp == stcb) {
4605 net_tmp->dest_state &= ~SCTP_ADDR_NOT_IN_ASSOC;
4606 } else if (stcb_tmp != stcb) {
4607 /* It belongs to another association? */
4608 SCTP_TCB_UNLOCK(stcb_tmp);
4609 return (-3);
4610 }
4611 }
4612 if (stcb->asoc.state == 0) {
4613 /* the assoc was freed? */
4614 return (-4);
4615 }
4616 /* now we must go through each of the params. */
4617 phdr = sctp_get_next_param(m, offset, &parm_buf, sizeof(parm_buf));
4618 while (phdr) {
4619 ptype = ntohs(phdr->param_type);
4620 plen = ntohs(phdr->param_length);
4621 /*
4622 * printf("ptype => %0x, plen => %d\n", (uint32_t)ptype,
4623 * (int)plen);
4624 */
4625 if (offset + plen > limit) {
4626 break;
4627 }
4628 if (plen == 0) {
4629 break;
4630 }
4631 if (ptype == SCTP_IPV4_ADDRESS) {
4632 if (stcb->asoc.ipv4_addr_legal) {
4633 struct sctp_ipv4addr_param *p4, p4_buf;
4634
4635 /* ok get the v4 address and check/add */
4636 phdr = sctp_get_next_param(m, offset,
4637 (struct sctp_paramhdr *)&p4_buf, sizeof(p4_buf));
4638 if (plen != sizeof(struct sctp_ipv4addr_param) ||
4639 phdr == NULL) {
4640 return (-5);
4641 }
4642 p4 = (struct sctp_ipv4addr_param *)phdr;
4643 sin.sin_addr.s_addr = p4->addr;
4644 sa = (struct sockaddr *)&sin;
4645 inp = stcb->sctp_ep;
4646 atomic_add_int(&stcb->asoc.refcnt, 1);
4647 stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net,
4648 local_sa, stcb);
4649 atomic_add_int(&stcb->asoc.refcnt, -1);
4650
4651 if ((stcb_tmp == NULL && inp == stcb->sctp_ep) ||
4652 inp == NULL) {
4653 /* we must add the source address */
4654 /*
4655 * no scope set since we have a tcb
4656 * already
4657 */
4658
4659 /*
4660 * we must validate the state again
4661 * here
4662 */
4663 if (stcb->asoc.state == 0) {
4664 /* the assoc was freed? */
4665 return (-7);
4666 }
4667 if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_4)) {
4668 return (-8);
4669 }
4670 } else if (stcb_tmp == stcb) {
4671 if (stcb->asoc.state == 0) {
4672 /* the assoc was freed? */
4673 return (-10);
4674 }
4675 if (net != NULL) {
4676 /* clear flag */
4677 net->dest_state &=
4678 ~SCTP_ADDR_NOT_IN_ASSOC;
4679 }
4680 } else {
4681 /*
4682 * strange, address is in another
4683 * assoc? straighten out locks.
4684 */
4685 if (stcb->asoc.state == 0) {
4686 /* the assoc was freed? */
4687 return (-12);
4688 }
4689 return (-13);
4690 }
4691 }
4692 } else if (ptype == SCTP_IPV6_ADDRESS) {
4693 if (stcb->asoc.ipv6_addr_legal) {
4694 /* ok get the v6 address and check/add */
4695 struct sctp_ipv6addr_param *p6, p6_buf;
4696
4697 phdr = sctp_get_next_param(m, offset,
4698 (struct sctp_paramhdr *)&p6_buf, sizeof(p6_buf));
4699 if (plen != sizeof(struct sctp_ipv6addr_param) ||
4700 phdr == NULL) {
4701 return (-14);
4702 }
4703 p6 = (struct sctp_ipv6addr_param *)phdr;
4704 memcpy((caddr_t)&sin6.sin6_addr, p6->addr,
4705 sizeof(p6->addr));
4706 sa = (struct sockaddr *)&sin6;
4707 inp = stcb->sctp_ep;
4708 atomic_add_int(&stcb->asoc.refcnt, 1);
4709 stcb_tmp = sctp_findassociation_ep_addr(&inp, sa, &net,
4710 local_sa, stcb);
4711 atomic_add_int(&stcb->asoc.refcnt, -1);
4712 if (stcb_tmp == NULL && (inp == stcb->sctp_ep ||
4713 inp == NULL)) {
4714 /*
4715 * we must validate the state again
4716 * here
4717 */
4718 if (stcb->asoc.state == 0) {
4719 /* the assoc was freed? */
4720 return (-16);
4721 }
4722 /*
4723 * we must add the address, no scope
4724 * set
4725 */
4726 if (sctp_add_remote_addr(stcb, sa, SCTP_DONOT_SETSCOPE, SCTP_LOAD_ADDR_5)) {
4727 return (-17);
4728 }
4729 } else if (stcb_tmp == stcb) {
4730 /*
4731 * we must validate the state again
4732 * here
4733 */
4734 if (stcb->asoc.state == 0) {
4735 /* the assoc was freed? */
4736 return (-19);
4737 }
4738 if (net != NULL) {
4739 /* clear flag */
4740 net->dest_state &=
4741 ~SCTP_ADDR_NOT_IN_ASSOC;
4742 }
4743 } else {
4744 /*
4745 * strange, address is in another
4746 * assoc? straighten out locks.
4747 */
4748 if (stcb->asoc.state == 0) {
4749 /* the assoc was freed? */
4750 return (-21);
4751 }
4752 return (-22);
4753 }
4754 }
4755 } else if (ptype == SCTP_ECN_CAPABLE) {
4756 stcb->asoc.ecn_allowed = 1;
4757 } else if (ptype == SCTP_ULP_ADAPTATION) {
4758 if (stcb->asoc.state != SCTP_STATE_OPEN) {
4759 struct sctp_adaptation_layer_indication ai,
4760 *aip;
4761
4762 phdr = sctp_get_next_param(m, offset,
4763 (struct sctp_paramhdr *)&ai, sizeof(ai));
4764 aip = (struct sctp_adaptation_layer_indication *)phdr;
4765 sctp_ulp_notify(SCTP_NOTIFY_ADAPTATION_INDICATION,
4766 stcb, ntohl(aip->indication), NULL);
4767 }
4768 } else if (ptype == SCTP_SET_PRIM_ADDR) {
4769 struct sctp_asconf_addr_param lstore, *fee;
4770 struct sctp_asconf_addrv4_param *fii;
4771 int lptype;
4772 struct sockaddr *lsa = NULL;
4773
4774 stcb->asoc.peer_supports_asconf = 1;
4775 if (plen > sizeof(lstore)) {
4776 return (-23);
4777 }
4778 phdr = sctp_get_next_param(m, offset,
4779 (struct sctp_paramhdr *)&lstore, plen);
4780 if (phdr == NULL) {
4781 return (-24);
4782 }
4783 fee = (struct sctp_asconf_addr_param *)phdr;
4784 lptype = ntohs(fee->addrp.ph.param_type);
4785 if (lptype == SCTP_IPV4_ADDRESS) {
4786 if (plen !=
4787 sizeof(struct sctp_asconf_addrv4_param)) {
4788 printf("Sizeof setprim in init/init ack not %d but %d - ignored\n",
4789 (int)sizeof(struct sctp_asconf_addrv4_param),
4790 plen);
4791 } else {
4792 fii = (struct sctp_asconf_addrv4_param *)fee;
4793 sin.sin_addr.s_addr = fii->addrp.addr;
4794 lsa = (struct sockaddr *)&sin;
4795 }
4796 } else if (lptype == SCTP_IPV6_ADDRESS) {
4797 if (plen !=
4798 sizeof(struct sctp_asconf_addr_param)) {
4799 printf("Sizeof setprim (v6) in init/init ack not %d but %d - ignored\n",
4800 (int)sizeof(struct sctp_asconf_addr_param),
4801 plen);
4802 } else {
4803 memcpy(sin6.sin6_addr.s6_addr,
4804 fee->addrp.addr,
4805 sizeof(fee->addrp.addr));
4806 lsa = (struct sockaddr *)&sin6;
4807 }
4808 }
4809 if (lsa) {
4810 sctp_set_primary_addr(stcb, sa, NULL);
4811 }
4812 } else if (ptype == SCTP_PRSCTP_SUPPORTED) {
4813 /* Peer supports pr-sctp */
4814 stcb->asoc.peer_supports_prsctp = 1;
4815 } else if (ptype == SCTP_SUPPORTED_CHUNK_EXT) {
4816 /* A supported extension chunk */
4817 struct sctp_supported_chunk_types_param *pr_supported;
4818 uint8_t local_store[SCTP_PARAM_BUFFER_SIZE];
4819 int num_ent, i;
4820
4821 phdr = sctp_get_next_param(m, offset,
4822 (struct sctp_paramhdr *)&local_store, plen);
4823 if (phdr == NULL) {
4824 return (-25);
4825 }
4826 stcb->asoc.peer_supports_asconf = 0;
4827 stcb->asoc.peer_supports_prsctp = 0;
4828 stcb->asoc.peer_supports_pktdrop = 0;
4829 stcb->asoc.peer_supports_strreset = 0;
4830 stcb->asoc.peer_supports_auth = 0;
4831 pr_supported = (struct sctp_supported_chunk_types_param *)phdr;
4832 num_ent = plen - sizeof(struct sctp_paramhdr);
4833 for (i = 0; i < num_ent; i++) {
4834 switch (pr_supported->chunk_types[i]) {
4835 case SCTP_ASCONF:
4836 case SCTP_ASCONF_ACK:
4837 stcb->asoc.peer_supports_asconf = 1;
4838 break;
4839 case SCTP_FORWARD_CUM_TSN:
4840 stcb->asoc.peer_supports_prsctp = 1;
4841 break;
4842 case SCTP_PACKET_DROPPED:
4843 stcb->asoc.peer_supports_pktdrop = 1;
4844 break;
4845 case SCTP_STREAM_RESET:
4846 stcb->asoc.peer_supports_strreset = 1;
4847 break;
4848 case SCTP_AUTHENTICATION:
4849 stcb->asoc.peer_supports_auth = 1;
4850 break;
4851 default:
4852 /* one I have not learned yet */
4853 break;
4854
4855 }
4856 }
4857 } else if (ptype == SCTP_ECN_NONCE_SUPPORTED) {
4858 /* Peer supports ECN-nonce */
4859 stcb->asoc.peer_supports_ecn_nonce = 1;
4860 stcb->asoc.ecn_nonce_allowed = 1;
4861 } else if (ptype == SCTP_RANDOM) {
4862 if (plen > sizeof(random_store))
4863 break;
4864 if (got_random) {
4865 /* already processed a RANDOM */
4866 goto next_param;
4867 }
4868 phdr = sctp_get_next_param(m, offset,
4869 (struct sctp_paramhdr *)random_store,
4870 plen);
4871 if (phdr == NULL)
4872 return (-26);
4873 random = (struct sctp_auth_random *)phdr;
4874 random_len = plen - sizeof(*random);
4875 /* enforce the random length */
4876 if (random_len != SCTP_AUTH_RANDOM_SIZE_REQUIRED) {
4877#ifdef SCTP_DEBUG
4878 if (sctp_debug_on & SCTP_DEBUG_AUTH1)
4879 printf("SCTP: invalid RANDOM len\n");
4880#endif
4881 return (-27);
4882 }
4883 got_random = 1;
4884 } else if (ptype == SCTP_HMAC_LIST) {
4885 int num_hmacs;
4886 int i;
4887
4888 if (plen > sizeof(hmacs_store))
4889 break;
4890 if (got_hmacs) {
4891 /* already processed a HMAC list */
4892 goto next_param;
4893 }
4894 phdr = sctp_get_next_param(m, offset,
4895 (struct sctp_paramhdr *)hmacs_store,
4896 plen);
4897 if (phdr == NULL)
4898 return (-28);
4899 hmacs = (struct sctp_auth_hmac_algo *)phdr;
4900 hmacs_len = plen - sizeof(*hmacs);
4901 num_hmacs = hmacs_len / sizeof(hmacs->hmac_ids[0]);
4902 /* validate the hmac list */
4903 if (sctp_verify_hmac_param(hmacs, num_hmacs)) {
4904 return (-29);
4905 }
4906 if (stcb->asoc.peer_hmacs != NULL)
4907 sctp_free_hmaclist(stcb->asoc.peer_hmacs);
4908 stcb->asoc.peer_hmacs = sctp_alloc_hmaclist(num_hmacs);
4909 if (stcb->asoc.peer_hmacs != NULL) {
4910 for (i = 0; i < num_hmacs; i++) {
4911 sctp_auth_add_hmacid(stcb->asoc.peer_hmacs,
4912 ntohs(hmacs->hmac_ids[i]));
4913 }
4914 }
4915 got_hmacs = 1;
4916 } else if (ptype == SCTP_CHUNK_LIST) {
4917 int i;
4918
4919 if (plen > sizeof(chunks_store))
4920 break;
4921 if (got_chklist) {
4922 /* already processed a Chunks list */
4923 goto next_param;
4924 }
4925 phdr = sctp_get_next_param(m, offset,
4926 (struct sctp_paramhdr *)chunks_store,
4927 plen);
4928 if (phdr == NULL)
4929 return (-30);
4930 chunks = (struct sctp_auth_chunk_list *)phdr;
4931 num_chunks = plen - sizeof(*chunks);
4932 if (stcb->asoc.peer_auth_chunks != NULL)
4933 sctp_clear_chunklist(stcb->asoc.peer_auth_chunks);
4934 else
4935 stcb->asoc.peer_auth_chunks = sctp_alloc_chunklist();
4936 for (i = 0; i < num_chunks; i++) {
4937 sctp_auth_add_chunk(chunks->chunk_types[i],
4938 stcb->asoc.peer_auth_chunks);
4939 }
4940 got_chklist = 1;
4941 } else if ((ptype == SCTP_HEARTBEAT_INFO) ||
4942 (ptype == SCTP_STATE_COOKIE) ||
4943 (ptype == SCTP_UNRECOG_PARAM) ||
4944 (ptype == SCTP_COOKIE_PRESERVE) ||
4945 (ptype == SCTP_SUPPORTED_ADDRTYPE) ||
4946 (ptype == SCTP_ADD_IP_ADDRESS) ||
4947 (ptype == SCTP_DEL_IP_ADDRESS) ||
4948 (ptype == SCTP_ERROR_CAUSE_IND) ||
4949 (ptype == SCTP_SUCCESS_REPORT)) {
4950 /* don't care */ ;
4951 } else {
4952 if ((ptype & 0x8000) == 0x0000) {
4953 /*
4954 * must stop processing the rest of the
4955 * param's. Any report bits were handled
4956 * with the call to
4957 * sctp_arethere_unrecognized_parameters()
4958 * when the INIT or INIT-ACK was first seen.
4959 */
4960 break;
4961 }
4962 }
4963next_param:
4964 offset += SCTP_SIZE32(plen);
4965 if (offset >= limit) {
4966 break;
4967 }
4968 phdr = sctp_get_next_param(m, offset, &parm_buf,
4969 sizeof(parm_buf));
4970 }
4971 /* Now check to see if we need to purge any addresses */
4972 for (net = TAILQ_FIRST(&stcb->asoc.nets); net != NULL; net = net_tmp) {
4973 net_tmp = TAILQ_NEXT(net, sctp_next);
4974 if ((net->dest_state & SCTP_ADDR_NOT_IN_ASSOC) ==
4975 SCTP_ADDR_NOT_IN_ASSOC) {
4976 /* This address has been removed from the asoc */
4977 /* remove and free it */
4978 stcb->asoc.numnets--;
4979 TAILQ_REMOVE(&stcb->asoc.nets, net, sctp_next);
4980 sctp_free_remote_addr(net);
4981 if (net == stcb->asoc.primary_destination) {
4982 stcb->asoc.primary_destination = NULL;
4983 sctp_select_primary_destination(stcb);
4984 }
4985 }
4986 }
4987 /* validate authentication required parameters */
4988 if (got_random && got_hmacs) {
4989 stcb->asoc.peer_supports_auth = 1;
4990 } else {
4991 stcb->asoc.peer_supports_auth = 0;
4992 }
4993 if (!sctp_asconf_auth_nochk && stcb->asoc.peer_supports_asconf &&
4994 !stcb->asoc.peer_supports_auth) {
4995 return (-31);
4996 }
4997 /* concatenate the full random key */
4998#ifdef SCTP_AUTH_DRAFT_04
4999 keylen = random_len;
5000 new_key = sctp_alloc_key(keylen);
5001 if (new_key != NULL) {
5002 /* copy in the RANDOM */
5003 if (random != NULL)
5004 bcopy(random->random_data, new_key->key, random_len);
5005 }
5006#else
5007 keylen = sizeof(*random) + random_len + sizeof(*chunks) + num_chunks +
5008 sizeof(*hmacs) + hmacs_len;
5009 new_key = sctp_alloc_key(keylen);
5010 if (new_key != NULL) {
5011 /* copy in the RANDOM */
5012 if (random != NULL) {
5013 keylen = sizeof(*random) + random_len;
5014 bcopy(random, new_key->key, keylen);
5015 }
5016 /* append in the AUTH chunks */
5017 if (chunks != NULL) {
5018 bcopy(chunks, new_key->key + keylen,
5019 sizeof(*chunks) + num_chunks);
5020 keylen += sizeof(*chunks) + num_chunks;
5021 }
5022 /* append in the HMACs */
5023 if (hmacs != NULL) {
5024 bcopy(hmacs, new_key->key + keylen,
5025 sizeof(*hmacs) + hmacs_len);
5026 }
5027 }
5028#endif
5029 else {
5030 return (-32);
5031 }
5032 if (stcb->asoc.authinfo.peer_random != NULL)
5033 sctp_free_key(stcb->asoc.authinfo.peer_random);
5034 stcb->asoc.authinfo.peer_random = new_key;
5035#ifdef SCTP_AUTH_DRAFT_04
5036 /* don't include the chunks and hmacs for draft -04 */
5037 stcb->asoc.authinfo.peer_random->keylen = random_len;
5038#endif
5039 sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.assoc_keyid);
5040 sctp_clear_cachedkeys(stcb, stcb->asoc.authinfo.recv_keyid);
5041
5042 return (0);
5043}
5044
5045int
5046sctp_set_primary_addr(struct sctp_tcb *stcb, struct sockaddr *sa,
5047 struct sctp_nets *net)
5048{
5049 /* make sure the requested primary address exists in the assoc */
5050 if (net == NULL && sa)
5051 net = sctp_findnet(stcb, sa);
5052
5053 if (net == NULL) {
5054 /* didn't find the requested primary address! */
5055 return (-1);
5056 } else {
5057 /* set the primary address */
5058 if (net->dest_state & SCTP_ADDR_UNCONFIRMED) {
4817 /* Must be confirmed */
4818 return (-1);
5059 /* Must be confirmed, so queue to set */
5060 net->dest_state |= SCTP_ADDR_REQ_PRIMARY;
5061 return (0);
4819 }
4820 stcb->asoc.primary_destination = net;
4821 net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY;
4822 net = TAILQ_FIRST(&stcb->asoc.nets);
4823 if (net != stcb->asoc.primary_destination) {
4824 /*
4825 * first one on the list is NOT the primary
4826 * sctp_cmpaddr() is much more efficent if the
4827 * primary is the first on the list, make it so.
4828 */
4829 TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
4830 TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
4831 }
4832 return (0);
4833 }
4834}
4835
4836
4837int
4838sctp_is_vtag_good(struct sctp_inpcb *inp, uint32_t tag, struct timeval *now)
4839{
4840 /*
4841 * This function serves two purposes. It will see if a TAG can be
4842 * re-used and return 1 for yes it is ok and 0 for don't use that
4843 * tag. A secondary function it will do is purge out old tags that
4844 * can be removed.
4845 */
4846 struct sctpasochead *head;
4847 struct sctpvtaghead *chain;
4848 struct sctp_tagblock *twait_block;
4849 struct sctp_tcb *stcb;
4850 int i;
4851
4852 SCTP_INP_INFO_WLOCK();
4853 chain = &sctppcbinfo.vtag_timewait[(tag % SCTP_STACK_VTAG_HASH_SIZE)];
4854 /* First is the vtag in use ? */
4855
4856 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(tag,
4857 sctppcbinfo.hashasocmark)];
4858 if (head == NULL) {
4859 goto check_restart;
4860 }
4861 LIST_FOREACH(stcb, head, sctp_asocs) {
4862
4863 if (stcb->asoc.my_vtag == tag) {
4864 /*
4865 * We should remove this if and return 0 always if
4866 * we want vtags unique across all endpoints. For
4867 * now within a endpoint is ok.
4868 */
4869 if (inp == stcb->sctp_ep) {
4870 /* bad tag, in use */
4871 SCTP_INP_INFO_WUNLOCK();
4872 return (0);
4873 }
4874 }
4875 }
4876check_restart:
4877 /* Now lets check the restart hash */
4878 head = &sctppcbinfo.sctp_restarthash[SCTP_PCBHASH_ASOC(tag,
4879 sctppcbinfo.hashrestartmark)];
4880 if (head == NULL) {
4881 goto check_time_wait;
4882 }
4883 LIST_FOREACH(stcb, head, sctp_tcbrestarhash) {
4884 if (stcb->asoc.assoc_id == tag) {
4885 /* candidate */
4886 if (inp == stcb->sctp_ep) {
4887 /* bad tag, in use */
4888 SCTP_INP_INFO_WUNLOCK();
4889 return (0);
4890 }
4891 }
4892 }
4893check_time_wait:
4894 /* Now what about timed wait ? */
4895 if (!SCTP_LIST_EMPTY(chain)) {
4896 /*
4897 * Block(s) are present, lets see if we have this tag in the
4898 * list
4899 */
4900 LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
4901 for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) {
4902 if (twait_block->vtag_block[i].v_tag == 0) {
4903 /* not used */
4904 continue;
4905 } else if ((long)twait_block->vtag_block[i].tv_sec_at_expire >
4906 now->tv_sec) {
4907 /* Audit expires this guy */
4908 twait_block->vtag_block[i].tv_sec_at_expire = 0;
4909 twait_block->vtag_block[i].v_tag = 0;
4910 } else if (twait_block->vtag_block[i].v_tag ==
4911 tag) {
4912 /* Bad tag, sorry :< */
4913 SCTP_INP_INFO_WUNLOCK();
4914 return (0);
4915 }
4916 }
4917 }
4918 }
4919 /* Not found, ok to use the tag */
4920 SCTP_INP_INFO_WUNLOCK();
4921 return (1);
4922}
4923
4924
5062 }
5063 stcb->asoc.primary_destination = net;
5064 net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY;
5065 net = TAILQ_FIRST(&stcb->asoc.nets);
5066 if (net != stcb->asoc.primary_destination) {
5067 /*
5068 * first one on the list is NOT the primary
5069 * sctp_cmpaddr() is much more efficent if the
5070 * primary is the first on the list, make it so.
5071 */
5072 TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
5073 TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next);
5074 }
5075 return (0);
5076 }
5077}
5078
5079
5080int
5081sctp_is_vtag_good(struct sctp_inpcb *inp, uint32_t tag, struct timeval *now)
5082{
5083 /*
5084 * This function serves two purposes. It will see if a TAG can be
5085 * re-used and return 1 for yes it is ok and 0 for don't use that
5086 * tag. A secondary function it will do is purge out old tags that
5087 * can be removed.
5088 */
5089 struct sctpasochead *head;
5090 struct sctpvtaghead *chain;
5091 struct sctp_tagblock *twait_block;
5092 struct sctp_tcb *stcb;
5093 int i;
5094
5095 SCTP_INP_INFO_WLOCK();
5096 chain = &sctppcbinfo.vtag_timewait[(tag % SCTP_STACK_VTAG_HASH_SIZE)];
5097 /* First is the vtag in use ? */
5098
5099 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(tag,
5100 sctppcbinfo.hashasocmark)];
5101 if (head == NULL) {
5102 goto check_restart;
5103 }
5104 LIST_FOREACH(stcb, head, sctp_asocs) {
5105
5106 if (stcb->asoc.my_vtag == tag) {
5107 /*
5108 * We should remove this if and return 0 always if
5109 * we want vtags unique across all endpoints. For
5110 * now within a endpoint is ok.
5111 */
5112 if (inp == stcb->sctp_ep) {
5113 /* bad tag, in use */
5114 SCTP_INP_INFO_WUNLOCK();
5115 return (0);
5116 }
5117 }
5118 }
5119check_restart:
5120 /* Now lets check the restart hash */
5121 head = &sctppcbinfo.sctp_restarthash[SCTP_PCBHASH_ASOC(tag,
5122 sctppcbinfo.hashrestartmark)];
5123 if (head == NULL) {
5124 goto check_time_wait;
5125 }
5126 LIST_FOREACH(stcb, head, sctp_tcbrestarhash) {
5127 if (stcb->asoc.assoc_id == tag) {
5128 /* candidate */
5129 if (inp == stcb->sctp_ep) {
5130 /* bad tag, in use */
5131 SCTP_INP_INFO_WUNLOCK();
5132 return (0);
5133 }
5134 }
5135 }
5136check_time_wait:
5137 /* Now what about timed wait ? */
5138 if (!SCTP_LIST_EMPTY(chain)) {
5139 /*
5140 * Block(s) are present, lets see if we have this tag in the
5141 * list
5142 */
5143 LIST_FOREACH(twait_block, chain, sctp_nxt_tagblock) {
5144 for (i = 0; i < SCTP_NUMBER_IN_VTAG_BLOCK; i++) {
5145 if (twait_block->vtag_block[i].v_tag == 0) {
5146 /* not used */
5147 continue;
5148 } else if ((long)twait_block->vtag_block[i].tv_sec_at_expire >
5149 now->tv_sec) {
5150 /* Audit expires this guy */
5151 twait_block->vtag_block[i].tv_sec_at_expire = 0;
5152 twait_block->vtag_block[i].v_tag = 0;
5153 } else if (twait_block->vtag_block[i].v_tag ==
5154 tag) {
5155 /* Bad tag, sorry :< */
5156 SCTP_INP_INFO_WUNLOCK();
5157 return (0);
5158 }
5159 }
5160 }
5161 }
5162 /* Not found, ok to use the tag */
5163 SCTP_INP_INFO_WUNLOCK();
5164 return (1);
5165}
5166
5167
4925/*
4926 * Delete the address from the endpoint local address list Lookup using a
4927 * sockaddr address (ie. not an ifaddr)
4928 */
4929int
4930sctp_del_local_addr_ep_sa(struct sctp_inpcb *inp, struct sockaddr *sa)
4931{
4932 struct sctp_laddr *laddr;
4933 struct sockaddr *l_sa;
4934 int found = 0;
4935
4936 /*
4937 * Here is another function I cannot find a caller for. As such we
4938 * SHOULD delete it if we have no users. If we find a user that user
4939 * MUST have the INP locked.
4940 *
4941 */
4942
4943 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) {
4944 /* You are already bound to all. You have it already */
4945 return (EINVAL);
4946 }
4947 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) {
4948 /* make sure the address exists */
4949 if (laddr->ifa == NULL)
4950 continue;
4951 if (laddr->ifa->ifa_addr == NULL)
4952 continue;
4953
4954 l_sa = laddr->ifa->ifa_addr;
4955 if (l_sa->sa_family == AF_INET6) {
4956 /* IPv6 address */
4957 struct sockaddr_in6 *sin1, *sin2;
4958
4959 sin1 = (struct sockaddr_in6 *)l_sa;
4960 sin2 = (struct sockaddr_in6 *)sa;
4961 if (memcmp(&sin1->sin6_addr, &sin2->sin6_addr,
4962 sizeof(struct in6_addr)) == 0) {
4963 /* matched */
4964 found = 1;
4965 break;
4966 }
4967 } else if (l_sa->sa_family == AF_INET) {
4968 /* IPv4 address */
4969 struct sockaddr_in *sin1, *sin2;
4970
4971 sin1 = (struct sockaddr_in *)l_sa;
4972 sin2 = (struct sockaddr_in *)sa;
4973 if (sin1->sin_addr.s_addr == sin2->sin_addr.s_addr) {
4974 /* matched */
4975 found = 1;
4976 break;
4977 }
4978 } else {
4979 /* invalid family */
4980 return (-1);
4981 }
4982 }
4983
4984 if (found && inp->laddr_count < 2) {
4985 /* can't delete unless there are at LEAST 2 addresses */
4986 return (-1);
4987 }
4988 if (found && (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) == 0) {
4989 /*
4990 * remove it from the ep list, this should NOT be done until
4991 * its really gone from the interface list and we won't be
4992 * receiving more of these. Probably right away. If we do
4993 * allow a removal of an address from an association
4994 * (sub-set bind) than this should NOT be called until the
4995 * all ASCONF come back from this association.
4996 */
4997 sctp_remove_laddr(laddr);
4998 return (0);
4999 } else {
5000 return (-1);
5001 }
5002}
5003
5004static sctp_assoc_t reneged_asoc_ids[256];
5005static uint8_t reneged_at = 0;
5006
5168static sctp_assoc_t reneged_asoc_ids[256];
5169static uint8_t reneged_at = 0;
5170
5007extern int sctp_do_drain;
5008
5009static void
5010sctp_drain_mbufs(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
5011{
5012 /*
5013 * We must hunt this association for MBUF's past the cumack (i.e.
5014 * out of order data that we can renege on).
5015 */
5016 struct sctp_association *asoc;
5017 struct sctp_tmit_chunk *chk, *nchk;
5018 uint32_t cumulative_tsn_p1, tsn;
5019 struct sctp_queued_to_read *ctl, *nctl;
5020 int cnt, strmat, gap;
5021
5022 /* We look for anything larger than the cum-ack + 1 */
5023
5024 SCTP_STAT_INCR(sctps_protocol_drain_calls);
5025 if (sctp_do_drain == 0) {
5026 return;
5027 }
5028 asoc = &stcb->asoc;
5029 if (asoc->cumulative_tsn == asoc->highest_tsn_inside_map) {
5030 /* none we can reneg on. */
5031 return;
5032 }
5033 SCTP_STAT_INCR(sctps_protocol_drains_done);
5034 cumulative_tsn_p1 = asoc->cumulative_tsn + 1;
5035 cnt = 0;
5036 /* First look in the re-assembly queue */
5037 chk = TAILQ_FIRST(&asoc->reasmqueue);
5038 while (chk) {
5039 /* Get the next one */
5040 nchk = TAILQ_NEXT(chk, sctp_next);
5041 if (compare_with_wrap(chk->rec.data.TSN_seq,
5042 cumulative_tsn_p1, MAX_TSN)) {
5043 /* Yep it is above cum-ack */
5044 cnt++;
5045 tsn = chk->rec.data.TSN_seq;
5046 if (tsn >= asoc->mapping_array_base_tsn) {
5047 gap = tsn - asoc->mapping_array_base_tsn;
5048 } else {
5049 gap = (MAX_TSN - asoc->mapping_array_base_tsn) +
5050 tsn + 1;
5051 }
5052 asoc->size_on_reasm_queue = sctp_sbspace_sub(asoc->size_on_reasm_queue, chk->send_size);
5053 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5054 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
5055 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5056 if (chk->data) {
5057 sctp_m_freem(chk->data);
5058 chk->data = NULL;
5059 }
5060 sctp_free_remote_addr(chk->whoTo);
5061 sctp_free_a_chunk(stcb, chk);
5062 }
5063 chk = nchk;
5064 }
5065 /* Ok that was fun, now we will drain all the inbound streams? */
5066 for (strmat = 0; strmat < asoc->streamincnt; strmat++) {
5067 ctl = TAILQ_FIRST(&asoc->strmin[strmat].inqueue);
5068 while (ctl) {
5069 nctl = TAILQ_NEXT(ctl, next);
5070 if (compare_with_wrap(ctl->sinfo_tsn,
5071 cumulative_tsn_p1, MAX_TSN)) {
5072 /* Yep it is above cum-ack */
5073 cnt++;
5074 tsn = ctl->sinfo_tsn;
5075 if (tsn >= asoc->mapping_array_base_tsn) {
5076 gap = tsn -
5077 asoc->mapping_array_base_tsn;
5078 } else {
5079 gap = (MAX_TSN -
5080 asoc->mapping_array_base_tsn) +
5081 tsn + 1;
5082 }
5083 asoc->size_on_all_streams = sctp_sbspace_sub(asoc->size_on_all_streams, ctl->length);
5084 sctp_ucount_decr(asoc->cnt_on_all_streams);
5085
5086 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array,
5087 gap);
5088 TAILQ_REMOVE(&asoc->strmin[strmat].inqueue,
5089 ctl, next);
5090 if (ctl->data) {
5091 sctp_m_freem(ctl->data);
5092 ctl->data = NULL;
5093 }
5094 sctp_free_remote_addr(ctl->whoFrom);
5095 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_readq, ctl);
5096 SCTP_DECR_READQ_COUNT();
5097 }
5098 ctl = nctl;
5099 }
5100 }
5101 /*
5102 * Question, should we go through the delivery queue? The only
5103 * reason things are on here is the app not reading OR a p-d-api up.
5104 * An attacker COULD send enough in to initiate the PD-API and then
5105 * send a bunch of stuff to other streams... these would wind up on
5106 * the delivery queue.. and then we would not get to them. But in
5107 * order to do this I then have to back-track and un-deliver
5108 * sequence numbers in streams.. el-yucko. I think for now we will
5109 * NOT look at the delivery queue and leave it to be something to
5110 * consider later. An alternative would be to abort the P-D-API with
5111 * a notification and then deliver the data.... Or another method
5112 * might be to keep track of how many times the situation occurs and
5113 * if we see a possible attack underway just abort the association.
5114 */
5115#ifdef SCTP_DEBUG
5116 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
5117 if (cnt) {
5118 printf("Freed %d chunks from reneg harvest\n", cnt);
5119 }
5120 }
5121#endif /* SCTP_DEBUG */
5122 if (cnt) {
5123 /*
5124 * Now do we need to find a new
5125 * asoc->highest_tsn_inside_map?
5126 */
5127 if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) {
5128 gap = asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn;
5129 } else {
5130 gap = (MAX_TSN - asoc->mapping_array_base_tsn) +
5131 asoc->highest_tsn_inside_map + 1;
5132 }
5133 if (gap >= (asoc->mapping_array_size << 3)) {
5134 /*
5135 * Something bad happened or cum-ack and high were
5136 * behind the base, but if so earlier checks should
5137 * have found NO data... wierd... we will start at
5138 * end of mapping array.
5139 */
5140 printf("Gap was larger than array?? %d set to max:%d maparraymax:%x\n",
5141 (int)gap,
5142 (int)(asoc->mapping_array_size << 3),
5143 (int)asoc->highest_tsn_inside_map);
5144 gap = asoc->mapping_array_size << 3;
5145 }
5146 while (gap > 0) {
5147 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
5148 /* found the new highest */
5149 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn + gap;
5150 break;
5151 }
5152 gap--;
5153 }
5154 if (gap == 0) {
5155 /* Nothing left in map */
5156 memset(asoc->mapping_array, 0, asoc->mapping_array_size);
5157 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
5158 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
5159 }
5160 asoc->last_revoke_count = cnt;
5161 SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
5162 sctp_send_sack(stcb);
5171
5172static void
5173sctp_drain_mbufs(struct sctp_inpcb *inp, struct sctp_tcb *stcb)
5174{
5175 /*
5176 * We must hunt this association for MBUF's past the cumack (i.e.
5177 * out of order data that we can renege on).
5178 */
5179 struct sctp_association *asoc;
5180 struct sctp_tmit_chunk *chk, *nchk;
5181 uint32_t cumulative_tsn_p1, tsn;
5182 struct sctp_queued_to_read *ctl, *nctl;
5183 int cnt, strmat, gap;
5184
5185 /* We look for anything larger than the cum-ack + 1 */
5186
5187 SCTP_STAT_INCR(sctps_protocol_drain_calls);
5188 if (sctp_do_drain == 0) {
5189 return;
5190 }
5191 asoc = &stcb->asoc;
5192 if (asoc->cumulative_tsn == asoc->highest_tsn_inside_map) {
5193 /* none we can reneg on. */
5194 return;
5195 }
5196 SCTP_STAT_INCR(sctps_protocol_drains_done);
5197 cumulative_tsn_p1 = asoc->cumulative_tsn + 1;
5198 cnt = 0;
5199 /* First look in the re-assembly queue */
5200 chk = TAILQ_FIRST(&asoc->reasmqueue);
5201 while (chk) {
5202 /* Get the next one */
5203 nchk = TAILQ_NEXT(chk, sctp_next);
5204 if (compare_with_wrap(chk->rec.data.TSN_seq,
5205 cumulative_tsn_p1, MAX_TSN)) {
5206 /* Yep it is above cum-ack */
5207 cnt++;
5208 tsn = chk->rec.data.TSN_seq;
5209 if (tsn >= asoc->mapping_array_base_tsn) {
5210 gap = tsn - asoc->mapping_array_base_tsn;
5211 } else {
5212 gap = (MAX_TSN - asoc->mapping_array_base_tsn) +
5213 tsn + 1;
5214 }
5215 asoc->size_on_reasm_queue = sctp_sbspace_sub(asoc->size_on_reasm_queue, chk->send_size);
5216 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5217 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
5218 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5219 if (chk->data) {
5220 sctp_m_freem(chk->data);
5221 chk->data = NULL;
5222 }
5223 sctp_free_remote_addr(chk->whoTo);
5224 sctp_free_a_chunk(stcb, chk);
5225 }
5226 chk = nchk;
5227 }
5228 /* Ok that was fun, now we will drain all the inbound streams? */
5229 for (strmat = 0; strmat < asoc->streamincnt; strmat++) {
5230 ctl = TAILQ_FIRST(&asoc->strmin[strmat].inqueue);
5231 while (ctl) {
5232 nctl = TAILQ_NEXT(ctl, next);
5233 if (compare_with_wrap(ctl->sinfo_tsn,
5234 cumulative_tsn_p1, MAX_TSN)) {
5235 /* Yep it is above cum-ack */
5236 cnt++;
5237 tsn = ctl->sinfo_tsn;
5238 if (tsn >= asoc->mapping_array_base_tsn) {
5239 gap = tsn -
5240 asoc->mapping_array_base_tsn;
5241 } else {
5242 gap = (MAX_TSN -
5243 asoc->mapping_array_base_tsn) +
5244 tsn + 1;
5245 }
5246 asoc->size_on_all_streams = sctp_sbspace_sub(asoc->size_on_all_streams, ctl->length);
5247 sctp_ucount_decr(asoc->cnt_on_all_streams);
5248
5249 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array,
5250 gap);
5251 TAILQ_REMOVE(&asoc->strmin[strmat].inqueue,
5252 ctl, next);
5253 if (ctl->data) {
5254 sctp_m_freem(ctl->data);
5255 ctl->data = NULL;
5256 }
5257 sctp_free_remote_addr(ctl->whoFrom);
5258 SCTP_ZONE_FREE(sctppcbinfo.ipi_zone_readq, ctl);
5259 SCTP_DECR_READQ_COUNT();
5260 }
5261 ctl = nctl;
5262 }
5263 }
5264 /*
5265 * Question, should we go through the delivery queue? The only
5266 * reason things are on here is the app not reading OR a p-d-api up.
5267 * An attacker COULD send enough in to initiate the PD-API and then
5268 * send a bunch of stuff to other streams... these would wind up on
5269 * the delivery queue.. and then we would not get to them. But in
5270 * order to do this I then have to back-track and un-deliver
5271 * sequence numbers in streams.. el-yucko. I think for now we will
5272 * NOT look at the delivery queue and leave it to be something to
5273 * consider later. An alternative would be to abort the P-D-API with
5274 * a notification and then deliver the data.... Or another method
5275 * might be to keep track of how many times the situation occurs and
5276 * if we see a possible attack underway just abort the association.
5277 */
5278#ifdef SCTP_DEBUG
5279 if (sctp_debug_on & SCTP_DEBUG_PCB1) {
5280 if (cnt) {
5281 printf("Freed %d chunks from reneg harvest\n", cnt);
5282 }
5283 }
5284#endif /* SCTP_DEBUG */
5285 if (cnt) {
5286 /*
5287 * Now do we need to find a new
5288 * asoc->highest_tsn_inside_map?
5289 */
5290 if (asoc->highest_tsn_inside_map >= asoc->mapping_array_base_tsn) {
5291 gap = asoc->highest_tsn_inside_map - asoc->mapping_array_base_tsn;
5292 } else {
5293 gap = (MAX_TSN - asoc->mapping_array_base_tsn) +
5294 asoc->highest_tsn_inside_map + 1;
5295 }
5296 if (gap >= (asoc->mapping_array_size << 3)) {
5297 /*
5298 * Something bad happened or cum-ack and high were
5299 * behind the base, but if so earlier checks should
5300 * have found NO data... wierd... we will start at
5301 * end of mapping array.
5302 */
5303 printf("Gap was larger than array?? %d set to max:%d maparraymax:%x\n",
5304 (int)gap,
5305 (int)(asoc->mapping_array_size << 3),
5306 (int)asoc->highest_tsn_inside_map);
5307 gap = asoc->mapping_array_size << 3;
5308 }
5309 while (gap > 0) {
5310 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
5311 /* found the new highest */
5312 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn + gap;
5313 break;
5314 }
5315 gap--;
5316 }
5317 if (gap == 0) {
5318 /* Nothing left in map */
5319 memset(asoc->mapping_array, 0, asoc->mapping_array_size);
5320 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
5321 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
5322 }
5323 asoc->last_revoke_count = cnt;
5324 SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
5325 sctp_send_sack(stcb);
5326 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_DRAIN);
5163 reneged_asoc_ids[reneged_at] = sctp_get_associd(stcb);
5164 reneged_at++;
5165 }
5166 /*
5167 * Another issue, in un-setting the TSN's in the mapping array we
5168 * DID NOT adjust the higest_tsn marker. This will cause one of two
5169 * things to occur. It may cause us to do extra work in checking for
5170 * our mapping array movement. More importantly it may cause us to
5171 * SACK every datagram. This may not be a bad thing though since we
5172 * will recover once we get our cum-ack above and all this stuff we
5173 * dumped recovered.
5174 */
5175}
5176
5177void
5178sctp_drain()
5179{
5180 /*
5181 * We must walk the PCB lists for ALL associations here. The system
5182 * is LOW on MBUF's and needs help. This is where reneging will
5183 * occur. We really hope this does NOT happen!
5184 */
5185 struct sctp_inpcb *inp;
5186 struct sctp_tcb *stcb;
5187
5188 SCTP_INP_INFO_RLOCK();
5189 LIST_FOREACH(inp, &sctppcbinfo.listhead, sctp_list) {
5190 /* For each endpoint */
5191 SCTP_INP_RLOCK(inp);
5192 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
5193 /* For each association */
5194 SCTP_TCB_LOCK(stcb);
5195 sctp_drain_mbufs(inp, stcb);
5196 SCTP_TCB_UNLOCK(stcb);
5197 }
5198 SCTP_INP_RUNLOCK(inp);
5199 }
5200 SCTP_INP_INFO_RUNLOCK();
5201}
5202
5203/*
5204 * start a new iterator
5205 * iterates through all endpoints and associations based on the pcb_state
5206 * flags and asoc_state. "af" (mandatory) is executed for all matching
5207 * assocs and "ef" (optional) is executed when the iterator completes.
5208 * "inpf" (optional) is executed for each new endpoint as it is being
5209 * iterated through.
5210 */
5211int
5327 reneged_asoc_ids[reneged_at] = sctp_get_associd(stcb);
5328 reneged_at++;
5329 }
5330 /*
5331 * Another issue, in un-setting the TSN's in the mapping array we
5332 * DID NOT adjust the higest_tsn marker. This will cause one of two
5333 * things to occur. It may cause us to do extra work in checking for
5334 * our mapping array movement. More importantly it may cause us to
5335 * SACK every datagram. This may not be a bad thing though since we
5336 * will recover once we get our cum-ack above and all this stuff we
5337 * dumped recovered.
5338 */
5339}
5340
5341void
5342sctp_drain()
5343{
5344 /*
5345 * We must walk the PCB lists for ALL associations here. The system
5346 * is LOW on MBUF's and needs help. This is where reneging will
5347 * occur. We really hope this does NOT happen!
5348 */
5349 struct sctp_inpcb *inp;
5350 struct sctp_tcb *stcb;
5351
5352 SCTP_INP_INFO_RLOCK();
5353 LIST_FOREACH(inp, &sctppcbinfo.listhead, sctp_list) {
5354 /* For each endpoint */
5355 SCTP_INP_RLOCK(inp);
5356 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) {
5357 /* For each association */
5358 SCTP_TCB_LOCK(stcb);
5359 sctp_drain_mbufs(inp, stcb);
5360 SCTP_TCB_UNLOCK(stcb);
5361 }
5362 SCTP_INP_RUNLOCK(inp);
5363 }
5364 SCTP_INP_INFO_RUNLOCK();
5365}
5366
5367/*
5368 * start a new iterator
5369 * iterates through all endpoints and associations based on the pcb_state
5370 * flags and asoc_state. "af" (mandatory) is executed for all matching
5371 * assocs and "ef" (optional) is executed when the iterator completes.
5372 * "inpf" (optional) is executed for each new endpoint as it is being
5373 * iterated through.
5374 */
5375int
5212sctp_initiate_iterator(inp_func inpf, asoc_func af, uint32_t pcb_state,
5213 uint32_t pcb_features, uint32_t asoc_state, void *argp, uint32_t argi,
5214 end_func ef, struct sctp_inpcb *s_inp, uint8_t chunk_output_off)
5376sctp_initiate_iterator(inp_func inpf,
5377 asoc_func af,
5378 inp_func inpe,
5379 uint32_t pcb_state,
5380 uint32_t pcb_features,
5381 uint32_t asoc_state,
5382 void *argp,
5383 uint32_t argi,
5384 end_func ef,
5385 struct sctp_inpcb *s_inp,
5386 uint8_t chunk_output_off)
5215{
5216 struct sctp_iterator *it = NULL;
5217
5218 if (af == NULL) {
5219 return (-1);
5220 }
5221 SCTP_MALLOC(it, struct sctp_iterator *, sizeof(struct sctp_iterator),
5222 "Iterator");
5223 if (it == NULL) {
5224 return (ENOMEM);
5225 }
5226 memset(it, 0, sizeof(*it));
5227 it->function_assoc = af;
5228 it->function_inp = inpf;
5387{
5388 struct sctp_iterator *it = NULL;
5389
5390 if (af == NULL) {
5391 return (-1);
5392 }
5393 SCTP_MALLOC(it, struct sctp_iterator *, sizeof(struct sctp_iterator),
5394 "Iterator");
5395 if (it == NULL) {
5396 return (ENOMEM);
5397 }
5398 memset(it, 0, sizeof(*it));
5399 it->function_assoc = af;
5400 it->function_inp = inpf;
5401 if (inpf)
5402 it->done_current_ep = 0;
5403 else
5404 it->done_current_ep = 1;
5229 it->function_atend = ef;
5230 it->pointer = argp;
5231 it->val = argi;
5232 it->pcb_flags = pcb_state;
5233 it->pcb_features = pcb_features;
5234 it->asoc_state = asoc_state;
5405 it->function_atend = ef;
5406 it->pointer = argp;
5407 it->val = argi;
5408 it->pcb_flags = pcb_state;
5409 it->pcb_features = pcb_features;
5410 it->asoc_state = asoc_state;
5411 it->function_inp_end = inpe;
5235 it->no_chunk_output = chunk_output_off;
5236 if (s_inp) {
5237 it->inp = s_inp;
5238 it->iterator_flags = SCTP_ITERATOR_DO_SINGLE_INP;
5239 } else {
5240 SCTP_INP_INFO_RLOCK();
5241 it->inp = LIST_FIRST(&sctppcbinfo.listhead);
5412 it->no_chunk_output = chunk_output_off;
5413 if (s_inp) {
5414 it->inp = s_inp;
5415 it->iterator_flags = SCTP_ITERATOR_DO_SINGLE_INP;
5416 } else {
5417 SCTP_INP_INFO_RLOCK();
5418 it->inp = LIST_FIRST(&sctppcbinfo.listhead);
5419
5242 SCTP_INP_INFO_RUNLOCK();
5243 it->iterator_flags = SCTP_ITERATOR_DO_ALL_INP;
5244
5245 }
5420 SCTP_INP_INFO_RUNLOCK();
5421 it->iterator_flags = SCTP_ITERATOR_DO_ALL_INP;
5422
5423 }
5424 SCTP_IPI_ITERATOR_WQ_LOCK();
5425 if (it->inp)
5426 SCTP_INP_INCR_REF(it->inp);
5427 TAILQ_INSERT_TAIL(&sctppcbinfo.iteratorhead, it, sctp_nxt_itr);
5428#if defined(SCTP_USE_THREAD_BASED_ITERATOR)
5429 if (sctppcbinfo.iterator_running == 0) {
5430 sctp_wakeup_iterator();
5431 }
5432 SCTP_IPI_ITERATOR_WQ_UNLOCK();
5433#else
5434 if (it->inp)
5435 SCTP_INP_DECR_REF(it->inp);
5436 SCTP_IPI_ITERATOR_WQ_UNLOCK();
5246 /* Init the timer */
5247 SCTP_OS_TIMER_INIT(&it->tmr.timer);
5248 /* add to the list of all iterators */
5437 /* Init the timer */
5438 SCTP_OS_TIMER_INIT(&it->tmr.timer);
5439 /* add to the list of all iterators */
5249 SCTP_INP_INFO_WLOCK();
5250 LIST_INSERT_HEAD(&sctppcbinfo.iteratorhead, it, sctp_nxt_itr);
5251 SCTP_INP_INFO_WUNLOCK();
5252 sctp_timer_start(SCTP_TIMER_TYPE_ITERATOR, (struct sctp_inpcb *)it,
5253 NULL, NULL);
5440 sctp_timer_start(SCTP_TIMER_TYPE_ITERATOR, (struct sctp_inpcb *)it,
5441 NULL, NULL);
5442#endif
5254 return (0);
5255}
5443 return (0);
5444}