1/*- 2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * a) Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * b) Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the distribution. 15 * 16 * c) Neither the name of Cisco Systems, Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD$"); 35 36#include <netinet/sctp_os.h> 37#include <netinet/sctp_pcb.h> 38#include <netinet/sctputil.h> 39#include <netinet/sctp_var.h> 40#include <netinet/sctp_sysctl.h> 41#ifdef INET6 42#include <netinet6/sctp6_var.h> 43#endif 44#include <netinet/sctp_header.h> 45#include <netinet/sctp_output.h> 46#include <netinet/sctp_uio.h> 47#include <netinet/sctp_timer.h> 48#include <netinet/sctp_indata.h>/* for sctp_deliver_data() */ 49#include <netinet/sctp_auth.h> 50#include <netinet/sctp_asconf.h> 51#include <netinet/sctp_bsd_addr.h> 52#include <netinet/udp.h> 53#include <netinet/udp_var.h> 54#include <sys/proc.h> 55 56 57#ifndef KTR_SCTP 58#define KTR_SCTP KTR_SUBSYS 59#endif 60 61extern struct sctp_cc_functions sctp_cc_functions[]; 62extern struct sctp_ss_functions sctp_ss_functions[]; 63 64void 65sctp_sblog(struct sockbuf *sb, struct sctp_tcb *stcb, int from, int incr) 66{ 67 struct sctp_cwnd_log sctp_clog; 68 69 sctp_clog.x.sb.stcb = stcb; 70 sctp_clog.x.sb.so_sbcc = sb->sb_cc; 71 if (stcb) 72 sctp_clog.x.sb.stcb_sbcc = stcb->asoc.sb_cc; 73 else 74 sctp_clog.x.sb.stcb_sbcc = 0; 75 sctp_clog.x.sb.incr = incr; 76 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 77 SCTP_LOG_EVENT_SB, 78 from, 79 sctp_clog.x.misc.log1, 80 sctp_clog.x.misc.log2, 81 sctp_clog.x.misc.log3, 82 sctp_clog.x.misc.log4); 83} 84 85void 86sctp_log_closing(struct sctp_inpcb *inp, struct sctp_tcb *stcb, int16_t loc) 87{ 88 struct sctp_cwnd_log sctp_clog; 89 90 sctp_clog.x.close.inp = (void *)inp; 91 sctp_clog.x.close.sctp_flags = inp->sctp_flags; 92 if (stcb) { 93 sctp_clog.x.close.stcb = (void *)stcb; 94 sctp_clog.x.close.state = (uint16_t) stcb->asoc.state; 95 } else { 96 sctp_clog.x.close.stcb = 0; 97 sctp_clog.x.close.state = 0; 98 } 99 sctp_clog.x.close.loc = loc; 100 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 101 SCTP_LOG_EVENT_CLOSE, 102 0, 103 sctp_clog.x.misc.log1, 104 sctp_clog.x.misc.log2, 105 sctp_clog.x.misc.log3, 106 sctp_clog.x.misc.log4); 107} 108 109void 110rto_logging(struct sctp_nets *net, int from) 111{ 112 struct sctp_cwnd_log sctp_clog; 113 114 memset(&sctp_clog, 0, sizeof(sctp_clog)); 115 sctp_clog.x.rto.net = (void *)net; 116 sctp_clog.x.rto.rtt = net->rtt / 1000; 117 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 118 SCTP_LOG_EVENT_RTT, 119 from, 120 sctp_clog.x.misc.log1, 121 sctp_clog.x.misc.log2, 122 sctp_clog.x.misc.log3, 123 sctp_clog.x.misc.log4); 124} 125 126void 127sctp_log_strm_del_alt(struct sctp_tcb *stcb, uint32_t tsn, uint16_t sseq, uint16_t stream, int from) 128{ 129 struct sctp_cwnd_log sctp_clog; 130 131 sctp_clog.x.strlog.stcb = stcb; 132 sctp_clog.x.strlog.n_tsn = tsn; 133 sctp_clog.x.strlog.n_sseq = sseq; 134 sctp_clog.x.strlog.e_tsn = 0; 135 sctp_clog.x.strlog.e_sseq = 0; 136 sctp_clog.x.strlog.strm = stream; 137 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 138 SCTP_LOG_EVENT_STRM, 139 from, 140 sctp_clog.x.misc.log1, 141 sctp_clog.x.misc.log2, 142 sctp_clog.x.misc.log3, 143 sctp_clog.x.misc.log4); 144} 145 146void 147sctp_log_nagle_event(struct sctp_tcb *stcb, int action) 148{ 149 struct sctp_cwnd_log sctp_clog; 150 151 sctp_clog.x.nagle.stcb = (void *)stcb; 152 sctp_clog.x.nagle.total_flight = stcb->asoc.total_flight; 153 sctp_clog.x.nagle.total_in_queue = stcb->asoc.total_output_queue_size; 154 sctp_clog.x.nagle.count_in_queue = stcb->asoc.chunks_on_out_queue; 155 sctp_clog.x.nagle.count_in_flight = stcb->asoc.total_flight_count; 156 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 157 SCTP_LOG_EVENT_NAGLE, 158 action, 159 sctp_clog.x.misc.log1, 160 sctp_clog.x.misc.log2, 161 sctp_clog.x.misc.log3, 162 sctp_clog.x.misc.log4); 163} 164 165void 166sctp_log_sack(uint32_t old_cumack, uint32_t cumack, uint32_t tsn, uint16_t gaps, uint16_t dups, int from) 167{ 168 struct sctp_cwnd_log sctp_clog; 169 170 sctp_clog.x.sack.cumack = cumack; 171 sctp_clog.x.sack.oldcumack = old_cumack; 172 sctp_clog.x.sack.tsn = tsn; 173 sctp_clog.x.sack.numGaps = gaps; 174 sctp_clog.x.sack.numDups = dups; 175 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 176 SCTP_LOG_EVENT_SACK, 177 from, 178 sctp_clog.x.misc.log1, 179 sctp_clog.x.misc.log2, 180 sctp_clog.x.misc.log3, 181 sctp_clog.x.misc.log4); 182} 183 184void 185sctp_log_map(uint32_t map, uint32_t cum, uint32_t high, int from) 186{ 187 struct sctp_cwnd_log sctp_clog; 188 189 memset(&sctp_clog, 0, sizeof(sctp_clog)); 190 sctp_clog.x.map.base = map; 191 sctp_clog.x.map.cum = cum; 192 sctp_clog.x.map.high = high; 193 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 194 SCTP_LOG_EVENT_MAP, 195 from, 196 sctp_clog.x.misc.log1, 197 sctp_clog.x.misc.log2, 198 sctp_clog.x.misc.log3, 199 sctp_clog.x.misc.log4); 200} 201 202void 203sctp_log_fr(uint32_t biggest_tsn, uint32_t biggest_new_tsn, uint32_t tsn, int from) 204{ 205 struct sctp_cwnd_log sctp_clog; 206 207 memset(&sctp_clog, 0, sizeof(sctp_clog)); 208 sctp_clog.x.fr.largest_tsn = biggest_tsn; 209 sctp_clog.x.fr.largest_new_tsn = biggest_new_tsn; 210 sctp_clog.x.fr.tsn = tsn; 211 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 212 SCTP_LOG_EVENT_FR, 213 from, 214 sctp_clog.x.misc.log1, 215 sctp_clog.x.misc.log2, 216 sctp_clog.x.misc.log3, 217 sctp_clog.x.misc.log4); 218} 219 220void 221sctp_log_mb(struct mbuf *m, int from) 222{ 223 struct sctp_cwnd_log sctp_clog; 224 225 sctp_clog.x.mb.mp = m; 226 sctp_clog.x.mb.mbuf_flags = (uint8_t) (SCTP_BUF_GET_FLAGS(m)); 227 sctp_clog.x.mb.size = (uint16_t) (SCTP_BUF_LEN(m)); 228 sctp_clog.x.mb.data = SCTP_BUF_AT(m, 0); 229 if (SCTP_BUF_IS_EXTENDED(m)) { 230 sctp_clog.x.mb.ext = SCTP_BUF_EXTEND_BASE(m); 231 sctp_clog.x.mb.refcnt = (uint8_t) (SCTP_BUF_EXTEND_REFCNT(m)); 232 } else { 233 sctp_clog.x.mb.ext = 0; 234 sctp_clog.x.mb.refcnt = 0; 235 } 236 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 237 SCTP_LOG_EVENT_MBUF, 238 from, 239 sctp_clog.x.misc.log1, 240 sctp_clog.x.misc.log2, 241 sctp_clog.x.misc.log3, 242 sctp_clog.x.misc.log4); 243} 244 245void 246sctp_log_strm_del(struct sctp_queued_to_read *control, struct sctp_queued_to_read *poschk, int from) 247{ 248 struct sctp_cwnd_log sctp_clog; 249 250 if (control == NULL) { 251 SCTP_PRINTF("Gak log of NULL?\n"); 252 return; 253 } 254 sctp_clog.x.strlog.stcb = control->stcb; 255 sctp_clog.x.strlog.n_tsn = control->sinfo_tsn; 256 sctp_clog.x.strlog.n_sseq = control->sinfo_ssn; 257 sctp_clog.x.strlog.strm = control->sinfo_stream; 258 if (poschk != NULL) { 259 sctp_clog.x.strlog.e_tsn = poschk->sinfo_tsn; 260 sctp_clog.x.strlog.e_sseq = poschk->sinfo_ssn; 261 } else { 262 sctp_clog.x.strlog.e_tsn = 0; 263 sctp_clog.x.strlog.e_sseq = 0; 264 } 265 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 266 SCTP_LOG_EVENT_STRM, 267 from, 268 sctp_clog.x.misc.log1, 269 sctp_clog.x.misc.log2, 270 sctp_clog.x.misc.log3, 271 sctp_clog.x.misc.log4); 272} 273 274void 275sctp_log_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net, int augment, uint8_t from) 276{ 277 struct sctp_cwnd_log sctp_clog; 278 279 sctp_clog.x.cwnd.net = net; 280 if (stcb->asoc.send_queue_cnt > 255) 281 sctp_clog.x.cwnd.cnt_in_send = 255; 282 else 283 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 284 if (stcb->asoc.stream_queue_cnt > 255) 285 sctp_clog.x.cwnd.cnt_in_str = 255; 286 else 287 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 288 289 if (net) { 290 sctp_clog.x.cwnd.cwnd_new_value = net->cwnd; 291 sctp_clog.x.cwnd.inflight = net->flight_size; 292 sctp_clog.x.cwnd.pseudo_cumack = net->pseudo_cumack; 293 sctp_clog.x.cwnd.meets_pseudo_cumack = net->new_pseudo_cumack; 294 sctp_clog.x.cwnd.need_new_pseudo_cumack = net->find_pseudo_cumack; 295 } 296 if (SCTP_CWNDLOG_PRESEND == from) { 297 sctp_clog.x.cwnd.meets_pseudo_cumack = stcb->asoc.peers_rwnd; 298 } 299 sctp_clog.x.cwnd.cwnd_augment = augment; 300 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 301 SCTP_LOG_EVENT_CWND, 302 from, 303 sctp_clog.x.misc.log1, 304 sctp_clog.x.misc.log2, 305 sctp_clog.x.misc.log3, 306 sctp_clog.x.misc.log4); 307} 308 309void 310sctp_log_lock(struct sctp_inpcb *inp, struct sctp_tcb *stcb, uint8_t from) 311{ 312 struct sctp_cwnd_log sctp_clog; 313 314 memset(&sctp_clog, 0, sizeof(sctp_clog)); 315 if (inp) { 316 sctp_clog.x.lock.sock = (void *)inp->sctp_socket; 317 318 } else { 319 sctp_clog.x.lock.sock = (void *)NULL; 320 } 321 sctp_clog.x.lock.inp = (void *)inp; 322 if (stcb) { 323 sctp_clog.x.lock.tcb_lock = mtx_owned(&stcb->tcb_mtx); 324 } else { 325 sctp_clog.x.lock.tcb_lock = SCTP_LOCK_UNKNOWN; 326 } 327 if (inp) { 328 sctp_clog.x.lock.inp_lock = mtx_owned(&inp->inp_mtx); 329 sctp_clog.x.lock.create_lock = mtx_owned(&inp->inp_create_mtx); 330 } else { 331 sctp_clog.x.lock.inp_lock = SCTP_LOCK_UNKNOWN; 332 sctp_clog.x.lock.create_lock = SCTP_LOCK_UNKNOWN; 333 } 334 sctp_clog.x.lock.info_lock = rw_wowned(&SCTP_BASE_INFO(ipi_ep_mtx)); 335 if (inp && (inp->sctp_socket)) { 336 sctp_clog.x.lock.sock_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 337 sctp_clog.x.lock.sockrcvbuf_lock = mtx_owned(&(inp->sctp_socket->so_rcv.sb_mtx)); 338 sctp_clog.x.lock.socksndbuf_lock = mtx_owned(&(inp->sctp_socket->so_snd.sb_mtx)); 339 } else { 340 sctp_clog.x.lock.sock_lock = SCTP_LOCK_UNKNOWN; 341 sctp_clog.x.lock.sockrcvbuf_lock = SCTP_LOCK_UNKNOWN; 342 sctp_clog.x.lock.socksndbuf_lock = SCTP_LOCK_UNKNOWN; 343 } 344 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 345 SCTP_LOG_LOCK_EVENT, 346 from, 347 sctp_clog.x.misc.log1, 348 sctp_clog.x.misc.log2, 349 sctp_clog.x.misc.log3, 350 sctp_clog.x.misc.log4); 351} 352 353void 354sctp_log_maxburst(struct sctp_tcb *stcb, struct sctp_nets *net, int error, int burst, uint8_t from) 355{ 356 struct sctp_cwnd_log sctp_clog; 357 358 memset(&sctp_clog, 0, sizeof(sctp_clog)); 359 sctp_clog.x.cwnd.net = net; 360 sctp_clog.x.cwnd.cwnd_new_value = error; 361 sctp_clog.x.cwnd.inflight = net->flight_size; 362 sctp_clog.x.cwnd.cwnd_augment = burst; 363 if (stcb->asoc.send_queue_cnt > 255) 364 sctp_clog.x.cwnd.cnt_in_send = 255; 365 else 366 sctp_clog.x.cwnd.cnt_in_send = stcb->asoc.send_queue_cnt; 367 if (stcb->asoc.stream_queue_cnt > 255) 368 sctp_clog.x.cwnd.cnt_in_str = 255; 369 else 370 sctp_clog.x.cwnd.cnt_in_str = stcb->asoc.stream_queue_cnt; 371 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 372 SCTP_LOG_EVENT_MAXBURST, 373 from, 374 sctp_clog.x.misc.log1, 375 sctp_clog.x.misc.log2, 376 sctp_clog.x.misc.log3, 377 sctp_clog.x.misc.log4); 378} 379 380void 381sctp_log_rwnd(uint8_t from, uint32_t peers_rwnd, uint32_t snd_size, uint32_t overhead) 382{ 383 struct sctp_cwnd_log sctp_clog; 384 385 sctp_clog.x.rwnd.rwnd = peers_rwnd; 386 sctp_clog.x.rwnd.send_size = snd_size; 387 sctp_clog.x.rwnd.overhead = overhead; 388 sctp_clog.x.rwnd.new_rwnd = 0; 389 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 390 SCTP_LOG_EVENT_RWND, 391 from, 392 sctp_clog.x.misc.log1, 393 sctp_clog.x.misc.log2, 394 sctp_clog.x.misc.log3, 395 sctp_clog.x.misc.log4); 396} 397 398void 399sctp_log_rwnd_set(uint8_t from, uint32_t peers_rwnd, uint32_t flight_size, uint32_t overhead, uint32_t a_rwndval) 400{ 401 struct sctp_cwnd_log sctp_clog; 402 403 sctp_clog.x.rwnd.rwnd = peers_rwnd; 404 sctp_clog.x.rwnd.send_size = flight_size; 405 sctp_clog.x.rwnd.overhead = overhead; 406 sctp_clog.x.rwnd.new_rwnd = a_rwndval; 407 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 408 SCTP_LOG_EVENT_RWND, 409 from, 410 sctp_clog.x.misc.log1, 411 sctp_clog.x.misc.log2, 412 sctp_clog.x.misc.log3, 413 sctp_clog.x.misc.log4); 414} 415 416void 417sctp_log_mbcnt(uint8_t from, uint32_t total_oq, uint32_t book, uint32_t total_mbcnt_q, uint32_t mbcnt) 418{ 419 struct sctp_cwnd_log sctp_clog; 420 421 sctp_clog.x.mbcnt.total_queue_size = total_oq; 422 sctp_clog.x.mbcnt.size_change = book; 423 sctp_clog.x.mbcnt.total_queue_mb_size = total_mbcnt_q; 424 sctp_clog.x.mbcnt.mbcnt_change = mbcnt; 425 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 426 SCTP_LOG_EVENT_MBCNT, 427 from, 428 sctp_clog.x.misc.log1, 429 sctp_clog.x.misc.log2, 430 sctp_clog.x.misc.log3, 431 sctp_clog.x.misc.log4); 432} 433 434void 435sctp_misc_ints(uint8_t from, uint32_t a, uint32_t b, uint32_t c, uint32_t d) 436{ 437 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 438 SCTP_LOG_MISC_EVENT, 439 from, 440 a, b, c, d); 441} 442 443void 444sctp_wakeup_log(struct sctp_tcb *stcb, uint32_t wake_cnt, int from) 445{ 446 struct sctp_cwnd_log sctp_clog; 447 448 sctp_clog.x.wake.stcb = (void *)stcb; 449 sctp_clog.x.wake.wake_cnt = wake_cnt; 450 sctp_clog.x.wake.flight = stcb->asoc.total_flight_count; 451 sctp_clog.x.wake.send_q = stcb->asoc.send_queue_cnt; 452 sctp_clog.x.wake.sent_q = stcb->asoc.sent_queue_cnt; 453 454 if (stcb->asoc.stream_queue_cnt < 0xff) 455 sctp_clog.x.wake.stream_qcnt = (uint8_t) stcb->asoc.stream_queue_cnt; 456 else 457 sctp_clog.x.wake.stream_qcnt = 0xff; 458 459 if (stcb->asoc.chunks_on_out_queue < 0xff) 460 sctp_clog.x.wake.chunks_on_oque = (uint8_t) stcb->asoc.chunks_on_out_queue; 461 else 462 sctp_clog.x.wake.chunks_on_oque = 0xff; 463 464 sctp_clog.x.wake.sctpflags = 0; 465 /* set in the defered mode stuff */ 466 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_DONT_WAKE) 467 sctp_clog.x.wake.sctpflags |= 1; 468 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEOUTPUT) 469 sctp_clog.x.wake.sctpflags |= 2; 470 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_WAKEINPUT) 471 sctp_clog.x.wake.sctpflags |= 4; 472 /* what about the sb */ 473 if (stcb->sctp_socket) { 474 struct socket *so = stcb->sctp_socket; 475 476 sctp_clog.x.wake.sbflags = (uint8_t) ((so->so_snd.sb_flags & 0x00ff)); 477 } else { 478 sctp_clog.x.wake.sbflags = 0xff; 479 } 480 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 481 SCTP_LOG_EVENT_WAKE, 482 from, 483 sctp_clog.x.misc.log1, 484 sctp_clog.x.misc.log2, 485 sctp_clog.x.misc.log3, 486 sctp_clog.x.misc.log4); 487} 488 489void 490sctp_log_block(uint8_t from, struct sctp_association *asoc, int sendlen) 491{ 492 struct sctp_cwnd_log sctp_clog; 493 494 sctp_clog.x.blk.onsb = asoc->total_output_queue_size; 495 sctp_clog.x.blk.send_sent_qcnt = (uint16_t) (asoc->send_queue_cnt + asoc->sent_queue_cnt); 496 sctp_clog.x.blk.peer_rwnd = asoc->peers_rwnd; 497 sctp_clog.x.blk.stream_qcnt = (uint16_t) asoc->stream_queue_cnt; 498 sctp_clog.x.blk.chunks_on_oque = (uint16_t) asoc->chunks_on_out_queue; 499 sctp_clog.x.blk.flight_size = (uint16_t) (asoc->total_flight / 1024); 500 sctp_clog.x.blk.sndlen = sendlen; 501 SCTP_CTR6(KTR_SCTP, "SCTP:%d[%d]:%x-%x-%x-%x", 502 SCTP_LOG_EVENT_BLOCK, 503 from, 504 sctp_clog.x.misc.log1, 505 sctp_clog.x.misc.log2, 506 sctp_clog.x.misc.log3, 507 sctp_clog.x.misc.log4); 508} 509 510int 511sctp_fill_stat_log(void *optval SCTP_UNUSED, size_t *optsize SCTP_UNUSED) 512{ 513 /* May need to fix this if ktrdump does not work */ 514 return (0); 515} 516 517#ifdef SCTP_AUDITING_ENABLED 518uint8_t sctp_audit_data[SCTP_AUDIT_SIZE][2]; 519static int sctp_audit_indx = 0; 520 521static 522void 523sctp_print_audit_report(void) 524{ 525 int i; 526 int cnt; 527 528 cnt = 0; 529 for (i = sctp_audit_indx; i < SCTP_AUDIT_SIZE; i++) { 530 if ((sctp_audit_data[i][0] == 0xe0) && 531 (sctp_audit_data[i][1] == 0x01)) { 532 cnt = 0; 533 SCTP_PRINTF("\n"); 534 } else if (sctp_audit_data[i][0] == 0xf0) { 535 cnt = 0; 536 SCTP_PRINTF("\n"); 537 } else if ((sctp_audit_data[i][0] == 0xc0) && 538 (sctp_audit_data[i][1] == 0x01)) { 539 SCTP_PRINTF("\n"); 540 cnt = 0; 541 } 542 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0], 543 (uint32_t) sctp_audit_data[i][1]); 544 cnt++; 545 if ((cnt % 14) == 0) 546 SCTP_PRINTF("\n"); 547 } 548 for (i = 0; i < sctp_audit_indx; i++) { 549 if ((sctp_audit_data[i][0] == 0xe0) && 550 (sctp_audit_data[i][1] == 0x01)) { 551 cnt = 0; 552 SCTP_PRINTF("\n"); 553 } else if (sctp_audit_data[i][0] == 0xf0) { 554 cnt = 0; 555 SCTP_PRINTF("\n"); 556 } else if ((sctp_audit_data[i][0] == 0xc0) && 557 (sctp_audit_data[i][1] == 0x01)) { 558 SCTP_PRINTF("\n"); 559 cnt = 0; 560 } 561 SCTP_PRINTF("%2.2x%2.2x ", (uint32_t) sctp_audit_data[i][0], 562 (uint32_t) sctp_audit_data[i][1]); 563 cnt++; 564 if ((cnt % 14) == 0) 565 SCTP_PRINTF("\n"); 566 } 567 SCTP_PRINTF("\n"); 568} 569 570void 571sctp_auditing(int from, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 572 struct sctp_nets *net) 573{ 574 int resend_cnt, tot_out, rep, tot_book_cnt; 575 struct sctp_nets *lnet; 576 struct sctp_tmit_chunk *chk; 577 578 sctp_audit_data[sctp_audit_indx][0] = 0xAA; 579 sctp_audit_data[sctp_audit_indx][1] = 0x000000ff & from; 580 sctp_audit_indx++; 581 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 582 sctp_audit_indx = 0; 583 } 584 if (inp == NULL) { 585 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 586 sctp_audit_data[sctp_audit_indx][1] = 0x01; 587 sctp_audit_indx++; 588 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 589 sctp_audit_indx = 0; 590 } 591 return; 592 } 593 if (stcb == NULL) { 594 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 595 sctp_audit_data[sctp_audit_indx][1] = 0x02; 596 sctp_audit_indx++; 597 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 598 sctp_audit_indx = 0; 599 } 600 return; 601 } 602 sctp_audit_data[sctp_audit_indx][0] = 0xA1; 603 sctp_audit_data[sctp_audit_indx][1] = 604 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 605 sctp_audit_indx++; 606 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 607 sctp_audit_indx = 0; 608 } 609 rep = 0; 610 tot_book_cnt = 0; 611 resend_cnt = tot_out = 0; 612 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 613 if (chk->sent == SCTP_DATAGRAM_RESEND) { 614 resend_cnt++; 615 } else if (chk->sent < SCTP_DATAGRAM_RESEND) { 616 tot_out += chk->book_size; 617 tot_book_cnt++; 618 } 619 } 620 if (resend_cnt != stcb->asoc.sent_queue_retran_cnt) { 621 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 622 sctp_audit_data[sctp_audit_indx][1] = 0xA1; 623 sctp_audit_indx++; 624 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 625 sctp_audit_indx = 0; 626 } 627 SCTP_PRINTF("resend_cnt:%d asoc-tot:%d\n", 628 resend_cnt, stcb->asoc.sent_queue_retran_cnt); 629 rep = 1; 630 stcb->asoc.sent_queue_retran_cnt = resend_cnt; 631 sctp_audit_data[sctp_audit_indx][0] = 0xA2; 632 sctp_audit_data[sctp_audit_indx][1] = 633 (0x000000ff & stcb->asoc.sent_queue_retran_cnt); 634 sctp_audit_indx++; 635 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 636 sctp_audit_indx = 0; 637 } 638 } 639 if (tot_out != stcb->asoc.total_flight) { 640 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 641 sctp_audit_data[sctp_audit_indx][1] = 0xA2; 642 sctp_audit_indx++; 643 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 644 sctp_audit_indx = 0; 645 } 646 rep = 1; 647 SCTP_PRINTF("tot_flt:%d asoc_tot:%d\n", tot_out, 648 (int)stcb->asoc.total_flight); 649 stcb->asoc.total_flight = tot_out; 650 } 651 if (tot_book_cnt != stcb->asoc.total_flight_count) { 652 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 653 sctp_audit_data[sctp_audit_indx][1] = 0xA5; 654 sctp_audit_indx++; 655 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 656 sctp_audit_indx = 0; 657 } 658 rep = 1; 659 SCTP_PRINTF("tot_flt_book:%d\n", tot_book_cnt); 660 661 stcb->asoc.total_flight_count = tot_book_cnt; 662 } 663 tot_out = 0; 664 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 665 tot_out += lnet->flight_size; 666 } 667 if (tot_out != stcb->asoc.total_flight) { 668 sctp_audit_data[sctp_audit_indx][0] = 0xAF; 669 sctp_audit_data[sctp_audit_indx][1] = 0xA3; 670 sctp_audit_indx++; 671 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 672 sctp_audit_indx = 0; 673 } 674 rep = 1; 675 SCTP_PRINTF("real flight:%d net total was %d\n", 676 stcb->asoc.total_flight, tot_out); 677 /* now corrective action */ 678 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 679 680 tot_out = 0; 681 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 682 if ((chk->whoTo == lnet) && 683 (chk->sent < SCTP_DATAGRAM_RESEND)) { 684 tot_out += chk->book_size; 685 } 686 } 687 if (lnet->flight_size != tot_out) { 688 SCTP_PRINTF("net:%p flight was %d corrected to %d\n", 689 (void *)lnet, lnet->flight_size, 690 tot_out); 691 lnet->flight_size = tot_out; 692 } 693 } 694 } 695 if (rep) { 696 sctp_print_audit_report(); 697 } 698} 699 700void 701sctp_audit_log(uint8_t ev, uint8_t fd) 702{ 703 704 sctp_audit_data[sctp_audit_indx][0] = ev; 705 sctp_audit_data[sctp_audit_indx][1] = fd; 706 sctp_audit_indx++; 707 if (sctp_audit_indx >= SCTP_AUDIT_SIZE) { 708 sctp_audit_indx = 0; 709 } 710} 711 712#endif 713 714/* 715 * sctp_stop_timers_for_shutdown() should be called 716 * when entering the SHUTDOWN_SENT or SHUTDOWN_ACK_SENT 717 * state to make sure that all timers are stopped. 718 */ 719void 720sctp_stop_timers_for_shutdown(struct sctp_tcb *stcb) 721{ 722 struct sctp_association *asoc; 723 struct sctp_nets *net; 724 725 asoc = &stcb->asoc; 726 727 (void)SCTP_OS_TIMER_STOP(&asoc->dack_timer.timer); 728 (void)SCTP_OS_TIMER_STOP(&asoc->strreset_timer.timer); 729 (void)SCTP_OS_TIMER_STOP(&asoc->asconf_timer.timer); 730 (void)SCTP_OS_TIMER_STOP(&asoc->autoclose_timer.timer); 731 (void)SCTP_OS_TIMER_STOP(&asoc->delayed_event_timer.timer); 732 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 733 (void)SCTP_OS_TIMER_STOP(&net->pmtu_timer.timer); 734 (void)SCTP_OS_TIMER_STOP(&net->hb_timer.timer); 735 } 736} 737 738/* 739 * a list of sizes based on typical mtu's, used only if next hop size not 740 * returned. 741 */ 742static uint32_t sctp_mtu_sizes[] = { 743 68, 744 296, 745 508, 746 512, 747 544, 748 576, 749 1006, 750 1492, 751 1500, 752 1536, 753 2002, 754 2048, 755 4352, 756 4464, 757 8166, 758 17914, 759 32000, 760 65535 761}; 762 763/* 764 * Return the largest MTU smaller than val. If there is no 765 * entry, just return val. 766 */ 767uint32_t 768sctp_get_prev_mtu(uint32_t val) 769{ 770 uint32_t i; 771 772 if (val <= sctp_mtu_sizes[0]) { 773 return (val); 774 } 775 for (i = 1; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 776 if (val <= sctp_mtu_sizes[i]) { 777 break; 778 } 779 } 780 return (sctp_mtu_sizes[i - 1]); 781} 782 783/* 784 * Return the smallest MTU larger than val. If there is no 785 * entry, just return val. 786 */ 787uint32_t 788sctp_get_next_mtu(uint32_t val) 789{ 790 /* select another MTU that is just bigger than this one */ 791 uint32_t i; 792 793 for (i = 0; i < (sizeof(sctp_mtu_sizes) / sizeof(uint32_t)); i++) { 794 if (val < sctp_mtu_sizes[i]) { 795 return (sctp_mtu_sizes[i]); 796 } 797 } 798 return (val); 799} 800 801void 802sctp_fill_random_store(struct sctp_pcb *m) 803{ 804 /* 805 * Here we use the MD5/SHA-1 to hash with our good randomNumbers and 806 * our counter. The result becomes our good random numbers and we 807 * then setup to give these out. Note that we do no locking to 808 * protect this. This is ok, since if competing folks call this we 809 * will get more gobbled gook in the random store which is what we 810 * want. There is a danger that two guys will use the same random 811 * numbers, but thats ok too since that is random as well :-> 812 */ 813 m->store_at = 0; 814 (void)sctp_hmac(SCTP_HMAC, (uint8_t *) m->random_numbers, 815 sizeof(m->random_numbers), (uint8_t *) & m->random_counter, 816 sizeof(m->random_counter), (uint8_t *) m->random_store); 817 m->random_counter++; 818} 819 820uint32_t 821sctp_select_initial_TSN(struct sctp_pcb *inp) 822{ 823 /* 824 * A true implementation should use random selection process to get 825 * the initial stream sequence number, using RFC1750 as a good 826 * guideline 827 */ 828 uint32_t x, *xp; 829 uint8_t *p; 830 int store_at, new_store; 831 832 if (inp->initial_sequence_debug != 0) { 833 uint32_t ret; 834 835 ret = inp->initial_sequence_debug; 836 inp->initial_sequence_debug++; 837 return (ret); 838 } 839retry: 840 store_at = inp->store_at; 841 new_store = store_at + sizeof(uint32_t); 842 if (new_store >= (SCTP_SIGNATURE_SIZE - 3)) { 843 new_store = 0; 844 } 845 if (!atomic_cmpset_int(&inp->store_at, store_at, new_store)) { 846 goto retry; 847 } 848 if (new_store == 0) { 849 /* Refill the random store */ 850 sctp_fill_random_store(inp); 851 } 852 p = &inp->random_store[store_at]; 853 xp = (uint32_t *) p; 854 x = *xp; 855 return (x); 856} 857 858uint32_t 859sctp_select_a_tag(struct sctp_inpcb *inp, uint16_t lport, uint16_t rport, int check) 860{ 861 uint32_t x; 862 struct timeval now; 863 864 if (check) { 865 (void)SCTP_GETTIME_TIMEVAL(&now); 866 } 867 for (;;) { 868 x = sctp_select_initial_TSN(&inp->sctp_ep); 869 if (x == 0) { 870 /* we never use 0 */ 871 continue; 872 } 873 if (!check || sctp_is_vtag_good(x, lport, rport, &now)) { 874 break; 875 } 876 } 877 return (x); 878} 879 880int 881sctp_init_asoc(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 882 uint32_t override_tag, uint32_t vrf_id) 883{ 884 struct sctp_association *asoc; 885 886 /* 887 * Anything set to zero is taken care of by the allocation routine's 888 * bzero 889 */ 890 891 /* 892 * Up front select what scoping to apply on addresses I tell my peer 893 * Not sure what to do with these right now, we will need to come up 894 * with a way to set them. We may need to pass them through from the 895 * caller in the sctp_aloc_assoc() function. 896 */ 897 int i; 898 899 asoc = &stcb->asoc; 900 /* init all variables to a known value. */ 901 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_INUSE); 902 asoc->max_burst = inp->sctp_ep.max_burst; 903 asoc->fr_max_burst = inp->sctp_ep.fr_max_burst; 904 asoc->heart_beat_delay = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_HEARTBEAT]); 905 asoc->cookie_life = inp->sctp_ep.def_cookie_life; 906 asoc->sctp_cmt_on_off = inp->sctp_cmt_on_off; 907 asoc->ecn_allowed = inp->sctp_ecn_enable; 908 asoc->sctp_nr_sack_on_off = (uint8_t) SCTP_BASE_SYSCTL(sctp_nr_sack_on_off); 909 asoc->sctp_cmt_pf = (uint8_t) 0; 910 asoc->sctp_frag_point = inp->sctp_frag_point; 911 asoc->sctp_features = inp->sctp_features; 912 asoc->default_dscp = inp->sctp_ep.default_dscp; 913#ifdef INET6 914 if (inp->sctp_ep.default_flowlabel) { 915 asoc->default_flowlabel = inp->sctp_ep.default_flowlabel; 916 } else { 917 if (inp->ip_inp.inp.inp_flags & IN6P_AUTOFLOWLABEL) { 918 asoc->default_flowlabel = sctp_select_initial_TSN(&inp->sctp_ep); 919 asoc->default_flowlabel &= 0x000fffff; 920 asoc->default_flowlabel |= 0x80000000; 921 } else { 922 asoc->default_flowlabel = 0; 923 } 924 } 925#endif 926 asoc->sb_send_resv = 0; 927 if (override_tag) { 928 asoc->my_vtag = override_tag; 929 } else { 930 asoc->my_vtag = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 931 } 932 /* Get the nonce tags */ 933 asoc->my_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 934 asoc->peer_vtag_nonce = sctp_select_a_tag(inp, stcb->sctp_ep->sctp_lport, stcb->rport, 0); 935 asoc->vrf_id = vrf_id; 936 937#ifdef SCTP_ASOCLOG_OF_TSNS 938 asoc->tsn_in_at = 0; 939 asoc->tsn_out_at = 0; 940 asoc->tsn_in_wrapped = 0; 941 asoc->tsn_out_wrapped = 0; 942 asoc->cumack_log_at = 0; 943 asoc->cumack_log_atsnt = 0; 944#endif 945#ifdef SCTP_FS_SPEC_LOG 946 asoc->fs_index = 0; 947#endif 948 asoc->refcnt = 0; 949 asoc->assoc_up_sent = 0; 950 asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number = asoc->sending_seq = 951 sctp_select_initial_TSN(&inp->sctp_ep); 952 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 953 /* we are optimisitic here */ 954 asoc->peer_supports_pktdrop = 1; 955 asoc->peer_supports_nat = 0; 956 asoc->sent_queue_retran_cnt = 0; 957 958 /* for CMT */ 959 asoc->last_net_cmt_send_started = NULL; 960 961 /* This will need to be adjusted */ 962 asoc->last_acked_seq = asoc->init_seq_number - 1; 963 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 964 asoc->asconf_seq_in = asoc->last_acked_seq; 965 966 /* here we are different, we hold the next one we expect */ 967 asoc->str_reset_seq_in = asoc->last_acked_seq + 1; 968 969 asoc->initial_init_rto_max = inp->sctp_ep.initial_init_rto_max; 970 asoc->initial_rto = inp->sctp_ep.initial_rto; 971 972 asoc->max_init_times = inp->sctp_ep.max_init_times; 973 asoc->max_send_times = inp->sctp_ep.max_send_times; 974 asoc->def_net_failure = inp->sctp_ep.def_net_failure; 975 asoc->def_net_pf_threshold = inp->sctp_ep.def_net_pf_threshold; 976 asoc->free_chunk_cnt = 0; 977 978 asoc->iam_blocking = 0; 979 asoc->context = inp->sctp_context; 980 asoc->local_strreset_support = inp->local_strreset_support; 981 asoc->def_send = inp->def_send; 982 asoc->delayed_ack = TICKS_TO_MSEC(inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_RECV]); 983 asoc->sack_freq = inp->sctp_ep.sctp_sack_freq; 984 asoc->pr_sctp_cnt = 0; 985 asoc->total_output_queue_size = 0; 986 987 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 988 asoc->scope.ipv6_addr_legal = 1; 989 if (SCTP_IPV6_V6ONLY(inp) == 0) { 990 asoc->scope.ipv4_addr_legal = 1; 991 } else { 992 asoc->scope.ipv4_addr_legal = 0; 993 } 994 } else { 995 asoc->scope.ipv6_addr_legal = 0; 996 asoc->scope.ipv4_addr_legal = 1; 997 } 998 999 asoc->my_rwnd = max(SCTP_SB_LIMIT_RCV(inp->sctp_socket), SCTP_MINIMAL_RWND); 1000 asoc->peers_rwnd = SCTP_SB_LIMIT_RCV(inp->sctp_socket); 1001 1002 asoc->smallest_mtu = inp->sctp_frag_point; 1003 asoc->minrto = inp->sctp_ep.sctp_minrto; 1004 asoc->maxrto = inp->sctp_ep.sctp_maxrto; 1005 1006 asoc->locked_on_sending = NULL; 1007 asoc->stream_locked_on = 0; 1008 asoc->ecn_echo_cnt_onq = 0; 1009 asoc->stream_locked = 0; 1010 1011 asoc->send_sack = 1; 1012 1013 LIST_INIT(&asoc->sctp_restricted_addrs); 1014 1015 TAILQ_INIT(&asoc->nets); 1016 TAILQ_INIT(&asoc->pending_reply_queue); 1017 TAILQ_INIT(&asoc->asconf_ack_sent); 1018 /* Setup to fill the hb random cache at first HB */ 1019 asoc->hb_random_idx = 4; 1020 1021 asoc->sctp_autoclose_ticks = inp->sctp_ep.auto_close_time; 1022 1023 stcb->asoc.congestion_control_module = inp->sctp_ep.sctp_default_cc_module; 1024 stcb->asoc.cc_functions = sctp_cc_functions[inp->sctp_ep.sctp_default_cc_module]; 1025 1026 stcb->asoc.stream_scheduling_module = inp->sctp_ep.sctp_default_ss_module; 1027 stcb->asoc.ss_functions = sctp_ss_functions[inp->sctp_ep.sctp_default_ss_module]; 1028 1029 /* 1030 * Now the stream parameters, here we allocate space for all streams 1031 * that we request by default. 1032 */ 1033 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams = 1034 inp->sctp_ep.pre_open_stream_count; 1035 SCTP_MALLOC(asoc->strmout, struct sctp_stream_out *, 1036 asoc->streamoutcnt * sizeof(struct sctp_stream_out), 1037 SCTP_M_STRMO); 1038 if (asoc->strmout == NULL) { 1039 /* big trouble no memory */ 1040 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1041 return (ENOMEM); 1042 } 1043 for (i = 0; i < asoc->streamoutcnt; i++) { 1044 /* 1045 * inbound side must be set to 0xffff, also NOTE when we get 1046 * the INIT-ACK back (for INIT sender) we MUST reduce the 1047 * count (streamoutcnt) but first check if we sent to any of 1048 * the upper streams that were dropped (if some were). Those 1049 * that were dropped must be notified to the upper layer as 1050 * failed to send. 1051 */ 1052 asoc->strmout[i].next_sequence_send = 0x0; 1053 TAILQ_INIT(&asoc->strmout[i].outqueue); 1054 asoc->strmout[i].chunks_on_queues = 0; 1055 asoc->strmout[i].stream_no = i; 1056 asoc->strmout[i].last_msg_incomplete = 0; 1057 asoc->ss_functions.sctp_ss_init_stream(&asoc->strmout[i], NULL); 1058 } 1059 asoc->ss_functions.sctp_ss_init(stcb, asoc, 0); 1060 1061 /* Now the mapping array */ 1062 asoc->mapping_array_size = SCTP_INITIAL_MAPPING_ARRAY; 1063 SCTP_MALLOC(asoc->mapping_array, uint8_t *, asoc->mapping_array_size, 1064 SCTP_M_MAP); 1065 if (asoc->mapping_array == NULL) { 1066 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1067 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1068 return (ENOMEM); 1069 } 1070 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 1071 SCTP_MALLOC(asoc->nr_mapping_array, uint8_t *, asoc->mapping_array_size, 1072 SCTP_M_MAP); 1073 if (asoc->nr_mapping_array == NULL) { 1074 SCTP_FREE(asoc->strmout, SCTP_M_STRMO); 1075 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1076 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 1077 return (ENOMEM); 1078 } 1079 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 1080 1081 /* Now the init of the other outqueues */ 1082 TAILQ_INIT(&asoc->free_chunks); 1083 TAILQ_INIT(&asoc->control_send_queue); 1084 TAILQ_INIT(&asoc->asconf_send_queue); 1085 TAILQ_INIT(&asoc->send_queue); 1086 TAILQ_INIT(&asoc->sent_queue); 1087 TAILQ_INIT(&asoc->reasmqueue); 1088 TAILQ_INIT(&asoc->resetHead); 1089 asoc->max_inbound_streams = inp->sctp_ep.max_open_streams_intome; 1090 TAILQ_INIT(&asoc->asconf_queue); 1091 /* authentication fields */ 1092 asoc->authinfo.random = NULL; 1093 asoc->authinfo.active_keyid = 0; 1094 asoc->authinfo.assoc_key = NULL; 1095 asoc->authinfo.assoc_keyid = 0; 1096 asoc->authinfo.recv_key = NULL; 1097 asoc->authinfo.recv_keyid = 0; 1098 LIST_INIT(&asoc->shared_keys); 1099 asoc->marked_retrans = 0; 1100 asoc->port = inp->sctp_ep.port; 1101 asoc->timoinit = 0; 1102 asoc->timodata = 0; 1103 asoc->timosack = 0; 1104 asoc->timoshutdown = 0; 1105 asoc->timoheartbeat = 0; 1106 asoc->timocookie = 0; 1107 asoc->timoshutdownack = 0; 1108 (void)SCTP_GETTIME_TIMEVAL(&asoc->start_time); 1109 asoc->discontinuity_time = asoc->start_time; 1110 /* 1111 * sa_ignore MEMLEAK {memory is put in the assoc mapping array and 1112 * freed later when the association is freed. 1113 */ 1114 return (0); 1115} 1116 1117void 1118sctp_print_mapping_array(struct sctp_association *asoc) 1119{ 1120 unsigned int i, limit; 1121 1122 SCTP_PRINTF("Mapping array size: %d, baseTSN: %8.8x, cumAck: %8.8x, highestTSN: (%8.8x, %8.8x).\n", 1123 asoc->mapping_array_size, 1124 asoc->mapping_array_base_tsn, 1125 asoc->cumulative_tsn, 1126 asoc->highest_tsn_inside_map, 1127 asoc->highest_tsn_inside_nr_map); 1128 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1129 if (asoc->mapping_array[limit - 1] != 0) { 1130 break; 1131 } 1132 } 1133 SCTP_PRINTF("Renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1134 for (i = 0; i < limit; i++) { 1135 SCTP_PRINTF("%2.2x%c", asoc->mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1136 } 1137 if (limit % 16) 1138 SCTP_PRINTF("\n"); 1139 for (limit = asoc->mapping_array_size; limit > 1; limit--) { 1140 if (asoc->nr_mapping_array[limit - 1]) { 1141 break; 1142 } 1143 } 1144 SCTP_PRINTF("Non renegable mapping array (last %d entries are zero):\n", asoc->mapping_array_size - limit); 1145 for (i = 0; i < limit; i++) { 1146 SCTP_PRINTF("%2.2x%c", asoc->nr_mapping_array[i], ((i + 1) % 16) ? ' ' : '\n'); 1147 } 1148 if (limit % 16) 1149 SCTP_PRINTF("\n"); 1150} 1151 1152int 1153sctp_expand_mapping_array(struct sctp_association *asoc, uint32_t needed) 1154{ 1155 /* mapping array needs to grow */ 1156 uint8_t *new_array1, *new_array2; 1157 uint32_t new_size; 1158 1159 new_size = asoc->mapping_array_size + ((needed + 7) / 8 + SCTP_MAPPING_ARRAY_INCR); 1160 SCTP_MALLOC(new_array1, uint8_t *, new_size, SCTP_M_MAP); 1161 SCTP_MALLOC(new_array2, uint8_t *, new_size, SCTP_M_MAP); 1162 if ((new_array1 == NULL) || (new_array2 == NULL)) { 1163 /* can't get more, forget it */ 1164 SCTP_PRINTF("No memory for expansion of SCTP mapping array %d\n", new_size); 1165 if (new_array1) { 1166 SCTP_FREE(new_array1, SCTP_M_MAP); 1167 } 1168 if (new_array2) { 1169 SCTP_FREE(new_array2, SCTP_M_MAP); 1170 } 1171 return (-1); 1172 } 1173 memset(new_array1, 0, new_size); 1174 memset(new_array2, 0, new_size); 1175 memcpy(new_array1, asoc->mapping_array, asoc->mapping_array_size); 1176 memcpy(new_array2, asoc->nr_mapping_array, asoc->mapping_array_size); 1177 SCTP_FREE(asoc->mapping_array, SCTP_M_MAP); 1178 SCTP_FREE(asoc->nr_mapping_array, SCTP_M_MAP); 1179 asoc->mapping_array = new_array1; 1180 asoc->nr_mapping_array = new_array2; 1181 asoc->mapping_array_size = new_size; 1182 return (0); 1183} 1184 1185 1186static void 1187sctp_iterator_work(struct sctp_iterator *it) 1188{ 1189 int iteration_count = 0; 1190 int inp_skip = 0; 1191 int first_in = 1; 1192 struct sctp_inpcb *tinp; 1193 1194 SCTP_INP_INFO_RLOCK(); 1195 SCTP_ITERATOR_LOCK(); 1196 if (it->inp) { 1197 SCTP_INP_RLOCK(it->inp); 1198 SCTP_INP_DECR_REF(it->inp); 1199 } 1200 if (it->inp == NULL) { 1201 /* iterator is complete */ 1202done_with_iterator: 1203 SCTP_ITERATOR_UNLOCK(); 1204 SCTP_INP_INFO_RUNLOCK(); 1205 if (it->function_atend != NULL) { 1206 (*it->function_atend) (it->pointer, it->val); 1207 } 1208 SCTP_FREE(it, SCTP_M_ITER); 1209 return; 1210 } 1211select_a_new_ep: 1212 if (first_in) { 1213 first_in = 0; 1214 } else { 1215 SCTP_INP_RLOCK(it->inp); 1216 } 1217 while (((it->pcb_flags) && 1218 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1219 ((it->pcb_features) && 1220 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1221 /* endpoint flags or features don't match, so keep looking */ 1222 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1223 SCTP_INP_RUNLOCK(it->inp); 1224 goto done_with_iterator; 1225 } 1226 tinp = it->inp; 1227 it->inp = LIST_NEXT(it->inp, sctp_list); 1228 SCTP_INP_RUNLOCK(tinp); 1229 if (it->inp == NULL) { 1230 goto done_with_iterator; 1231 } 1232 SCTP_INP_RLOCK(it->inp); 1233 } 1234 /* now go through each assoc which is in the desired state */ 1235 if (it->done_current_ep == 0) { 1236 if (it->function_inp != NULL) 1237 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1238 it->done_current_ep = 1; 1239 } 1240 if (it->stcb == NULL) { 1241 /* run the per instance function */ 1242 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1243 } 1244 if ((inp_skip) || it->stcb == NULL) { 1245 if (it->function_inp_end != NULL) { 1246 inp_skip = (*it->function_inp_end) (it->inp, 1247 it->pointer, 1248 it->val); 1249 } 1250 SCTP_INP_RUNLOCK(it->inp); 1251 goto no_stcb; 1252 } 1253 while (it->stcb) { 1254 SCTP_TCB_LOCK(it->stcb); 1255 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1256 /* not in the right state... keep looking */ 1257 SCTP_TCB_UNLOCK(it->stcb); 1258 goto next_assoc; 1259 } 1260 /* see if we have limited out the iterator loop */ 1261 iteration_count++; 1262 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1263 /* Pause to let others grab the lock */ 1264 atomic_add_int(&it->stcb->asoc.refcnt, 1); 1265 SCTP_TCB_UNLOCK(it->stcb); 1266 SCTP_INP_INCR_REF(it->inp); 1267 SCTP_INP_RUNLOCK(it->inp); 1268 SCTP_ITERATOR_UNLOCK(); 1269 SCTP_INP_INFO_RUNLOCK(); 1270 SCTP_INP_INFO_RLOCK(); 1271 SCTP_ITERATOR_LOCK(); 1272 if (sctp_it_ctl.iterator_flags) { 1273 /* We won't be staying here */ 1274 SCTP_INP_DECR_REF(it->inp); 1275 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1276 if (sctp_it_ctl.iterator_flags & 1277 SCTP_ITERATOR_STOP_CUR_IT) { 1278 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_IT; 1279 goto done_with_iterator; 1280 } 1281 if (sctp_it_ctl.iterator_flags & 1282 SCTP_ITERATOR_STOP_CUR_INP) { 1283 sctp_it_ctl.iterator_flags &= ~SCTP_ITERATOR_STOP_CUR_INP; 1284 goto no_stcb; 1285 } 1286 /* If we reach here huh? */ 1287 SCTP_PRINTF("Unknown it ctl flag %x\n", 1288 sctp_it_ctl.iterator_flags); 1289 sctp_it_ctl.iterator_flags = 0; 1290 } 1291 SCTP_INP_RLOCK(it->inp); 1292 SCTP_INP_DECR_REF(it->inp); 1293 SCTP_TCB_LOCK(it->stcb); 1294 atomic_add_int(&it->stcb->asoc.refcnt, -1); 1295 iteration_count = 0; 1296 } 1297 /* run function on this one */ 1298 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1299 1300 /* 1301 * we lie here, it really needs to have its own type but 1302 * first I must verify that this won't effect things :-0 1303 */ 1304 if (it->no_chunk_output == 0) 1305 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1306 1307 SCTP_TCB_UNLOCK(it->stcb); 1308next_assoc: 1309 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1310 if (it->stcb == NULL) { 1311 /* Run last function */ 1312 if (it->function_inp_end != NULL) { 1313 inp_skip = (*it->function_inp_end) (it->inp, 1314 it->pointer, 1315 it->val); 1316 } 1317 } 1318 } 1319 SCTP_INP_RUNLOCK(it->inp); 1320no_stcb: 1321 /* done with all assocs on this endpoint, move on to next endpoint */ 1322 it->done_current_ep = 0; 1323 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1324 it->inp = NULL; 1325 } else { 1326 it->inp = LIST_NEXT(it->inp, sctp_list); 1327 } 1328 if (it->inp == NULL) { 1329 goto done_with_iterator; 1330 } 1331 goto select_a_new_ep; 1332} 1333 1334void 1335sctp_iterator_worker(void) 1336{ 1337 struct sctp_iterator *it, *nit; 1338 1339 /* This function is called with the WQ lock in place */ 1340 1341 sctp_it_ctl.iterator_running = 1; 1342 TAILQ_FOREACH_SAFE(it, &sctp_it_ctl.iteratorhead, sctp_nxt_itr, nit) { 1343 sctp_it_ctl.cur_it = it; 1344 /* now lets work on this one */ 1345 TAILQ_REMOVE(&sctp_it_ctl.iteratorhead, it, sctp_nxt_itr); 1346 SCTP_IPI_ITERATOR_WQ_UNLOCK(); 1347 CURVNET_SET(it->vn); 1348 sctp_iterator_work(it); 1349 sctp_it_ctl.cur_it = NULL; 1350 CURVNET_RESTORE(); 1351 SCTP_IPI_ITERATOR_WQ_LOCK(); 1352 /* sa_ignore FREED_MEMORY */ 1353 } 1354 sctp_it_ctl.iterator_running = 0; 1355 return; 1356} 1357 1358 1359static void 1360sctp_handle_addr_wq(void) 1361{ 1362 /* deal with the ADDR wq from the rtsock calls */ 1363 struct sctp_laddr *wi, *nwi; 1364 struct sctp_asconf_iterator *asc; 1365 1366 SCTP_MALLOC(asc, struct sctp_asconf_iterator *, 1367 sizeof(struct sctp_asconf_iterator), SCTP_M_ASC_IT); 1368 if (asc == NULL) { 1369 /* Try later, no memory */ 1370 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 1371 (struct sctp_inpcb *)NULL, 1372 (struct sctp_tcb *)NULL, 1373 (struct sctp_nets *)NULL); 1374 return; 1375 } 1376 LIST_INIT(&asc->list_of_work); 1377 asc->cnt = 0; 1378 1379 SCTP_WQ_ADDR_LOCK(); 1380 LIST_FOREACH_SAFE(wi, &SCTP_BASE_INFO(addr_wq), sctp_nxt_addr, nwi) { 1381 LIST_REMOVE(wi, sctp_nxt_addr); 1382 LIST_INSERT_HEAD(&asc->list_of_work, wi, sctp_nxt_addr); 1383 asc->cnt++; 1384 } 1385 SCTP_WQ_ADDR_UNLOCK(); 1386 1387 if (asc->cnt == 0) { 1388 SCTP_FREE(asc, SCTP_M_ASC_IT); 1389 } else { 1390 (void)sctp_initiate_iterator(sctp_asconf_iterator_ep, 1391 sctp_asconf_iterator_stcb, 1392 NULL, /* No ep end for boundall */ 1393 SCTP_PCB_FLAGS_BOUNDALL, 1394 SCTP_PCB_ANY_FEATURES, 1395 SCTP_ASOC_ANY_STATE, 1396 (void *)asc, 0, 1397 sctp_asconf_iterator_end, NULL, 0); 1398 } 1399} 1400 1401void 1402sctp_timeout_handler(void *t) 1403{ 1404 struct sctp_inpcb *inp; 1405 struct sctp_tcb *stcb; 1406 struct sctp_nets *net; 1407 struct sctp_timer *tmr; 1408 1409#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1410 struct socket *so; 1411 1412#endif 1413 int did_output, type; 1414 1415 tmr = (struct sctp_timer *)t; 1416 inp = (struct sctp_inpcb *)tmr->ep; 1417 stcb = (struct sctp_tcb *)tmr->tcb; 1418 net = (struct sctp_nets *)tmr->net; 1419 CURVNET_SET((struct vnet *)tmr->vnet); 1420 did_output = 1; 1421 1422#ifdef SCTP_AUDITING_ENABLED 1423 sctp_audit_log(0xF0, (uint8_t) tmr->type); 1424 sctp_auditing(3, inp, stcb, net); 1425#endif 1426 1427 /* sanity checks... */ 1428 if (tmr->self != (void *)tmr) { 1429 /* 1430 * SCTP_PRINTF("Stale SCTP timer fired (%p), ignoring...\n", 1431 * (void *)tmr); 1432 */ 1433 CURVNET_RESTORE(); 1434 return; 1435 } 1436 tmr->stopped_from = 0xa001; 1437 if (!SCTP_IS_TIMER_TYPE_VALID(tmr->type)) { 1438 /* 1439 * SCTP_PRINTF("SCTP timer fired with invalid type: 0x%x\n", 1440 * tmr->type); 1441 */ 1442 CURVNET_RESTORE(); 1443 return; 1444 } 1445 tmr->stopped_from = 0xa002; 1446 if ((tmr->type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) { 1447 CURVNET_RESTORE(); 1448 return; 1449 } 1450 /* if this is an iterator timeout, get the struct and clear inp */ 1451 tmr->stopped_from = 0xa003; 1452 type = tmr->type; 1453 if (inp) { 1454 SCTP_INP_INCR_REF(inp); 1455 if ((inp->sctp_socket == NULL) && 1456 ((tmr->type != SCTP_TIMER_TYPE_INPKILL) && 1457 (tmr->type != SCTP_TIMER_TYPE_INIT) && 1458 (tmr->type != SCTP_TIMER_TYPE_SEND) && 1459 (tmr->type != SCTP_TIMER_TYPE_RECV) && 1460 (tmr->type != SCTP_TIMER_TYPE_HEARTBEAT) && 1461 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWN) && 1462 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNACK) && 1463 (tmr->type != SCTP_TIMER_TYPE_SHUTDOWNGUARD) && 1464 (tmr->type != SCTP_TIMER_TYPE_ASOCKILL)) 1465 ) { 1466 SCTP_INP_DECR_REF(inp); 1467 CURVNET_RESTORE(); 1468 return; 1469 } 1470 } 1471 tmr->stopped_from = 0xa004; 1472 if (stcb) { 1473 atomic_add_int(&stcb->asoc.refcnt, 1); 1474 if (stcb->asoc.state == 0) { 1475 atomic_add_int(&stcb->asoc.refcnt, -1); 1476 if (inp) { 1477 SCTP_INP_DECR_REF(inp); 1478 } 1479 CURVNET_RESTORE(); 1480 return; 1481 } 1482 } 1483 tmr->stopped_from = 0xa005; 1484 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer type %d goes off\n", tmr->type); 1485 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1486 if (inp) { 1487 SCTP_INP_DECR_REF(inp); 1488 } 1489 if (stcb) { 1490 atomic_add_int(&stcb->asoc.refcnt, -1); 1491 } 1492 CURVNET_RESTORE(); 1493 return; 1494 } 1495 tmr->stopped_from = 0xa006; 1496 1497 if (stcb) { 1498 SCTP_TCB_LOCK(stcb); 1499 atomic_add_int(&stcb->asoc.refcnt, -1); 1500 if ((tmr->type != SCTP_TIMER_TYPE_ASOCKILL) && 1501 ((stcb->asoc.state == 0) || 1502 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED))) { 1503 SCTP_TCB_UNLOCK(stcb); 1504 if (inp) { 1505 SCTP_INP_DECR_REF(inp); 1506 } 1507 CURVNET_RESTORE(); 1508 return; 1509 } 1510 } 1511 /* record in stopped what t-o occured */ 1512 tmr->stopped_from = tmr->type; 1513 1514 /* mark as being serviced now */ 1515 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 1516 /* 1517 * Callout has been rescheduled. 1518 */ 1519 goto get_out; 1520 } 1521 if (!SCTP_OS_TIMER_ACTIVE(&tmr->timer)) { 1522 /* 1523 * Not active, so no action. 1524 */ 1525 goto get_out; 1526 } 1527 SCTP_OS_TIMER_DEACTIVATE(&tmr->timer); 1528 1529 /* call the handler for the appropriate timer type */ 1530 switch (tmr->type) { 1531 case SCTP_TIMER_TYPE_ZERO_COPY: 1532 if (inp == NULL) { 1533 break; 1534 } 1535 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) { 1536 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket); 1537 } 1538 break; 1539 case SCTP_TIMER_TYPE_ZCOPY_SENDQ: 1540 if (inp == NULL) { 1541 break; 1542 } 1543 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) { 1544 SCTP_ZERO_COPY_SENDQ_EVENT(inp, inp->sctp_socket); 1545 } 1546 break; 1547 case SCTP_TIMER_TYPE_ADDR_WQ: 1548 sctp_handle_addr_wq(); 1549 break; 1550 case SCTP_TIMER_TYPE_SEND: 1551 if ((stcb == NULL) || (inp == NULL)) { 1552 break; 1553 } 1554 SCTP_STAT_INCR(sctps_timodata); 1555 stcb->asoc.timodata++; 1556 stcb->asoc.num_send_timers_up--; 1557 if (stcb->asoc.num_send_timers_up < 0) { 1558 stcb->asoc.num_send_timers_up = 0; 1559 } 1560 SCTP_TCB_LOCK_ASSERT(stcb); 1561 if (sctp_t3rxt_timer(inp, stcb, net)) { 1562 /* no need to unlock on tcb its gone */ 1563 1564 goto out_decr; 1565 } 1566 SCTP_TCB_LOCK_ASSERT(stcb); 1567#ifdef SCTP_AUDITING_ENABLED 1568 sctp_auditing(4, inp, stcb, net); 1569#endif 1570 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1571 if ((stcb->asoc.num_send_timers_up == 0) && 1572 (stcb->asoc.sent_queue_cnt > 0)) { 1573 struct sctp_tmit_chunk *chk; 1574 1575 /* 1576 * safeguard. If there on some on the sent queue 1577 * somewhere but no timers running something is 1578 * wrong... so we start a timer on the first chunk 1579 * on the send queue on whatever net it is sent to. 1580 */ 1581 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 1582 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, 1583 chk->whoTo); 1584 } 1585 break; 1586 case SCTP_TIMER_TYPE_INIT: 1587 if ((stcb == NULL) || (inp == NULL)) { 1588 break; 1589 } 1590 SCTP_STAT_INCR(sctps_timoinit); 1591 stcb->asoc.timoinit++; 1592 if (sctp_t1init_timer(inp, stcb, net)) { 1593 /* no need to unlock on tcb its gone */ 1594 goto out_decr; 1595 } 1596 /* We do output but not here */ 1597 did_output = 0; 1598 break; 1599 case SCTP_TIMER_TYPE_RECV: 1600 if ((stcb == NULL) || (inp == NULL)) { 1601 break; 1602 } 1603 SCTP_STAT_INCR(sctps_timosack); 1604 stcb->asoc.timosack++; 1605 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 1606#ifdef SCTP_AUDITING_ENABLED 1607 sctp_auditing(4, inp, stcb, net); 1608#endif 1609 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SACK_TMR, SCTP_SO_NOT_LOCKED); 1610 break; 1611 case SCTP_TIMER_TYPE_SHUTDOWN: 1612 if ((stcb == NULL) || (inp == NULL)) { 1613 break; 1614 } 1615 if (sctp_shutdown_timer(inp, stcb, net)) { 1616 /* no need to unlock on tcb its gone */ 1617 goto out_decr; 1618 } 1619 SCTP_STAT_INCR(sctps_timoshutdown); 1620 stcb->asoc.timoshutdown++; 1621#ifdef SCTP_AUDITING_ENABLED 1622 sctp_auditing(4, inp, stcb, net); 1623#endif 1624 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_TMR, SCTP_SO_NOT_LOCKED); 1625 break; 1626 case SCTP_TIMER_TYPE_HEARTBEAT: 1627 if ((stcb == NULL) || (inp == NULL) || (net == NULL)) { 1628 break; 1629 } 1630 SCTP_STAT_INCR(sctps_timoheartbeat); 1631 stcb->asoc.timoheartbeat++; 1632 if (sctp_heartbeat_timer(inp, stcb, net)) { 1633 /* no need to unlock on tcb its gone */ 1634 goto out_decr; 1635 } 1636#ifdef SCTP_AUDITING_ENABLED 1637 sctp_auditing(4, inp, stcb, net); 1638#endif 1639 if (!(net->dest_state & SCTP_ADDR_NOHB)) { 1640 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net); 1641 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_HB_TMR, SCTP_SO_NOT_LOCKED); 1642 } 1643 break; 1644 case SCTP_TIMER_TYPE_COOKIE: 1645 if ((stcb == NULL) || (inp == NULL)) { 1646 break; 1647 } 1648 if (sctp_cookie_timer(inp, stcb, net)) { 1649 /* no need to unlock on tcb its gone */ 1650 goto out_decr; 1651 } 1652 SCTP_STAT_INCR(sctps_timocookie); 1653 stcb->asoc.timocookie++; 1654#ifdef SCTP_AUDITING_ENABLED 1655 sctp_auditing(4, inp, stcb, net); 1656#endif 1657 /* 1658 * We consider T3 and Cookie timer pretty much the same with 1659 * respect to where from in chunk_output. 1660 */ 1661 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1662 break; 1663 case SCTP_TIMER_TYPE_NEWCOOKIE: 1664 { 1665 struct timeval tv; 1666 int i, secret; 1667 1668 if (inp == NULL) { 1669 break; 1670 } 1671 SCTP_STAT_INCR(sctps_timosecret); 1672 (void)SCTP_GETTIME_TIMEVAL(&tv); 1673 SCTP_INP_WLOCK(inp); 1674 inp->sctp_ep.time_of_secret_change = tv.tv_sec; 1675 inp->sctp_ep.last_secret_number = 1676 inp->sctp_ep.current_secret_number; 1677 inp->sctp_ep.current_secret_number++; 1678 if (inp->sctp_ep.current_secret_number >= 1679 SCTP_HOW_MANY_SECRETS) { 1680 inp->sctp_ep.current_secret_number = 0; 1681 } 1682 secret = (int)inp->sctp_ep.current_secret_number; 1683 for (i = 0; i < SCTP_NUMBER_OF_SECRETS; i++) { 1684 inp->sctp_ep.secret_key[secret][i] = 1685 sctp_select_initial_TSN(&inp->sctp_ep); 1686 } 1687 SCTP_INP_WUNLOCK(inp); 1688 sctp_timer_start(SCTP_TIMER_TYPE_NEWCOOKIE, inp, stcb, net); 1689 } 1690 did_output = 0; 1691 break; 1692 case SCTP_TIMER_TYPE_PATHMTURAISE: 1693 if ((stcb == NULL) || (inp == NULL)) { 1694 break; 1695 } 1696 SCTP_STAT_INCR(sctps_timopathmtu); 1697 sctp_pathmtu_timer(inp, stcb, net); 1698 did_output = 0; 1699 break; 1700 case SCTP_TIMER_TYPE_SHUTDOWNACK: 1701 if ((stcb == NULL) || (inp == NULL)) { 1702 break; 1703 } 1704 if (sctp_shutdownack_timer(inp, stcb, net)) { 1705 /* no need to unlock on tcb its gone */ 1706 goto out_decr; 1707 } 1708 SCTP_STAT_INCR(sctps_timoshutdownack); 1709 stcb->asoc.timoshutdownack++; 1710#ifdef SCTP_AUDITING_ENABLED 1711 sctp_auditing(4, inp, stcb, net); 1712#endif 1713 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_SHUT_ACK_TMR, SCTP_SO_NOT_LOCKED); 1714 break; 1715 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 1716 if ((stcb == NULL) || (inp == NULL)) { 1717 break; 1718 } 1719 SCTP_STAT_INCR(sctps_timoshutdownguard); 1720 sctp_abort_an_association(inp, stcb, NULL, SCTP_SO_NOT_LOCKED); 1721 /* no need to unlock on tcb its gone */ 1722 goto out_decr; 1723 1724 case SCTP_TIMER_TYPE_STRRESET: 1725 if ((stcb == NULL) || (inp == NULL)) { 1726 break; 1727 } 1728 if (sctp_strreset_timer(inp, stcb, net)) { 1729 /* no need to unlock on tcb its gone */ 1730 goto out_decr; 1731 } 1732 SCTP_STAT_INCR(sctps_timostrmrst); 1733 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_STRRST_TMR, SCTP_SO_NOT_LOCKED); 1734 break; 1735 case SCTP_TIMER_TYPE_ASCONF: 1736 if ((stcb == NULL) || (inp == NULL)) { 1737 break; 1738 } 1739 if (sctp_asconf_timer(inp, stcb, net)) { 1740 /* no need to unlock on tcb its gone */ 1741 goto out_decr; 1742 } 1743 SCTP_STAT_INCR(sctps_timoasconf); 1744#ifdef SCTP_AUDITING_ENABLED 1745 sctp_auditing(4, inp, stcb, net); 1746#endif 1747 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_ASCONF_TMR, SCTP_SO_NOT_LOCKED); 1748 break; 1749 case SCTP_TIMER_TYPE_PRIM_DELETED: 1750 if ((stcb == NULL) || (inp == NULL)) { 1751 break; 1752 } 1753 sctp_delete_prim_timer(inp, stcb, net); 1754 SCTP_STAT_INCR(sctps_timodelprim); 1755 break; 1756 1757 case SCTP_TIMER_TYPE_AUTOCLOSE: 1758 if ((stcb == NULL) || (inp == NULL)) { 1759 break; 1760 } 1761 SCTP_STAT_INCR(sctps_timoautoclose); 1762 sctp_autoclose_timer(inp, stcb, net); 1763 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 1764 did_output = 0; 1765 break; 1766 case SCTP_TIMER_TYPE_ASOCKILL: 1767 if ((stcb == NULL) || (inp == NULL)) { 1768 break; 1769 } 1770 SCTP_STAT_INCR(sctps_timoassockill); 1771 /* Can we free it yet? */ 1772 SCTP_INP_DECR_REF(inp); 1773 sctp_timer_stop(SCTP_TIMER_TYPE_ASOCKILL, inp, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_1); 1774#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1775 so = SCTP_INP_SO(inp); 1776 atomic_add_int(&stcb->asoc.refcnt, 1); 1777 SCTP_TCB_UNLOCK(stcb); 1778 SCTP_SOCKET_LOCK(so, 1); 1779 SCTP_TCB_LOCK(stcb); 1780 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1781#endif 1782 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_2); 1783#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1784 SCTP_SOCKET_UNLOCK(so, 1); 1785#endif 1786 /* 1787 * free asoc, always unlocks (or destroy's) so prevent 1788 * duplicate unlock or unlock of a free mtx :-0 1789 */ 1790 stcb = NULL; 1791 goto out_no_decr; 1792 case SCTP_TIMER_TYPE_INPKILL: 1793 SCTP_STAT_INCR(sctps_timoinpkill); 1794 if (inp == NULL) { 1795 break; 1796 } 1797 /* 1798 * special case, take away our increment since WE are the 1799 * killer 1800 */ 1801 SCTP_INP_DECR_REF(inp); 1802 sctp_timer_stop(SCTP_TIMER_TYPE_INPKILL, inp, NULL, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_3); 1803 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 1804 SCTP_CALLED_FROM_INPKILL_TIMER); 1805 inp = NULL; 1806 goto out_no_decr; 1807 default: 1808 SCTPDBG(SCTP_DEBUG_TIMER1, "sctp_timeout_handler:unknown timer %d\n", 1809 tmr->type); 1810 break; 1811 } 1812#ifdef SCTP_AUDITING_ENABLED 1813 sctp_audit_log(0xF1, (uint8_t) tmr->type); 1814 if (inp) 1815 sctp_auditing(5, inp, stcb, net); 1816#endif 1817 if ((did_output) && stcb) { 1818 /* 1819 * Now we need to clean up the control chunk chain if an 1820 * ECNE is on it. It must be marked as UNSENT again so next 1821 * call will continue to send it until such time that we get 1822 * a CWR, to remove it. It is, however, less likely that we 1823 * will find a ecn echo on the chain though. 1824 */ 1825 sctp_fix_ecn_echo(&stcb->asoc); 1826 } 1827get_out: 1828 if (stcb) { 1829 SCTP_TCB_UNLOCK(stcb); 1830 } 1831out_decr: 1832 if (inp) { 1833 SCTP_INP_DECR_REF(inp); 1834 } 1835out_no_decr: 1836 SCTPDBG(SCTP_DEBUG_TIMER1, "Timer now complete (type %d)\n", 1837 type); 1838 CURVNET_RESTORE(); 1839} 1840 1841void 1842sctp_timer_start(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1843 struct sctp_nets *net) 1844{ 1845 uint32_t to_ticks; 1846 struct sctp_timer *tmr; 1847 1848 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && (inp == NULL)) 1849 return; 1850 1851 tmr = NULL; 1852 if (stcb) { 1853 SCTP_TCB_LOCK_ASSERT(stcb); 1854 } 1855 switch (t_type) { 1856 case SCTP_TIMER_TYPE_ZERO_COPY: 1857 tmr = &inp->sctp_ep.zero_copy_timer; 1858 to_ticks = SCTP_ZERO_COPY_TICK_DELAY; 1859 break; 1860 case SCTP_TIMER_TYPE_ZCOPY_SENDQ: 1861 tmr = &inp->sctp_ep.zero_copy_sendq_timer; 1862 to_ticks = SCTP_ZERO_COPY_SENDQ_TICK_DELAY; 1863 break; 1864 case SCTP_TIMER_TYPE_ADDR_WQ: 1865 /* Only 1 tick away :-) */ 1866 tmr = &SCTP_BASE_INFO(addr_wq_timer); 1867 to_ticks = SCTP_ADDRESS_TICK_DELAY; 1868 break; 1869 case SCTP_TIMER_TYPE_SEND: 1870 /* Here we use the RTO timer */ 1871 { 1872 int rto_val; 1873 1874 if ((stcb == NULL) || (net == NULL)) { 1875 return; 1876 } 1877 tmr = &net->rxt_timer; 1878 if (net->RTO == 0) { 1879 rto_val = stcb->asoc.initial_rto; 1880 } else { 1881 rto_val = net->RTO; 1882 } 1883 to_ticks = MSEC_TO_TICKS(rto_val); 1884 } 1885 break; 1886 case SCTP_TIMER_TYPE_INIT: 1887 /* 1888 * Here we use the INIT timer default usually about 1 1889 * minute. 1890 */ 1891 if ((stcb == NULL) || (net == NULL)) { 1892 return; 1893 } 1894 tmr = &net->rxt_timer; 1895 if (net->RTO == 0) { 1896 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1897 } else { 1898 to_ticks = MSEC_TO_TICKS(net->RTO); 1899 } 1900 break; 1901 case SCTP_TIMER_TYPE_RECV: 1902 /* 1903 * Here we use the Delayed-Ack timer value from the inp 1904 * ususually about 200ms. 1905 */ 1906 if (stcb == NULL) { 1907 return; 1908 } 1909 tmr = &stcb->asoc.dack_timer; 1910 to_ticks = MSEC_TO_TICKS(stcb->asoc.delayed_ack); 1911 break; 1912 case SCTP_TIMER_TYPE_SHUTDOWN: 1913 /* Here we use the RTO of the destination. */ 1914 if ((stcb == NULL) || (net == NULL)) { 1915 return; 1916 } 1917 if (net->RTO == 0) { 1918 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1919 } else { 1920 to_ticks = MSEC_TO_TICKS(net->RTO); 1921 } 1922 tmr = &net->rxt_timer; 1923 break; 1924 case SCTP_TIMER_TYPE_HEARTBEAT: 1925 /* 1926 * the net is used here so that we can add in the RTO. Even 1927 * though we use a different timer. We also add the HB timer 1928 * PLUS a random jitter. 1929 */ 1930 if ((inp == NULL) || (stcb == NULL) || (net == NULL)) { 1931 return; 1932 } else { 1933 uint32_t rndval; 1934 uint32_t jitter; 1935 1936 if ((net->dest_state & SCTP_ADDR_NOHB) && 1937 !(net->dest_state & SCTP_ADDR_UNCONFIRMED)) { 1938 return; 1939 } 1940 if (net->RTO == 0) { 1941 to_ticks = stcb->asoc.initial_rto; 1942 } else { 1943 to_ticks = net->RTO; 1944 } 1945 rndval = sctp_select_initial_TSN(&inp->sctp_ep); 1946 jitter = rndval % to_ticks; 1947 if (jitter >= (to_ticks >> 1)) { 1948 to_ticks = to_ticks + (jitter - (to_ticks >> 1)); 1949 } else { 1950 to_ticks = to_ticks - jitter; 1951 } 1952 if (!(net->dest_state & SCTP_ADDR_UNCONFIRMED) && 1953 !(net->dest_state & SCTP_ADDR_PF)) { 1954 to_ticks += net->heart_beat_delay; 1955 } 1956 /* 1957 * Now we must convert the to_ticks that are now in 1958 * ms to ticks. 1959 */ 1960 to_ticks = MSEC_TO_TICKS(to_ticks); 1961 tmr = &net->hb_timer; 1962 } 1963 break; 1964 case SCTP_TIMER_TYPE_COOKIE: 1965 /* 1966 * Here we can use the RTO timer from the network since one 1967 * RTT was compelete. If a retran happened then we will be 1968 * using the RTO initial value. 1969 */ 1970 if ((stcb == NULL) || (net == NULL)) { 1971 return; 1972 } 1973 if (net->RTO == 0) { 1974 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 1975 } else { 1976 to_ticks = MSEC_TO_TICKS(net->RTO); 1977 } 1978 tmr = &net->rxt_timer; 1979 break; 1980 case SCTP_TIMER_TYPE_NEWCOOKIE: 1981 /* 1982 * nothing needed but the endpoint here ususually about 60 1983 * minutes. 1984 */ 1985 if (inp == NULL) { 1986 return; 1987 } 1988 tmr = &inp->sctp_ep.signature_change; 1989 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_SIGNATURE]; 1990 break; 1991 case SCTP_TIMER_TYPE_ASOCKILL: 1992 if (stcb == NULL) { 1993 return; 1994 } 1995 tmr = &stcb->asoc.strreset_timer; 1996 to_ticks = MSEC_TO_TICKS(SCTP_ASOC_KILL_TIMEOUT); 1997 break; 1998 case SCTP_TIMER_TYPE_INPKILL: 1999 /* 2000 * The inp is setup to die. We re-use the signature_chage 2001 * timer since that has stopped and we are in the GONE 2002 * state. 2003 */ 2004 if (inp == NULL) { 2005 return; 2006 } 2007 tmr = &inp->sctp_ep.signature_change; 2008 to_ticks = MSEC_TO_TICKS(SCTP_INP_KILL_TIMEOUT); 2009 break; 2010 case SCTP_TIMER_TYPE_PATHMTURAISE: 2011 /* 2012 * Here we use the value found in the EP for PMTU ususually 2013 * about 10 minutes. 2014 */ 2015 if ((stcb == NULL) || (inp == NULL)) { 2016 return; 2017 } 2018 if (net == NULL) { 2019 return; 2020 } 2021 if (net->dest_state & SCTP_ADDR_NO_PMTUD) { 2022 return; 2023 } 2024 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_PMTU]; 2025 tmr = &net->pmtu_timer; 2026 break; 2027 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2028 /* Here we use the RTO of the destination */ 2029 if ((stcb == NULL) || (net == NULL)) { 2030 return; 2031 } 2032 if (net->RTO == 0) { 2033 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2034 } else { 2035 to_ticks = MSEC_TO_TICKS(net->RTO); 2036 } 2037 tmr = &net->rxt_timer; 2038 break; 2039 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2040 /* 2041 * Here we use the endpoints shutdown guard timer usually 2042 * about 3 minutes. 2043 */ 2044 if ((inp == NULL) || (stcb == NULL)) { 2045 return; 2046 } 2047 to_ticks = inp->sctp_ep.sctp_timeoutticks[SCTP_TIMER_MAXSHUTDOWN]; 2048 tmr = &stcb->asoc.shut_guard_timer; 2049 break; 2050 case SCTP_TIMER_TYPE_STRRESET: 2051 /* 2052 * Here the timer comes from the stcb but its value is from 2053 * the net's RTO. 2054 */ 2055 if ((stcb == NULL) || (net == NULL)) { 2056 return; 2057 } 2058 if (net->RTO == 0) { 2059 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2060 } else { 2061 to_ticks = MSEC_TO_TICKS(net->RTO); 2062 } 2063 tmr = &stcb->asoc.strreset_timer; 2064 break; 2065 case SCTP_TIMER_TYPE_ASCONF: 2066 /* 2067 * Here the timer comes from the stcb but its value is from 2068 * the net's RTO. 2069 */ 2070 if ((stcb == NULL) || (net == NULL)) { 2071 return; 2072 } 2073 if (net->RTO == 0) { 2074 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2075 } else { 2076 to_ticks = MSEC_TO_TICKS(net->RTO); 2077 } 2078 tmr = &stcb->asoc.asconf_timer; 2079 break; 2080 case SCTP_TIMER_TYPE_PRIM_DELETED: 2081 if ((stcb == NULL) || (net != NULL)) { 2082 return; 2083 } 2084 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto); 2085 tmr = &stcb->asoc.delete_prim_timer; 2086 break; 2087 case SCTP_TIMER_TYPE_AUTOCLOSE: 2088 if (stcb == NULL) { 2089 return; 2090 } 2091 if (stcb->asoc.sctp_autoclose_ticks == 0) { 2092 /* 2093 * Really an error since stcb is NOT set to 2094 * autoclose 2095 */ 2096 return; 2097 } 2098 to_ticks = stcb->asoc.sctp_autoclose_ticks; 2099 tmr = &stcb->asoc.autoclose_timer; 2100 break; 2101 default: 2102 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n", 2103 __FUNCTION__, t_type); 2104 return; 2105 break; 2106 } 2107 if ((to_ticks <= 0) || (tmr == NULL)) { 2108 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: %d:software error to_ticks:%d tmr:%p not set ??\n", 2109 __FUNCTION__, t_type, to_ticks, (void *)tmr); 2110 return; 2111 } 2112 if (SCTP_OS_TIMER_PENDING(&tmr->timer)) { 2113 /* 2114 * we do NOT allow you to have it already running. if it is 2115 * we leave the current one up unchanged 2116 */ 2117 return; 2118 } 2119 /* At this point we can proceed */ 2120 if (t_type == SCTP_TIMER_TYPE_SEND) { 2121 stcb->asoc.num_send_timers_up++; 2122 } 2123 tmr->stopped_from = 0; 2124 tmr->type = t_type; 2125 tmr->ep = (void *)inp; 2126 tmr->tcb = (void *)stcb; 2127 tmr->net = (void *)net; 2128 tmr->self = (void *)tmr; 2129 tmr->vnet = (void *)curvnet; 2130 tmr->ticks = sctp_get_tick_count(); 2131 (void)SCTP_OS_TIMER_START(&tmr->timer, to_ticks, sctp_timeout_handler, tmr); 2132 return; 2133} 2134 2135void 2136sctp_timer_stop(int t_type, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2137 struct sctp_nets *net, uint32_t from) 2138{ 2139 struct sctp_timer *tmr; 2140 2141 if ((t_type != SCTP_TIMER_TYPE_ADDR_WQ) && 2142 (inp == NULL)) 2143 return; 2144 2145 tmr = NULL; 2146 if (stcb) { 2147 SCTP_TCB_LOCK_ASSERT(stcb); 2148 } 2149 switch (t_type) { 2150 case SCTP_TIMER_TYPE_ZERO_COPY: 2151 tmr = &inp->sctp_ep.zero_copy_timer; 2152 break; 2153 case SCTP_TIMER_TYPE_ZCOPY_SENDQ: 2154 tmr = &inp->sctp_ep.zero_copy_sendq_timer; 2155 break; 2156 case SCTP_TIMER_TYPE_ADDR_WQ: 2157 tmr = &SCTP_BASE_INFO(addr_wq_timer); 2158 break; 2159 case SCTP_TIMER_TYPE_SEND: 2160 if ((stcb == NULL) || (net == NULL)) { 2161 return; 2162 } 2163 tmr = &net->rxt_timer; 2164 break; 2165 case SCTP_TIMER_TYPE_INIT: 2166 if ((stcb == NULL) || (net == NULL)) { 2167 return; 2168 } 2169 tmr = &net->rxt_timer; 2170 break; 2171 case SCTP_TIMER_TYPE_RECV: 2172 if (stcb == NULL) { 2173 return; 2174 } 2175 tmr = &stcb->asoc.dack_timer; 2176 break; 2177 case SCTP_TIMER_TYPE_SHUTDOWN: 2178 if ((stcb == NULL) || (net == NULL)) { 2179 return; 2180 } 2181 tmr = &net->rxt_timer; 2182 break; 2183 case SCTP_TIMER_TYPE_HEARTBEAT: 2184 if ((stcb == NULL) || (net == NULL)) { 2185 return; 2186 } 2187 tmr = &net->hb_timer; 2188 break; 2189 case SCTP_TIMER_TYPE_COOKIE: 2190 if ((stcb == NULL) || (net == NULL)) { 2191 return; 2192 } 2193 tmr = &net->rxt_timer; 2194 break; 2195 case SCTP_TIMER_TYPE_NEWCOOKIE: 2196 /* nothing needed but the endpoint here */ 2197 tmr = &inp->sctp_ep.signature_change; 2198 /* 2199 * We re-use the newcookie timer for the INP kill timer. We 2200 * must assure that we do not kill it by accident. 2201 */ 2202 break; 2203 case SCTP_TIMER_TYPE_ASOCKILL: 2204 /* 2205 * Stop the asoc kill timer. 2206 */ 2207 if (stcb == NULL) { 2208 return; 2209 } 2210 tmr = &stcb->asoc.strreset_timer; 2211 break; 2212 2213 case SCTP_TIMER_TYPE_INPKILL: 2214 /* 2215 * The inp is setup to die. We re-use the signature_chage 2216 * timer since that has stopped and we are in the GONE 2217 * state. 2218 */ 2219 tmr = &inp->sctp_ep.signature_change; 2220 break; 2221 case SCTP_TIMER_TYPE_PATHMTURAISE: 2222 if ((stcb == NULL) || (net == NULL)) { 2223 return; 2224 } 2225 tmr = &net->pmtu_timer; 2226 break; 2227 case SCTP_TIMER_TYPE_SHUTDOWNACK: 2228 if ((stcb == NULL) || (net == NULL)) { 2229 return; 2230 } 2231 tmr = &net->rxt_timer; 2232 break; 2233 case SCTP_TIMER_TYPE_SHUTDOWNGUARD: 2234 if (stcb == NULL) { 2235 return; 2236 } 2237 tmr = &stcb->asoc.shut_guard_timer; 2238 break; 2239 case SCTP_TIMER_TYPE_STRRESET: 2240 if (stcb == NULL) { 2241 return; 2242 } 2243 tmr = &stcb->asoc.strreset_timer; 2244 break; 2245 case SCTP_TIMER_TYPE_ASCONF: 2246 if (stcb == NULL) { 2247 return; 2248 } 2249 tmr = &stcb->asoc.asconf_timer; 2250 break; 2251 case SCTP_TIMER_TYPE_PRIM_DELETED: 2252 if (stcb == NULL) { 2253 return; 2254 } 2255 tmr = &stcb->asoc.delete_prim_timer; 2256 break; 2257 case SCTP_TIMER_TYPE_AUTOCLOSE: 2258 if (stcb == NULL) { 2259 return; 2260 } 2261 tmr = &stcb->asoc.autoclose_timer; 2262 break; 2263 default: 2264 SCTPDBG(SCTP_DEBUG_TIMER1, "%s: Unknown timer type %d\n", 2265 __FUNCTION__, t_type); 2266 break; 2267 } 2268 if (tmr == NULL) { 2269 return; 2270 } 2271 if ((tmr->type != t_type) && tmr->type) { 2272 /* 2273 * Ok we have a timer that is under joint use. Cookie timer 2274 * per chance with the SEND timer. We therefore are NOT 2275 * running the timer that the caller wants stopped. So just 2276 * return. 2277 */ 2278 return; 2279 } 2280 if ((t_type == SCTP_TIMER_TYPE_SEND) && (stcb != NULL)) { 2281 stcb->asoc.num_send_timers_up--; 2282 if (stcb->asoc.num_send_timers_up < 0) { 2283 stcb->asoc.num_send_timers_up = 0; 2284 } 2285 } 2286 tmr->self = NULL; 2287 tmr->stopped_from = from; 2288 (void)SCTP_OS_TIMER_STOP(&tmr->timer); 2289 return; 2290} 2291 2292uint32_t 2293sctp_calculate_len(struct mbuf *m) 2294{ 2295 uint32_t tlen = 0; 2296 struct mbuf *at; 2297 2298 at = m; 2299 while (at) { 2300 tlen += SCTP_BUF_LEN(at); 2301 at = SCTP_BUF_NEXT(at); 2302 } 2303 return (tlen); 2304} 2305 2306void 2307sctp_mtu_size_reset(struct sctp_inpcb *inp, 2308 struct sctp_association *asoc, uint32_t mtu) 2309{ 2310 /* 2311 * Reset the P-MTU size on this association, this involves changing 2312 * the asoc MTU, going through ANY chunk+overhead larger than mtu to 2313 * allow the DF flag to be cleared. 2314 */ 2315 struct sctp_tmit_chunk *chk; 2316 unsigned int eff_mtu, ovh; 2317 2318 asoc->smallest_mtu = mtu; 2319 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) { 2320 ovh = SCTP_MIN_OVERHEAD; 2321 } else { 2322 ovh = SCTP_MIN_V4_OVERHEAD; 2323 } 2324 eff_mtu = mtu - ovh; 2325 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 2326 if (chk->send_size > eff_mtu) { 2327 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2328 } 2329 } 2330 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 2331 if (chk->send_size > eff_mtu) { 2332 chk->flags |= CHUNK_FLAGS_FRAGMENT_OK; 2333 } 2334 } 2335} 2336 2337 2338/* 2339 * given an association and starting time of the current RTT period return 2340 * RTO in number of msecs net should point to the current network 2341 */ 2342 2343uint32_t 2344sctp_calculate_rto(struct sctp_tcb *stcb, 2345 struct sctp_association *asoc, 2346 struct sctp_nets *net, 2347 struct timeval *told, 2348 int safe, int rtt_from_sack) 2349{ 2350 /*- 2351 * given an association and the starting time of the current RTT 2352 * period (in value1/value2) return RTO in number of msecs. 2353 */ 2354 int32_t rtt; /* RTT in ms */ 2355 uint32_t new_rto; 2356 int first_measure = 0; 2357 struct timeval now, then, *old; 2358 2359 /* Copy it out for sparc64 */ 2360 if (safe == sctp_align_unsafe_makecopy) { 2361 old = &then; 2362 memcpy(&then, told, sizeof(struct timeval)); 2363 } else if (safe == sctp_align_safe_nocopy) { 2364 old = told; 2365 } else { 2366 /* error */ 2367 SCTP_PRINTF("Huh, bad rto calc call\n"); 2368 return (0); 2369 } 2370 /************************/ 2371 /* 1. calculate new RTT */ 2372 /************************/ 2373 /* get the current time */ 2374 if (stcb->asoc.use_precise_time) { 2375 (void)SCTP_GETPTIME_TIMEVAL(&now); 2376 } else { 2377 (void)SCTP_GETTIME_TIMEVAL(&now); 2378 } 2379 timevalsub(&now, old); 2380 /* store the current RTT in us */ 2381 net->rtt = (uint64_t) 1000000 *(uint64_t) now.tv_sec + 2382 (uint64_t) now.tv_usec; 2383 2384 /* computer rtt in ms */ 2385 rtt = net->rtt / 1000; 2386 if ((asoc->cc_functions.sctp_rtt_calculated) && (rtt_from_sack == SCTP_RTT_FROM_DATA)) { 2387 /* 2388 * Tell the CC module that a new update has just occurred 2389 * from a sack 2390 */ 2391 (*asoc->cc_functions.sctp_rtt_calculated) (stcb, net, &now); 2392 } 2393 /* 2394 * Do we need to determine the lan? We do this only on sacks i.e. 2395 * RTT being determined from data not non-data (HB/INIT->INITACK). 2396 */ 2397 if ((rtt_from_sack == SCTP_RTT_FROM_DATA) && 2398 (net->lan_type == SCTP_LAN_UNKNOWN)) { 2399 if (net->rtt > SCTP_LOCAL_LAN_RTT) { 2400 net->lan_type = SCTP_LAN_INTERNET; 2401 } else { 2402 net->lan_type = SCTP_LAN_LOCAL; 2403 } 2404 } 2405 /***************************/ 2406 /* 2. update RTTVAR & SRTT */ 2407 /***************************/ 2408 /*- 2409 * Compute the scaled average lastsa and the 2410 * scaled variance lastsv as described in van Jacobson 2411 * Paper "Congestion Avoidance and Control", Annex A. 2412 * 2413 * (net->lastsa >> SCTP_RTT_SHIFT) is the srtt 2414 * (net->lastsa >> SCTP_RTT_VAR_SHIFT) is the rttvar 2415 */ 2416 if (net->RTO_measured) { 2417 rtt -= (net->lastsa >> SCTP_RTT_SHIFT); 2418 net->lastsa += rtt; 2419 if (rtt < 0) { 2420 rtt = -rtt; 2421 } 2422 rtt -= (net->lastsv >> SCTP_RTT_VAR_SHIFT); 2423 net->lastsv += rtt; 2424 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2425 rto_logging(net, SCTP_LOG_RTTVAR); 2426 } 2427 } else { 2428 /* First RTO measurment */ 2429 net->RTO_measured = 1; 2430 first_measure = 1; 2431 net->lastsa = rtt << SCTP_RTT_SHIFT; 2432 net->lastsv = (rtt / 2) << SCTP_RTT_VAR_SHIFT; 2433 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RTTVAR_LOGGING_ENABLE) { 2434 rto_logging(net, SCTP_LOG_INITIAL_RTT); 2435 } 2436 } 2437 if (net->lastsv == 0) { 2438 net->lastsv = SCTP_CLOCK_GRANULARITY; 2439 } 2440 new_rto = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 2441 if ((new_rto > SCTP_SAT_NETWORK_MIN) && 2442 (stcb->asoc.sat_network_lockout == 0)) { 2443 stcb->asoc.sat_network = 1; 2444 } else if ((!first_measure) && stcb->asoc.sat_network) { 2445 stcb->asoc.sat_network = 0; 2446 stcb->asoc.sat_network_lockout = 1; 2447 } 2448 /* bound it, per C6/C7 in Section 5.3.1 */ 2449 if (new_rto < stcb->asoc.minrto) { 2450 new_rto = stcb->asoc.minrto; 2451 } 2452 if (new_rto > stcb->asoc.maxrto) { 2453 new_rto = stcb->asoc.maxrto; 2454 } 2455 /* we are now returning the RTO */ 2456 return (new_rto); 2457} 2458 2459/* 2460 * return a pointer to a contiguous piece of data from the given mbuf chain 2461 * starting at 'off' for 'len' bytes. If the desired piece spans more than 2462 * one mbuf, a copy is made at 'ptr'. caller must ensure that the buffer size 2463 * is >= 'len' returns NULL if there there isn't 'len' bytes in the chain. 2464 */ 2465caddr_t 2466sctp_m_getptr(struct mbuf *m, int off, int len, uint8_t * in_ptr) 2467{ 2468 uint32_t count; 2469 uint8_t *ptr; 2470 2471 ptr = in_ptr; 2472 if ((off < 0) || (len <= 0)) 2473 return (NULL); 2474 2475 /* find the desired start location */ 2476 while ((m != NULL) && (off > 0)) { 2477 if (off < SCTP_BUF_LEN(m)) 2478 break; 2479 off -= SCTP_BUF_LEN(m); 2480 m = SCTP_BUF_NEXT(m); 2481 } 2482 if (m == NULL) 2483 return (NULL); 2484 2485 /* is the current mbuf large enough (eg. contiguous)? */ 2486 if ((SCTP_BUF_LEN(m) - off) >= len) { 2487 return (mtod(m, caddr_t)+off); 2488 } else { 2489 /* else, it spans more than one mbuf, so save a temp copy... */ 2490 while ((m != NULL) && (len > 0)) { 2491 count = min(SCTP_BUF_LEN(m) - off, len); 2492 bcopy(mtod(m, caddr_t)+off, ptr, count); 2493 len -= count; 2494 ptr += count; 2495 off = 0; 2496 m = SCTP_BUF_NEXT(m); 2497 } 2498 if ((m == NULL) && (len > 0)) 2499 return (NULL); 2500 else 2501 return ((caddr_t)in_ptr); 2502 } 2503} 2504 2505 2506 2507struct sctp_paramhdr * 2508sctp_get_next_param(struct mbuf *m, 2509 int offset, 2510 struct sctp_paramhdr *pull, 2511 int pull_limit) 2512{ 2513 /* This just provides a typed signature to Peter's Pull routine */ 2514 return ((struct sctp_paramhdr *)sctp_m_getptr(m, offset, pull_limit, 2515 (uint8_t *) pull)); 2516} 2517 2518 2519int 2520sctp_add_pad_tombuf(struct mbuf *m, int padlen) 2521{ 2522 /* 2523 * add padlen bytes of 0 filled padding to the end of the mbuf. If 2524 * padlen is > 3 this routine will fail. 2525 */ 2526 uint8_t *dp; 2527 int i; 2528 2529 if (padlen > 3) { 2530 SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 2531 return (ENOBUFS); 2532 } 2533 if (padlen <= M_TRAILINGSPACE(m)) { 2534 /* 2535 * The easy way. We hope the majority of the time we hit 2536 * here :) 2537 */ 2538 dp = (uint8_t *) (mtod(m, caddr_t)+SCTP_BUF_LEN(m)); 2539 SCTP_BUF_LEN(m) += padlen; 2540 } else { 2541 /* Hard way we must grow the mbuf */ 2542 struct mbuf *tmp; 2543 2544 tmp = sctp_get_mbuf_for_msg(padlen, 0, M_NOWAIT, 1, MT_DATA); 2545 if (tmp == NULL) { 2546 /* Out of space GAK! we are in big trouble. */ 2547 SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 2548 return (ENOBUFS); 2549 } 2550 /* setup and insert in middle */ 2551 SCTP_BUF_LEN(tmp) = padlen; 2552 SCTP_BUF_NEXT(tmp) = NULL; 2553 SCTP_BUF_NEXT(m) = tmp; 2554 dp = mtod(tmp, uint8_t *); 2555 } 2556 /* zero out the pad */ 2557 for (i = 0; i < padlen; i++) { 2558 *dp = 0; 2559 dp++; 2560 } 2561 return (0); 2562} 2563 2564int 2565sctp_pad_lastmbuf(struct mbuf *m, int padval, struct mbuf *last_mbuf) 2566{ 2567 /* find the last mbuf in chain and pad it */ 2568 struct mbuf *m_at; 2569 2570 if (last_mbuf) { 2571 return (sctp_add_pad_tombuf(last_mbuf, padval)); 2572 } else { 2573 for (m_at = m; m_at; m_at = SCTP_BUF_NEXT(m_at)) { 2574 if (SCTP_BUF_NEXT(m_at) == NULL) { 2575 return (sctp_add_pad_tombuf(m_at, padval)); 2576 } 2577 } 2578 } 2579 SCTP_LTRACE_ERR_RET_PKT(m, NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 2580 return (EFAULT); 2581} 2582 2583static void 2584sctp_notify_assoc_change(uint16_t state, struct sctp_tcb *stcb, 2585 uint16_t error, struct sctp_abort_chunk *abort, uint8_t from_peer, int so_locked 2586#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 2587 SCTP_UNUSED 2588#endif 2589) 2590{ 2591 struct mbuf *m_notify; 2592 struct sctp_assoc_change *sac; 2593 struct sctp_queued_to_read *control; 2594 size_t notif_len, abort_len; 2595 unsigned int i; 2596 2597#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2598 struct socket *so; 2599 2600#endif 2601 2602 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVASSOCEVNT)) { 2603 notif_len = sizeof(struct sctp_assoc_change); 2604 if (abort != NULL) { 2605 abort_len = ntohs(abort->ch.chunk_length); 2606 } else { 2607 abort_len = 0; 2608 } 2609 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 2610 notif_len += SCTP_ASSOC_SUPPORTS_MAX; 2611 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 2612 notif_len += abort_len; 2613 } 2614 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 2615 if (m_notify == NULL) { 2616 /* Retry with smaller value. */ 2617 notif_len = sizeof(struct sctp_assoc_change); 2618 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 2619 if (m_notify == NULL) { 2620 goto set_error; 2621 } 2622 } 2623 SCTP_BUF_NEXT(m_notify) = NULL; 2624 sac = mtod(m_notify, struct sctp_assoc_change *); 2625 memset(sac, 0, notif_len); 2626 sac->sac_type = SCTP_ASSOC_CHANGE; 2627 sac->sac_flags = 0; 2628 sac->sac_length = sizeof(struct sctp_assoc_change); 2629 sac->sac_state = state; 2630 sac->sac_error = error; 2631 /* XXX verify these stream counts */ 2632 sac->sac_outbound_streams = stcb->asoc.streamoutcnt; 2633 sac->sac_inbound_streams = stcb->asoc.streamincnt; 2634 sac->sac_assoc_id = sctp_get_associd(stcb); 2635 if (notif_len > sizeof(struct sctp_assoc_change)) { 2636 if ((state == SCTP_COMM_UP) || (state == SCTP_RESTART)) { 2637 i = 0; 2638 if (stcb->asoc.peer_supports_prsctp) { 2639 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_PR; 2640 } 2641 if (stcb->asoc.peer_supports_auth) { 2642 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_AUTH; 2643 } 2644 if (stcb->asoc.peer_supports_asconf) { 2645 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_ASCONF; 2646 } 2647 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_MULTIBUF; 2648 if (stcb->asoc.peer_supports_strreset) { 2649 sac->sac_info[i++] = SCTP_ASSOC_SUPPORTS_RE_CONFIG; 2650 } 2651 sac->sac_length += i; 2652 } else if ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC)) { 2653 memcpy(sac->sac_info, abort, abort_len); 2654 sac->sac_length += abort_len; 2655 } 2656 } 2657 SCTP_BUF_LEN(m_notify) = sac->sac_length; 2658 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2659 0, 0, stcb->asoc.context, 0, 0, 0, 2660 m_notify); 2661 if (control != NULL) { 2662 control->length = SCTP_BUF_LEN(m_notify); 2663 /* not that we need this */ 2664 control->tail_mbuf = m_notify; 2665 control->spec_flags = M_NOTIFICATION; 2666 sctp_add_to_readq(stcb->sctp_ep, stcb, 2667 control, 2668 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, 2669 so_locked); 2670 } else { 2671 sctp_m_freem(m_notify); 2672 } 2673 } 2674 /* 2675 * For 1-to-1 style sockets, we send up and error when an ABORT 2676 * comes in. 2677 */ 2678set_error: 2679 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2680 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2681 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 2682 SOCK_LOCK(stcb->sctp_socket); 2683 if (from_peer) { 2684 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) { 2685 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNREFUSED); 2686 stcb->sctp_socket->so_error = ECONNREFUSED; 2687 } else { 2688 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 2689 stcb->sctp_socket->so_error = ECONNRESET; 2690 } 2691 } else { 2692 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) || 2693 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) { 2694 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ETIMEDOUT); 2695 stcb->sctp_socket->so_error = ETIMEDOUT; 2696 } else { 2697 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ECONNABORTED); 2698 stcb->sctp_socket->so_error = ECONNABORTED; 2699 } 2700 } 2701 } 2702 /* Wake ANY sleepers */ 2703#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2704 so = SCTP_INP_SO(stcb->sctp_ep); 2705 if (!so_locked) { 2706 atomic_add_int(&stcb->asoc.refcnt, 1); 2707 SCTP_TCB_UNLOCK(stcb); 2708 SCTP_SOCKET_LOCK(so, 1); 2709 SCTP_TCB_LOCK(stcb); 2710 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2711 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2712 SCTP_SOCKET_UNLOCK(so, 1); 2713 return; 2714 } 2715 } 2716#endif 2717 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2718 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2719 ((state == SCTP_COMM_LOST) || (state == SCTP_CANT_STR_ASSOC))) { 2720 socantrcvmore_locked(stcb->sctp_socket); 2721 } 2722 sorwakeup(stcb->sctp_socket); 2723 sowwakeup(stcb->sctp_socket); 2724#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2725 if (!so_locked) { 2726 SCTP_SOCKET_UNLOCK(so, 1); 2727 } 2728#endif 2729} 2730 2731static void 2732sctp_notify_peer_addr_change(struct sctp_tcb *stcb, uint32_t state, 2733 struct sockaddr *sa, uint32_t error) 2734{ 2735 struct mbuf *m_notify; 2736 struct sctp_paddr_change *spc; 2737 struct sctp_queued_to_read *control; 2738 2739 if ((stcb == NULL) || 2740 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPADDREVNT)) { 2741 /* event not enabled */ 2742 return; 2743 } 2744 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_paddr_change), 0, M_NOWAIT, 1, MT_DATA); 2745 if (m_notify == NULL) 2746 return; 2747 SCTP_BUF_LEN(m_notify) = 0; 2748 spc = mtod(m_notify, struct sctp_paddr_change *); 2749 spc->spc_type = SCTP_PEER_ADDR_CHANGE; 2750 spc->spc_flags = 0; 2751 spc->spc_length = sizeof(struct sctp_paddr_change); 2752 switch (sa->sa_family) { 2753#ifdef INET 2754 case AF_INET: 2755 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in)); 2756 break; 2757#endif 2758#ifdef INET6 2759 case AF_INET6: 2760 { 2761 struct sockaddr_in6 *sin6; 2762 2763 memcpy(&spc->spc_aaddr, sa, sizeof(struct sockaddr_in6)); 2764 2765 sin6 = (struct sockaddr_in6 *)&spc->spc_aaddr; 2766 if (IN6_IS_SCOPE_LINKLOCAL(&sin6->sin6_addr)) { 2767 if (sin6->sin6_scope_id == 0) { 2768 /* recover scope_id for user */ 2769 (void)sa6_recoverscope(sin6); 2770 } else { 2771 /* clear embedded scope_id for user */ 2772 in6_clearscope(&sin6->sin6_addr); 2773 } 2774 } 2775 break; 2776 } 2777#endif 2778 default: 2779 /* TSNH */ 2780 break; 2781 } 2782 spc->spc_state = state; 2783 spc->spc_error = error; 2784 spc->spc_assoc_id = sctp_get_associd(stcb); 2785 2786 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_paddr_change); 2787 SCTP_BUF_NEXT(m_notify) = NULL; 2788 2789 /* append to socket */ 2790 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2791 0, 0, stcb->asoc.context, 0, 0, 0, 2792 m_notify); 2793 if (control == NULL) { 2794 /* no memory */ 2795 sctp_m_freem(m_notify); 2796 return; 2797 } 2798 control->length = SCTP_BUF_LEN(m_notify); 2799 control->spec_flags = M_NOTIFICATION; 2800 /* not that we need this */ 2801 control->tail_mbuf = m_notify; 2802 sctp_add_to_readq(stcb->sctp_ep, stcb, 2803 control, 2804 &stcb->sctp_socket->so_rcv, 1, 2805 SCTP_READ_LOCK_NOT_HELD, 2806 SCTP_SO_NOT_LOCKED); 2807} 2808 2809 2810static void 2811sctp_notify_send_failed(struct sctp_tcb *stcb, uint8_t sent, uint32_t error, 2812 struct sctp_tmit_chunk *chk, int so_locked 2813#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 2814 SCTP_UNUSED 2815#endif 2816) 2817{ 2818 struct mbuf *m_notify; 2819 struct sctp_send_failed *ssf; 2820 struct sctp_send_failed_event *ssfe; 2821 struct sctp_queued_to_read *control; 2822 int length; 2823 2824 if ((stcb == NULL) || 2825 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 2826 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 2827 /* event not enabled */ 2828 return; 2829 } 2830 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 2831 length = sizeof(struct sctp_send_failed_event); 2832 } else { 2833 length = sizeof(struct sctp_send_failed); 2834 } 2835 m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA); 2836 if (m_notify == NULL) 2837 /* no space left */ 2838 return; 2839 SCTP_BUF_LEN(m_notify) = 0; 2840 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 2841 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 2842 memset(ssfe, 0, length); 2843 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 2844 if (sent) { 2845 ssfe->ssfe_flags = SCTP_DATA_SENT; 2846 } else { 2847 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 2848 } 2849 length += chk->send_size; 2850 length -= sizeof(struct sctp_data_chunk); 2851 ssfe->ssfe_length = length; 2852 ssfe->ssfe_error = error; 2853 /* not exactly what the user sent in, but should be close :) */ 2854 ssfe->ssfe_info.snd_sid = chk->rec.data.stream_number; 2855 ssfe->ssfe_info.snd_flags = chk->rec.data.rcv_flags; 2856 ssfe->ssfe_info.snd_ppid = chk->rec.data.payloadtype; 2857 ssfe->ssfe_info.snd_context = chk->rec.data.context; 2858 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 2859 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 2860 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event); 2861 } else { 2862 ssf = mtod(m_notify, struct sctp_send_failed *); 2863 memset(ssf, 0, length); 2864 ssf->ssf_type = SCTP_SEND_FAILED; 2865 if (sent) { 2866 ssf->ssf_flags = SCTP_DATA_SENT; 2867 } else { 2868 ssf->ssf_flags = SCTP_DATA_UNSENT; 2869 } 2870 length += chk->send_size; 2871 length -= sizeof(struct sctp_data_chunk); 2872 ssf->ssf_length = length; 2873 ssf->ssf_error = error; 2874 /* not exactly what the user sent in, but should be close :) */ 2875 bzero(&ssf->ssf_info, sizeof(ssf->ssf_info)); 2876 ssf->ssf_info.sinfo_stream = chk->rec.data.stream_number; 2877 ssf->ssf_info.sinfo_ssn = chk->rec.data.stream_seq; 2878 ssf->ssf_info.sinfo_flags = chk->rec.data.rcv_flags; 2879 ssf->ssf_info.sinfo_ppid = chk->rec.data.payloadtype; 2880 ssf->ssf_info.sinfo_context = chk->rec.data.context; 2881 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 2882 ssf->ssf_assoc_id = sctp_get_associd(stcb); 2883 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed); 2884 } 2885 if (chk->data) { 2886 /* 2887 * trim off the sctp chunk header(it should be there) 2888 */ 2889 if (chk->send_size >= sizeof(struct sctp_data_chunk)) { 2890 m_adj(chk->data, sizeof(struct sctp_data_chunk)); 2891 sctp_mbuf_crush(chk->data); 2892 chk->send_size -= sizeof(struct sctp_data_chunk); 2893 } 2894 } 2895 SCTP_BUF_NEXT(m_notify) = chk->data; 2896 /* Steal off the mbuf */ 2897 chk->data = NULL; 2898 /* 2899 * For this case, we check the actual socket buffer, since the assoc 2900 * is going away we don't want to overfill the socket buffer for a 2901 * non-reader 2902 */ 2903 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 2904 sctp_m_freem(m_notify); 2905 return; 2906 } 2907 /* append to socket */ 2908 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 2909 0, 0, stcb->asoc.context, 0, 0, 0, 2910 m_notify); 2911 if (control == NULL) { 2912 /* no memory */ 2913 sctp_m_freem(m_notify); 2914 return; 2915 } 2916 control->spec_flags = M_NOTIFICATION; 2917 sctp_add_to_readq(stcb->sctp_ep, stcb, 2918 control, 2919 &stcb->sctp_socket->so_rcv, 1, 2920 SCTP_READ_LOCK_NOT_HELD, 2921 so_locked); 2922} 2923 2924 2925static void 2926sctp_notify_send_failed2(struct sctp_tcb *stcb, uint32_t error, 2927 struct sctp_stream_queue_pending *sp, int so_locked 2928#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 2929 SCTP_UNUSED 2930#endif 2931) 2932{ 2933 struct mbuf *m_notify; 2934 struct sctp_send_failed *ssf; 2935 struct sctp_send_failed_event *ssfe; 2936 struct sctp_queued_to_read *control; 2937 int length; 2938 2939 if ((stcb == NULL) || 2940 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSENDFAILEVNT) && 2941 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT))) { 2942 /* event not enabled */ 2943 return; 2944 } 2945 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 2946 length = sizeof(struct sctp_send_failed_event); 2947 } else { 2948 length = sizeof(struct sctp_send_failed); 2949 } 2950 m_notify = sctp_get_mbuf_for_msg(length, 0, M_NOWAIT, 1, MT_DATA); 2951 if (m_notify == NULL) { 2952 /* no space left */ 2953 return; 2954 } 2955 SCTP_BUF_LEN(m_notify) = 0; 2956 if (sctp_stcb_is_feature_on(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVNSENDFAILEVNT)) { 2957 ssfe = mtod(m_notify, struct sctp_send_failed_event *); 2958 memset(ssfe, 0, length); 2959 ssfe->ssfe_type = SCTP_SEND_FAILED_EVENT; 2960 ssfe->ssfe_flags = SCTP_DATA_UNSENT; 2961 length += sp->length; 2962 ssfe->ssfe_length = length; 2963 ssfe->ssfe_error = error; 2964 /* not exactly what the user sent in, but should be close :) */ 2965 ssfe->ssfe_info.snd_sid = sp->stream; 2966 if (sp->some_taken) { 2967 ssfe->ssfe_info.snd_flags = SCTP_DATA_LAST_FRAG; 2968 } else { 2969 ssfe->ssfe_info.snd_flags = SCTP_DATA_NOT_FRAG; 2970 } 2971 ssfe->ssfe_info.snd_ppid = sp->ppid; 2972 ssfe->ssfe_info.snd_context = sp->context; 2973 ssfe->ssfe_info.snd_assoc_id = sctp_get_associd(stcb); 2974 ssfe->ssfe_assoc_id = sctp_get_associd(stcb); 2975 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed_event); 2976 } else { 2977 ssf = mtod(m_notify, struct sctp_send_failed *); 2978 memset(ssf, 0, length); 2979 ssf->ssf_type = SCTP_SEND_FAILED; 2980 ssf->ssf_flags = SCTP_DATA_UNSENT; 2981 length += sp->length; 2982 ssf->ssf_length = length; 2983 ssf->ssf_error = error; 2984 /* not exactly what the user sent in, but should be close :) */ 2985 ssf->ssf_info.sinfo_stream = sp->stream; 2986 ssf->ssf_info.sinfo_ssn = 0; 2987 if (sp->some_taken) { 2988 ssf->ssf_info.sinfo_flags = SCTP_DATA_LAST_FRAG; 2989 } else { 2990 ssf->ssf_info.sinfo_flags = SCTP_DATA_NOT_FRAG; 2991 } 2992 ssf->ssf_info.sinfo_ppid = sp->ppid; 2993 ssf->ssf_info.sinfo_context = sp->context; 2994 ssf->ssf_info.sinfo_assoc_id = sctp_get_associd(stcb); 2995 ssf->ssf_assoc_id = sctp_get_associd(stcb); 2996 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_send_failed); 2997 } 2998 SCTP_BUF_NEXT(m_notify) = sp->data; 2999 3000 /* Steal off the mbuf */ 3001 sp->data = NULL; 3002 /* 3003 * For this case, we check the actual socket buffer, since the assoc 3004 * is going away we don't want to overfill the socket buffer for a 3005 * non-reader 3006 */ 3007 if (sctp_sbspace_failedmsgs(&stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3008 sctp_m_freem(m_notify); 3009 return; 3010 } 3011 /* append to socket */ 3012 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3013 0, 0, stcb->asoc.context, 0, 0, 0, 3014 m_notify); 3015 if (control == NULL) { 3016 /* no memory */ 3017 sctp_m_freem(m_notify); 3018 return; 3019 } 3020 control->spec_flags = M_NOTIFICATION; 3021 sctp_add_to_readq(stcb->sctp_ep, stcb, 3022 control, 3023 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3024} 3025 3026 3027 3028static void 3029sctp_notify_adaptation_layer(struct sctp_tcb *stcb) 3030{ 3031 struct mbuf *m_notify; 3032 struct sctp_adaptation_event *sai; 3033 struct sctp_queued_to_read *control; 3034 3035 if ((stcb == NULL) || 3036 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ADAPTATIONEVNT)) { 3037 /* event not enabled */ 3038 return; 3039 } 3040 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_adaption_event), 0, M_NOWAIT, 1, MT_DATA); 3041 if (m_notify == NULL) 3042 /* no space left */ 3043 return; 3044 SCTP_BUF_LEN(m_notify) = 0; 3045 sai = mtod(m_notify, struct sctp_adaptation_event *); 3046 memset(sai, 0, sizeof(struct sctp_adaptation_event)); 3047 sai->sai_type = SCTP_ADAPTATION_INDICATION; 3048 sai->sai_flags = 0; 3049 sai->sai_length = sizeof(struct sctp_adaptation_event); 3050 sai->sai_adaptation_ind = stcb->asoc.peers_adaptation; 3051 sai->sai_assoc_id = sctp_get_associd(stcb); 3052 3053 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_adaptation_event); 3054 SCTP_BUF_NEXT(m_notify) = NULL; 3055 3056 /* append to socket */ 3057 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3058 0, 0, stcb->asoc.context, 0, 0, 0, 3059 m_notify); 3060 if (control == NULL) { 3061 /* no memory */ 3062 sctp_m_freem(m_notify); 3063 return; 3064 } 3065 control->length = SCTP_BUF_LEN(m_notify); 3066 control->spec_flags = M_NOTIFICATION; 3067 /* not that we need this */ 3068 control->tail_mbuf = m_notify; 3069 sctp_add_to_readq(stcb->sctp_ep, stcb, 3070 control, 3071 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3072} 3073 3074/* This always must be called with the read-queue LOCKED in the INP */ 3075static void 3076sctp_notify_partial_delivery_indication(struct sctp_tcb *stcb, uint32_t error, 3077 uint32_t val, int so_locked 3078#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3079 SCTP_UNUSED 3080#endif 3081) 3082{ 3083 struct mbuf *m_notify; 3084 struct sctp_pdapi_event *pdapi; 3085 struct sctp_queued_to_read *control; 3086 struct sockbuf *sb; 3087 3088 if ((stcb == NULL) || 3089 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_PDAPIEVNT)) { 3090 /* event not enabled */ 3091 return; 3092 } 3093 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 3094 return; 3095 } 3096 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_pdapi_event), 0, M_NOWAIT, 1, MT_DATA); 3097 if (m_notify == NULL) 3098 /* no space left */ 3099 return; 3100 SCTP_BUF_LEN(m_notify) = 0; 3101 pdapi = mtod(m_notify, struct sctp_pdapi_event *); 3102 memset(pdapi, 0, sizeof(struct sctp_pdapi_event)); 3103 pdapi->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; 3104 pdapi->pdapi_flags = 0; 3105 pdapi->pdapi_length = sizeof(struct sctp_pdapi_event); 3106 pdapi->pdapi_indication = error; 3107 pdapi->pdapi_stream = (val >> 16); 3108 pdapi->pdapi_seq = (val & 0x0000ffff); 3109 pdapi->pdapi_assoc_id = sctp_get_associd(stcb); 3110 3111 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_pdapi_event); 3112 SCTP_BUF_NEXT(m_notify) = NULL; 3113 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3114 0, 0, stcb->asoc.context, 0, 0, 0, 3115 m_notify); 3116 if (control == NULL) { 3117 /* no memory */ 3118 sctp_m_freem(m_notify); 3119 return; 3120 } 3121 control->spec_flags = M_NOTIFICATION; 3122 control->length = SCTP_BUF_LEN(m_notify); 3123 /* not that we need this */ 3124 control->tail_mbuf = m_notify; 3125 control->held_length = 0; 3126 control->length = 0; 3127 sb = &stcb->sctp_socket->so_rcv; 3128 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3129 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m_notify)); 3130 } 3131 sctp_sballoc(stcb, sb, m_notify); 3132 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 3133 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 3134 } 3135 atomic_add_int(&control->length, SCTP_BUF_LEN(m_notify)); 3136 control->end_added = 1; 3137 if (stcb->asoc.control_pdapi) 3138 TAILQ_INSERT_AFTER(&stcb->sctp_ep->read_queue, stcb->asoc.control_pdapi, control, next); 3139 else { 3140 /* we really should not see this case */ 3141 TAILQ_INSERT_TAIL(&stcb->sctp_ep->read_queue, control, next); 3142 } 3143 if (stcb->sctp_ep && stcb->sctp_socket) { 3144 /* This should always be the case */ 3145#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3146 struct socket *so; 3147 3148 so = SCTP_INP_SO(stcb->sctp_ep); 3149 if (!so_locked) { 3150 atomic_add_int(&stcb->asoc.refcnt, 1); 3151 SCTP_TCB_UNLOCK(stcb); 3152 SCTP_SOCKET_LOCK(so, 1); 3153 SCTP_TCB_LOCK(stcb); 3154 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3155 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3156 SCTP_SOCKET_UNLOCK(so, 1); 3157 return; 3158 } 3159 } 3160#endif 3161 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 3162#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3163 if (!so_locked) { 3164 SCTP_SOCKET_UNLOCK(so, 1); 3165 } 3166#endif 3167 } 3168} 3169 3170static void 3171sctp_notify_shutdown_event(struct sctp_tcb *stcb) 3172{ 3173 struct mbuf *m_notify; 3174 struct sctp_shutdown_event *sse; 3175 struct sctp_queued_to_read *control; 3176 3177 /* 3178 * For TCP model AND UDP connected sockets we will send an error up 3179 * when an SHUTDOWN completes 3180 */ 3181 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 3182 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3183 /* mark socket closed for read/write and wakeup! */ 3184#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3185 struct socket *so; 3186 3187 so = SCTP_INP_SO(stcb->sctp_ep); 3188 atomic_add_int(&stcb->asoc.refcnt, 1); 3189 SCTP_TCB_UNLOCK(stcb); 3190 SCTP_SOCKET_LOCK(so, 1); 3191 SCTP_TCB_LOCK(stcb); 3192 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3193 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 3194 SCTP_SOCKET_UNLOCK(so, 1); 3195 return; 3196 } 3197#endif 3198 socantsendmore(stcb->sctp_socket); 3199#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3200 SCTP_SOCKET_UNLOCK(so, 1); 3201#endif 3202 } 3203 if (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVSHUTDOWNEVNT)) { 3204 /* event not enabled */ 3205 return; 3206 } 3207 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_shutdown_event), 0, M_NOWAIT, 1, MT_DATA); 3208 if (m_notify == NULL) 3209 /* no space left */ 3210 return; 3211 sse = mtod(m_notify, struct sctp_shutdown_event *); 3212 memset(sse, 0, sizeof(struct sctp_shutdown_event)); 3213 sse->sse_type = SCTP_SHUTDOWN_EVENT; 3214 sse->sse_flags = 0; 3215 sse->sse_length = sizeof(struct sctp_shutdown_event); 3216 sse->sse_assoc_id = sctp_get_associd(stcb); 3217 3218 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_shutdown_event); 3219 SCTP_BUF_NEXT(m_notify) = NULL; 3220 3221 /* append to socket */ 3222 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3223 0, 0, stcb->asoc.context, 0, 0, 0, 3224 m_notify); 3225 if (control == NULL) { 3226 /* no memory */ 3227 sctp_m_freem(m_notify); 3228 return; 3229 } 3230 control->spec_flags = M_NOTIFICATION; 3231 control->length = SCTP_BUF_LEN(m_notify); 3232 /* not that we need this */ 3233 control->tail_mbuf = m_notify; 3234 sctp_add_to_readq(stcb->sctp_ep, stcb, 3235 control, 3236 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3237} 3238 3239static void 3240sctp_notify_sender_dry_event(struct sctp_tcb *stcb, 3241 int so_locked 3242#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3243 SCTP_UNUSED 3244#endif 3245) 3246{ 3247 struct mbuf *m_notify; 3248 struct sctp_sender_dry_event *event; 3249 struct sctp_queued_to_read *control; 3250 3251 if ((stcb == NULL) || 3252 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_DRYEVNT)) { 3253 /* event not enabled */ 3254 return; 3255 } 3256 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_sender_dry_event), 0, M_NOWAIT, 1, MT_DATA); 3257 if (m_notify == NULL) { 3258 /* no space left */ 3259 return; 3260 } 3261 SCTP_BUF_LEN(m_notify) = 0; 3262 event = mtod(m_notify, struct sctp_sender_dry_event *); 3263 memset(event, 0, sizeof(struct sctp_sender_dry_event)); 3264 event->sender_dry_type = SCTP_SENDER_DRY_EVENT; 3265 event->sender_dry_flags = 0; 3266 event->sender_dry_length = sizeof(struct sctp_sender_dry_event); 3267 event->sender_dry_assoc_id = sctp_get_associd(stcb); 3268 3269 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_sender_dry_event); 3270 SCTP_BUF_NEXT(m_notify) = NULL; 3271 3272 /* append to socket */ 3273 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3274 0, 0, stcb->asoc.context, 0, 0, 0, 3275 m_notify); 3276 if (control == NULL) { 3277 /* no memory */ 3278 sctp_m_freem(m_notify); 3279 return; 3280 } 3281 control->length = SCTP_BUF_LEN(m_notify); 3282 control->spec_flags = M_NOTIFICATION; 3283 /* not that we need this */ 3284 control->tail_mbuf = m_notify; 3285 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 3286 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, so_locked); 3287} 3288 3289 3290void 3291sctp_notify_stream_reset_add(struct sctp_tcb *stcb, uint16_t numberin, uint16_t numberout, int flag) 3292{ 3293 struct mbuf *m_notify; 3294 struct sctp_queued_to_read *control; 3295 struct sctp_stream_change_event *stradd; 3296 3297 if ((stcb == NULL) || 3298 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_CHANGEEVNT))) { 3299 /* event not enabled */ 3300 return; 3301 } 3302 if ((stcb->asoc.peer_req_out) && flag) { 3303 /* Peer made the request, don't tell the local user */ 3304 stcb->asoc.peer_req_out = 0; 3305 return; 3306 } 3307 stcb->asoc.peer_req_out = 0; 3308 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_stream_change_event), 0, M_NOWAIT, 1, MT_DATA); 3309 if (m_notify == NULL) 3310 /* no space left */ 3311 return; 3312 SCTP_BUF_LEN(m_notify) = 0; 3313 stradd = mtod(m_notify, struct sctp_stream_change_event *); 3314 memset(stradd, 0, sizeof(struct sctp_stream_change_event)); 3315 stradd->strchange_type = SCTP_STREAM_CHANGE_EVENT; 3316 stradd->strchange_flags = flag; 3317 stradd->strchange_length = sizeof(struct sctp_stream_change_event); 3318 stradd->strchange_assoc_id = sctp_get_associd(stcb); 3319 stradd->strchange_instrms = numberin; 3320 stradd->strchange_outstrms = numberout; 3321 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_stream_change_event); 3322 SCTP_BUF_NEXT(m_notify) = NULL; 3323 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3324 /* no space */ 3325 sctp_m_freem(m_notify); 3326 return; 3327 } 3328 /* append to socket */ 3329 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3330 0, 0, stcb->asoc.context, 0, 0, 0, 3331 m_notify); 3332 if (control == NULL) { 3333 /* no memory */ 3334 sctp_m_freem(m_notify); 3335 return; 3336 } 3337 control->spec_flags = M_NOTIFICATION; 3338 control->length = SCTP_BUF_LEN(m_notify); 3339 /* not that we need this */ 3340 control->tail_mbuf = m_notify; 3341 sctp_add_to_readq(stcb->sctp_ep, stcb, 3342 control, 3343 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3344} 3345 3346void 3347sctp_notify_stream_reset_tsn(struct sctp_tcb *stcb, uint32_t sending_tsn, uint32_t recv_tsn, int flag) 3348{ 3349 struct mbuf *m_notify; 3350 struct sctp_queued_to_read *control; 3351 struct sctp_assoc_reset_event *strasoc; 3352 3353 if ((stcb == NULL) || 3354 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_ASSOC_RESETEVNT))) { 3355 /* event not enabled */ 3356 return; 3357 } 3358 m_notify = sctp_get_mbuf_for_msg(sizeof(struct sctp_assoc_reset_event), 0, M_NOWAIT, 1, MT_DATA); 3359 if (m_notify == NULL) 3360 /* no space left */ 3361 return; 3362 SCTP_BUF_LEN(m_notify) = 0; 3363 strasoc = mtod(m_notify, struct sctp_assoc_reset_event *); 3364 memset(strasoc, 0, sizeof(struct sctp_assoc_reset_event)); 3365 strasoc->assocreset_type = SCTP_ASSOC_RESET_EVENT; 3366 strasoc->assocreset_flags = flag; 3367 strasoc->assocreset_length = sizeof(struct sctp_assoc_reset_event); 3368 strasoc->assocreset_assoc_id = sctp_get_associd(stcb); 3369 strasoc->assocreset_local_tsn = sending_tsn; 3370 strasoc->assocreset_remote_tsn = recv_tsn; 3371 SCTP_BUF_LEN(m_notify) = sizeof(struct sctp_assoc_reset_event); 3372 SCTP_BUF_NEXT(m_notify) = NULL; 3373 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3374 /* no space */ 3375 sctp_m_freem(m_notify); 3376 return; 3377 } 3378 /* append to socket */ 3379 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3380 0, 0, stcb->asoc.context, 0, 0, 0, 3381 m_notify); 3382 if (control == NULL) { 3383 /* no memory */ 3384 sctp_m_freem(m_notify); 3385 return; 3386 } 3387 control->spec_flags = M_NOTIFICATION; 3388 control->length = SCTP_BUF_LEN(m_notify); 3389 /* not that we need this */ 3390 control->tail_mbuf = m_notify; 3391 sctp_add_to_readq(stcb->sctp_ep, stcb, 3392 control, 3393 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3394} 3395 3396 3397 3398static void 3399sctp_notify_stream_reset(struct sctp_tcb *stcb, 3400 int number_entries, uint16_t * list, int flag) 3401{ 3402 struct mbuf *m_notify; 3403 struct sctp_queued_to_read *control; 3404 struct sctp_stream_reset_event *strreset; 3405 int len; 3406 3407 if ((stcb == NULL) || 3408 (sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_STREAM_RESETEVNT))) { 3409 /* event not enabled */ 3410 return; 3411 } 3412 m_notify = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 3413 if (m_notify == NULL) 3414 /* no space left */ 3415 return; 3416 SCTP_BUF_LEN(m_notify) = 0; 3417 len = sizeof(struct sctp_stream_reset_event) + (number_entries * sizeof(uint16_t)); 3418 if (len > M_TRAILINGSPACE(m_notify)) { 3419 /* never enough room */ 3420 sctp_m_freem(m_notify); 3421 return; 3422 } 3423 strreset = mtod(m_notify, struct sctp_stream_reset_event *); 3424 memset(strreset, 0, len); 3425 strreset->strreset_type = SCTP_STREAM_RESET_EVENT; 3426 strreset->strreset_flags = flag; 3427 strreset->strreset_length = len; 3428 strreset->strreset_assoc_id = sctp_get_associd(stcb); 3429 if (number_entries) { 3430 int i; 3431 3432 for (i = 0; i < number_entries; i++) { 3433 strreset->strreset_stream_list[i] = ntohs(list[i]); 3434 } 3435 } 3436 SCTP_BUF_LEN(m_notify) = len; 3437 SCTP_BUF_NEXT(m_notify) = NULL; 3438 if (sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv) < SCTP_BUF_LEN(m_notify)) { 3439 /* no space */ 3440 sctp_m_freem(m_notify); 3441 return; 3442 } 3443 /* append to socket */ 3444 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3445 0, 0, stcb->asoc.context, 0, 0, 0, 3446 m_notify); 3447 if (control == NULL) { 3448 /* no memory */ 3449 sctp_m_freem(m_notify); 3450 return; 3451 } 3452 control->spec_flags = M_NOTIFICATION; 3453 control->length = SCTP_BUF_LEN(m_notify); 3454 /* not that we need this */ 3455 control->tail_mbuf = m_notify; 3456 sctp_add_to_readq(stcb->sctp_ep, stcb, 3457 control, 3458 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3459} 3460 3461 3462static void 3463sctp_notify_remote_error(struct sctp_tcb *stcb, uint16_t error, struct sctp_error_chunk *chunk) 3464{ 3465 struct mbuf *m_notify; 3466 struct sctp_remote_error *sre; 3467 struct sctp_queued_to_read *control; 3468 size_t notif_len, chunk_len; 3469 3470 if ((stcb == NULL) || 3471 sctp_stcb_is_feature_off(stcb->sctp_ep, stcb, SCTP_PCB_FLAGS_RECVPEERERR)) { 3472 return; 3473 } 3474 if (chunk != NULL) { 3475 chunk_len = ntohs(chunk->ch.chunk_length); 3476 } else { 3477 chunk_len = 0; 3478 } 3479 notif_len = sizeof(struct sctp_remote_error) + chunk_len; 3480 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3481 if (m_notify == NULL) { 3482 /* Retry with smaller value. */ 3483 notif_len = sizeof(struct sctp_remote_error); 3484 m_notify = sctp_get_mbuf_for_msg(notif_len, 0, M_NOWAIT, 1, MT_DATA); 3485 if (m_notify == NULL) { 3486 return; 3487 } 3488 } 3489 SCTP_BUF_NEXT(m_notify) = NULL; 3490 sre = mtod(m_notify, struct sctp_remote_error *); 3491 sre->sre_type = SCTP_REMOTE_ERROR; 3492 sre->sre_flags = 0; 3493 sre->sre_length = sizeof(struct sctp_remote_error); 3494 sre->sre_error = error; 3495 sre->sre_assoc_id = sctp_get_associd(stcb); 3496 if (notif_len > sizeof(struct sctp_remote_error)) { 3497 memcpy(sre->sre_data, chunk, chunk_len); 3498 sre->sre_length += chunk_len; 3499 } 3500 SCTP_BUF_LEN(m_notify) = sre->sre_length; 3501 control = sctp_build_readq_entry(stcb, stcb->asoc.primary_destination, 3502 0, 0, stcb->asoc.context, 0, 0, 0, 3503 m_notify); 3504 if (control != NULL) { 3505 control->length = SCTP_BUF_LEN(m_notify); 3506 /* not that we need this */ 3507 control->tail_mbuf = m_notify; 3508 control->spec_flags = M_NOTIFICATION; 3509 sctp_add_to_readq(stcb->sctp_ep, stcb, 3510 control, 3511 &stcb->sctp_socket->so_rcv, 1, 3512 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 3513 } else { 3514 sctp_m_freem(m_notify); 3515 } 3516} 3517 3518 3519void 3520sctp_ulp_notify(uint32_t notification, struct sctp_tcb *stcb, 3521 uint32_t error, void *data, int so_locked 3522#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3523 SCTP_UNUSED 3524#endif 3525) 3526{ 3527 if ((stcb == NULL) || 3528 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3529 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3530 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3531 /* If the socket is gone we are out of here */ 3532 return; 3533 } 3534 if (stcb->sctp_socket->so_rcv.sb_state & SBS_CANTRCVMORE) { 3535 return; 3536 } 3537 if ((stcb->asoc.state & SCTP_STATE_COOKIE_WAIT) || 3538 (stcb->asoc.state & SCTP_STATE_COOKIE_ECHOED)) { 3539 if ((notification == SCTP_NOTIFY_INTERFACE_DOWN) || 3540 (notification == SCTP_NOTIFY_INTERFACE_UP) || 3541 (notification == SCTP_NOTIFY_INTERFACE_CONFIRMED)) { 3542 /* Don't report these in front states */ 3543 return; 3544 } 3545 } 3546 switch (notification) { 3547 case SCTP_NOTIFY_ASSOC_UP: 3548 if (stcb->asoc.assoc_up_sent == 0) { 3549 sctp_notify_assoc_change(SCTP_COMM_UP, stcb, error, NULL, 0, so_locked); 3550 stcb->asoc.assoc_up_sent = 1; 3551 } 3552 if (stcb->asoc.adaptation_needed && (stcb->asoc.adaptation_sent == 0)) { 3553 sctp_notify_adaptation_layer(stcb); 3554 } 3555 if (stcb->asoc.peer_supports_auth == 0) { 3556 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 3557 NULL, so_locked); 3558 } 3559 break; 3560 case SCTP_NOTIFY_ASSOC_DOWN: 3561 sctp_notify_assoc_change(SCTP_SHUTDOWN_COMP, stcb, error, NULL, 0, so_locked); 3562 break; 3563 case SCTP_NOTIFY_INTERFACE_DOWN: 3564 { 3565 struct sctp_nets *net; 3566 3567 net = (struct sctp_nets *)data; 3568 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_UNREACHABLE, 3569 (struct sockaddr *)&net->ro._l_addr, error); 3570 break; 3571 } 3572 case SCTP_NOTIFY_INTERFACE_UP: 3573 { 3574 struct sctp_nets *net; 3575 3576 net = (struct sctp_nets *)data; 3577 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_AVAILABLE, 3578 (struct sockaddr *)&net->ro._l_addr, error); 3579 break; 3580 } 3581 case SCTP_NOTIFY_INTERFACE_CONFIRMED: 3582 { 3583 struct sctp_nets *net; 3584 3585 net = (struct sctp_nets *)data; 3586 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_CONFIRMED, 3587 (struct sockaddr *)&net->ro._l_addr, error); 3588 break; 3589 } 3590 case SCTP_NOTIFY_SPECIAL_SP_FAIL: 3591 sctp_notify_send_failed2(stcb, error, 3592 (struct sctp_stream_queue_pending *)data, so_locked); 3593 break; 3594 case SCTP_NOTIFY_SENT_DG_FAIL: 3595 sctp_notify_send_failed(stcb, 1, error, 3596 (struct sctp_tmit_chunk *)data, so_locked); 3597 break; 3598 case SCTP_NOTIFY_UNSENT_DG_FAIL: 3599 sctp_notify_send_failed(stcb, 0, error, 3600 (struct sctp_tmit_chunk *)data, so_locked); 3601 break; 3602 case SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION: 3603 { 3604 uint32_t val; 3605 3606 val = *((uint32_t *) data); 3607 3608 sctp_notify_partial_delivery_indication(stcb, error, val, so_locked); 3609 break; 3610 } 3611 case SCTP_NOTIFY_ASSOC_LOC_ABORTED: 3612 if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) || 3613 ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) { 3614 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 0, so_locked); 3615 } else { 3616 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 0, so_locked); 3617 } 3618 break; 3619 case SCTP_NOTIFY_ASSOC_REM_ABORTED: 3620 if (((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_WAIT) || 3621 ((stcb->asoc.state & SCTP_STATE_MASK) == SCTP_STATE_COOKIE_ECHOED)) { 3622 sctp_notify_assoc_change(SCTP_CANT_STR_ASSOC, stcb, error, data, 1, so_locked); 3623 } else { 3624 sctp_notify_assoc_change(SCTP_COMM_LOST, stcb, error, data, 1, so_locked); 3625 } 3626 break; 3627 case SCTP_NOTIFY_ASSOC_RESTART: 3628 sctp_notify_assoc_change(SCTP_RESTART, stcb, error, NULL, 0, so_locked); 3629 if (stcb->asoc.peer_supports_auth == 0) { 3630 sctp_ulp_notify(SCTP_NOTIFY_NO_PEER_AUTH, stcb, 0, 3631 NULL, so_locked); 3632 } 3633 break; 3634 case SCTP_NOTIFY_STR_RESET_SEND: 3635 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_OUTGOING_SSN); 3636 break; 3637 case SCTP_NOTIFY_STR_RESET_RECV: 3638 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), SCTP_STREAM_RESET_INCOMING); 3639 break; 3640 case SCTP_NOTIFY_STR_RESET_FAILED_OUT: 3641 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), 3642 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_FAILED)); 3643 break; 3644 case SCTP_NOTIFY_STR_RESET_DENIED_OUT: 3645 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), 3646 (SCTP_STREAM_RESET_OUTGOING_SSN | SCTP_STREAM_RESET_DENIED)); 3647 break; 3648 case SCTP_NOTIFY_STR_RESET_FAILED_IN: 3649 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), 3650 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_FAILED)); 3651 break; 3652 case SCTP_NOTIFY_STR_RESET_DENIED_IN: 3653 sctp_notify_stream_reset(stcb, error, ((uint16_t *) data), 3654 (SCTP_STREAM_RESET_INCOMING | SCTP_STREAM_RESET_DENIED)); 3655 break; 3656 case SCTP_NOTIFY_ASCONF_ADD_IP: 3657 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_ADDED, data, 3658 error); 3659 break; 3660 case SCTP_NOTIFY_ASCONF_DELETE_IP: 3661 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_REMOVED, data, 3662 error); 3663 break; 3664 case SCTP_NOTIFY_ASCONF_SET_PRIMARY: 3665 sctp_notify_peer_addr_change(stcb, SCTP_ADDR_MADE_PRIM, data, 3666 error); 3667 break; 3668 case SCTP_NOTIFY_PEER_SHUTDOWN: 3669 sctp_notify_shutdown_event(stcb); 3670 break; 3671 case SCTP_NOTIFY_AUTH_NEW_KEY: 3672 sctp_notify_authentication(stcb, SCTP_AUTH_NEW_KEY, error, 3673 (uint16_t) (uintptr_t) data, 3674 so_locked); 3675 break; 3676 case SCTP_NOTIFY_AUTH_FREE_KEY: 3677 sctp_notify_authentication(stcb, SCTP_AUTH_FREE_KEY, error, 3678 (uint16_t) (uintptr_t) data, 3679 so_locked); 3680 break; 3681 case SCTP_NOTIFY_NO_PEER_AUTH: 3682 sctp_notify_authentication(stcb, SCTP_AUTH_NO_AUTH, error, 3683 (uint16_t) (uintptr_t) data, 3684 so_locked); 3685 break; 3686 case SCTP_NOTIFY_SENDER_DRY: 3687 sctp_notify_sender_dry_event(stcb, so_locked); 3688 break; 3689 case SCTP_NOTIFY_REMOTE_ERROR: 3690 sctp_notify_remote_error(stcb, error, data); 3691 break; 3692 default: 3693 SCTPDBG(SCTP_DEBUG_UTIL1, "%s: unknown notification %xh (%u)\n", 3694 __FUNCTION__, notification, notification); 3695 break; 3696 } /* end switch */ 3697} 3698 3699void 3700sctp_report_all_outbound(struct sctp_tcb *stcb, uint16_t error, int holds_lock, int so_locked 3701#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3702 SCTP_UNUSED 3703#endif 3704) 3705{ 3706 struct sctp_association *asoc; 3707 struct sctp_stream_out *outs; 3708 struct sctp_tmit_chunk *chk, *nchk; 3709 struct sctp_stream_queue_pending *sp, *nsp; 3710 int i; 3711 3712 if (stcb == NULL) { 3713 return; 3714 } 3715 asoc = &stcb->asoc; 3716 if (asoc->state & SCTP_STATE_ABOUT_TO_BE_FREED) { 3717 /* already being freed */ 3718 return; 3719 } 3720 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3721 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3722 (asoc->state & SCTP_STATE_CLOSED_SOCKET)) { 3723 return; 3724 } 3725 /* now through all the gunk freeing chunks */ 3726 if (holds_lock == 0) { 3727 SCTP_TCB_SEND_LOCK(stcb); 3728 } 3729 /* sent queue SHOULD be empty */ 3730 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 3731 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 3732 asoc->sent_queue_cnt--; 3733 if (chk->sent != SCTP_DATAGRAM_NR_ACKED) { 3734 if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) { 3735 asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--; 3736#ifdef INVARIANTS 3737 } else { 3738 panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number); 3739#endif 3740 } 3741 } 3742 if (chk->data != NULL) { 3743 sctp_free_bufspace(stcb, asoc, chk, 1); 3744 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 3745 error, chk, so_locked); 3746 if (chk->data) { 3747 sctp_m_freem(chk->data); 3748 chk->data = NULL; 3749 } 3750 } 3751 sctp_free_a_chunk(stcb, chk, so_locked); 3752 /* sa_ignore FREED_MEMORY */ 3753 } 3754 /* pending send queue SHOULD be empty */ 3755 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 3756 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 3757 asoc->send_queue_cnt--; 3758 if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) { 3759 asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--; 3760#ifdef INVARIANTS 3761 } else { 3762 panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number); 3763#endif 3764 } 3765 if (chk->data != NULL) { 3766 sctp_free_bufspace(stcb, asoc, chk, 1); 3767 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 3768 error, chk, so_locked); 3769 if (chk->data) { 3770 sctp_m_freem(chk->data); 3771 chk->data = NULL; 3772 } 3773 } 3774 sctp_free_a_chunk(stcb, chk, so_locked); 3775 /* sa_ignore FREED_MEMORY */ 3776 } 3777 for (i = 0; i < asoc->streamoutcnt; i++) { 3778 /* For each stream */ 3779 outs = &asoc->strmout[i]; 3780 /* clean up any sends there */ 3781 asoc->locked_on_sending = NULL; 3782 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 3783 asoc->stream_queue_cnt--; 3784 TAILQ_REMOVE(&outs->outqueue, sp, next); 3785 sctp_free_spbufspace(stcb, asoc, sp); 3786 if (sp->data) { 3787 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, stcb, 3788 error, (void *)sp, so_locked); 3789 if (sp->data) { 3790 sctp_m_freem(sp->data); 3791 sp->data = NULL; 3792 sp->tail_mbuf = NULL; 3793 sp->length = 0; 3794 } 3795 } 3796 if (sp->net) { 3797 sctp_free_remote_addr(sp->net); 3798 sp->net = NULL; 3799 } 3800 /* Free the chunk */ 3801 sctp_free_a_strmoq(stcb, sp, so_locked); 3802 /* sa_ignore FREED_MEMORY */ 3803 } 3804 } 3805 3806 if (holds_lock == 0) { 3807 SCTP_TCB_SEND_UNLOCK(stcb); 3808 } 3809} 3810 3811void 3812sctp_abort_notification(struct sctp_tcb *stcb, uint8_t from_peer, uint16_t error, 3813 struct sctp_abort_chunk *abort, int so_locked 3814#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3815 SCTP_UNUSED 3816#endif 3817) 3818{ 3819 if (stcb == NULL) { 3820 return; 3821 } 3822 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL) || 3823 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 3824 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_CONNECTED))) { 3825 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_WAS_ABORTED; 3826 } 3827 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 3828 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 3829 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 3830 return; 3831 } 3832 /* Tell them we lost the asoc */ 3833 sctp_report_all_outbound(stcb, error, 1, so_locked); 3834 if (from_peer) { 3835 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_REM_ABORTED, stcb, error, abort, so_locked); 3836 } else { 3837 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_LOC_ABORTED, stcb, error, abort, so_locked); 3838 } 3839} 3840 3841void 3842sctp_abort_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 3843 struct mbuf *m, int iphlen, 3844 struct sockaddr *src, struct sockaddr *dst, 3845 struct sctphdr *sh, struct mbuf *op_err, 3846 uint8_t use_mflowid, uint32_t mflowid, 3847 uint32_t vrf_id, uint16_t port) 3848{ 3849 uint32_t vtag; 3850 3851#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3852 struct socket *so; 3853 3854#endif 3855 3856 vtag = 0; 3857 if (stcb != NULL) { 3858 /* We have a TCB to abort, send notification too */ 3859 vtag = stcb->asoc.peer_vtag; 3860 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED); 3861 /* get the assoc vrf id and table id */ 3862 vrf_id = stcb->asoc.vrf_id; 3863 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED; 3864 } 3865 sctp_send_abort(m, iphlen, src, dst, sh, vtag, op_err, 3866 use_mflowid, mflowid, 3867 vrf_id, port); 3868 if (stcb != NULL) { 3869 /* Ok, now lets free it */ 3870#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3871 so = SCTP_INP_SO(inp); 3872 atomic_add_int(&stcb->asoc.refcnt, 1); 3873 SCTP_TCB_UNLOCK(stcb); 3874 SCTP_SOCKET_LOCK(so, 1); 3875 SCTP_TCB_LOCK(stcb); 3876 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3877#endif 3878 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 3879 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 3880 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 3881 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 3882 } 3883 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_4); 3884#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3885 SCTP_SOCKET_UNLOCK(so, 1); 3886#endif 3887 } 3888} 3889 3890#ifdef SCTP_ASOCLOG_OF_TSNS 3891void 3892sctp_print_out_track_log(struct sctp_tcb *stcb) 3893{ 3894#ifdef NOSIY_PRINTS 3895 int i; 3896 3897 SCTP_PRINTF("Last ep reason:%x\n", stcb->sctp_ep->last_abort_code); 3898 SCTP_PRINTF("IN bound TSN log-aaa\n"); 3899 if ((stcb->asoc.tsn_in_at == 0) && (stcb->asoc.tsn_in_wrapped == 0)) { 3900 SCTP_PRINTF("None rcvd\n"); 3901 goto none_in; 3902 } 3903 if (stcb->asoc.tsn_in_wrapped) { 3904 for (i = stcb->asoc.tsn_in_at; i < SCTP_TSN_LOG_SIZE; i++) { 3905 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 3906 stcb->asoc.in_tsnlog[i].tsn, 3907 stcb->asoc.in_tsnlog[i].strm, 3908 stcb->asoc.in_tsnlog[i].seq, 3909 stcb->asoc.in_tsnlog[i].flgs, 3910 stcb->asoc.in_tsnlog[i].sz); 3911 } 3912 } 3913 if (stcb->asoc.tsn_in_at) { 3914 for (i = 0; i < stcb->asoc.tsn_in_at; i++) { 3915 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 3916 stcb->asoc.in_tsnlog[i].tsn, 3917 stcb->asoc.in_tsnlog[i].strm, 3918 stcb->asoc.in_tsnlog[i].seq, 3919 stcb->asoc.in_tsnlog[i].flgs, 3920 stcb->asoc.in_tsnlog[i].sz); 3921 } 3922 } 3923none_in: 3924 SCTP_PRINTF("OUT bound TSN log-aaa\n"); 3925 if ((stcb->asoc.tsn_out_at == 0) && 3926 (stcb->asoc.tsn_out_wrapped == 0)) { 3927 SCTP_PRINTF("None sent\n"); 3928 } 3929 if (stcb->asoc.tsn_out_wrapped) { 3930 for (i = stcb->asoc.tsn_out_at; i < SCTP_TSN_LOG_SIZE; i++) { 3931 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 3932 stcb->asoc.out_tsnlog[i].tsn, 3933 stcb->asoc.out_tsnlog[i].strm, 3934 stcb->asoc.out_tsnlog[i].seq, 3935 stcb->asoc.out_tsnlog[i].flgs, 3936 stcb->asoc.out_tsnlog[i].sz); 3937 } 3938 } 3939 if (stcb->asoc.tsn_out_at) { 3940 for (i = 0; i < stcb->asoc.tsn_out_at; i++) { 3941 SCTP_PRINTF("TSN:%x strm:%d seq:%d flags:%x sz:%d\n", 3942 stcb->asoc.out_tsnlog[i].tsn, 3943 stcb->asoc.out_tsnlog[i].strm, 3944 stcb->asoc.out_tsnlog[i].seq, 3945 stcb->asoc.out_tsnlog[i].flgs, 3946 stcb->asoc.out_tsnlog[i].sz); 3947 } 3948 } 3949#endif 3950} 3951 3952#endif 3953 3954void 3955sctp_abort_an_association(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 3956 struct mbuf *op_err, 3957 int so_locked 3958#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 3959 SCTP_UNUSED 3960#endif 3961) 3962{ 3963#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3964 struct socket *so; 3965 3966#endif 3967 3968#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3969 so = SCTP_INP_SO(inp); 3970#endif 3971 if (stcb == NULL) { 3972 /* Got to have a TCB */ 3973 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3974 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 3975 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 3976 SCTP_CALLED_DIRECTLY_NOCMPSET); 3977 } 3978 } 3979 return; 3980 } else { 3981 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED; 3982 } 3983 /* notify the ulp */ 3984 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) == 0) { 3985 sctp_abort_notification(stcb, 0, 0, NULL, so_locked); 3986 } 3987 /* notify the peer */ 3988 sctp_send_abort_tcb(stcb, op_err, so_locked); 3989 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 3990 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 3991 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 3992 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 3993 } 3994 /* now free the asoc */ 3995#ifdef SCTP_ASOCLOG_OF_TSNS 3996 sctp_print_out_track_log(stcb); 3997#endif 3998#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3999 if (!so_locked) { 4000 atomic_add_int(&stcb->asoc.refcnt, 1); 4001 SCTP_TCB_UNLOCK(stcb); 4002 SCTP_SOCKET_LOCK(so, 1); 4003 SCTP_TCB_LOCK(stcb); 4004 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4005 } 4006#endif 4007 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTPUTIL + SCTP_LOC_5); 4008#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4009 if (!so_locked) { 4010 SCTP_SOCKET_UNLOCK(so, 1); 4011 } 4012#endif 4013} 4014 4015void 4016sctp_handle_ootb(struct mbuf *m, int iphlen, int offset, 4017 struct sockaddr *src, struct sockaddr *dst, 4018 struct sctphdr *sh, struct sctp_inpcb *inp, 4019 uint8_t use_mflowid, uint32_t mflowid, 4020 uint32_t vrf_id, uint16_t port) 4021{ 4022 struct sctp_chunkhdr *ch, chunk_buf; 4023 unsigned int chk_length; 4024 int contains_init_chunk; 4025 4026 SCTP_STAT_INCR_COUNTER32(sctps_outoftheblue); 4027 /* Generate a TO address for future reference */ 4028 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE)) { 4029 if (LIST_EMPTY(&inp->sctp_asoc_list)) { 4030 sctp_inpcb_free(inp, SCTP_FREE_SHOULD_USE_ABORT, 4031 SCTP_CALLED_DIRECTLY_NOCMPSET); 4032 } 4033 } 4034 contains_init_chunk = 0; 4035 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4036 sizeof(*ch), (uint8_t *) & chunk_buf); 4037 while (ch != NULL) { 4038 chk_length = ntohs(ch->chunk_length); 4039 if (chk_length < sizeof(*ch)) { 4040 /* break to abort land */ 4041 break; 4042 } 4043 switch (ch->chunk_type) { 4044 case SCTP_INIT: 4045 contains_init_chunk = 1; 4046 break; 4047 case SCTP_COOKIE_ECHO: 4048 /* We hit here only if the assoc is being freed */ 4049 return; 4050 case SCTP_PACKET_DROPPED: 4051 /* we don't respond to pkt-dropped */ 4052 return; 4053 case SCTP_ABORT_ASSOCIATION: 4054 /* we don't respond with an ABORT to an ABORT */ 4055 return; 4056 case SCTP_SHUTDOWN_COMPLETE: 4057 /* 4058 * we ignore it since we are not waiting for it and 4059 * peer is gone 4060 */ 4061 return; 4062 case SCTP_SHUTDOWN_ACK: 4063 sctp_send_shutdown_complete2(src, dst, sh, 4064 use_mflowid, mflowid, 4065 vrf_id, port); 4066 return; 4067 default: 4068 break; 4069 } 4070 offset += SCTP_SIZE32(chk_length); 4071 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4072 sizeof(*ch), (uint8_t *) & chunk_buf); 4073 } 4074 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 4075 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 4076 (contains_init_chunk == 0))) { 4077 sctp_send_abort(m, iphlen, src, dst, sh, 0, NULL, 4078 use_mflowid, mflowid, 4079 vrf_id, port); 4080 } 4081} 4082 4083/* 4084 * check the inbound datagram to make sure there is not an abort inside it, 4085 * if there is return 1, else return 0. 4086 */ 4087int 4088sctp_is_there_an_abort_here(struct mbuf *m, int iphlen, uint32_t * vtagfill) 4089{ 4090 struct sctp_chunkhdr *ch; 4091 struct sctp_init_chunk *init_chk, chunk_buf; 4092 int offset; 4093 unsigned int chk_length; 4094 4095 offset = iphlen + sizeof(struct sctphdr); 4096 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, sizeof(*ch), 4097 (uint8_t *) & chunk_buf); 4098 while (ch != NULL) { 4099 chk_length = ntohs(ch->chunk_length); 4100 if (chk_length < sizeof(*ch)) { 4101 /* packet is probably corrupt */ 4102 break; 4103 } 4104 /* we seem to be ok, is it an abort? */ 4105 if (ch->chunk_type == SCTP_ABORT_ASSOCIATION) { 4106 /* yep, tell them */ 4107 return (1); 4108 } 4109 if (ch->chunk_type == SCTP_INITIATION) { 4110 /* need to update the Vtag */ 4111 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4112 offset, sizeof(*init_chk), (uint8_t *) & chunk_buf); 4113 if (init_chk != NULL) { 4114 *vtagfill = ntohl(init_chk->init.initiate_tag); 4115 } 4116 } 4117 /* Nope, move to the next chunk */ 4118 offset += SCTP_SIZE32(chk_length); 4119 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, offset, 4120 sizeof(*ch), (uint8_t *) & chunk_buf); 4121 } 4122 return (0); 4123} 4124 4125/* 4126 * currently (2/02), ifa_addr embeds scope_id's and don't have sin6_scope_id 4127 * set (i.e. it's 0) so, create this function to compare link local scopes 4128 */ 4129#ifdef INET6 4130uint32_t 4131sctp_is_same_scope(struct sockaddr_in6 *addr1, struct sockaddr_in6 *addr2) 4132{ 4133 struct sockaddr_in6 a, b; 4134 4135 /* save copies */ 4136 a = *addr1; 4137 b = *addr2; 4138 4139 if (a.sin6_scope_id == 0) 4140 if (sa6_recoverscope(&a)) { 4141 /* can't get scope, so can't match */ 4142 return (0); 4143 } 4144 if (b.sin6_scope_id == 0) 4145 if (sa6_recoverscope(&b)) { 4146 /* can't get scope, so can't match */ 4147 return (0); 4148 } 4149 if (a.sin6_scope_id != b.sin6_scope_id) 4150 return (0); 4151 4152 return (1); 4153} 4154 4155/* 4156 * returns a sockaddr_in6 with embedded scope recovered and removed 4157 */ 4158struct sockaddr_in6 * 4159sctp_recover_scope(struct sockaddr_in6 *addr, struct sockaddr_in6 *store) 4160{ 4161 /* check and strip embedded scope junk */ 4162 if (addr->sin6_family == AF_INET6) { 4163 if (IN6_IS_SCOPE_LINKLOCAL(&addr->sin6_addr)) { 4164 if (addr->sin6_scope_id == 0) { 4165 *store = *addr; 4166 if (!sa6_recoverscope(store)) { 4167 /* use the recovered scope */ 4168 addr = store; 4169 } 4170 } else { 4171 /* else, return the original "to" addr */ 4172 in6_clearscope(&addr->sin6_addr); 4173 } 4174 } 4175 } 4176 return (addr); 4177} 4178 4179#endif 4180 4181/* 4182 * are the two addresses the same? currently a "scopeless" check returns: 1 4183 * if same, 0 if not 4184 */ 4185int 4186sctp_cmpaddr(struct sockaddr *sa1, struct sockaddr *sa2) 4187{ 4188 4189 /* must be valid */ 4190 if (sa1 == NULL || sa2 == NULL) 4191 return (0); 4192 4193 /* must be the same family */ 4194 if (sa1->sa_family != sa2->sa_family) 4195 return (0); 4196 4197 switch (sa1->sa_family) { 4198#ifdef INET6 4199 case AF_INET6: 4200 { 4201 /* IPv6 addresses */ 4202 struct sockaddr_in6 *sin6_1, *sin6_2; 4203 4204 sin6_1 = (struct sockaddr_in6 *)sa1; 4205 sin6_2 = (struct sockaddr_in6 *)sa2; 4206 return (SCTP6_ARE_ADDR_EQUAL(sin6_1, 4207 sin6_2)); 4208 } 4209#endif 4210#ifdef INET 4211 case AF_INET: 4212 { 4213 /* IPv4 addresses */ 4214 struct sockaddr_in *sin_1, *sin_2; 4215 4216 sin_1 = (struct sockaddr_in *)sa1; 4217 sin_2 = (struct sockaddr_in *)sa2; 4218 return (sin_1->sin_addr.s_addr == sin_2->sin_addr.s_addr); 4219 } 4220#endif 4221 default: 4222 /* we don't do these... */ 4223 return (0); 4224 } 4225} 4226 4227void 4228sctp_print_address(struct sockaddr *sa) 4229{ 4230#ifdef INET6 4231 char ip6buf[INET6_ADDRSTRLEN]; 4232 4233#endif 4234 4235 switch (sa->sa_family) { 4236#ifdef INET6 4237 case AF_INET6: 4238 { 4239 struct sockaddr_in6 *sin6; 4240 4241 sin6 = (struct sockaddr_in6 *)sa; 4242 SCTP_PRINTF("IPv6 address: %s:port:%d scope:%u\n", 4243 ip6_sprintf(ip6buf, &sin6->sin6_addr), 4244 ntohs(sin6->sin6_port), 4245 sin6->sin6_scope_id); 4246 break; 4247 } 4248#endif 4249#ifdef INET 4250 case AF_INET: 4251 { 4252 struct sockaddr_in *sin; 4253 unsigned char *p; 4254 4255 sin = (struct sockaddr_in *)sa; 4256 p = (unsigned char *)&sin->sin_addr; 4257 SCTP_PRINTF("IPv4 address: %u.%u.%u.%u:%d\n", 4258 p[0], p[1], p[2], p[3], ntohs(sin->sin_port)); 4259 break; 4260 } 4261#endif 4262 default: 4263 SCTP_PRINTF("?\n"); 4264 break; 4265 } 4266} 4267 4268void 4269sctp_pull_off_control_to_new_inp(struct sctp_inpcb *old_inp, 4270 struct sctp_inpcb *new_inp, 4271 struct sctp_tcb *stcb, 4272 int waitflags) 4273{ 4274 /* 4275 * go through our old INP and pull off any control structures that 4276 * belong to stcb and move then to the new inp. 4277 */ 4278 struct socket *old_so, *new_so; 4279 struct sctp_queued_to_read *control, *nctl; 4280 struct sctp_readhead tmp_queue; 4281 struct mbuf *m; 4282 int error = 0; 4283 4284 old_so = old_inp->sctp_socket; 4285 new_so = new_inp->sctp_socket; 4286 TAILQ_INIT(&tmp_queue); 4287 error = sblock(&old_so->so_rcv, waitflags); 4288 if (error) { 4289 /* 4290 * Gak, can't get sblock, we have a problem. data will be 4291 * left stranded.. and we don't dare look at it since the 4292 * other thread may be reading something. Oh well, its a 4293 * screwed up app that does a peeloff OR a accept while 4294 * reading from the main socket... actually its only the 4295 * peeloff() case, since I think read will fail on a 4296 * listening socket.. 4297 */ 4298 return; 4299 } 4300 /* lock the socket buffers */ 4301 SCTP_INP_READ_LOCK(old_inp); 4302 TAILQ_FOREACH_SAFE(control, &old_inp->read_queue, next, nctl) { 4303 /* Pull off all for out target stcb */ 4304 if (control->stcb == stcb) { 4305 /* remove it we want it */ 4306 TAILQ_REMOVE(&old_inp->read_queue, control, next); 4307 TAILQ_INSERT_TAIL(&tmp_queue, control, next); 4308 m = control->data; 4309 while (m) { 4310 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4311 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 4312 } 4313 sctp_sbfree(control, stcb, &old_so->so_rcv, m); 4314 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4315 sctp_sblog(&old_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4316 } 4317 m = SCTP_BUF_NEXT(m); 4318 } 4319 } 4320 } 4321 SCTP_INP_READ_UNLOCK(old_inp); 4322 /* Remove the sb-lock on the old socket */ 4323 4324 sbunlock(&old_so->so_rcv); 4325 /* Now we move them over to the new socket buffer */ 4326 SCTP_INP_READ_LOCK(new_inp); 4327 TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) { 4328 TAILQ_INSERT_TAIL(&new_inp->read_queue, control, next); 4329 m = control->data; 4330 while (m) { 4331 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4332 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4333 } 4334 sctp_sballoc(stcb, &new_so->so_rcv, m); 4335 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4336 sctp_sblog(&new_so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4337 } 4338 m = SCTP_BUF_NEXT(m); 4339 } 4340 } 4341 SCTP_INP_READ_UNLOCK(new_inp); 4342} 4343 4344void 4345sctp_add_to_readq(struct sctp_inpcb *inp, 4346 struct sctp_tcb *stcb, 4347 struct sctp_queued_to_read *control, 4348 struct sockbuf *sb, 4349 int end, 4350 int inp_read_lock_held, 4351 int so_locked 4352#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4353 SCTP_UNUSED 4354#endif 4355) 4356{ 4357 /* 4358 * Here we must place the control on the end of the socket read 4359 * queue AND increment sb_cc so that select will work properly on 4360 * read. 4361 */ 4362 struct mbuf *m, *prev = NULL; 4363 4364 if (inp == NULL) { 4365 /* Gak, TSNH!! */ 4366#ifdef INVARIANTS 4367 panic("Gak, inp NULL on add_to_readq"); 4368#endif 4369 return; 4370 } 4371 if (inp_read_lock_held == 0) 4372 SCTP_INP_READ_LOCK(inp); 4373 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ) { 4374 sctp_free_remote_addr(control->whoFrom); 4375 if (control->data) { 4376 sctp_m_freem(control->data); 4377 control->data = NULL; 4378 } 4379 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control); 4380 if (inp_read_lock_held == 0) 4381 SCTP_INP_READ_UNLOCK(inp); 4382 return; 4383 } 4384 if (!(control->spec_flags & M_NOTIFICATION)) { 4385 atomic_add_int(&inp->total_recvs, 1); 4386 if (!control->do_not_ref_stcb) { 4387 atomic_add_int(&stcb->total_recvs, 1); 4388 } 4389 } 4390 m = control->data; 4391 control->held_length = 0; 4392 control->length = 0; 4393 while (m) { 4394 if (SCTP_BUF_LEN(m) == 0) { 4395 /* Skip mbufs with NO length */ 4396 if (prev == NULL) { 4397 /* First one */ 4398 control->data = sctp_m_free(m); 4399 m = control->data; 4400 } else { 4401 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 4402 m = SCTP_BUF_NEXT(prev); 4403 } 4404 if (m == NULL) { 4405 control->tail_mbuf = prev; 4406 } 4407 continue; 4408 } 4409 prev = m; 4410 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4411 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(m)); 4412 } 4413 sctp_sballoc(stcb, sb, m); 4414 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4415 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4416 } 4417 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 4418 m = SCTP_BUF_NEXT(m); 4419 } 4420 if (prev != NULL) { 4421 control->tail_mbuf = prev; 4422 } else { 4423 /* Everything got collapsed out?? */ 4424 sctp_free_remote_addr(control->whoFrom); 4425 SCTP_ZONE_FREE(SCTP_BASE_INFO(ipi_zone_readq), control); 4426 if (inp_read_lock_held == 0) 4427 SCTP_INP_READ_UNLOCK(inp); 4428 return; 4429 } 4430 if (end) { 4431 control->end_added = 1; 4432 } 4433 TAILQ_INSERT_TAIL(&inp->read_queue, control, next); 4434 if (inp_read_lock_held == 0) 4435 SCTP_INP_READ_UNLOCK(inp); 4436 if (inp && inp->sctp_socket) { 4437 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) { 4438 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket); 4439 } else { 4440#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4441 struct socket *so; 4442 4443 so = SCTP_INP_SO(inp); 4444 if (!so_locked) { 4445 if (stcb) { 4446 atomic_add_int(&stcb->asoc.refcnt, 1); 4447 SCTP_TCB_UNLOCK(stcb); 4448 } 4449 SCTP_SOCKET_LOCK(so, 1); 4450 if (stcb) { 4451 SCTP_TCB_LOCK(stcb); 4452 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4453 } 4454 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4455 SCTP_SOCKET_UNLOCK(so, 1); 4456 return; 4457 } 4458 } 4459#endif 4460 sctp_sorwakeup(inp, inp->sctp_socket); 4461#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4462 if (!so_locked) { 4463 SCTP_SOCKET_UNLOCK(so, 1); 4464 } 4465#endif 4466 } 4467 } 4468} 4469 4470 4471int 4472sctp_append_to_readq(struct sctp_inpcb *inp, 4473 struct sctp_tcb *stcb, 4474 struct sctp_queued_to_read *control, 4475 struct mbuf *m, 4476 int end, 4477 int ctls_cumack, 4478 struct sockbuf *sb) 4479{ 4480 /* 4481 * A partial delivery API event is underway. OR we are appending on 4482 * the reassembly queue. 4483 * 4484 * If PDAPI this means we need to add m to the end of the data. 4485 * Increase the length in the control AND increment the sb_cc. 4486 * Otherwise sb is NULL and all we need to do is put it at the end 4487 * of the mbuf chain. 4488 */ 4489 int len = 0; 4490 struct mbuf *mm, *tail = NULL, *prev = NULL; 4491 4492 if (inp) { 4493 SCTP_INP_READ_LOCK(inp); 4494 } 4495 if (control == NULL) { 4496get_out: 4497 if (inp) { 4498 SCTP_INP_READ_UNLOCK(inp); 4499 } 4500 return (-1); 4501 } 4502 if (inp && (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_CANT_READ)) { 4503 SCTP_INP_READ_UNLOCK(inp); 4504 return (0); 4505 } 4506 if (control->end_added) { 4507 /* huh this one is complete? */ 4508 goto get_out; 4509 } 4510 mm = m; 4511 if (mm == NULL) { 4512 goto get_out; 4513 } 4514 while (mm) { 4515 if (SCTP_BUF_LEN(mm) == 0) { 4516 /* Skip mbufs with NO lenght */ 4517 if (prev == NULL) { 4518 /* First one */ 4519 m = sctp_m_free(mm); 4520 mm = m; 4521 } else { 4522 SCTP_BUF_NEXT(prev) = sctp_m_free(mm); 4523 mm = SCTP_BUF_NEXT(prev); 4524 } 4525 continue; 4526 } 4527 prev = mm; 4528 len += SCTP_BUF_LEN(mm); 4529 if (sb) { 4530 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4531 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBALLOC, SCTP_BUF_LEN(mm)); 4532 } 4533 sctp_sballoc(stcb, sb, mm); 4534 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 4535 sctp_sblog(sb, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 4536 } 4537 } 4538 mm = SCTP_BUF_NEXT(mm); 4539 } 4540 if (prev) { 4541 tail = prev; 4542 } else { 4543 /* Really there should always be a prev */ 4544 if (m == NULL) { 4545 /* Huh nothing left? */ 4546#ifdef INVARIANTS 4547 panic("Nothing left to add?"); 4548#else 4549 goto get_out; 4550#endif 4551 } 4552 tail = m; 4553 } 4554 if (control->tail_mbuf) { 4555 /* append */ 4556 SCTP_BUF_NEXT(control->tail_mbuf) = m; 4557 control->tail_mbuf = tail; 4558 } else { 4559 /* nothing there */ 4560#ifdef INVARIANTS 4561 if (control->data != NULL) { 4562 panic("This should NOT happen"); 4563 } 4564#endif 4565 control->data = m; 4566 control->tail_mbuf = tail; 4567 } 4568 atomic_add_int(&control->length, len); 4569 if (end) { 4570 /* message is complete */ 4571 if (stcb && (control == stcb->asoc.control_pdapi)) { 4572 stcb->asoc.control_pdapi = NULL; 4573 } 4574 control->held_length = 0; 4575 control->end_added = 1; 4576 } 4577 if (stcb == NULL) { 4578 control->do_not_ref_stcb = 1; 4579 } 4580 /* 4581 * When we are appending in partial delivery, the cum-ack is used 4582 * for the actual pd-api highest tsn on this mbuf. The true cum-ack 4583 * is populated in the outbound sinfo structure from the true cumack 4584 * if the association exists... 4585 */ 4586 control->sinfo_tsn = control->sinfo_cumtsn = ctls_cumack; 4587 if (inp) { 4588 SCTP_INP_READ_UNLOCK(inp); 4589 } 4590 if (inp && inp->sctp_socket) { 4591 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_ZERO_COPY_ACTIVE)) { 4592 SCTP_ZERO_COPY_EVENT(inp, inp->sctp_socket); 4593 } else { 4594#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4595 struct socket *so; 4596 4597 so = SCTP_INP_SO(inp); 4598 if (stcb) { 4599 atomic_add_int(&stcb->asoc.refcnt, 1); 4600 SCTP_TCB_UNLOCK(stcb); 4601 } 4602 SCTP_SOCKET_LOCK(so, 1); 4603 if (stcb) { 4604 SCTP_TCB_LOCK(stcb); 4605 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4606 } 4607 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4608 SCTP_SOCKET_UNLOCK(so, 1); 4609 return (0); 4610 } 4611#endif 4612 sctp_sorwakeup(inp, inp->sctp_socket); 4613#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4614 SCTP_SOCKET_UNLOCK(so, 1); 4615#endif 4616 } 4617 } 4618 return (0); 4619} 4620 4621 4622 4623/*************HOLD THIS COMMENT FOR PATCH FILE OF 4624 *************ALTERNATE ROUTING CODE 4625 */ 4626 4627/*************HOLD THIS COMMENT FOR END OF PATCH FILE OF 4628 *************ALTERNATE ROUTING CODE 4629 */ 4630 4631struct mbuf * 4632sctp_generate_invmanparam(int err) 4633{ 4634 /* Return a MBUF with a invalid mandatory parameter */ 4635 struct mbuf *m; 4636 4637 m = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 0, M_NOWAIT, 1, MT_DATA); 4638 if (m) { 4639 struct sctp_paramhdr *ph; 4640 4641 SCTP_BUF_LEN(m) = sizeof(struct sctp_paramhdr); 4642 ph = mtod(m, struct sctp_paramhdr *); 4643 ph->param_length = htons(sizeof(struct sctp_paramhdr)); 4644 ph->param_type = htons(err); 4645 } 4646 return (m); 4647} 4648 4649#ifdef SCTP_MBCNT_LOGGING 4650void 4651sctp_free_bufspace(struct sctp_tcb *stcb, struct sctp_association *asoc, 4652 struct sctp_tmit_chunk *tp1, int chk_cnt) 4653{ 4654 if (tp1->data == NULL) { 4655 return; 4656 } 4657 asoc->chunks_on_out_queue -= chk_cnt; 4658 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBCNT_LOGGING_ENABLE) { 4659 sctp_log_mbcnt(SCTP_LOG_MBCNT_DECREASE, 4660 asoc->total_output_queue_size, 4661 tp1->book_size, 4662 0, 4663 tp1->mbcnt); 4664 } 4665 if (asoc->total_output_queue_size >= tp1->book_size) { 4666 atomic_add_int(&asoc->total_output_queue_size, -tp1->book_size); 4667 } else { 4668 asoc->total_output_queue_size = 0; 4669 } 4670 4671 if (stcb->sctp_socket && (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) || 4672 ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE)))) { 4673 if (stcb->sctp_socket->so_snd.sb_cc >= tp1->book_size) { 4674 stcb->sctp_socket->so_snd.sb_cc -= tp1->book_size; 4675 } else { 4676 stcb->sctp_socket->so_snd.sb_cc = 0; 4677 4678 } 4679 } 4680} 4681 4682#endif 4683 4684int 4685sctp_release_pr_sctp_chunk(struct sctp_tcb *stcb, struct sctp_tmit_chunk *tp1, 4686 uint8_t sent, int so_locked 4687#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 4688 SCTP_UNUSED 4689#endif 4690) 4691{ 4692 struct sctp_stream_out *strq; 4693 struct sctp_tmit_chunk *chk = NULL, *tp2; 4694 struct sctp_stream_queue_pending *sp; 4695 uint16_t stream = 0, seq = 0; 4696 uint8_t foundeom = 0; 4697 int ret_sz = 0; 4698 int notdone; 4699 int do_wakeup_routine = 0; 4700 4701 stream = tp1->rec.data.stream_number; 4702 seq = tp1->rec.data.stream_seq; 4703 do { 4704 ret_sz += tp1->book_size; 4705 if (tp1->data != NULL) { 4706 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4707 sctp_flight_size_decrease(tp1); 4708 sctp_total_flight_decrease(stcb, tp1); 4709 } 4710 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 4711 stcb->asoc.peers_rwnd += tp1->send_size; 4712 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 4713 if (sent) { 4714 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 4715 } else { 4716 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 4717 } 4718 if (tp1->data) { 4719 sctp_m_freem(tp1->data); 4720 tp1->data = NULL; 4721 } 4722 do_wakeup_routine = 1; 4723 if (PR_SCTP_BUF_ENABLED(tp1->flags)) { 4724 stcb->asoc.sent_queue_cnt_removeable--; 4725 } 4726 } 4727 tp1->sent = SCTP_FORWARD_TSN_SKIP; 4728 if ((tp1->rec.data.rcv_flags & SCTP_DATA_NOT_FRAG) == 4729 SCTP_DATA_NOT_FRAG) { 4730 /* not frag'ed we ae done */ 4731 notdone = 0; 4732 foundeom = 1; 4733 } else if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 4734 /* end of frag, we are done */ 4735 notdone = 0; 4736 foundeom = 1; 4737 } else { 4738 /* 4739 * Its a begin or middle piece, we must mark all of 4740 * it 4741 */ 4742 notdone = 1; 4743 tp1 = TAILQ_NEXT(tp1, sctp_next); 4744 } 4745 } while (tp1 && notdone); 4746 if (foundeom == 0) { 4747 /* 4748 * The multi-part message was scattered across the send and 4749 * sent queue. 4750 */ 4751 TAILQ_FOREACH_SAFE(tp1, &stcb->asoc.send_queue, sctp_next, tp2) { 4752 if ((tp1->rec.data.stream_number != stream) || 4753 (tp1->rec.data.stream_seq != seq)) { 4754 break; 4755 } 4756 /* 4757 * save to chk in case we have some on stream out 4758 * queue. If so and we have an un-transmitted one we 4759 * don't have to fudge the TSN. 4760 */ 4761 chk = tp1; 4762 ret_sz += tp1->book_size; 4763 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 4764 if (sent) { 4765 sctp_ulp_notify(SCTP_NOTIFY_SENT_DG_FAIL, stcb, 0, tp1, so_locked); 4766 } else { 4767 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 0, tp1, so_locked); 4768 } 4769 if (tp1->data) { 4770 sctp_m_freem(tp1->data); 4771 tp1->data = NULL; 4772 } 4773 /* No flight involved here book the size to 0 */ 4774 tp1->book_size = 0; 4775 if (tp1->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 4776 foundeom = 1; 4777 } 4778 do_wakeup_routine = 1; 4779 tp1->sent = SCTP_FORWARD_TSN_SKIP; 4780 TAILQ_REMOVE(&stcb->asoc.send_queue, tp1, sctp_next); 4781 /* 4782 * on to the sent queue so we can wait for it to be 4783 * passed by. 4784 */ 4785 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, tp1, 4786 sctp_next); 4787 stcb->asoc.send_queue_cnt--; 4788 stcb->asoc.sent_queue_cnt++; 4789 } 4790 } 4791 if (foundeom == 0) { 4792 /* 4793 * Still no eom found. That means there is stuff left on the 4794 * stream out queue.. yuck. 4795 */ 4796 SCTP_TCB_SEND_LOCK(stcb); 4797 strq = &stcb->asoc.strmout[stream]; 4798 sp = TAILQ_FIRST(&strq->outqueue); 4799 if (sp != NULL) { 4800 sp->discard_rest = 1; 4801 /* 4802 * We may need to put a chunk on the queue that 4803 * holds the TSN that would have been sent with the 4804 * LAST bit. 4805 */ 4806 if (chk == NULL) { 4807 /* Yep, we have to */ 4808 sctp_alloc_a_chunk(stcb, chk); 4809 if (chk == NULL) { 4810 /* 4811 * we are hosed. All we can do is 4812 * nothing.. which will cause an 4813 * abort if the peer is paying 4814 * attention. 4815 */ 4816 goto oh_well; 4817 } 4818 memset(chk, 0, sizeof(*chk)); 4819 chk->rec.data.rcv_flags = SCTP_DATA_LAST_FRAG; 4820 chk->sent = SCTP_FORWARD_TSN_SKIP; 4821 chk->asoc = &stcb->asoc; 4822 chk->rec.data.stream_seq = strq->next_sequence_send; 4823 chk->rec.data.stream_number = sp->stream; 4824 chk->rec.data.payloadtype = sp->ppid; 4825 chk->rec.data.context = sp->context; 4826 chk->flags = sp->act_flags; 4827 if (sp->net) 4828 chk->whoTo = sp->net; 4829 else 4830 chk->whoTo = stcb->asoc.primary_destination; 4831 atomic_add_int(&chk->whoTo->ref_count, 1); 4832 chk->rec.data.TSN_seq = atomic_fetchadd_int(&stcb->asoc.sending_seq, 1); 4833 stcb->asoc.pr_sctp_cnt++; 4834 TAILQ_INSERT_TAIL(&stcb->asoc.sent_queue, chk, sctp_next); 4835 stcb->asoc.sent_queue_cnt++; 4836 stcb->asoc.pr_sctp_cnt++; 4837 } else { 4838 chk->rec.data.rcv_flags |= SCTP_DATA_LAST_FRAG; 4839 } 4840 strq->next_sequence_send++; 4841 oh_well: 4842 if (sp->data) { 4843 /* 4844 * Pull any data to free up the SB and allow 4845 * sender to "add more" while we will throw 4846 * away :-) 4847 */ 4848 sctp_free_spbufspace(stcb, &stcb->asoc, sp); 4849 ret_sz += sp->length; 4850 do_wakeup_routine = 1; 4851 sp->some_taken = 1; 4852 sctp_m_freem(sp->data); 4853 sp->data = NULL; 4854 sp->tail_mbuf = NULL; 4855 sp->length = 0; 4856 } 4857 } 4858 SCTP_TCB_SEND_UNLOCK(stcb); 4859 } 4860 if (do_wakeup_routine) { 4861#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4862 struct socket *so; 4863 4864 so = SCTP_INP_SO(stcb->sctp_ep); 4865 if (!so_locked) { 4866 atomic_add_int(&stcb->asoc.refcnt, 1); 4867 SCTP_TCB_UNLOCK(stcb); 4868 SCTP_SOCKET_LOCK(so, 1); 4869 SCTP_TCB_LOCK(stcb); 4870 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4871 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4872 /* assoc was freed while we were unlocked */ 4873 SCTP_SOCKET_UNLOCK(so, 1); 4874 return (ret_sz); 4875 } 4876 } 4877#endif 4878 sctp_sowwakeup(stcb->sctp_ep, stcb->sctp_socket); 4879#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4880 if (!so_locked) { 4881 SCTP_SOCKET_UNLOCK(so, 1); 4882 } 4883#endif 4884 } 4885 return (ret_sz); 4886} 4887 4888/* 4889 * checks to see if the given address, sa, is one that is currently known by 4890 * the kernel note: can't distinguish the same address on multiple interfaces 4891 * and doesn't handle multiple addresses with different zone/scope id's note: 4892 * ifa_ifwithaddr() compares the entire sockaddr struct 4893 */ 4894struct sctp_ifa * 4895sctp_find_ifa_in_ep(struct sctp_inpcb *inp, struct sockaddr *addr, 4896 int holds_lock) 4897{ 4898 struct sctp_laddr *laddr; 4899 4900 if (holds_lock == 0) { 4901 SCTP_INP_RLOCK(inp); 4902 } 4903 LIST_FOREACH(laddr, &inp->sctp_addr_list, sctp_nxt_addr) { 4904 if (laddr->ifa == NULL) 4905 continue; 4906 if (addr->sa_family != laddr->ifa->address.sa.sa_family) 4907 continue; 4908#ifdef INET 4909 if (addr->sa_family == AF_INET) { 4910 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 4911 laddr->ifa->address.sin.sin_addr.s_addr) { 4912 /* found him. */ 4913 if (holds_lock == 0) { 4914 SCTP_INP_RUNLOCK(inp); 4915 } 4916 return (laddr->ifa); 4917 break; 4918 } 4919 } 4920#endif 4921#ifdef INET6 4922 if (addr->sa_family == AF_INET6) { 4923 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 4924 &laddr->ifa->address.sin6)) { 4925 /* found him. */ 4926 if (holds_lock == 0) { 4927 SCTP_INP_RUNLOCK(inp); 4928 } 4929 return (laddr->ifa); 4930 break; 4931 } 4932 } 4933#endif 4934 } 4935 if (holds_lock == 0) { 4936 SCTP_INP_RUNLOCK(inp); 4937 } 4938 return (NULL); 4939} 4940 4941uint32_t 4942sctp_get_ifa_hash_val(struct sockaddr *addr) 4943{ 4944 switch (addr->sa_family) { 4945#ifdef INET 4946 case AF_INET: 4947 { 4948 struct sockaddr_in *sin; 4949 4950 sin = (struct sockaddr_in *)addr; 4951 return (sin->sin_addr.s_addr ^ (sin->sin_addr.s_addr >> 16)); 4952 } 4953#endif 4954#ifdef INET6 4955 case AF_INET6: 4956 { 4957 struct sockaddr_in6 *sin6; 4958 uint32_t hash_of_addr; 4959 4960 sin6 = (struct sockaddr_in6 *)addr; 4961 hash_of_addr = (sin6->sin6_addr.s6_addr32[0] + 4962 sin6->sin6_addr.s6_addr32[1] + 4963 sin6->sin6_addr.s6_addr32[2] + 4964 sin6->sin6_addr.s6_addr32[3]); 4965 hash_of_addr = (hash_of_addr ^ (hash_of_addr >> 16)); 4966 return (hash_of_addr); 4967 } 4968#endif 4969 default: 4970 break; 4971 } 4972 return (0); 4973} 4974 4975struct sctp_ifa * 4976sctp_find_ifa_by_addr(struct sockaddr *addr, uint32_t vrf_id, int holds_lock) 4977{ 4978 struct sctp_ifa *sctp_ifap; 4979 struct sctp_vrf *vrf; 4980 struct sctp_ifalist *hash_head; 4981 uint32_t hash_of_addr; 4982 4983 if (holds_lock == 0) 4984 SCTP_IPI_ADDR_RLOCK(); 4985 4986 vrf = sctp_find_vrf(vrf_id); 4987 if (vrf == NULL) { 4988stage_right: 4989 if (holds_lock == 0) 4990 SCTP_IPI_ADDR_RUNLOCK(); 4991 return (NULL); 4992 } 4993 hash_of_addr = sctp_get_ifa_hash_val(addr); 4994 4995 hash_head = &vrf->vrf_addr_hash[(hash_of_addr & vrf->vrf_addr_hashmark)]; 4996 if (hash_head == NULL) { 4997 SCTP_PRINTF("hash_of_addr:%x mask:%x table:%x - ", 4998 hash_of_addr, (uint32_t) vrf->vrf_addr_hashmark, 4999 (uint32_t) (hash_of_addr & vrf->vrf_addr_hashmark)); 5000 sctp_print_address(addr); 5001 SCTP_PRINTF("No such bucket for address\n"); 5002 if (holds_lock == 0) 5003 SCTP_IPI_ADDR_RUNLOCK(); 5004 5005 return (NULL); 5006 } 5007 LIST_FOREACH(sctp_ifap, hash_head, next_bucket) { 5008 if (sctp_ifap == NULL) { 5009#ifdef INVARIANTS 5010 panic("Huh LIST_FOREACH corrupt"); 5011 goto stage_right; 5012#else 5013 SCTP_PRINTF("LIST corrupt of sctp_ifap's?\n"); 5014 goto stage_right; 5015#endif 5016 } 5017 if (addr->sa_family != sctp_ifap->address.sa.sa_family) 5018 continue; 5019#ifdef INET 5020 if (addr->sa_family == AF_INET) { 5021 if (((struct sockaddr_in *)addr)->sin_addr.s_addr == 5022 sctp_ifap->address.sin.sin_addr.s_addr) { 5023 /* found him. */ 5024 if (holds_lock == 0) 5025 SCTP_IPI_ADDR_RUNLOCK(); 5026 return (sctp_ifap); 5027 break; 5028 } 5029 } 5030#endif 5031#ifdef INET6 5032 if (addr->sa_family == AF_INET6) { 5033 if (SCTP6_ARE_ADDR_EQUAL((struct sockaddr_in6 *)addr, 5034 &sctp_ifap->address.sin6)) { 5035 /* found him. */ 5036 if (holds_lock == 0) 5037 SCTP_IPI_ADDR_RUNLOCK(); 5038 return (sctp_ifap); 5039 break; 5040 } 5041 } 5042#endif 5043 } 5044 if (holds_lock == 0) 5045 SCTP_IPI_ADDR_RUNLOCK(); 5046 return (NULL); 5047} 5048 5049static void 5050sctp_user_rcvd(struct sctp_tcb *stcb, uint32_t * freed_so_far, int hold_rlock, 5051 uint32_t rwnd_req) 5052{ 5053 /* User pulled some data, do we need a rwnd update? */ 5054 int r_unlocked = 0; 5055 uint32_t dif, rwnd; 5056 struct socket *so = NULL; 5057 5058 if (stcb == NULL) 5059 return; 5060 5061 atomic_add_int(&stcb->asoc.refcnt, 1); 5062 5063 if (stcb->asoc.state & (SCTP_STATE_ABOUT_TO_BE_FREED | 5064 SCTP_STATE_SHUTDOWN_RECEIVED | 5065 SCTP_STATE_SHUTDOWN_ACK_SENT)) { 5066 /* Pre-check If we are freeing no update */ 5067 goto no_lock; 5068 } 5069 SCTP_INP_INCR_REF(stcb->sctp_ep); 5070 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5071 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5072 goto out; 5073 } 5074 so = stcb->sctp_socket; 5075 if (so == NULL) { 5076 goto out; 5077 } 5078 atomic_add_int(&stcb->freed_by_sorcv_sincelast, *freed_so_far); 5079 /* Have you have freed enough to look */ 5080 *freed_so_far = 0; 5081 /* Yep, its worth a look and the lock overhead */ 5082 5083 /* Figure out what the rwnd would be */ 5084 rwnd = sctp_calc_rwnd(stcb, &stcb->asoc); 5085 if (rwnd >= stcb->asoc.my_last_reported_rwnd) { 5086 dif = rwnd - stcb->asoc.my_last_reported_rwnd; 5087 } else { 5088 dif = 0; 5089 } 5090 if (dif >= rwnd_req) { 5091 if (hold_rlock) { 5092 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5093 r_unlocked = 1; 5094 } 5095 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5096 /* 5097 * One last check before we allow the guy possibly 5098 * to get in. There is a race, where the guy has not 5099 * reached the gate. In that case 5100 */ 5101 goto out; 5102 } 5103 SCTP_TCB_LOCK(stcb); 5104 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5105 /* No reports here */ 5106 SCTP_TCB_UNLOCK(stcb); 5107 goto out; 5108 } 5109 SCTP_STAT_INCR(sctps_wu_sacks_sent); 5110 sctp_send_sack(stcb, SCTP_SO_LOCKED); 5111 5112 sctp_chunk_output(stcb->sctp_ep, stcb, 5113 SCTP_OUTPUT_FROM_USR_RCVD, SCTP_SO_LOCKED); 5114 /* make sure no timer is running */ 5115 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTPUTIL + SCTP_LOC_6); 5116 SCTP_TCB_UNLOCK(stcb); 5117 } else { 5118 /* Update how much we have pending */ 5119 stcb->freed_by_sorcv_sincelast = dif; 5120 } 5121out: 5122 if (so && r_unlocked && hold_rlock) { 5123 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5124 } 5125 SCTP_INP_DECR_REF(stcb->sctp_ep); 5126no_lock: 5127 atomic_add_int(&stcb->asoc.refcnt, -1); 5128 return; 5129} 5130 5131int 5132sctp_sorecvmsg(struct socket *so, 5133 struct uio *uio, 5134 struct mbuf **mp, 5135 struct sockaddr *from, 5136 int fromlen, 5137 int *msg_flags, 5138 struct sctp_sndrcvinfo *sinfo, 5139 int filling_sinfo) 5140{ 5141 /* 5142 * MSG flags we will look at MSG_DONTWAIT - non-blocking IO. 5143 * MSG_PEEK - Look don't touch :-D (only valid with OUT mbuf copy 5144 * mp=NULL thus uio is the copy method to userland) MSG_WAITALL - ?? 5145 * On the way out we may send out any combination of: 5146 * MSG_NOTIFICATION MSG_EOR 5147 * 5148 */ 5149 struct sctp_inpcb *inp = NULL; 5150 int my_len = 0; 5151 int cp_len = 0, error = 0; 5152 struct sctp_queued_to_read *control = NULL, *ctl = NULL, *nxt = NULL; 5153 struct mbuf *m = NULL; 5154 struct sctp_tcb *stcb = NULL; 5155 int wakeup_read_socket = 0; 5156 int freecnt_applied = 0; 5157 int out_flags = 0, in_flags = 0; 5158 int block_allowed = 1; 5159 uint32_t freed_so_far = 0; 5160 uint32_t copied_so_far = 0; 5161 int in_eeor_mode = 0; 5162 int no_rcv_needed = 0; 5163 uint32_t rwnd_req = 0; 5164 int hold_sblock = 0; 5165 int hold_rlock = 0; 5166 int slen = 0; 5167 uint32_t held_length = 0; 5168 int sockbuf_lock = 0; 5169 5170 if (uio == NULL) { 5171 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5172 return (EINVAL); 5173 } 5174 if (msg_flags) { 5175 in_flags = *msg_flags; 5176 if (in_flags & MSG_PEEK) 5177 SCTP_STAT_INCR(sctps_read_peeks); 5178 } else { 5179 in_flags = 0; 5180 } 5181 slen = uio->uio_resid; 5182 5183 /* Pull in and set up our int flags */ 5184 if (in_flags & MSG_OOB) { 5185 /* Out of band's NOT supported */ 5186 return (EOPNOTSUPP); 5187 } 5188 if ((in_flags & MSG_PEEK) && (mp != NULL)) { 5189 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 5190 return (EINVAL); 5191 } 5192 if ((in_flags & (MSG_DONTWAIT 5193 | MSG_NBIO 5194 )) || 5195 SCTP_SO_IS_NBIO(so)) { 5196 block_allowed = 0; 5197 } 5198 /* setup the endpoint */ 5199 inp = (struct sctp_inpcb *)so->so_pcb; 5200 if (inp == NULL) { 5201 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EFAULT); 5202 return (EFAULT); 5203 } 5204 rwnd_req = (SCTP_SB_LIMIT_RCV(so) >> SCTP_RWND_HIWAT_SHIFT); 5205 /* Must be at least a MTU's worth */ 5206 if (rwnd_req < SCTP_MIN_RWND) 5207 rwnd_req = SCTP_MIN_RWND; 5208 in_eeor_mode = sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXPLICIT_EOR); 5209 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5210 sctp_misc_ints(SCTP_SORECV_ENTER, 5211 rwnd_req, in_eeor_mode, so->so_rcv.sb_cc, uio->uio_resid); 5212 } 5213 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 5214 sctp_misc_ints(SCTP_SORECV_ENTERPL, 5215 rwnd_req, block_allowed, so->so_rcv.sb_cc, uio->uio_resid); 5216 } 5217 error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0)); 5218 if (error) { 5219 goto release_unlocked; 5220 } 5221 sockbuf_lock = 1; 5222restart: 5223 5224 5225restart_nosblocks: 5226 if (hold_sblock == 0) { 5227 SOCKBUF_LOCK(&so->so_rcv); 5228 hold_sblock = 1; 5229 } 5230 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5231 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5232 goto out; 5233 } 5234 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && (so->so_rcv.sb_cc == 0)) { 5235 if (so->so_error) { 5236 error = so->so_error; 5237 if ((in_flags & MSG_PEEK) == 0) 5238 so->so_error = 0; 5239 goto out; 5240 } else { 5241 if (so->so_rcv.sb_cc == 0) { 5242 /* indicate EOF */ 5243 error = 0; 5244 goto out; 5245 } 5246 } 5247 } 5248 if ((so->so_rcv.sb_cc <= held_length) && block_allowed) { 5249 /* we need to wait for data */ 5250 if ((so->so_rcv.sb_cc == 0) && 5251 ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5252 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL))) { 5253 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5254 /* 5255 * For active open side clear flags for 5256 * re-use passive open is blocked by 5257 * connect. 5258 */ 5259 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5260 /* 5261 * You were aborted, passive side 5262 * always hits here 5263 */ 5264 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5265 error = ECONNRESET; 5266 } 5267 so->so_state &= ~(SS_ISCONNECTING | 5268 SS_ISDISCONNECTING | 5269 SS_ISCONFIRMING | 5270 SS_ISCONNECTED); 5271 if (error == 0) { 5272 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5273 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5274 error = ENOTCONN; 5275 } 5276 } 5277 goto out; 5278 } 5279 } 5280 error = sbwait(&so->so_rcv); 5281 if (error) { 5282 goto out; 5283 } 5284 held_length = 0; 5285 goto restart_nosblocks; 5286 } else if (so->so_rcv.sb_cc == 0) { 5287 if (so->so_error) { 5288 error = so->so_error; 5289 if ((in_flags & MSG_PEEK) == 0) 5290 so->so_error = 0; 5291 } else { 5292 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 5293 (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 5294 if ((inp->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0) { 5295 /* 5296 * For active open side clear flags 5297 * for re-use passive open is 5298 * blocked by connect. 5299 */ 5300 if (inp->sctp_flags & SCTP_PCB_FLAGS_WAS_ABORTED) { 5301 /* 5302 * You were aborted, passive 5303 * side always hits here 5304 */ 5305 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ECONNRESET); 5306 error = ECONNRESET; 5307 } 5308 so->so_state &= ~(SS_ISCONNECTING | 5309 SS_ISDISCONNECTING | 5310 SS_ISCONFIRMING | 5311 SS_ISCONNECTED); 5312 if (error == 0) { 5313 if ((inp->sctp_flags & SCTP_PCB_FLAGS_WAS_CONNECTED) == 0) { 5314 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOTCONN); 5315 error = ENOTCONN; 5316 } 5317 } 5318 goto out; 5319 } 5320 } 5321 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EWOULDBLOCK); 5322 error = EWOULDBLOCK; 5323 } 5324 goto out; 5325 } 5326 if (hold_sblock == 1) { 5327 SOCKBUF_UNLOCK(&so->so_rcv); 5328 hold_sblock = 0; 5329 } 5330 /* we possibly have data we can read */ 5331 /* sa_ignore FREED_MEMORY */ 5332 control = TAILQ_FIRST(&inp->read_queue); 5333 if (control == NULL) { 5334 /* 5335 * This could be happening since the appender did the 5336 * increment but as not yet did the tailq insert onto the 5337 * read_queue 5338 */ 5339 if (hold_rlock == 0) { 5340 SCTP_INP_READ_LOCK(inp); 5341 } 5342 control = TAILQ_FIRST(&inp->read_queue); 5343 if ((control == NULL) && (so->so_rcv.sb_cc != 0)) { 5344#ifdef INVARIANTS 5345 panic("Huh, its non zero and nothing on control?"); 5346#endif 5347 so->so_rcv.sb_cc = 0; 5348 } 5349 SCTP_INP_READ_UNLOCK(inp); 5350 hold_rlock = 0; 5351 goto restart; 5352 } 5353 if ((control->length == 0) && 5354 (control->do_not_ref_stcb)) { 5355 /* 5356 * Clean up code for freeing assoc that left behind a 5357 * pdapi.. maybe a peer in EEOR that just closed after 5358 * sending and never indicated a EOR. 5359 */ 5360 if (hold_rlock == 0) { 5361 hold_rlock = 1; 5362 SCTP_INP_READ_LOCK(inp); 5363 } 5364 control->held_length = 0; 5365 if (control->data) { 5366 /* Hmm there is data here .. fix */ 5367 struct mbuf *m_tmp; 5368 int cnt = 0; 5369 5370 m_tmp = control->data; 5371 while (m_tmp) { 5372 cnt += SCTP_BUF_LEN(m_tmp); 5373 if (SCTP_BUF_NEXT(m_tmp) == NULL) { 5374 control->tail_mbuf = m_tmp; 5375 control->end_added = 1; 5376 } 5377 m_tmp = SCTP_BUF_NEXT(m_tmp); 5378 } 5379 control->length = cnt; 5380 } else { 5381 /* remove it */ 5382 TAILQ_REMOVE(&inp->read_queue, control, next); 5383 /* Add back any hiddend data */ 5384 sctp_free_remote_addr(control->whoFrom); 5385 sctp_free_a_readq(stcb, control); 5386 } 5387 if (hold_rlock) { 5388 hold_rlock = 0; 5389 SCTP_INP_READ_UNLOCK(inp); 5390 } 5391 goto restart; 5392 } 5393 if ((control->length == 0) && 5394 (control->end_added == 1)) { 5395 /* 5396 * Do we also need to check for (control->pdapi_aborted == 5397 * 1)? 5398 */ 5399 if (hold_rlock == 0) { 5400 hold_rlock = 1; 5401 SCTP_INP_READ_LOCK(inp); 5402 } 5403 TAILQ_REMOVE(&inp->read_queue, control, next); 5404 if (control->data) { 5405#ifdef INVARIANTS 5406 panic("control->data not null but control->length == 0"); 5407#else 5408 SCTP_PRINTF("Strange, data left in the control buffer. Cleaning up.\n"); 5409 sctp_m_freem(control->data); 5410 control->data = NULL; 5411#endif 5412 } 5413 if (control->aux_data) { 5414 sctp_m_free(control->aux_data); 5415 control->aux_data = NULL; 5416 } 5417 sctp_free_remote_addr(control->whoFrom); 5418 sctp_free_a_readq(stcb, control); 5419 if (hold_rlock) { 5420 hold_rlock = 0; 5421 SCTP_INP_READ_UNLOCK(inp); 5422 } 5423 goto restart; 5424 } 5425 if (control->length == 0) { 5426 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE)) && 5427 (filling_sinfo)) { 5428 /* find a more suitable one then this */ 5429 ctl = TAILQ_NEXT(control, next); 5430 while (ctl) { 5431 if ((ctl->stcb != control->stcb) && (ctl->length) && 5432 (ctl->some_taken || 5433 (ctl->spec_flags & M_NOTIFICATION) || 5434 ((ctl->do_not_ref_stcb == 0) && 5435 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0))) 5436 ) { 5437 /*- 5438 * If we have a different TCB next, and there is data 5439 * present. If we have already taken some (pdapi), OR we can 5440 * ref the tcb and no delivery as started on this stream, we 5441 * take it. Note we allow a notification on a different 5442 * assoc to be delivered.. 5443 */ 5444 control = ctl; 5445 goto found_one; 5446 } else if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_INTERLEAVE_STRMS)) && 5447 (ctl->length) && 5448 ((ctl->some_taken) || 5449 ((ctl->do_not_ref_stcb == 0) && 5450 ((ctl->spec_flags & M_NOTIFICATION) == 0) && 5451 (ctl->stcb->asoc.strmin[ctl->sinfo_stream].delivery_started == 0)))) { 5452 /*- 5453 * If we have the same tcb, and there is data present, and we 5454 * have the strm interleave feature present. Then if we have 5455 * taken some (pdapi) or we can refer to tht tcb AND we have 5456 * not started a delivery for this stream, we can take it. 5457 * Note we do NOT allow a notificaiton on the same assoc to 5458 * be delivered. 5459 */ 5460 control = ctl; 5461 goto found_one; 5462 } 5463 ctl = TAILQ_NEXT(ctl, next); 5464 } 5465 } 5466 /* 5467 * if we reach here, not suitable replacement is available 5468 * <or> fragment interleave is NOT on. So stuff the sb_cc 5469 * into the our held count, and its time to sleep again. 5470 */ 5471 held_length = so->so_rcv.sb_cc; 5472 control->held_length = so->so_rcv.sb_cc; 5473 goto restart; 5474 } 5475 /* Clear the held length since there is something to read */ 5476 control->held_length = 0; 5477 if (hold_rlock) { 5478 SCTP_INP_READ_UNLOCK(inp); 5479 hold_rlock = 0; 5480 } 5481found_one: 5482 /* 5483 * If we reach here, control has a some data for us to read off. 5484 * Note that stcb COULD be NULL. 5485 */ 5486 control->some_taken++; 5487 if (hold_sblock) { 5488 SOCKBUF_UNLOCK(&so->so_rcv); 5489 hold_sblock = 0; 5490 } 5491 stcb = control->stcb; 5492 if (stcb) { 5493 if ((control->do_not_ref_stcb == 0) && 5494 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5495 if (freecnt_applied == 0) 5496 stcb = NULL; 5497 } else if (control->do_not_ref_stcb == 0) { 5498 /* you can't free it on me please */ 5499 /* 5500 * The lock on the socket buffer protects us so the 5501 * free code will stop. But since we used the 5502 * socketbuf lock and the sender uses the tcb_lock 5503 * to increment, we need to use the atomic add to 5504 * the refcnt 5505 */ 5506 if (freecnt_applied) { 5507#ifdef INVARIANTS 5508 panic("refcnt already incremented"); 5509#else 5510 SCTP_PRINTF("refcnt already incremented?\n"); 5511#endif 5512 } else { 5513 atomic_add_int(&stcb->asoc.refcnt, 1); 5514 freecnt_applied = 1; 5515 } 5516 /* 5517 * Setup to remember how much we have not yet told 5518 * the peer our rwnd has opened up. Note we grab the 5519 * value from the tcb from last time. Note too that 5520 * sack sending clears this when a sack is sent, 5521 * which is fine. Once we hit the rwnd_req, we then 5522 * will go to the sctp_user_rcvd() that will not 5523 * lock until it KNOWs it MUST send a WUP-SACK. 5524 */ 5525 freed_so_far = stcb->freed_by_sorcv_sincelast; 5526 stcb->freed_by_sorcv_sincelast = 0; 5527 } 5528 } 5529 if (stcb && 5530 ((control->spec_flags & M_NOTIFICATION) == 0) && 5531 control->do_not_ref_stcb == 0) { 5532 stcb->asoc.strmin[control->sinfo_stream].delivery_started = 1; 5533 } 5534 /* First lets get off the sinfo and sockaddr info */ 5535 if ((sinfo) && filling_sinfo) { 5536 memcpy(sinfo, control, sizeof(struct sctp_nonpad_sndrcvinfo)); 5537 nxt = TAILQ_NEXT(control, next); 5538 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 5539 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 5540 struct sctp_extrcvinfo *s_extra; 5541 5542 s_extra = (struct sctp_extrcvinfo *)sinfo; 5543 if ((nxt) && 5544 (nxt->length)) { 5545 s_extra->sreinfo_next_flags = SCTP_NEXT_MSG_AVAIL; 5546 if (nxt->sinfo_flags & SCTP_UNORDERED) { 5547 s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_UNORDERED; 5548 } 5549 if (nxt->spec_flags & M_NOTIFICATION) { 5550 s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_IS_NOTIFICATION; 5551 } 5552 s_extra->sreinfo_next_aid = nxt->sinfo_assoc_id; 5553 s_extra->sreinfo_next_length = nxt->length; 5554 s_extra->sreinfo_next_ppid = nxt->sinfo_ppid; 5555 s_extra->sreinfo_next_stream = nxt->sinfo_stream; 5556 if (nxt->tail_mbuf != NULL) { 5557 if (nxt->end_added) { 5558 s_extra->sreinfo_next_flags |= SCTP_NEXT_MSG_ISCOMPLETE; 5559 } 5560 } 5561 } else { 5562 /* 5563 * we explicitly 0 this, since the memcpy 5564 * got some other things beyond the older 5565 * sinfo_ that is on the control's structure 5566 * :-D 5567 */ 5568 nxt = NULL; 5569 s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG; 5570 s_extra->sreinfo_next_aid = 0; 5571 s_extra->sreinfo_next_length = 0; 5572 s_extra->sreinfo_next_ppid = 0; 5573 s_extra->sreinfo_next_stream = 0; 5574 } 5575 } 5576 /* 5577 * update off the real current cum-ack, if we have an stcb. 5578 */ 5579 if ((control->do_not_ref_stcb == 0) && stcb) 5580 sinfo->sinfo_cumtsn = stcb->asoc.cumulative_tsn; 5581 /* 5582 * mask off the high bits, we keep the actual chunk bits in 5583 * there. 5584 */ 5585 sinfo->sinfo_flags &= 0x00ff; 5586 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 5587 sinfo->sinfo_flags |= SCTP_UNORDERED; 5588 } 5589 } 5590#ifdef SCTP_ASOCLOG_OF_TSNS 5591 { 5592 int index, newindex; 5593 struct sctp_pcbtsn_rlog *entry; 5594 5595 do { 5596 index = inp->readlog_index; 5597 newindex = index + 1; 5598 if (newindex >= SCTP_READ_LOG_SIZE) { 5599 newindex = 0; 5600 } 5601 } while (atomic_cmpset_int(&inp->readlog_index, index, newindex) == 0); 5602 entry = &inp->readlog[index]; 5603 entry->vtag = control->sinfo_assoc_id; 5604 entry->strm = control->sinfo_stream; 5605 entry->seq = control->sinfo_ssn; 5606 entry->sz = control->length; 5607 entry->flgs = control->sinfo_flags; 5608 } 5609#endif 5610 if (fromlen && from) { 5611 cp_len = min((size_t)fromlen, (size_t)control->whoFrom->ro._l_addr.sa.sa_len); 5612 switch (control->whoFrom->ro._l_addr.sa.sa_family) { 5613#ifdef INET6 5614 case AF_INET6: 5615 ((struct sockaddr_in6 *)from)->sin6_port = control->port_from; 5616 break; 5617#endif 5618#ifdef INET 5619 case AF_INET: 5620 ((struct sockaddr_in *)from)->sin_port = control->port_from; 5621 break; 5622#endif 5623 default: 5624 break; 5625 } 5626 memcpy(from, &control->whoFrom->ro._l_addr, cp_len); 5627 5628#if defined(INET) && defined(INET6) 5629 if ((sctp_is_feature_on(inp, SCTP_PCB_FLAGS_NEEDS_MAPPED_V4)) && 5630 (from->sa_family == AF_INET) && 5631 ((size_t)fromlen >= sizeof(struct sockaddr_in6))) { 5632 struct sockaddr_in *sin; 5633 struct sockaddr_in6 sin6; 5634 5635 sin = (struct sockaddr_in *)from; 5636 bzero(&sin6, sizeof(sin6)); 5637 sin6.sin6_family = AF_INET6; 5638 sin6.sin6_len = sizeof(struct sockaddr_in6); 5639 sin6.sin6_addr.s6_addr32[2] = htonl(0xffff); 5640 bcopy(&sin->sin_addr, 5641 &sin6.sin6_addr.s6_addr32[3], 5642 sizeof(sin6.sin6_addr.s6_addr32[3])); 5643 sin6.sin6_port = sin->sin_port; 5644 memcpy(from, &sin6, sizeof(struct sockaddr_in6)); 5645 } 5646#endif 5647#ifdef INET6 5648 { 5649 struct sockaddr_in6 lsa6, *from6; 5650 5651 from6 = (struct sockaddr_in6 *)from; 5652 sctp_recover_scope_mac(from6, (&lsa6)); 5653 } 5654#endif 5655 } 5656 /* now copy out what data we can */ 5657 if (mp == NULL) { 5658 /* copy out each mbuf in the chain up to length */ 5659get_more_data: 5660 m = control->data; 5661 while (m) { 5662 /* Move out all we can */ 5663 cp_len = (int)uio->uio_resid; 5664 my_len = (int)SCTP_BUF_LEN(m); 5665 if (cp_len > my_len) { 5666 /* not enough in this buf */ 5667 cp_len = my_len; 5668 } 5669 if (hold_rlock) { 5670 SCTP_INP_READ_UNLOCK(inp); 5671 hold_rlock = 0; 5672 } 5673 if (cp_len > 0) 5674 error = uiomove(mtod(m, char *), cp_len, uio); 5675 /* re-read */ 5676 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5677 goto release; 5678 } 5679 if ((control->do_not_ref_stcb == 0) && stcb && 5680 stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) { 5681 no_rcv_needed = 1; 5682 } 5683 if (error) { 5684 /* error we are out of here */ 5685 goto release; 5686 } 5687 if ((SCTP_BUF_NEXT(m) == NULL) && 5688 (cp_len >= SCTP_BUF_LEN(m)) && 5689 ((control->end_added == 0) || 5690 (control->end_added && 5691 (TAILQ_NEXT(control, next) == NULL))) 5692 ) { 5693 SCTP_INP_READ_LOCK(inp); 5694 hold_rlock = 1; 5695 } 5696 if (cp_len == SCTP_BUF_LEN(m)) { 5697 if ((SCTP_BUF_NEXT(m) == NULL) && 5698 (control->end_added)) { 5699 out_flags |= MSG_EOR; 5700 if ((control->do_not_ref_stcb == 0) && 5701 (control->stcb != NULL) && 5702 ((control->spec_flags & M_NOTIFICATION) == 0)) 5703 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5704 } 5705 if (control->spec_flags & M_NOTIFICATION) { 5706 out_flags |= MSG_NOTIFICATION; 5707 } 5708 /* we ate up the mbuf */ 5709 if (in_flags & MSG_PEEK) { 5710 /* just looking */ 5711 m = SCTP_BUF_NEXT(m); 5712 copied_so_far += cp_len; 5713 } else { 5714 /* dispose of the mbuf */ 5715 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5716 sctp_sblog(&so->so_rcv, 5717 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 5718 } 5719 sctp_sbfree(control, stcb, &so->so_rcv, m); 5720 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5721 sctp_sblog(&so->so_rcv, 5722 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 5723 } 5724 copied_so_far += cp_len; 5725 freed_so_far += cp_len; 5726 freed_so_far += MSIZE; 5727 atomic_subtract_int(&control->length, cp_len); 5728 control->data = sctp_m_free(m); 5729 m = control->data; 5730 /* 5731 * been through it all, must hold sb 5732 * lock ok to null tail 5733 */ 5734 if (control->data == NULL) { 5735#ifdef INVARIANTS 5736 if ((control->end_added == 0) || 5737 (TAILQ_NEXT(control, next) == NULL)) { 5738 /* 5739 * If the end is not 5740 * added, OR the 5741 * next is NOT null 5742 * we MUST have the 5743 * lock. 5744 */ 5745 if (mtx_owned(&inp->inp_rdata_mtx) == 0) { 5746 panic("Hmm we don't own the lock?"); 5747 } 5748 } 5749#endif 5750 control->tail_mbuf = NULL; 5751#ifdef INVARIANTS 5752 if ((control->end_added) && ((out_flags & MSG_EOR) == 0)) { 5753 panic("end_added, nothing left and no MSG_EOR"); 5754 } 5755#endif 5756 } 5757 } 5758 } else { 5759 /* Do we need to trim the mbuf? */ 5760 if (control->spec_flags & M_NOTIFICATION) { 5761 out_flags |= MSG_NOTIFICATION; 5762 } 5763 if ((in_flags & MSG_PEEK) == 0) { 5764 SCTP_BUF_RESV_UF(m, cp_len); 5765 SCTP_BUF_LEN(m) -= cp_len; 5766 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5767 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, cp_len); 5768 } 5769 atomic_subtract_int(&so->so_rcv.sb_cc, cp_len); 5770 if ((control->do_not_ref_stcb == 0) && 5771 stcb) { 5772 atomic_subtract_int(&stcb->asoc.sb_cc, cp_len); 5773 } 5774 copied_so_far += cp_len; 5775 freed_so_far += cp_len; 5776 freed_so_far += MSIZE; 5777 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 5778 sctp_sblog(&so->so_rcv, control->do_not_ref_stcb ? NULL : stcb, 5779 SCTP_LOG_SBRESULT, 0); 5780 } 5781 atomic_subtract_int(&control->length, cp_len); 5782 } else { 5783 copied_so_far += cp_len; 5784 } 5785 } 5786 if ((out_flags & MSG_EOR) || (uio->uio_resid == 0)) { 5787 break; 5788 } 5789 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 5790 (control->do_not_ref_stcb == 0) && 5791 (freed_so_far >= rwnd_req)) { 5792 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5793 } 5794 } /* end while(m) */ 5795 /* 5796 * At this point we have looked at it all and we either have 5797 * a MSG_EOR/or read all the user wants... <OR> 5798 * control->length == 0. 5799 */ 5800 if ((out_flags & MSG_EOR) && ((in_flags & MSG_PEEK) == 0)) { 5801 /* we are done with this control */ 5802 if (control->length == 0) { 5803 if (control->data) { 5804#ifdef INVARIANTS 5805 panic("control->data not null at read eor?"); 5806#else 5807 SCTP_PRINTF("Strange, data left in the control buffer .. invarients would panic?\n"); 5808 sctp_m_freem(control->data); 5809 control->data = NULL; 5810#endif 5811 } 5812 done_with_control: 5813 if (TAILQ_NEXT(control, next) == NULL) { 5814 /* 5815 * If we don't have a next we need a 5816 * lock, if there is a next 5817 * interrupt is filling ahead of us 5818 * and we don't need a lock to 5819 * remove this guy (which is the 5820 * head of the queue). 5821 */ 5822 if (hold_rlock == 0) { 5823 SCTP_INP_READ_LOCK(inp); 5824 hold_rlock = 1; 5825 } 5826 } 5827 TAILQ_REMOVE(&inp->read_queue, control, next); 5828 /* Add back any hiddend data */ 5829 if (control->held_length) { 5830 held_length = 0; 5831 control->held_length = 0; 5832 wakeup_read_socket = 1; 5833 } 5834 if (control->aux_data) { 5835 sctp_m_free(control->aux_data); 5836 control->aux_data = NULL; 5837 } 5838 no_rcv_needed = control->do_not_ref_stcb; 5839 sctp_free_remote_addr(control->whoFrom); 5840 control->data = NULL; 5841 sctp_free_a_readq(stcb, control); 5842 control = NULL; 5843 if ((freed_so_far >= rwnd_req) && 5844 (no_rcv_needed == 0)) 5845 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5846 5847 } else { 5848 /* 5849 * The user did not read all of this 5850 * message, turn off the returned MSG_EOR 5851 * since we are leaving more behind on the 5852 * control to read. 5853 */ 5854#ifdef INVARIANTS 5855 if (control->end_added && 5856 (control->data == NULL) && 5857 (control->tail_mbuf == NULL)) { 5858 panic("Gak, control->length is corrupt?"); 5859 } 5860#endif 5861 no_rcv_needed = control->do_not_ref_stcb; 5862 out_flags &= ~MSG_EOR; 5863 } 5864 } 5865 if (out_flags & MSG_EOR) { 5866 goto release; 5867 } 5868 if ((uio->uio_resid == 0) || 5869 ((in_eeor_mode) && (copied_so_far >= max(so->so_rcv.sb_lowat, 1))) 5870 ) { 5871 goto release; 5872 } 5873 /* 5874 * If I hit here the receiver wants more and this message is 5875 * NOT done (pd-api). So two questions. Can we block? if not 5876 * we are done. Did the user NOT set MSG_WAITALL? 5877 */ 5878 if (block_allowed == 0) { 5879 goto release; 5880 } 5881 /* 5882 * We need to wait for more data a few things: - We don't 5883 * sbunlock() so we don't get someone else reading. - We 5884 * must be sure to account for the case where what is added 5885 * is NOT to our control when we wakeup. 5886 */ 5887 5888 /* 5889 * Do we need to tell the transport a rwnd update might be 5890 * needed before we go to sleep? 5891 */ 5892 if (((stcb) && (in_flags & MSG_PEEK) == 0) && 5893 ((freed_so_far >= rwnd_req) && 5894 (control->do_not_ref_stcb == 0) && 5895 (no_rcv_needed == 0))) { 5896 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 5897 } 5898wait_some_more: 5899 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 5900 goto release; 5901 } 5902 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) 5903 goto release; 5904 5905 if (hold_rlock == 1) { 5906 SCTP_INP_READ_UNLOCK(inp); 5907 hold_rlock = 0; 5908 } 5909 if (hold_sblock == 0) { 5910 SOCKBUF_LOCK(&so->so_rcv); 5911 hold_sblock = 1; 5912 } 5913 if ((copied_so_far) && (control->length == 0) && 5914 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_FRAG_INTERLEAVE))) { 5915 goto release; 5916 } 5917 if (so->so_rcv.sb_cc <= control->held_length) { 5918 error = sbwait(&so->so_rcv); 5919 if (error) { 5920 goto release; 5921 } 5922 control->held_length = 0; 5923 } 5924 if (hold_sblock) { 5925 SOCKBUF_UNLOCK(&so->so_rcv); 5926 hold_sblock = 0; 5927 } 5928 if (control->length == 0) { 5929 /* still nothing here */ 5930 if (control->end_added == 1) { 5931 /* he aborted, or is done i.e.did a shutdown */ 5932 out_flags |= MSG_EOR; 5933 if (control->pdapi_aborted) { 5934 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 5935 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5936 5937 out_flags |= MSG_TRUNC; 5938 } else { 5939 if ((control->do_not_ref_stcb == 0) && ((control->spec_flags & M_NOTIFICATION) == 0)) 5940 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5941 } 5942 goto done_with_control; 5943 } 5944 if (so->so_rcv.sb_cc > held_length) { 5945 control->held_length = so->so_rcv.sb_cc; 5946 held_length = 0; 5947 } 5948 goto wait_some_more; 5949 } else if (control->data == NULL) { 5950 /* 5951 * we must re-sync since data is probably being 5952 * added 5953 */ 5954 SCTP_INP_READ_LOCK(inp); 5955 if ((control->length > 0) && (control->data == NULL)) { 5956 /* 5957 * big trouble.. we have the lock and its 5958 * corrupt? 5959 */ 5960#ifdef INVARIANTS 5961 panic("Impossible data==NULL length !=0"); 5962#endif 5963 out_flags |= MSG_EOR; 5964 out_flags |= MSG_TRUNC; 5965 control->length = 0; 5966 SCTP_INP_READ_UNLOCK(inp); 5967 goto done_with_control; 5968 } 5969 SCTP_INP_READ_UNLOCK(inp); 5970 /* We will fall around to get more data */ 5971 } 5972 goto get_more_data; 5973 } else { 5974 /*- 5975 * Give caller back the mbuf chain, 5976 * store in uio_resid the length 5977 */ 5978 wakeup_read_socket = 0; 5979 if ((control->end_added == 0) || 5980 (TAILQ_NEXT(control, next) == NULL)) { 5981 /* Need to get rlock */ 5982 if (hold_rlock == 0) { 5983 SCTP_INP_READ_LOCK(inp); 5984 hold_rlock = 1; 5985 } 5986 } 5987 if (control->end_added) { 5988 out_flags |= MSG_EOR; 5989 if ((control->do_not_ref_stcb == 0) && 5990 (control->stcb != NULL) && 5991 ((control->spec_flags & M_NOTIFICATION) == 0)) 5992 control->stcb->asoc.strmin[control->sinfo_stream].delivery_started = 0; 5993 } 5994 if (control->spec_flags & M_NOTIFICATION) { 5995 out_flags |= MSG_NOTIFICATION; 5996 } 5997 uio->uio_resid = control->length; 5998 *mp = control->data; 5999 m = control->data; 6000 while (m) { 6001 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6002 sctp_sblog(&so->so_rcv, 6003 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBFREE, SCTP_BUF_LEN(m)); 6004 } 6005 sctp_sbfree(control, stcb, &so->so_rcv, m); 6006 freed_so_far += SCTP_BUF_LEN(m); 6007 freed_so_far += MSIZE; 6008 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SB_LOGGING_ENABLE) { 6009 sctp_sblog(&so->so_rcv, 6010 control->do_not_ref_stcb ? NULL : stcb, SCTP_LOG_SBRESULT, 0); 6011 } 6012 m = SCTP_BUF_NEXT(m); 6013 } 6014 control->data = control->tail_mbuf = NULL; 6015 control->length = 0; 6016 if (out_flags & MSG_EOR) { 6017 /* Done with this control */ 6018 goto done_with_control; 6019 } 6020 } 6021release: 6022 if (hold_rlock == 1) { 6023 SCTP_INP_READ_UNLOCK(inp); 6024 hold_rlock = 0; 6025 } 6026 if (hold_sblock == 1) { 6027 SOCKBUF_UNLOCK(&so->so_rcv); 6028 hold_sblock = 0; 6029 } 6030 sbunlock(&so->so_rcv); 6031 sockbuf_lock = 0; 6032 6033release_unlocked: 6034 if (hold_sblock) { 6035 SOCKBUF_UNLOCK(&so->so_rcv); 6036 hold_sblock = 0; 6037 } 6038 if ((stcb) && (in_flags & MSG_PEEK) == 0) { 6039 if ((freed_so_far >= rwnd_req) && 6040 (control && (control->do_not_ref_stcb == 0)) && 6041 (no_rcv_needed == 0)) 6042 sctp_user_rcvd(stcb, &freed_so_far, hold_rlock, rwnd_req); 6043 } 6044out: 6045 if (msg_flags) { 6046 *msg_flags = out_flags; 6047 } 6048 if (((out_flags & MSG_EOR) == 0) && 6049 ((in_flags & MSG_PEEK) == 0) && 6050 (sinfo) && 6051 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO) || 6052 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO))) { 6053 struct sctp_extrcvinfo *s_extra; 6054 6055 s_extra = (struct sctp_extrcvinfo *)sinfo; 6056 s_extra->sreinfo_next_flags = SCTP_NO_NEXT_MSG; 6057 } 6058 if (hold_rlock == 1) { 6059 SCTP_INP_READ_UNLOCK(inp); 6060 } 6061 if (hold_sblock) { 6062 SOCKBUF_UNLOCK(&so->so_rcv); 6063 } 6064 if (sockbuf_lock) { 6065 sbunlock(&so->so_rcv); 6066 } 6067 if (freecnt_applied) { 6068 /* 6069 * The lock on the socket buffer protects us so the free 6070 * code will stop. But since we used the socketbuf lock and 6071 * the sender uses the tcb_lock to increment, we need to use 6072 * the atomic add to the refcnt. 6073 */ 6074 if (stcb == NULL) { 6075#ifdef INVARIANTS 6076 panic("stcb for refcnt has gone NULL?"); 6077 goto stage_left; 6078#else 6079 goto stage_left; 6080#endif 6081 } 6082 atomic_add_int(&stcb->asoc.refcnt, -1); 6083 /* Save the value back for next time */ 6084 stcb->freed_by_sorcv_sincelast = freed_so_far; 6085 } 6086 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_RECV_RWND_LOGGING_ENABLE) { 6087 if (stcb) { 6088 sctp_misc_ints(SCTP_SORECV_DONE, 6089 freed_so_far, 6090 ((uio) ? (slen - uio->uio_resid) : slen), 6091 stcb->asoc.my_rwnd, 6092 so->so_rcv.sb_cc); 6093 } else { 6094 sctp_misc_ints(SCTP_SORECV_DONE, 6095 freed_so_far, 6096 ((uio) ? (slen - uio->uio_resid) : slen), 6097 0, 6098 so->so_rcv.sb_cc); 6099 } 6100 } 6101stage_left: 6102 if (wakeup_read_socket) { 6103 sctp_sorwakeup(inp, so); 6104 } 6105 return (error); 6106} 6107 6108 6109#ifdef SCTP_MBUF_LOGGING 6110struct mbuf * 6111sctp_m_free(struct mbuf *m) 6112{ 6113 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6114 if (SCTP_BUF_IS_EXTENDED(m)) { 6115 sctp_log_mb(m, SCTP_MBUF_IFREE); 6116 } 6117 } 6118 return (m_free(m)); 6119} 6120 6121void 6122sctp_m_freem(struct mbuf *mb) 6123{ 6124 while (mb != NULL) 6125 mb = sctp_m_free(mb); 6126} 6127 6128#endif 6129 6130int 6131sctp_dynamic_set_primary(struct sockaddr *sa, uint32_t vrf_id) 6132{ 6133 /* 6134 * Given a local address. For all associations that holds the 6135 * address, request a peer-set-primary. 6136 */ 6137 struct sctp_ifa *ifa; 6138 struct sctp_laddr *wi; 6139 6140 ifa = sctp_find_ifa_by_addr(sa, vrf_id, 0); 6141 if (ifa == NULL) { 6142 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, EADDRNOTAVAIL); 6143 return (EADDRNOTAVAIL); 6144 } 6145 /* 6146 * Now that we have the ifa we must awaken the iterator with this 6147 * message. 6148 */ 6149 wi = SCTP_ZONE_GET(SCTP_BASE_INFO(ipi_zone_laddr), struct sctp_laddr); 6150 if (wi == NULL) { 6151 SCTP_LTRACE_ERR_RET(NULL, NULL, NULL, SCTP_FROM_SCTPUTIL, ENOMEM); 6152 return (ENOMEM); 6153 } 6154 /* Now incr the count and int wi structure */ 6155 SCTP_INCR_LADDR_COUNT(); 6156 bzero(wi, sizeof(*wi)); 6157 (void)SCTP_GETTIME_TIMEVAL(&wi->start_time); 6158 wi->ifa = ifa; 6159 wi->action = SCTP_SET_PRIM_ADDR; 6160 atomic_add_int(&ifa->refcount, 1); 6161 6162 /* Now add it to the work queue */ 6163 SCTP_WQ_ADDR_LOCK(); 6164 /* 6165 * Should this really be a tailq? As it is we will process the 6166 * newest first :-0 6167 */ 6168 LIST_INSERT_HEAD(&SCTP_BASE_INFO(addr_wq), wi, sctp_nxt_addr); 6169 SCTP_WQ_ADDR_UNLOCK(); 6170 sctp_timer_start(SCTP_TIMER_TYPE_ADDR_WQ, 6171 (struct sctp_inpcb *)NULL, 6172 (struct sctp_tcb *)NULL, 6173 (struct sctp_nets *)NULL); 6174 return (0); 6175} 6176 6177 6178int 6179sctp_soreceive(struct socket *so, 6180 struct sockaddr **psa, 6181 struct uio *uio, 6182 struct mbuf **mp0, 6183 struct mbuf **controlp, 6184 int *flagsp) 6185{ 6186 int error, fromlen; 6187 uint8_t sockbuf[256]; 6188 struct sockaddr *from; 6189 struct sctp_extrcvinfo sinfo; 6190 int filling_sinfo = 1; 6191 struct sctp_inpcb *inp; 6192 6193 inp = (struct sctp_inpcb *)so->so_pcb; 6194 /* pickup the assoc we are reading from */ 6195 if (inp == NULL) { 6196 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6197 return (EINVAL); 6198 } 6199 if ((sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 6200 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 6201 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) || 6202 (controlp == NULL)) { 6203 /* user does not want the sndrcv ctl */ 6204 filling_sinfo = 0; 6205 } 6206 if (psa) { 6207 from = (struct sockaddr *)sockbuf; 6208 fromlen = sizeof(sockbuf); 6209 from->sa_len = 0; 6210 } else { 6211 from = NULL; 6212 fromlen = 0; 6213 } 6214 6215 if (filling_sinfo) { 6216 memset(&sinfo, 0, sizeof(struct sctp_extrcvinfo)); 6217 } 6218 error = sctp_sorecvmsg(so, uio, mp0, from, fromlen, flagsp, 6219 (struct sctp_sndrcvinfo *)&sinfo, filling_sinfo); 6220 if (controlp != NULL) { 6221 /* copy back the sinfo in a CMSG format */ 6222 if (filling_sinfo) 6223 *controlp = sctp_build_ctl_nchunk(inp, 6224 (struct sctp_sndrcvinfo *)&sinfo); 6225 else 6226 *controlp = NULL; 6227 } 6228 if (psa) { 6229 /* copy back the address info */ 6230 if (from && from->sa_len) { 6231 *psa = sodupsockaddr(from, M_NOWAIT); 6232 } else { 6233 *psa = NULL; 6234 } 6235 } 6236 return (error); 6237} 6238 6239 6240 6241 6242 6243int 6244sctp_connectx_helper_add(struct sctp_tcb *stcb, struct sockaddr *addr, 6245 int totaddr, int *error) 6246{ 6247 int added = 0; 6248 int i; 6249 struct sctp_inpcb *inp; 6250 struct sockaddr *sa; 6251 size_t incr = 0; 6252 6253#ifdef INET 6254 struct sockaddr_in *sin; 6255 6256#endif 6257#ifdef INET6 6258 struct sockaddr_in6 *sin6; 6259 6260#endif 6261 6262 sa = addr; 6263 inp = stcb->sctp_ep; 6264 *error = 0; 6265 for (i = 0; i < totaddr; i++) { 6266 switch (sa->sa_family) { 6267#ifdef INET 6268 case AF_INET: 6269 incr = sizeof(struct sockaddr_in); 6270 sin = (struct sockaddr_in *)sa; 6271 if ((sin->sin_addr.s_addr == INADDR_ANY) || 6272 (sin->sin_addr.s_addr == INADDR_BROADCAST) || 6273 IN_MULTICAST(ntohl(sin->sin_addr.s_addr))) { 6274 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6275 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7); 6276 *error = EINVAL; 6277 goto out_now; 6278 } 6279 if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { 6280 /* assoc gone no un-lock */ 6281 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6282 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_7); 6283 *error = ENOBUFS; 6284 goto out_now; 6285 } 6286 added++; 6287 break; 6288#endif 6289#ifdef INET6 6290 case AF_INET6: 6291 incr = sizeof(struct sockaddr_in6); 6292 sin6 = (struct sockaddr_in6 *)sa; 6293 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr) || 6294 IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { 6295 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6296 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8); 6297 *error = EINVAL; 6298 goto out_now; 6299 } 6300 if (sctp_add_remote_addr(stcb, sa, NULL, SCTP_DONOT_SETSCOPE, SCTP_ADDR_IS_CONFIRMED)) { 6301 /* assoc gone no un-lock */ 6302 SCTP_LTRACE_ERR_RET(NULL, stcb, NULL, SCTP_FROM_SCTPUTIL, ENOBUFS); 6303 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_USRREQ + SCTP_LOC_8); 6304 *error = ENOBUFS; 6305 goto out_now; 6306 } 6307 added++; 6308 break; 6309#endif 6310 default: 6311 break; 6312 } 6313 sa = (struct sockaddr *)((caddr_t)sa + incr); 6314 } 6315out_now: 6316 return (added); 6317} 6318 6319struct sctp_tcb * 6320sctp_connectx_helper_find(struct sctp_inpcb *inp, struct sockaddr *addr, 6321 int *totaddr, int *num_v4, int *num_v6, int *error, 6322 int limit, int *bad_addr) 6323{ 6324 struct sockaddr *sa; 6325 struct sctp_tcb *stcb = NULL; 6326 size_t incr, at, i; 6327 6328 at = incr = 0; 6329 sa = addr; 6330 6331 *error = *num_v6 = *num_v4 = 0; 6332 /* account and validate addresses */ 6333 for (i = 0; i < (size_t)*totaddr; i++) { 6334 switch (sa->sa_family) { 6335#ifdef INET 6336 case AF_INET: 6337 (*num_v4) += 1; 6338 incr = sizeof(struct sockaddr_in); 6339 if (sa->sa_len != incr) { 6340 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6341 *error = EINVAL; 6342 *bad_addr = 1; 6343 return (NULL); 6344 } 6345 break; 6346#endif 6347#ifdef INET6 6348 case AF_INET6: 6349 { 6350 struct sockaddr_in6 *sin6; 6351 6352 sin6 = (struct sockaddr_in6 *)sa; 6353 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6354 /* Must be non-mapped for connectx */ 6355 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6356 *error = EINVAL; 6357 *bad_addr = 1; 6358 return (NULL); 6359 } 6360 (*num_v6) += 1; 6361 incr = sizeof(struct sockaddr_in6); 6362 if (sa->sa_len != incr) { 6363 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6364 *error = EINVAL; 6365 *bad_addr = 1; 6366 return (NULL); 6367 } 6368 break; 6369 } 6370#endif 6371 default: 6372 *totaddr = i; 6373 /* we are done */ 6374 break; 6375 } 6376 if (i == (size_t)*totaddr) { 6377 break; 6378 } 6379 SCTP_INP_INCR_REF(inp); 6380 stcb = sctp_findassociation_ep_addr(&inp, sa, NULL, NULL, NULL); 6381 if (stcb != NULL) { 6382 /* Already have or am bring up an association */ 6383 return (stcb); 6384 } else { 6385 SCTP_INP_DECR_REF(inp); 6386 } 6387 if ((at + incr) > (size_t)limit) { 6388 *totaddr = i; 6389 break; 6390 } 6391 sa = (struct sockaddr *)((caddr_t)sa + incr); 6392 } 6393 return ((struct sctp_tcb *)NULL); 6394} 6395 6396/* 6397 * sctp_bindx(ADD) for one address. 6398 * assumes all arguments are valid/checked by caller. 6399 */ 6400void 6401sctp_bindx_add_address(struct socket *so, struct sctp_inpcb *inp, 6402 struct sockaddr *sa, sctp_assoc_t assoc_id, 6403 uint32_t vrf_id, int *error, void *p) 6404{ 6405 struct sockaddr *addr_touse; 6406 6407#ifdef INET6 6408 struct sockaddr_in sin; 6409 6410#endif 6411 6412 /* see if we're bound all already! */ 6413 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6414 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6415 *error = EINVAL; 6416 return; 6417 } 6418 addr_touse = sa; 6419#ifdef INET6 6420 if (sa->sa_family == AF_INET6) { 6421 struct sockaddr_in6 *sin6; 6422 6423 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6424 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6425 *error = EINVAL; 6426 return; 6427 } 6428 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6429 /* can only bind v6 on PF_INET6 sockets */ 6430 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6431 *error = EINVAL; 6432 return; 6433 } 6434 sin6 = (struct sockaddr_in6 *)addr_touse; 6435 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6436 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6437 SCTP_IPV6_V6ONLY(inp)) { 6438 /* can't bind v4-mapped on PF_INET sockets */ 6439 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6440 *error = EINVAL; 6441 return; 6442 } 6443 in6_sin6_2_sin(&sin, sin6); 6444 addr_touse = (struct sockaddr *)&sin; 6445 } 6446 } 6447#endif 6448#ifdef INET 6449 if (sa->sa_family == AF_INET) { 6450 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6451 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6452 *error = EINVAL; 6453 return; 6454 } 6455 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6456 SCTP_IPV6_V6ONLY(inp)) { 6457 /* can't bind v4 on PF_INET sockets */ 6458 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6459 *error = EINVAL; 6460 return; 6461 } 6462 } 6463#endif 6464 if (inp->sctp_flags & SCTP_PCB_FLAGS_UNBOUND) { 6465 if (p == NULL) { 6466 /* Can't get proc for Net/Open BSD */ 6467 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6468 *error = EINVAL; 6469 return; 6470 } 6471 *error = sctp_inpcb_bind(so, addr_touse, NULL, p); 6472 return; 6473 } 6474 /* 6475 * No locks required here since bind and mgmt_ep_sa all do their own 6476 * locking. If we do something for the FIX: below we may need to 6477 * lock in that case. 6478 */ 6479 if (assoc_id == 0) { 6480 /* add the address */ 6481 struct sctp_inpcb *lep; 6482 struct sockaddr_in *lsin = (struct sockaddr_in *)addr_touse; 6483 6484 /* validate the incoming port */ 6485 if ((lsin->sin_port != 0) && 6486 (lsin->sin_port != inp->sctp_lport)) { 6487 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6488 *error = EINVAL; 6489 return; 6490 } else { 6491 /* user specified 0 port, set it to existing port */ 6492 lsin->sin_port = inp->sctp_lport; 6493 } 6494 6495 lep = sctp_pcb_findep(addr_touse, 1, 0, vrf_id); 6496 if (lep != NULL) { 6497 /* 6498 * We must decrement the refcount since we have the 6499 * ep already and are binding. No remove going on 6500 * here. 6501 */ 6502 SCTP_INP_DECR_REF(lep); 6503 } 6504 if (lep == inp) { 6505 /* already bound to it.. ok */ 6506 return; 6507 } else if (lep == NULL) { 6508 ((struct sockaddr_in *)addr_touse)->sin_port = 0; 6509 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse, 6510 SCTP_ADD_IP_ADDRESS, 6511 vrf_id, NULL); 6512 } else { 6513 *error = EADDRINUSE; 6514 } 6515 if (*error) 6516 return; 6517 } else { 6518 /* 6519 * FIX: decide whether we allow assoc based bindx 6520 */ 6521 } 6522} 6523 6524/* 6525 * sctp_bindx(DELETE) for one address. 6526 * assumes all arguments are valid/checked by caller. 6527 */ 6528void 6529sctp_bindx_delete_address(struct sctp_inpcb *inp, 6530 struct sockaddr *sa, sctp_assoc_t assoc_id, 6531 uint32_t vrf_id, int *error) 6532{ 6533 struct sockaddr *addr_touse; 6534 6535#ifdef INET6 6536 struct sockaddr_in sin; 6537 6538#endif 6539 6540 /* see if we're bound all already! */ 6541 if (inp->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6542 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6543 *error = EINVAL; 6544 return; 6545 } 6546 addr_touse = sa; 6547#ifdef INET6 6548 if (sa->sa_family == AF_INET6) { 6549 struct sockaddr_in6 *sin6; 6550 6551 if (sa->sa_len != sizeof(struct sockaddr_in6)) { 6552 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6553 *error = EINVAL; 6554 return; 6555 } 6556 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) == 0) { 6557 /* can only bind v6 on PF_INET6 sockets */ 6558 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6559 *error = EINVAL; 6560 return; 6561 } 6562 sin6 = (struct sockaddr_in6 *)addr_touse; 6563 if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { 6564 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6565 SCTP_IPV6_V6ONLY(inp)) { 6566 /* can't bind mapped-v4 on PF_INET sockets */ 6567 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6568 *error = EINVAL; 6569 return; 6570 } 6571 in6_sin6_2_sin(&sin, sin6); 6572 addr_touse = (struct sockaddr *)&sin; 6573 } 6574 } 6575#endif 6576#ifdef INET 6577 if (sa->sa_family == AF_INET) { 6578 if (sa->sa_len != sizeof(struct sockaddr_in)) { 6579 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6580 *error = EINVAL; 6581 return; 6582 } 6583 if ((inp->sctp_flags & SCTP_PCB_FLAGS_BOUND_V6) && 6584 SCTP_IPV6_V6ONLY(inp)) { 6585 /* can't bind v4 on PF_INET sockets */ 6586 SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTPUTIL, EINVAL); 6587 *error = EINVAL; 6588 return; 6589 } 6590 } 6591#endif 6592 /* 6593 * No lock required mgmt_ep_sa does its own locking. If the FIX: 6594 * below is ever changed we may need to lock before calling 6595 * association level binding. 6596 */ 6597 if (assoc_id == 0) { 6598 /* delete the address */ 6599 *error = sctp_addr_mgmt_ep_sa(inp, addr_touse, 6600 SCTP_DEL_IP_ADDRESS, 6601 vrf_id, NULL); 6602 } else { 6603 /* 6604 * FIX: decide whether we allow assoc based bindx 6605 */ 6606 } 6607} 6608 6609/* 6610 * returns the valid local address count for an assoc, taking into account 6611 * all scoping rules 6612 */ 6613int 6614sctp_local_addr_count(struct sctp_tcb *stcb) 6615{ 6616 int loopback_scope; 6617 6618#if defined(INET) 6619 int ipv4_local_scope, ipv4_addr_legal; 6620 6621#endif 6622#if defined (INET6) 6623 int local_scope, site_scope, ipv6_addr_legal; 6624 6625#endif 6626 struct sctp_vrf *vrf; 6627 struct sctp_ifn *sctp_ifn; 6628 struct sctp_ifa *sctp_ifa; 6629 int count = 0; 6630 6631 /* Turn on all the appropriate scopes */ 6632 loopback_scope = stcb->asoc.scope.loopback_scope; 6633#if defined(INET) 6634 ipv4_local_scope = stcb->asoc.scope.ipv4_local_scope; 6635 ipv4_addr_legal = stcb->asoc.scope.ipv4_addr_legal; 6636#endif 6637#if defined(INET6) 6638 local_scope = stcb->asoc.scope.local_scope; 6639 site_scope = stcb->asoc.scope.site_scope; 6640 ipv6_addr_legal = stcb->asoc.scope.ipv6_addr_legal; 6641#endif 6642 SCTP_IPI_ADDR_RLOCK(); 6643 vrf = sctp_find_vrf(stcb->asoc.vrf_id); 6644 if (vrf == NULL) { 6645 /* no vrf, no addresses */ 6646 SCTP_IPI_ADDR_RUNLOCK(); 6647 return (0); 6648 } 6649 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_BOUNDALL) { 6650 /* 6651 * bound all case: go through all ifns on the vrf 6652 */ 6653 LIST_FOREACH(sctp_ifn, &vrf->ifnlist, next_ifn) { 6654 if ((loopback_scope == 0) && 6655 SCTP_IFN_IS_IFT_LOOP(sctp_ifn)) { 6656 continue; 6657 } 6658 LIST_FOREACH(sctp_ifa, &sctp_ifn->ifalist, next_ifa) { 6659 if (sctp_is_addr_restricted(stcb, sctp_ifa)) 6660 continue; 6661 switch (sctp_ifa->address.sa.sa_family) { 6662#ifdef INET 6663 case AF_INET: 6664 if (ipv4_addr_legal) { 6665 struct sockaddr_in *sin; 6666 6667 sin = (struct sockaddr_in *)&sctp_ifa->address.sa; 6668 if (sin->sin_addr.s_addr == 0) { 6669 /* 6670 * skip unspecified 6671 * addrs 6672 */ 6673 continue; 6674 } 6675 if ((ipv4_local_scope == 0) && 6676 (IN4_ISPRIVATE_ADDRESS(&sin->sin_addr))) { 6677 continue; 6678 } 6679 /* count this one */ 6680 count++; 6681 } else { 6682 continue; 6683 } 6684 break; 6685#endif 6686#ifdef INET6 6687 case AF_INET6: 6688 if (ipv6_addr_legal) { 6689 struct sockaddr_in6 *sin6; 6690 6691 sin6 = (struct sockaddr_in6 *)&sctp_ifa->address.sa; 6692 if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { 6693 continue; 6694 } 6695 if (IN6_IS_ADDR_LINKLOCAL(&sin6->sin6_addr)) { 6696 if (local_scope == 0) 6697 continue; 6698 if (sin6->sin6_scope_id == 0) { 6699 if (sa6_recoverscope(sin6) != 0) 6700 /* 6701 * 6702 * bad 6703 * 6704 * li 6705 * nk 6706 * 6707 * loc 6708 * al 6709 * 6710 * add 6711 * re 6712 * ss 6713 * */ 6714 continue; 6715 } 6716 } 6717 if ((site_scope == 0) && 6718 (IN6_IS_ADDR_SITELOCAL(&sin6->sin6_addr))) { 6719 continue; 6720 } 6721 /* count this one */ 6722 count++; 6723 } 6724 break; 6725#endif 6726 default: 6727 /* TSNH */ 6728 break; 6729 } 6730 } 6731 } 6732 } else { 6733 /* 6734 * subset bound case 6735 */ 6736 struct sctp_laddr *laddr; 6737 6738 LIST_FOREACH(laddr, &stcb->sctp_ep->sctp_addr_list, 6739 sctp_nxt_addr) { 6740 if (sctp_is_addr_restricted(stcb, laddr->ifa)) { 6741 continue; 6742 } 6743 /* count this one */ 6744 count++; 6745 } 6746 } 6747 SCTP_IPI_ADDR_RUNLOCK(); 6748 return (count); 6749} 6750 6751#if defined(SCTP_LOCAL_TRACE_BUF) 6752 6753void 6754sctp_log_trace(uint32_t subsys, const char *str SCTP_UNUSED, uint32_t a, uint32_t b, uint32_t c, uint32_t d, uint32_t e, uint32_t f) 6755{ 6756 uint32_t saveindex, newindex; 6757 6758 do { 6759 saveindex = SCTP_BASE_SYSCTL(sctp_log).index; 6760 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 6761 newindex = 1; 6762 } else { 6763 newindex = saveindex + 1; 6764 } 6765 } while (atomic_cmpset_int(&SCTP_BASE_SYSCTL(sctp_log).index, saveindex, newindex) == 0); 6766 if (saveindex >= SCTP_MAX_LOGGING_SIZE) { 6767 saveindex = 0; 6768 } 6769 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].timestamp = SCTP_GET_CYCLECOUNT; 6770 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].subsys = subsys; 6771 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[0] = a; 6772 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[1] = b; 6773 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[2] = c; 6774 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[3] = d; 6775 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[4] = e; 6776 SCTP_BASE_SYSCTL(sctp_log).entry[saveindex].params[5] = f; 6777} 6778 6779#endif 6780static void 6781sctp_recv_udp_tunneled_packet(struct mbuf *m, int off, struct inpcb *ignored) 6782{ 6783 struct ip *iph; 6784 6785#ifdef INET6 6786 struct ip6_hdr *ip6; 6787 6788#endif 6789 struct mbuf *sp, *last; 6790 struct udphdr *uhdr; 6791 uint16_t port; 6792 6793 if ((m->m_flags & M_PKTHDR) == 0) { 6794 /* Can't handle one that is not a pkt hdr */ 6795 goto out; 6796 } 6797 /* Pull the src port */ 6798 iph = mtod(m, struct ip *); 6799 uhdr = (struct udphdr *)((caddr_t)iph + off); 6800 port = uhdr->uh_sport; 6801 /* 6802 * Split out the mbuf chain. Leave the IP header in m, place the 6803 * rest in the sp. 6804 */ 6805 sp = m_split(m, off, M_NOWAIT); 6806 if (sp == NULL) { 6807 /* Gak, drop packet, we can't do a split */ 6808 goto out; 6809 } 6810 if (sp->m_pkthdr.len < sizeof(struct udphdr) + sizeof(struct sctphdr)) { 6811 /* Gak, packet can't have an SCTP header in it - too small */ 6812 m_freem(sp); 6813 goto out; 6814 } 6815 /* Now pull up the UDP header and SCTP header together */ 6816 sp = m_pullup(sp, sizeof(struct udphdr) + sizeof(struct sctphdr)); 6817 if (sp == NULL) { 6818 /* Gak pullup failed */ 6819 goto out; 6820 } 6821 /* Trim out the UDP header */ 6822 m_adj(sp, sizeof(struct udphdr)); 6823 6824 /* Now reconstruct the mbuf chain */ 6825 for (last = m; last->m_next; last = last->m_next); 6826 last->m_next = sp; 6827 m->m_pkthdr.len += sp->m_pkthdr.len; 6828 iph = mtod(m, struct ip *); 6829 switch (iph->ip_v) { 6830#ifdef INET 6831 case IPVERSION: 6832 iph->ip_len = htons(ntohs(iph->ip_len) - sizeof(struct udphdr)); 6833 sctp_input_with_port(m, off, port); 6834 break; 6835#endif 6836#ifdef INET6 6837 case IPV6_VERSION >> 4: 6838 ip6 = mtod(m, struct ip6_hdr *); 6839 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - sizeof(struct udphdr)); 6840 sctp6_input_with_port(&m, &off, port); 6841 break; 6842#endif 6843 default: 6844 goto out; 6845 break; 6846 } 6847 return; 6848out: 6849 m_freem(m); 6850} 6851 6852void 6853sctp_over_udp_stop(void) 6854{ 6855 /* 6856 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 6857 * for writting! 6858 */ 6859#ifdef INET 6860 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 6861 soclose(SCTP_BASE_INFO(udp4_tun_socket)); 6862 SCTP_BASE_INFO(udp4_tun_socket) = NULL; 6863 } 6864#endif 6865#ifdef INET6 6866 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 6867 soclose(SCTP_BASE_INFO(udp6_tun_socket)); 6868 SCTP_BASE_INFO(udp6_tun_socket) = NULL; 6869 } 6870#endif 6871} 6872 6873int 6874sctp_over_udp_start(void) 6875{ 6876 uint16_t port; 6877 int ret; 6878 6879#ifdef INET 6880 struct sockaddr_in sin; 6881 6882#endif 6883#ifdef INET6 6884 struct sockaddr_in6 sin6; 6885 6886#endif 6887 /* 6888 * This function assumes sysctl caller holds sctp_sysctl_info_lock() 6889 * for writting! 6890 */ 6891 port = SCTP_BASE_SYSCTL(sctp_udp_tunneling_port); 6892 if (ntohs(port) == 0) { 6893 /* Must have a port set */ 6894 return (EINVAL); 6895 } 6896#ifdef INET 6897 if (SCTP_BASE_INFO(udp4_tun_socket) != NULL) { 6898 /* Already running -- must stop first */ 6899 return (EALREADY); 6900 } 6901#endif 6902#ifdef INET6 6903 if (SCTP_BASE_INFO(udp6_tun_socket) != NULL) { 6904 /* Already running -- must stop first */ 6905 return (EALREADY); 6906 } 6907#endif 6908#ifdef INET 6909 if ((ret = socreate(PF_INET, &SCTP_BASE_INFO(udp4_tun_socket), 6910 SOCK_DGRAM, IPPROTO_UDP, 6911 curthread->td_ucred, curthread))) { 6912 sctp_over_udp_stop(); 6913 return (ret); 6914 } 6915 /* Call the special UDP hook. */ 6916 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp4_tun_socket), 6917 sctp_recv_udp_tunneled_packet))) { 6918 sctp_over_udp_stop(); 6919 return (ret); 6920 } 6921 /* Ok, we have a socket, bind it to the port. */ 6922 memset(&sin, 0, sizeof(struct sockaddr_in)); 6923 sin.sin_len = sizeof(struct sockaddr_in); 6924 sin.sin_family = AF_INET; 6925 sin.sin_port = htons(port); 6926 if ((ret = sobind(SCTP_BASE_INFO(udp4_tun_socket), 6927 (struct sockaddr *)&sin, curthread))) { 6928 sctp_over_udp_stop(); 6929 return (ret); 6930 } 6931#endif 6932#ifdef INET6 6933 if ((ret = socreate(PF_INET6, &SCTP_BASE_INFO(udp6_tun_socket), 6934 SOCK_DGRAM, IPPROTO_UDP, 6935 curthread->td_ucred, curthread))) { 6936 sctp_over_udp_stop(); 6937 return (ret); 6938 } 6939 /* Call the special UDP hook. */ 6940 if ((ret = udp_set_kernel_tunneling(SCTP_BASE_INFO(udp6_tun_socket), 6941 sctp_recv_udp_tunneled_packet))) { 6942 sctp_over_udp_stop(); 6943 return (ret); 6944 } 6945 /* Ok, we have a socket, bind it to the port. */ 6946 memset(&sin6, 0, sizeof(struct sockaddr_in6)); 6947 sin6.sin6_len = sizeof(struct sockaddr_in6); 6948 sin6.sin6_family = AF_INET6; 6949 sin6.sin6_port = htons(port); 6950 if ((ret = sobind(SCTP_BASE_INFO(udp6_tun_socket), 6951 (struct sockaddr *)&sin6, curthread))) { 6952 sctp_over_udp_stop(); 6953 return (ret); 6954 } 6955#endif 6956 return (0); 6957} 6958