sctp_input.c revision 237540
1/*- 2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * a) Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * b) Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the distribution. 15 * 16 * c) Neither the name of Cisco Systems, Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: head/sys/netinet/sctp_input.c 237540 2012-06-24 21:25:54Z tuexen $"); 35 36#include <netinet/sctp_os.h> 37#include <netinet/sctp_var.h> 38#include <netinet/sctp_sysctl.h> 39#include <netinet/sctp_pcb.h> 40#include <netinet/sctp_header.h> 41#include <netinet/sctputil.h> 42#include <netinet/sctp_output.h> 43#include <netinet/sctp_input.h> 44#include <netinet/sctp_auth.h> 45#include <netinet/sctp_indata.h> 46#include <netinet/sctp_asconf.h> 47#include <netinet/sctp_bsd_addr.h> 48#include <netinet/sctp_timer.h> 49#include <netinet/sctp_crc32.h> 50#include <netinet/udp.h> 51#include <sys/smp.h> 52 53 54 55static void 56sctp_stop_all_cookie_timers(struct sctp_tcb *stcb) 57{ 58 struct sctp_nets *net; 59 60 /* 61 * This now not only stops all cookie timers it also stops any INIT 62 * timers as well. This will make sure that the timers are stopped 63 * in all collision cases. 64 */ 65 SCTP_TCB_LOCK_ASSERT(stcb); 66 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 67 if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) { 68 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, 69 stcb->sctp_ep, 70 stcb, 71 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1); 72 } else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) { 73 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, 74 stcb->sctp_ep, 75 stcb, 76 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2); 77 } 78 } 79} 80 81/* INIT handler */ 82static void 83sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh, 84 struct sctp_init_chunk *cp, struct sctp_inpcb *inp, 85 struct sctp_tcb *stcb, int *abort_no_unlock, 86 uint8_t use_mflowid, uint32_t mflowid, 87 uint32_t vrf_id, uint16_t port) 88{ 89 struct sctp_init *init; 90 struct mbuf *op_err; 91 92 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n", 93 stcb); 94 if (stcb == NULL) { 95 SCTP_INP_RLOCK(inp); 96 } 97 /* validate length */ 98 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) { 99 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 100 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 101 use_mflowid, mflowid, 102 vrf_id, port); 103 if (stcb) 104 *abort_no_unlock = 1; 105 goto outnow; 106 } 107 /* validate parameters */ 108 init = &cp->init; 109 if (init->initiate_tag == 0) { 110 /* protocol error... send abort */ 111 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 112 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 113 use_mflowid, mflowid, 114 vrf_id, port); 115 if (stcb) 116 *abort_no_unlock = 1; 117 goto outnow; 118 } 119 if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) { 120 /* invalid parameter... send abort */ 121 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 122 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 123 use_mflowid, mflowid, 124 vrf_id, port); 125 if (stcb) 126 *abort_no_unlock = 1; 127 goto outnow; 128 } 129 if (init->num_inbound_streams == 0) { 130 /* protocol error... send abort */ 131 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 132 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 133 use_mflowid, mflowid, 134 vrf_id, port); 135 if (stcb) 136 *abort_no_unlock = 1; 137 goto outnow; 138 } 139 if (init->num_outbound_streams == 0) { 140 /* protocol error... send abort */ 141 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 142 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 143 use_mflowid, mflowid, 144 vrf_id, port); 145 if (stcb) 146 *abort_no_unlock = 1; 147 goto outnow; 148 } 149 if (sctp_validate_init_auth_params(m, offset + sizeof(*cp), 150 offset + ntohs(cp->ch.chunk_length))) { 151 /* auth parameter(s) error... send abort */ 152 sctp_abort_association(inp, stcb, m, iphlen, sh, NULL, 153 use_mflowid, mflowid, 154 vrf_id, port); 155 if (stcb) 156 *abort_no_unlock = 1; 157 goto outnow; 158 } 159 /* 160 * We are only accepting if we have a socket with positive 161 * so_qlimit. 162 */ 163 if ((stcb == NULL) && 164 ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 165 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 166 (inp->sctp_socket == NULL) || 167 (inp->sctp_socket->so_qlimit == 0))) { 168 /* 169 * FIX ME ?? What about TCP model and we have a 170 * match/restart case? Actually no fix is needed. the lookup 171 * will always find the existing assoc so stcb would not be 172 * NULL. It may be questionable to do this since we COULD 173 * just send back the INIT-ACK and hope that the app did 174 * accept()'s by the time the COOKIE was sent. But there is 175 * a price to pay for COOKIE generation and I don't want to 176 * pay it on the chance that the app will actually do some 177 * accepts(). The App just looses and should NOT be in this 178 * state :-) 179 */ 180 if (SCTP_BASE_SYSCTL(sctp_blackhole) == 0) { 181 sctp_send_abort(m, iphlen, sh, 0, NULL, 182 use_mflowid, mflowid, 183 vrf_id, port); 184 } 185 goto outnow; 186 } 187 if ((stcb != NULL) && 188 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT)) { 189 SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending SHUTDOWN-ACK\n"); 190 sctp_send_shutdown_ack(stcb, NULL); 191 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 192 } else { 193 SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n"); 194 sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, sh, cp, 195 use_mflowid, mflowid, 196 vrf_id, port, 197 ((stcb == NULL) ? SCTP_HOLDS_LOCK : SCTP_NOT_LOCKED)); 198 } 199outnow: 200 if (stcb == NULL) { 201 SCTP_INP_RUNLOCK(inp); 202 } 203} 204 205/* 206 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error 207 */ 208 209int 210sctp_is_there_unsent_data(struct sctp_tcb *stcb, int so_locked 211#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 212 SCTP_UNUSED 213#endif 214) 215{ 216 int unsent_data = 0; 217 unsigned int i; 218 struct sctp_stream_queue_pending *sp; 219 struct sctp_association *asoc; 220 221 /* 222 * This function returns the number of streams that have true unsent 223 * data on them. Note that as it looks through it will clean up any 224 * places that have old data that has been sent but left at top of 225 * stream queue. 226 */ 227 asoc = &stcb->asoc; 228 SCTP_TCB_SEND_LOCK(stcb); 229 if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) { 230 /* Check to see if some data queued */ 231 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 232 /* sa_ignore FREED_MEMORY */ 233 sp = TAILQ_FIRST(&stcb->asoc.strmout[i].outqueue); 234 if (sp == NULL) { 235 continue; 236 } 237 if ((sp->msg_is_complete) && 238 (sp->length == 0) && 239 (sp->sender_all_done)) { 240 /* 241 * We are doing differed cleanup. Last time 242 * through when we took all the data the 243 * sender_all_done was not set. 244 */ 245 if (sp->put_last_out == 0) { 246 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n"); 247 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n", 248 sp->sender_all_done, 249 sp->length, 250 sp->msg_is_complete, 251 sp->put_last_out); 252 } 253 atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1); 254 TAILQ_REMOVE(&stcb->asoc.strmout[i].outqueue, sp, next); 255 if (sp->net) { 256 sctp_free_remote_addr(sp->net); 257 sp->net = NULL; 258 } 259 if (sp->data) { 260 sctp_m_freem(sp->data); 261 sp->data = NULL; 262 } 263 sctp_free_a_strmoq(stcb, sp, so_locked); 264 } else { 265 unsent_data++; 266 break; 267 } 268 } 269 } 270 SCTP_TCB_SEND_UNLOCK(stcb); 271 return (unsent_data); 272} 273 274static int 275sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb) 276{ 277 struct sctp_init *init; 278 struct sctp_association *asoc; 279 struct sctp_nets *lnet; 280 unsigned int i; 281 282 init = &cp->init; 283 asoc = &stcb->asoc; 284 /* save off parameters */ 285 asoc->peer_vtag = ntohl(init->initiate_tag); 286 asoc->peers_rwnd = ntohl(init->a_rwnd); 287 /* init tsn's */ 288 asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1; 289 290 if (!TAILQ_EMPTY(&asoc->nets)) { 291 /* update any ssthresh's that may have a default */ 292 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) { 293 lnet->ssthresh = asoc->peers_rwnd; 294 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) { 295 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION); 296 } 297 } 298 } 299 SCTP_TCB_SEND_LOCK(stcb); 300 if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) { 301 unsigned int newcnt; 302 struct sctp_stream_out *outs; 303 struct sctp_stream_queue_pending *sp, *nsp; 304 struct sctp_tmit_chunk *chk, *nchk; 305 306 /* abandon the upper streams */ 307 newcnt = ntohs(init->num_inbound_streams); 308 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 309 if (chk->rec.data.stream_number >= newcnt) { 310 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 311 asoc->send_queue_cnt--; 312 if (chk->data != NULL) { 313 sctp_free_bufspace(stcb, asoc, chk, 1); 314 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 315 0, chk, SCTP_SO_NOT_LOCKED); 316 if (chk->data) { 317 sctp_m_freem(chk->data); 318 chk->data = NULL; 319 } 320 } 321 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 322 /* sa_ignore FREED_MEMORY */ 323 } 324 } 325 if (asoc->strmout) { 326 for (i = newcnt; i < asoc->pre_open_streams; i++) { 327 outs = &asoc->strmout[i]; 328 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 329 TAILQ_REMOVE(&outs->outqueue, sp, next); 330 asoc->stream_queue_cnt--; 331 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, 332 stcb, 0, sp, SCTP_SO_NOT_LOCKED); 333 if (sp->data) { 334 sctp_m_freem(sp->data); 335 sp->data = NULL; 336 } 337 if (sp->net) { 338 sctp_free_remote_addr(sp->net); 339 sp->net = NULL; 340 } 341 /* Free the chunk */ 342 sctp_free_a_strmoq(stcb, sp, SCTP_SO_NOT_LOCKED); 343 /* sa_ignore FREED_MEMORY */ 344 } 345 } 346 } 347 /* cut back the count */ 348 asoc->pre_open_streams = newcnt; 349 } 350 SCTP_TCB_SEND_UNLOCK(stcb); 351 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams; 352 353 /* EY - nr_sack: initialize highest tsn in nr_mapping_array */ 354 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map; 355 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 356 sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 357 } 358 /* This is the next one we expect */ 359 asoc->str_reset_seq_in = asoc->asconf_seq_in + 1; 360 361 asoc->mapping_array_base_tsn = ntohl(init->initial_tsn); 362 asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->asconf_seq_in; 363 364 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 365 /* open the requested streams */ 366 367 if (asoc->strmin != NULL) { 368 /* Free the old ones */ 369 struct sctp_queued_to_read *ctl, *nctl; 370 371 for (i = 0; i < asoc->streamincnt; i++) { 372 TAILQ_FOREACH_SAFE(ctl, &asoc->strmin[i].inqueue, next, nctl) { 373 TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next); 374 sctp_free_remote_addr(ctl->whoFrom); 375 ctl->whoFrom = NULL; 376 sctp_m_freem(ctl->data); 377 ctl->data = NULL; 378 sctp_free_a_readq(stcb, ctl); 379 } 380 } 381 SCTP_FREE(asoc->strmin, SCTP_M_STRMI); 382 } 383 asoc->streamincnt = ntohs(init->num_outbound_streams); 384 if (asoc->streamincnt > MAX_SCTP_STREAMS) { 385 asoc->streamincnt = MAX_SCTP_STREAMS; 386 } 387 SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt * 388 sizeof(struct sctp_stream_in), SCTP_M_STRMI); 389 if (asoc->strmin == NULL) { 390 /* we didn't get memory for the streams! */ 391 SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n"); 392 return (-1); 393 } 394 for (i = 0; i < asoc->streamincnt; i++) { 395 asoc->strmin[i].stream_no = i; 396 asoc->strmin[i].last_sequence_delivered = 0xffff; 397 /* 398 * U-stream ranges will be set when the cookie is unpacked. 399 * Or for the INIT sender they are un set (if pr-sctp not 400 * supported) when the INIT-ACK arrives. 401 */ 402 TAILQ_INIT(&asoc->strmin[i].inqueue); 403 asoc->strmin[i].delivery_started = 0; 404 } 405 /* 406 * load_address_from_init will put the addresses into the 407 * association when the COOKIE is processed or the INIT-ACK is 408 * processed. Both types of COOKIE's existing and new call this 409 * routine. It will remove addresses that are no longer in the 410 * association (for the restarting case where addresses are 411 * removed). Up front when the INIT arrives we will discard it if it 412 * is a restart and new addresses have been added. 413 */ 414 /* sa_ignore MEMLEAK */ 415 return (0); 416} 417 418/* 419 * INIT-ACK message processing/consumption returns value < 0 on error 420 */ 421static int 422sctp_process_init_ack(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh, 423 struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 424 struct sctp_nets *net, int *abort_no_unlock, 425 uint8_t use_mflowid, uint32_t mflowid, 426 uint32_t vrf_id) 427{ 428 struct sctp_association *asoc; 429 struct mbuf *op_err; 430 int retval, abort_flag; 431 uint32_t initack_limit; 432 int nat_friendly = 0; 433 434 /* First verify that we have no illegal param's */ 435 abort_flag = 0; 436 op_err = NULL; 437 438 op_err = sctp_arethere_unrecognized_parameters(m, 439 (offset + sizeof(struct sctp_init_chunk)), 440 &abort_flag, (struct sctp_chunkhdr *)cp, &nat_friendly); 441 if (abort_flag) { 442 /* Send an abort and notify peer */ 443 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 444 *abort_no_unlock = 1; 445 return (-1); 446 } 447 asoc = &stcb->asoc; 448 asoc->peer_supports_nat = (uint8_t) nat_friendly; 449 /* process the peer's parameters in the INIT-ACK */ 450 retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb); 451 if (retval < 0) { 452 return (retval); 453 } 454 initack_limit = offset + ntohs(cp->ch.chunk_length); 455 /* load all addresses */ 456 if ((retval = sctp_load_addresses_from_init(stcb, m, 457 (offset + sizeof(struct sctp_init_chunk)), initack_limit, sh, 458 NULL))) { 459 /* Huh, we should abort */ 460 SCTPDBG(SCTP_DEBUG_INPUT1, 461 "Load addresses from INIT causes an abort %d\n", 462 retval); 463 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, NULL, 464 use_mflowid, mflowid, 465 vrf_id, net->port); 466 *abort_no_unlock = 1; 467 return (-1); 468 } 469 /* if the peer doesn't support asconf, flush the asconf queue */ 470 if (asoc->peer_supports_asconf == 0) { 471 struct sctp_asconf_addr *param, *nparam; 472 473 TAILQ_FOREACH_SAFE(param, &asoc->asconf_queue, next, nparam) { 474 TAILQ_REMOVE(&asoc->asconf_queue, param, next); 475 SCTP_FREE(param, SCTP_M_ASC_ADDR); 476 } 477 } 478 stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs, 479 stcb->asoc.local_hmacs); 480 if (op_err) { 481 sctp_queue_op_err(stcb, op_err); 482 /* queuing will steal away the mbuf chain to the out queue */ 483 op_err = NULL; 484 } 485 /* extract the cookie and queue it to "echo" it back... */ 486 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 487 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 488 stcb->asoc.overall_error_count, 489 0, 490 SCTP_FROM_SCTP_INPUT, 491 __LINE__); 492 } 493 stcb->asoc.overall_error_count = 0; 494 net->error_count = 0; 495 496 /* 497 * Cancel the INIT timer, We do this first before queueing the 498 * cookie. We always cancel at the primary to assue that we are 499 * canceling the timer started by the INIT which always goes to the 500 * primary. 501 */ 502 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb, 503 asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4); 504 505 /* calculate the RTO */ 506 net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered, sctp_align_safe_nocopy, 507 SCTP_RTT_FROM_NON_DATA); 508 509 retval = sctp_send_cookie_echo(m, offset, stcb, net); 510 if (retval < 0) { 511 /* 512 * No cookie, we probably should send a op error. But in any 513 * case if there is no cookie in the INIT-ACK, we can 514 * abandon the peer, its broke. 515 */ 516 if (retval == -3) { 517 /* We abort with an error of missing mandatory param */ 518 op_err = 519 sctp_generate_invmanparam(SCTP_CAUSE_MISSING_PARAM); 520 if (op_err) { 521 /* 522 * Expand beyond to include the mandatory 523 * param cookie 524 */ 525 struct sctp_inv_mandatory_param *mp; 526 527 SCTP_BUF_LEN(op_err) = 528 sizeof(struct sctp_inv_mandatory_param); 529 mp = mtod(op_err, 530 struct sctp_inv_mandatory_param *); 531 /* Subtract the reserved param */ 532 mp->length = 533 htons(sizeof(struct sctp_inv_mandatory_param) - 2); 534 mp->num_param = htonl(1); 535 mp->param = htons(SCTP_STATE_COOKIE); 536 mp->resv = 0; 537 } 538 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 539 sh, op_err, 540 use_mflowid, mflowid, 541 vrf_id, net->port); 542 *abort_no_unlock = 1; 543 } 544 return (retval); 545 } 546 return (0); 547} 548 549static void 550sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp, 551 struct sctp_tcb *stcb, struct sctp_nets *net) 552{ 553 struct sockaddr_storage store; 554 struct sctp_nets *r_net, *f_net; 555 struct timeval tv; 556 int req_prim = 0; 557 uint16_t old_error_counter; 558 559#ifdef INET 560 struct sockaddr_in *sin; 561 562#endif 563#ifdef INET6 564 struct sockaddr_in6 *sin6; 565 566#endif 567 568 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) { 569 /* Invalid length */ 570 return; 571 } 572 memset(&store, 0, sizeof(store)); 573 switch (cp->heartbeat.hb_info.addr_family) { 574#ifdef INET 575 case AF_INET: 576 if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) { 577 sin = (struct sockaddr_in *)&store; 578 sin->sin_family = cp->heartbeat.hb_info.addr_family; 579 sin->sin_len = cp->heartbeat.hb_info.addr_len; 580 sin->sin_port = stcb->rport; 581 memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address, 582 sizeof(sin->sin_addr)); 583 } else { 584 return; 585 } 586 break; 587#endif 588#ifdef INET6 589 case AF_INET6: 590 if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) { 591 sin6 = (struct sockaddr_in6 *)&store; 592 sin6->sin6_family = cp->heartbeat.hb_info.addr_family; 593 sin6->sin6_len = cp->heartbeat.hb_info.addr_len; 594 sin6->sin6_port = stcb->rport; 595 memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address, 596 sizeof(sin6->sin6_addr)); 597 } else { 598 return; 599 } 600 break; 601#endif 602 default: 603 return; 604 } 605 r_net = sctp_findnet(stcb, (struct sockaddr *)&store); 606 if (r_net == NULL) { 607 SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n"); 608 return; 609 } 610 if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) && 611 (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) && 612 (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) { 613 /* 614 * If the its a HB and it's random value is correct when can 615 * confirm the destination. 616 */ 617 r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 618 if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) { 619 stcb->asoc.primary_destination = r_net; 620 r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY; 621 f_net = TAILQ_FIRST(&stcb->asoc.nets); 622 if (f_net != r_net) { 623 /* 624 * first one on the list is NOT the primary 625 * sctp_cmpaddr() is much more efficent if 626 * the primary is the first on the list, 627 * make it so. 628 */ 629 TAILQ_REMOVE(&stcb->asoc.nets, r_net, sctp_next); 630 TAILQ_INSERT_HEAD(&stcb->asoc.nets, r_net, sctp_next); 631 } 632 req_prim = 1; 633 } 634 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 635 stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED); 636 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3); 637 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net); 638 } 639 old_error_counter = r_net->error_count; 640 r_net->error_count = 0; 641 r_net->hb_responded = 1; 642 tv.tv_sec = cp->heartbeat.hb_info.time_value_1; 643 tv.tv_usec = cp->heartbeat.hb_info.time_value_2; 644 /* Now lets do a RTO with this */ 645 r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv, sctp_align_safe_nocopy, 646 SCTP_RTT_FROM_NON_DATA); 647 if (!(r_net->dest_state & SCTP_ADDR_REACHABLE)) { 648 r_net->dest_state |= SCTP_ADDR_REACHABLE; 649 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 650 0, (void *)r_net, SCTP_SO_NOT_LOCKED); 651 } 652 if (r_net->dest_state & SCTP_ADDR_PF) { 653 r_net->dest_state &= ~SCTP_ADDR_PF; 654 stcb->asoc.cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 655 } 656 if (old_error_counter > 0) { 657 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3); 658 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net); 659 } 660 if (r_net == stcb->asoc.primary_destination) { 661 if (stcb->asoc.alternate) { 662 /* release the alternate, primary is good */ 663 sctp_free_remote_addr(stcb->asoc.alternate); 664 stcb->asoc.alternate = NULL; 665 } 666 } 667 /* Mobility adaptation */ 668 if (req_prim) { 669 if ((sctp_is_mobility_feature_on(stcb->sctp_ep, 670 SCTP_MOBILITY_BASE) || 671 sctp_is_mobility_feature_on(stcb->sctp_ep, 672 SCTP_MOBILITY_FASTHANDOFF)) && 673 sctp_is_mobility_feature_on(stcb->sctp_ep, 674 SCTP_MOBILITY_PRIM_DELETED)) { 675 676 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_TIMER + SCTP_LOC_7); 677 if (sctp_is_mobility_feature_on(stcb->sctp_ep, 678 SCTP_MOBILITY_FASTHANDOFF)) { 679 sctp_assoc_immediate_retrans(stcb, 680 stcb->asoc.primary_destination); 681 } 682 if (sctp_is_mobility_feature_on(stcb->sctp_ep, 683 SCTP_MOBILITY_BASE)) { 684 sctp_move_chunks_from_net(stcb, 685 stcb->asoc.deleted_primary); 686 } 687 sctp_delete_prim_timer(stcb->sctp_ep, stcb, 688 stcb->asoc.deleted_primary); 689 } 690 } 691} 692 693static int 694sctp_handle_nat_colliding_state(struct sctp_tcb *stcb) 695{ 696 /* 697 * return 0 means we want you to proceed with the abort non-zero 698 * means no abort processing 699 */ 700 struct sctpasochead *head; 701 702 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) { 703 /* generate a new vtag and send init */ 704 LIST_REMOVE(stcb, sctp_asocs); 705 stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 706 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))]; 707 /* 708 * put it in the bucket in the vtag hash of assoc's for the 709 * system 710 */ 711 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 712 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 713 return (1); 714 } 715 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { 716 /* 717 * treat like a case where the cookie expired i.e.: - dump 718 * current cookie. - generate a new vtag. - resend init. 719 */ 720 /* generate a new vtag and send init */ 721 LIST_REMOVE(stcb, sctp_asocs); 722 stcb->asoc.state &= ~SCTP_STATE_COOKIE_ECHOED; 723 stcb->asoc.state |= SCTP_STATE_COOKIE_WAIT; 724 sctp_stop_all_cookie_timers(stcb); 725 sctp_toss_old_cookies(stcb, &stcb->asoc); 726 stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 727 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))]; 728 /* 729 * put it in the bucket in the vtag hash of assoc's for the 730 * system 731 */ 732 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 733 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 734 return (1); 735 } 736 return (0); 737} 738 739static int 740sctp_handle_nat_missing_state(struct sctp_tcb *stcb, 741 struct sctp_nets *net) 742{ 743 /* 744 * return 0 means we want you to proceed with the abort non-zero 745 * means no abort processing 746 */ 747 if (stcb->asoc.peer_supports_auth == 0) { 748 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_nat_missing_state: Peer does not support AUTH, cannot send an asconf\n"); 749 return (0); 750 } 751 sctp_asconf_send_nat_state_update(stcb, net); 752 return (1); 753} 754 755 756static void 757sctp_handle_abort(struct sctp_abort_chunk *abort, 758 struct sctp_tcb *stcb, struct sctp_nets *net) 759{ 760#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 761 struct socket *so; 762 763#endif 764 uint16_t len; 765 uint16_t error; 766 767 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n"); 768 if (stcb == NULL) 769 return; 770 771 len = ntohs(abort->ch.chunk_length); 772 if (len > sizeof(struct sctp_chunkhdr)) { 773 /* 774 * Need to check the cause codes for our two magic nat 775 * aborts which don't kill the assoc necessarily. 776 */ 777 struct sctp_missing_nat_state *natc; 778 779 natc = (struct sctp_missing_nat_state *)(abort + 1); 780 error = ntohs(natc->cause); 781 if (error == SCTP_CAUSE_NAT_COLLIDING_STATE) { 782 SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n", 783 abort->ch.chunk_flags); 784 if (sctp_handle_nat_colliding_state(stcb)) { 785 return; 786 } 787 } else if (error == SCTP_CAUSE_NAT_MISSING_STATE) { 788 SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n", 789 abort->ch.chunk_flags); 790 if (sctp_handle_nat_missing_state(stcb, net)) { 791 return; 792 } 793 } 794 } else { 795 error = 0; 796 } 797 /* stop any receive timers */ 798 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6); 799 /* notify user of the abort and clean up... */ 800 sctp_abort_notification(stcb, 1, error, abort, SCTP_SO_NOT_LOCKED); 801 /* free the tcb */ 802 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 803 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 804 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 805 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 806 } 807#ifdef SCTP_ASOCLOG_OF_TSNS 808 sctp_print_out_track_log(stcb); 809#endif 810#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 811 so = SCTP_INP_SO(stcb->sctp_ep); 812 atomic_add_int(&stcb->asoc.refcnt, 1); 813 SCTP_TCB_UNLOCK(stcb); 814 SCTP_SOCKET_LOCK(so, 1); 815 SCTP_TCB_LOCK(stcb); 816 atomic_subtract_int(&stcb->asoc.refcnt, 1); 817#endif 818 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED; 819 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 820 SCTP_FROM_SCTP_INPUT + SCTP_LOC_6); 821#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 822 SCTP_SOCKET_UNLOCK(so, 1); 823#endif 824 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n"); 825} 826 827static void 828sctp_start_net_timers(struct sctp_tcb *stcb) 829{ 830 uint32_t cnt_hb_sent; 831 struct sctp_nets *net; 832 833 cnt_hb_sent = 0; 834 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 835 /* 836 * For each network start: 1) A pmtu timer. 2) A HB timer 3) 837 * If the dest in unconfirmed send a hb as well if under 838 * max_hb_burst have been sent. 839 */ 840 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, stcb->sctp_ep, stcb, net); 841 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 842 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) && 843 (cnt_hb_sent < SCTP_BASE_SYSCTL(sctp_hb_maxburst))) { 844 sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED); 845 cnt_hb_sent++; 846 } 847 } 848 if (cnt_hb_sent) { 849 sctp_chunk_output(stcb->sctp_ep, stcb, 850 SCTP_OUTPUT_FROM_COOKIE_ACK, 851 SCTP_SO_NOT_LOCKED); 852 } 853} 854 855 856static void 857sctp_handle_shutdown(struct sctp_shutdown_chunk *cp, 858 struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag) 859{ 860 struct sctp_association *asoc; 861 int some_on_streamwheel; 862 863#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 864 struct socket *so; 865 866#endif 867 868 SCTPDBG(SCTP_DEBUG_INPUT2, 869 "sctp_handle_shutdown: handling SHUTDOWN\n"); 870 if (stcb == NULL) 871 return; 872 asoc = &stcb->asoc; 873 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 874 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 875 return; 876 } 877 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) { 878 /* Shutdown NOT the expected size */ 879 return; 880 } else { 881 sctp_update_acked(stcb, cp, abort_flag); 882 if (*abort_flag) { 883 return; 884 } 885 } 886 if (asoc->control_pdapi) { 887 /* 888 * With a normal shutdown we assume the end of last record. 889 */ 890 SCTP_INP_READ_LOCK(stcb->sctp_ep); 891 asoc->control_pdapi->end_added = 1; 892 asoc->control_pdapi->pdapi_aborted = 1; 893 asoc->control_pdapi = NULL; 894 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 895#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 896 so = SCTP_INP_SO(stcb->sctp_ep); 897 atomic_add_int(&stcb->asoc.refcnt, 1); 898 SCTP_TCB_UNLOCK(stcb); 899 SCTP_SOCKET_LOCK(so, 1); 900 SCTP_TCB_LOCK(stcb); 901 atomic_subtract_int(&stcb->asoc.refcnt, 1); 902 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 903 /* assoc was freed while we were unlocked */ 904 SCTP_SOCKET_UNLOCK(so, 1); 905 return; 906 } 907#endif 908 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 909#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 910 SCTP_SOCKET_UNLOCK(so, 1); 911#endif 912 } 913 /* goto SHUTDOWN_RECEIVED state to block new requests */ 914 if (stcb->sctp_socket) { 915 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 916 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) && 917 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) { 918 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_RECEIVED); 919 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 920 /* 921 * notify upper layer that peer has initiated a 922 * shutdown 923 */ 924 sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 925 926 /* reset time */ 927 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 928 } 929 } 930 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 931 /* 932 * stop the shutdown timer, since we WILL move to 933 * SHUTDOWN-ACK-SENT. 934 */ 935 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8); 936 } 937 /* Now is there unsent data on a stream somewhere? */ 938 some_on_streamwheel = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED); 939 940 if (!TAILQ_EMPTY(&asoc->send_queue) || 941 !TAILQ_EMPTY(&asoc->sent_queue) || 942 some_on_streamwheel) { 943 /* By returning we will push more data out */ 944 return; 945 } else { 946 /* no outstanding data to send, so move on... */ 947 /* send SHUTDOWN-ACK */ 948 sctp_send_shutdown_ack(stcb, net); 949 /* move to SHUTDOWN-ACK-SENT state */ 950 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 951 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 952 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 953 } 954 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 955 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 956 sctp_stop_timers_for_shutdown(stcb); 957 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, 958 stcb, net); 959 } 960} 961 962static void 963sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp SCTP_UNUSED, 964 struct sctp_tcb *stcb, 965 struct sctp_nets *net) 966{ 967 struct sctp_association *asoc; 968 969#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 970 struct socket *so; 971 972 so = SCTP_INP_SO(stcb->sctp_ep); 973#endif 974 SCTPDBG(SCTP_DEBUG_INPUT2, 975 "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n"); 976 if (stcb == NULL) 977 return; 978 979 asoc = &stcb->asoc; 980 /* process according to association state */ 981 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 982 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 983 /* unexpected SHUTDOWN-ACK... do OOTB handling... */ 984 sctp_send_shutdown_complete(stcb, net, 1); 985 SCTP_TCB_UNLOCK(stcb); 986 return; 987 } 988 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 989 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 990 /* unexpected SHUTDOWN-ACK... so ignore... */ 991 SCTP_TCB_UNLOCK(stcb); 992 return; 993 } 994 if (asoc->control_pdapi) { 995 /* 996 * With a normal shutdown we assume the end of last record. 997 */ 998 SCTP_INP_READ_LOCK(stcb->sctp_ep); 999 asoc->control_pdapi->end_added = 1; 1000 asoc->control_pdapi->pdapi_aborted = 1; 1001 asoc->control_pdapi = NULL; 1002 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 1003#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1004 atomic_add_int(&stcb->asoc.refcnt, 1); 1005 SCTP_TCB_UNLOCK(stcb); 1006 SCTP_SOCKET_LOCK(so, 1); 1007 SCTP_TCB_LOCK(stcb); 1008 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1009 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1010 /* assoc was freed while we were unlocked */ 1011 SCTP_SOCKET_UNLOCK(so, 1); 1012 return; 1013 } 1014#endif 1015 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1016#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1017 SCTP_SOCKET_UNLOCK(so, 1); 1018#endif 1019 } 1020 /* are the queues empty? */ 1021 if (!TAILQ_EMPTY(&asoc->send_queue) || 1022 !TAILQ_EMPTY(&asoc->sent_queue) || 1023 !stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) { 1024 sctp_report_all_outbound(stcb, 0, 0, SCTP_SO_NOT_LOCKED); 1025 } 1026 /* stop the timer */ 1027 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9); 1028 /* send SHUTDOWN-COMPLETE */ 1029 sctp_send_shutdown_complete(stcb, net, 0); 1030 /* notify upper layer protocol */ 1031 if (stcb->sctp_socket) { 1032 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1033 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 1034 stcb->sctp_socket->so_snd.sb_cc = 0; 1035 } 1036 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 1037 } 1038 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 1039 /* free the TCB but first save off the ep */ 1040#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1041 atomic_add_int(&stcb->asoc.refcnt, 1); 1042 SCTP_TCB_UNLOCK(stcb); 1043 SCTP_SOCKET_LOCK(so, 1); 1044 SCTP_TCB_LOCK(stcb); 1045 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1046#endif 1047 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 1048 SCTP_FROM_SCTP_INPUT + SCTP_LOC_10); 1049#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1050 SCTP_SOCKET_UNLOCK(so, 1); 1051#endif 1052} 1053 1054/* 1055 * Skip past the param header and then we will find the chunk that caused the 1056 * problem. There are two possiblities ASCONF or FWD-TSN other than that and 1057 * our peer must be broken. 1058 */ 1059static void 1060sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr, 1061 struct sctp_nets *net) 1062{ 1063 struct sctp_chunkhdr *chk; 1064 1065 chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr)); 1066 switch (chk->chunk_type) { 1067 case SCTP_ASCONF_ACK: 1068 case SCTP_ASCONF: 1069 sctp_asconf_cleanup(stcb, net); 1070 break; 1071 case SCTP_FORWARD_CUM_TSN: 1072 stcb->asoc.peer_supports_prsctp = 0; 1073 break; 1074 default: 1075 SCTPDBG(SCTP_DEBUG_INPUT2, 1076 "Peer does not support chunk type %d(%x)??\n", 1077 chk->chunk_type, (uint32_t) chk->chunk_type); 1078 break; 1079 } 1080} 1081 1082/* 1083 * Skip past the param header and then we will find the param that caused the 1084 * problem. There are a number of param's in a ASCONF OR the prsctp param 1085 * these will turn of specific features. 1086 */ 1087static void 1088sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr) 1089{ 1090 struct sctp_paramhdr *pbad; 1091 1092 pbad = phdr + 1; 1093 switch (ntohs(pbad->param_type)) { 1094 /* pr-sctp draft */ 1095 case SCTP_PRSCTP_SUPPORTED: 1096 stcb->asoc.peer_supports_prsctp = 0; 1097 break; 1098 case SCTP_SUPPORTED_CHUNK_EXT: 1099 break; 1100 /* draft-ietf-tsvwg-addip-sctp */ 1101 case SCTP_HAS_NAT_SUPPORT: 1102 stcb->asoc.peer_supports_nat = 0; 1103 break; 1104 case SCTP_ADD_IP_ADDRESS: 1105 case SCTP_DEL_IP_ADDRESS: 1106 case SCTP_SET_PRIM_ADDR: 1107 stcb->asoc.peer_supports_asconf = 0; 1108 break; 1109 case SCTP_SUCCESS_REPORT: 1110 case SCTP_ERROR_CAUSE_IND: 1111 SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n"); 1112 SCTPDBG(SCTP_DEBUG_INPUT2, 1113 "Turning off ASCONF to this strange peer\n"); 1114 stcb->asoc.peer_supports_asconf = 0; 1115 break; 1116 default: 1117 SCTPDBG(SCTP_DEBUG_INPUT2, 1118 "Peer does not support param type %d(%x)??\n", 1119 pbad->param_type, (uint32_t) pbad->param_type); 1120 break; 1121 } 1122} 1123 1124static int 1125sctp_handle_error(struct sctp_chunkhdr *ch, 1126 struct sctp_tcb *stcb, struct sctp_nets *net) 1127{ 1128 int chklen; 1129 struct sctp_paramhdr *phdr; 1130 uint16_t error, error_type; 1131 uint16_t error_len; 1132 struct sctp_association *asoc; 1133 int adjust; 1134 1135#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1136 struct socket *so; 1137 1138#endif 1139 1140 /* parse through all of the errors and process */ 1141 asoc = &stcb->asoc; 1142 phdr = (struct sctp_paramhdr *)((caddr_t)ch + 1143 sizeof(struct sctp_chunkhdr)); 1144 chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr); 1145 error = 0; 1146 while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) { 1147 /* Process an Error Cause */ 1148 error_type = ntohs(phdr->param_type); 1149 error_len = ntohs(phdr->param_length); 1150 if ((error_len > chklen) || (error_len == 0)) { 1151 /* invalid param length for this param */ 1152 SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n", 1153 chklen, error_len); 1154 return (0); 1155 } 1156 if (error == 0) { 1157 /* report the first error cause */ 1158 error = error_type; 1159 } 1160 switch (error_type) { 1161 case SCTP_CAUSE_INVALID_STREAM: 1162 case SCTP_CAUSE_MISSING_PARAM: 1163 case SCTP_CAUSE_INVALID_PARAM: 1164 case SCTP_CAUSE_NO_USER_DATA: 1165 SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n", 1166 error_type); 1167 break; 1168 case SCTP_CAUSE_NAT_COLLIDING_STATE: 1169 SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n", 1170 ch->chunk_flags); 1171 if (sctp_handle_nat_colliding_state(stcb)) { 1172 return (0); 1173 } 1174 break; 1175 case SCTP_CAUSE_NAT_MISSING_STATE: 1176 SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n", 1177 ch->chunk_flags); 1178 if (sctp_handle_nat_missing_state(stcb, net)) { 1179 return (0); 1180 } 1181 break; 1182 case SCTP_CAUSE_STALE_COOKIE: 1183 /* 1184 * We only act if we have echoed a cookie and are 1185 * waiting. 1186 */ 1187 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) { 1188 int *p; 1189 1190 p = (int *)((caddr_t)phdr + sizeof(*phdr)); 1191 /* Save the time doubled */ 1192 asoc->cookie_preserve_req = ntohl(*p) << 1; 1193 asoc->stale_cookie_count++; 1194 if (asoc->stale_cookie_count > 1195 asoc->max_init_times) { 1196 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED); 1197 /* now free the asoc */ 1198#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1199 so = SCTP_INP_SO(stcb->sctp_ep); 1200 atomic_add_int(&stcb->asoc.refcnt, 1); 1201 SCTP_TCB_UNLOCK(stcb); 1202 SCTP_SOCKET_LOCK(so, 1); 1203 SCTP_TCB_LOCK(stcb); 1204 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1205#endif 1206 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 1207 SCTP_FROM_SCTP_INPUT + SCTP_LOC_11); 1208#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1209 SCTP_SOCKET_UNLOCK(so, 1); 1210#endif 1211 return (-1); 1212 } 1213 /* blast back to INIT state */ 1214 sctp_toss_old_cookies(stcb, &stcb->asoc); 1215 asoc->state &= ~SCTP_STATE_COOKIE_ECHOED; 1216 asoc->state |= SCTP_STATE_COOKIE_WAIT; 1217 sctp_stop_all_cookie_timers(stcb); 1218 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 1219 } 1220 break; 1221 case SCTP_CAUSE_UNRESOLVABLE_ADDR: 1222 /* 1223 * Nothing we can do here, we don't do hostname 1224 * addresses so if the peer does not like my IPv6 1225 * (or IPv4 for that matter) it does not matter. If 1226 * they don't support that type of address, they can 1227 * NOT possibly get that packet type... i.e. with no 1228 * IPv6 you can't recieve a IPv6 packet. so we can 1229 * safely ignore this one. If we ever added support 1230 * for HOSTNAME Addresses, then we would need to do 1231 * something here. 1232 */ 1233 break; 1234 case SCTP_CAUSE_UNRECOG_CHUNK: 1235 sctp_process_unrecog_chunk(stcb, phdr, net); 1236 break; 1237 case SCTP_CAUSE_UNRECOG_PARAM: 1238 sctp_process_unrecog_param(stcb, phdr); 1239 break; 1240 case SCTP_CAUSE_COOKIE_IN_SHUTDOWN: 1241 /* 1242 * We ignore this since the timer will drive out a 1243 * new cookie anyway and there timer will drive us 1244 * to send a SHUTDOWN_COMPLETE. We can't send one 1245 * here since we don't have their tag. 1246 */ 1247 break; 1248 case SCTP_CAUSE_DELETING_LAST_ADDR: 1249 case SCTP_CAUSE_RESOURCE_SHORTAGE: 1250 case SCTP_CAUSE_DELETING_SRC_ADDR: 1251 /* 1252 * We should NOT get these here, but in a 1253 * ASCONF-ACK. 1254 */ 1255 SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n", 1256 error_type); 1257 break; 1258 case SCTP_CAUSE_OUT_OF_RESC: 1259 /* 1260 * And what, pray tell do we do with the fact that 1261 * the peer is out of resources? Not really sure we 1262 * could do anything but abort. I suspect this 1263 * should have came WITH an abort instead of in a 1264 * OP-ERROR. 1265 */ 1266 break; 1267 default: 1268 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n", 1269 error_type); 1270 break; 1271 } 1272 adjust = SCTP_SIZE32(error_len); 1273 chklen -= adjust; 1274 phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust); 1275 } 1276 sctp_ulp_notify(SCTP_NOTIFY_REMOTE_ERROR, stcb, error, ch, SCTP_SO_NOT_LOCKED); 1277 return (0); 1278} 1279 1280static int 1281sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh, 1282 struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 1283 struct sctp_nets *net, int *abort_no_unlock, 1284 uint8_t use_mflowid, uint32_t mflowid, 1285 uint32_t vrf_id) 1286{ 1287 struct sctp_init_ack *init_ack; 1288 struct mbuf *op_err; 1289 1290 SCTPDBG(SCTP_DEBUG_INPUT2, 1291 "sctp_handle_init_ack: handling INIT-ACK\n"); 1292 1293 if (stcb == NULL) { 1294 SCTPDBG(SCTP_DEBUG_INPUT2, 1295 "sctp_handle_init_ack: TCB is null\n"); 1296 return (-1); 1297 } 1298 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) { 1299 /* Invalid length */ 1300 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1301 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1302 op_err, 1303 use_mflowid, mflowid, 1304 vrf_id, net->port); 1305 *abort_no_unlock = 1; 1306 return (-1); 1307 } 1308 init_ack = &cp->init; 1309 /* validate parameters */ 1310 if (init_ack->initiate_tag == 0) { 1311 /* protocol error... send an abort */ 1312 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1313 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1314 op_err, 1315 use_mflowid, mflowid, 1316 vrf_id, net->port); 1317 *abort_no_unlock = 1; 1318 return (-1); 1319 } 1320 if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) { 1321 /* protocol error... send an abort */ 1322 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1323 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1324 op_err, 1325 use_mflowid, mflowid, 1326 vrf_id, net->port); 1327 *abort_no_unlock = 1; 1328 return (-1); 1329 } 1330 if (init_ack->num_inbound_streams == 0) { 1331 /* protocol error... send an abort */ 1332 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1333 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1334 op_err, 1335 use_mflowid, mflowid, 1336 vrf_id, net->port); 1337 *abort_no_unlock = 1; 1338 return (-1); 1339 } 1340 if (init_ack->num_outbound_streams == 0) { 1341 /* protocol error... send an abort */ 1342 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1343 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1344 op_err, 1345 use_mflowid, mflowid, 1346 vrf_id, net->port); 1347 *abort_no_unlock = 1; 1348 return (-1); 1349 } 1350 /* process according to association state... */ 1351 switch (stcb->asoc.state & SCTP_STATE_MASK) { 1352 case SCTP_STATE_COOKIE_WAIT: 1353 /* this is the expected state for this chunk */ 1354 /* process the INIT-ACK parameters */ 1355 if (stcb->asoc.primary_destination->dest_state & 1356 SCTP_ADDR_UNCONFIRMED) { 1357 /* 1358 * The primary is where we sent the INIT, we can 1359 * always consider it confirmed when the INIT-ACK is 1360 * returned. Do this before we load addresses 1361 * though. 1362 */ 1363 stcb->asoc.primary_destination->dest_state &= 1364 ~SCTP_ADDR_UNCONFIRMED; 1365 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 1366 stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED); 1367 } 1368 if (sctp_process_init_ack(m, iphlen, offset, sh, cp, stcb, 1369 net, abort_no_unlock, 1370 use_mflowid, mflowid, 1371 vrf_id) < 0) { 1372 /* error in parsing parameters */ 1373 return (-1); 1374 } 1375 /* update our state */ 1376 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n"); 1377 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_ECHOED); 1378 1379 /* reset the RTO calc */ 1380 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 1381 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 1382 stcb->asoc.overall_error_count, 1383 0, 1384 SCTP_FROM_SCTP_INPUT, 1385 __LINE__); 1386 } 1387 stcb->asoc.overall_error_count = 0; 1388 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 1389 /* 1390 * collapse the init timer back in case of a exponential 1391 * backoff 1392 */ 1393 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep, 1394 stcb, net); 1395 /* 1396 * the send at the end of the inbound data processing will 1397 * cause the cookie to be sent 1398 */ 1399 break; 1400 case SCTP_STATE_SHUTDOWN_SENT: 1401 /* incorrect state... discard */ 1402 break; 1403 case SCTP_STATE_COOKIE_ECHOED: 1404 /* incorrect state... discard */ 1405 break; 1406 case SCTP_STATE_OPEN: 1407 /* incorrect state... discard */ 1408 break; 1409 case SCTP_STATE_EMPTY: 1410 case SCTP_STATE_INUSE: 1411 default: 1412 /* incorrect state... discard */ 1413 return (-1); 1414 break; 1415 } 1416 SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n"); 1417 return (0); 1418} 1419 1420static struct sctp_tcb * 1421sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, 1422 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1423 struct sctp_inpcb *inp, struct sctp_nets **netp, 1424 struct sockaddr *init_src, int *notification, 1425 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 1426 uint8_t use_mflowid, uint32_t mflowid, 1427 uint32_t vrf_id, uint16_t port); 1428 1429 1430/* 1431 * handle a state cookie for an existing association m: input packet mbuf 1432 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a 1433 * "split" mbuf and the cookie signature does not exist offset: offset into 1434 * mbuf to the cookie-echo chunk 1435 */ 1436static struct sctp_tcb * 1437sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, 1438 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1439 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp, 1440 struct sockaddr *init_src, int *notification, 1441 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 1442 uint8_t use_mflowid, uint32_t mflowid, 1443 uint32_t vrf_id, uint16_t port) 1444{ 1445 struct sctp_association *asoc; 1446 struct sctp_init_chunk *init_cp, init_buf; 1447 struct sctp_init_ack_chunk *initack_cp, initack_buf; 1448 struct sctp_nets *net; 1449 struct mbuf *op_err; 1450 struct sctp_paramhdr *ph; 1451 int init_offset, initack_offset, i; 1452 int retval; 1453 int spec_flag = 0; 1454 uint32_t how_indx; 1455 1456 net = *netp; 1457 /* I know that the TCB is non-NULL from the caller */ 1458 asoc = &stcb->asoc; 1459 for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) { 1460 if (asoc->cookie_how[how_indx] == 0) 1461 break; 1462 } 1463 if (how_indx < sizeof(asoc->cookie_how)) { 1464 asoc->cookie_how[how_indx] = 1; 1465 } 1466 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 1467 /* SHUTDOWN came in after sending INIT-ACK */ 1468 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination); 1469 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 1470 0, M_DONTWAIT, 1, MT_DATA); 1471 if (op_err == NULL) { 1472 /* FOOBAR */ 1473 return (NULL); 1474 } 1475 /* Set the len */ 1476 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr); 1477 ph = mtod(op_err, struct sctp_paramhdr *); 1478 ph->param_type = htons(SCTP_CAUSE_COOKIE_IN_SHUTDOWN); 1479 ph->param_length = htons(sizeof(struct sctp_paramhdr)); 1480 sctp_send_operr_to(m, sh, cookie->peers_vtag, op_err, 1481 use_mflowid, mflowid, 1482 vrf_id, net->port); 1483 if (how_indx < sizeof(asoc->cookie_how)) 1484 asoc->cookie_how[how_indx] = 2; 1485 return (NULL); 1486 } 1487 /* 1488 * find and validate the INIT chunk in the cookie (peer's info) the 1489 * INIT should start after the cookie-echo header struct (chunk 1490 * header, state cookie header struct) 1491 */ 1492 init_offset = offset += sizeof(struct sctp_cookie_echo_chunk); 1493 1494 init_cp = (struct sctp_init_chunk *) 1495 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 1496 (uint8_t *) & init_buf); 1497 if (init_cp == NULL) { 1498 /* could not pull a INIT chunk in cookie */ 1499 return (NULL); 1500 } 1501 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 1502 return (NULL); 1503 } 1504 /* 1505 * find and validate the INIT-ACK chunk in the cookie (my info) the 1506 * INIT-ACK follows the INIT chunk 1507 */ 1508 initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length)); 1509 initack_cp = (struct sctp_init_ack_chunk *) 1510 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 1511 (uint8_t *) & initack_buf); 1512 if (initack_cp == NULL) { 1513 /* could not pull INIT-ACK chunk in cookie */ 1514 return (NULL); 1515 } 1516 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 1517 return (NULL); 1518 } 1519 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && 1520 (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) { 1521 /* 1522 * case D in Section 5.2.4 Table 2: MMAA process accordingly 1523 * to get into the OPEN state 1524 */ 1525 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1526 /*- 1527 * Opps, this means that we somehow generated two vtag's 1528 * the same. I.e. we did: 1529 * Us Peer 1530 * <---INIT(tag=a)------ 1531 * ----INIT-ACK(tag=t)--> 1532 * ----INIT(tag=t)------> *1 1533 * <---INIT-ACK(tag=a)--- 1534 * <----CE(tag=t)------------- *2 1535 * 1536 * At point *1 we should be generating a different 1537 * tag t'. Which means we would throw away the CE and send 1538 * ours instead. Basically this is case C (throw away side). 1539 */ 1540 if (how_indx < sizeof(asoc->cookie_how)) 1541 asoc->cookie_how[how_indx] = 17; 1542 return (NULL); 1543 1544 } 1545 switch SCTP_GET_STATE 1546 (asoc) { 1547 case SCTP_STATE_COOKIE_WAIT: 1548 case SCTP_STATE_COOKIE_ECHOED: 1549 /* 1550 * INIT was sent but got a COOKIE_ECHO with the 1551 * correct tags... just accept it...but we must 1552 * process the init so that we can make sure we have 1553 * the right seq no's. 1554 */ 1555 /* First we must process the INIT !! */ 1556 retval = sctp_process_init(init_cp, stcb); 1557 if (retval < 0) { 1558 if (how_indx < sizeof(asoc->cookie_how)) 1559 asoc->cookie_how[how_indx] = 3; 1560 return (NULL); 1561 } 1562 /* we have already processed the INIT so no problem */ 1563 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, 1564 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12); 1565 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_13); 1566 /* update current state */ 1567 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) 1568 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1569 else 1570 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1571 1572 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1573 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1574 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1575 stcb->sctp_ep, stcb, asoc->primary_destination); 1576 } 1577 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1578 sctp_stop_all_cookie_timers(stcb); 1579 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1580 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1581 (inp->sctp_socket->so_qlimit == 0) 1582 ) { 1583#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1584 struct socket *so; 1585 1586#endif 1587 /* 1588 * Here is where collision would go if we 1589 * did a connect() and instead got a 1590 * init/init-ack/cookie done before the 1591 * init-ack came back.. 1592 */ 1593 stcb->sctp_ep->sctp_flags |= 1594 SCTP_PCB_FLAGS_CONNECTED; 1595#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1596 so = SCTP_INP_SO(stcb->sctp_ep); 1597 atomic_add_int(&stcb->asoc.refcnt, 1); 1598 SCTP_TCB_UNLOCK(stcb); 1599 SCTP_SOCKET_LOCK(so, 1); 1600 SCTP_TCB_LOCK(stcb); 1601 atomic_add_int(&stcb->asoc.refcnt, -1); 1602 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1603 SCTP_SOCKET_UNLOCK(so, 1); 1604 return (NULL); 1605 } 1606#endif 1607 soisconnected(stcb->sctp_socket); 1608#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1609 SCTP_SOCKET_UNLOCK(so, 1); 1610#endif 1611 } 1612 /* notify upper layer */ 1613 *notification = SCTP_NOTIFY_ASSOC_UP; 1614 /* 1615 * since we did not send a HB make sure we don't 1616 * double things 1617 */ 1618 net->hb_responded = 1; 1619 net->RTO = sctp_calculate_rto(stcb, asoc, net, 1620 &cookie->time_entered, 1621 sctp_align_unsafe_makecopy, 1622 SCTP_RTT_FROM_NON_DATA); 1623 1624 if (stcb->asoc.sctp_autoclose_ticks && 1625 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) { 1626 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 1627 inp, stcb, NULL); 1628 } 1629 break; 1630 default: 1631 /* 1632 * we're in the OPEN state (or beyond), so peer must 1633 * have simply lost the COOKIE-ACK 1634 */ 1635 break; 1636 } /* end switch */ 1637 sctp_stop_all_cookie_timers(stcb); 1638 /* 1639 * We ignore the return code here.. not sure if we should 1640 * somehow abort.. but we do have an existing asoc. This 1641 * really should not fail. 1642 */ 1643 if (sctp_load_addresses_from_init(stcb, m, 1644 init_offset + sizeof(struct sctp_init_chunk), 1645 initack_offset, sh, init_src)) { 1646 if (how_indx < sizeof(asoc->cookie_how)) 1647 asoc->cookie_how[how_indx] = 4; 1648 return (NULL); 1649 } 1650 /* respond with a COOKIE-ACK */ 1651 sctp_toss_old_cookies(stcb, asoc); 1652 sctp_send_cookie_ack(stcb); 1653 if (how_indx < sizeof(asoc->cookie_how)) 1654 asoc->cookie_how[how_indx] = 5; 1655 return (stcb); 1656 } 1657 if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1658 ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag && 1659 cookie->tie_tag_my_vtag == 0 && 1660 cookie->tie_tag_peer_vtag == 0) { 1661 /* 1662 * case C in Section 5.2.4 Table 2: XMOO silently discard 1663 */ 1664 if (how_indx < sizeof(asoc->cookie_how)) 1665 asoc->cookie_how[how_indx] = 6; 1666 return (NULL); 1667 } 1668 /* 1669 * If nat support, and the below and stcb is established, send back 1670 * a ABORT(colliding state) if we are established. 1671 */ 1672 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) && 1673 (asoc->peer_supports_nat) && 1674 ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && 1675 ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) || 1676 (asoc->peer_vtag == 0)))) { 1677 /* 1678 * Special case - Peer's support nat. We may have two init's 1679 * that we gave out the same tag on since one was not 1680 * established.. i.e. we get INIT from host-1 behind the nat 1681 * and we respond tag-a, we get a INIT from host-2 behind 1682 * the nat and we get tag-a again. Then we bring up host-1 1683 * (or 2's) assoc, Then comes the cookie from hsot-2 (or 1). 1684 * Now we have colliding state. We must send an abort here 1685 * with colliding state indication. 1686 */ 1687 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 1688 0, M_DONTWAIT, 1, MT_DATA); 1689 if (op_err == NULL) { 1690 /* FOOBAR */ 1691 return (NULL); 1692 } 1693 /* pre-reserve some space */ 1694#ifdef INET6 1695 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 1696#else 1697 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip)); 1698#endif 1699 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 1700 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 1701 /* Set the len */ 1702 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr); 1703 ph = mtod(op_err, struct sctp_paramhdr *); 1704 ph->param_type = htons(SCTP_CAUSE_NAT_COLLIDING_STATE); 1705 ph->param_length = htons(sizeof(struct sctp_paramhdr)); 1706 sctp_send_abort(m, iphlen, sh, 0, op_err, 1707 use_mflowid, mflowid, 1708 vrf_id, port); 1709 return (NULL); 1710 } 1711 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && 1712 ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) || 1713 (asoc->peer_vtag == 0))) { 1714 /* 1715 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info 1716 * should be ok, re-accept peer info 1717 */ 1718 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1719 /* 1720 * Extension of case C. If we hit this, then the 1721 * random number generator returned the same vtag 1722 * when we first sent our INIT-ACK and when we later 1723 * sent our INIT. The side with the seq numbers that 1724 * are different will be the one that normnally 1725 * would have hit case C. This in effect "extends" 1726 * our vtags in this collision case to be 64 bits. 1727 * The same collision could occur aka you get both 1728 * vtag and seq number the same twice in a row.. but 1729 * is much less likely. If it did happen then we 1730 * would proceed through and bring up the assoc.. we 1731 * may end up with the wrong stream setup however.. 1732 * which would be bad.. but there is no way to 1733 * tell.. until we send on a stream that does not 1734 * exist :-) 1735 */ 1736 if (how_indx < sizeof(asoc->cookie_how)) 1737 asoc->cookie_how[how_indx] = 7; 1738 1739 return (NULL); 1740 } 1741 if (how_indx < sizeof(asoc->cookie_how)) 1742 asoc->cookie_how[how_indx] = 8; 1743 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_14); 1744 sctp_stop_all_cookie_timers(stcb); 1745 /* 1746 * since we did not send a HB make sure we don't double 1747 * things 1748 */ 1749 net->hb_responded = 1; 1750 if (stcb->asoc.sctp_autoclose_ticks && 1751 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1752 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, 1753 NULL); 1754 } 1755 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1756 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); 1757 1758 if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) { 1759 /* 1760 * Ok the peer probably discarded our data (if we 1761 * echoed a cookie+data). So anything on the 1762 * sent_queue should be marked for retransmit, we 1763 * may not get something to kick us so it COULD 1764 * still take a timeout to move these.. but it can't 1765 * hurt to mark them. 1766 */ 1767 struct sctp_tmit_chunk *chk; 1768 1769 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1770 if (chk->sent < SCTP_DATAGRAM_RESEND) { 1771 chk->sent = SCTP_DATAGRAM_RESEND; 1772 sctp_flight_size_decrease(chk); 1773 sctp_total_flight_decrease(stcb, chk); 1774 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1775 spec_flag++; 1776 } 1777 } 1778 1779 } 1780 /* process the INIT info (peer's info) */ 1781 retval = sctp_process_init(init_cp, stcb); 1782 if (retval < 0) { 1783 if (how_indx < sizeof(asoc->cookie_how)) 1784 asoc->cookie_how[how_indx] = 9; 1785 return (NULL); 1786 } 1787 if (sctp_load_addresses_from_init(stcb, m, 1788 init_offset + sizeof(struct sctp_init_chunk), 1789 initack_offset, sh, init_src)) { 1790 if (how_indx < sizeof(asoc->cookie_how)) 1791 asoc->cookie_how[how_indx] = 10; 1792 return (NULL); 1793 } 1794 if ((asoc->state & SCTP_STATE_COOKIE_WAIT) || 1795 (asoc->state & SCTP_STATE_COOKIE_ECHOED)) { 1796 *notification = SCTP_NOTIFY_ASSOC_UP; 1797 1798 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1799 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1800 (inp->sctp_socket->so_qlimit == 0)) { 1801#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1802 struct socket *so; 1803 1804#endif 1805 stcb->sctp_ep->sctp_flags |= 1806 SCTP_PCB_FLAGS_CONNECTED; 1807#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1808 so = SCTP_INP_SO(stcb->sctp_ep); 1809 atomic_add_int(&stcb->asoc.refcnt, 1); 1810 SCTP_TCB_UNLOCK(stcb); 1811 SCTP_SOCKET_LOCK(so, 1); 1812 SCTP_TCB_LOCK(stcb); 1813 atomic_add_int(&stcb->asoc.refcnt, -1); 1814 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1815 SCTP_SOCKET_UNLOCK(so, 1); 1816 return (NULL); 1817 } 1818#endif 1819 soisconnected(stcb->sctp_socket); 1820#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1821 SCTP_SOCKET_UNLOCK(so, 1); 1822#endif 1823 } 1824 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) 1825 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1826 else 1827 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1828 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1829 } else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1830 SCTP_STAT_INCR_COUNTER32(sctps_restartestab); 1831 } else { 1832 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1833 } 1834 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1835 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1836 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1837 stcb->sctp_ep, stcb, asoc->primary_destination); 1838 } 1839 sctp_stop_all_cookie_timers(stcb); 1840 sctp_toss_old_cookies(stcb, asoc); 1841 sctp_send_cookie_ack(stcb); 1842 if (spec_flag) { 1843 /* 1844 * only if we have retrans set do we do this. What 1845 * this call does is get only the COOKIE-ACK out and 1846 * then when we return the normal call to 1847 * sctp_chunk_output will get the retrans out behind 1848 * this. 1849 */ 1850 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED); 1851 } 1852 if (how_indx < sizeof(asoc->cookie_how)) 1853 asoc->cookie_how[how_indx] = 11; 1854 1855 return (stcb); 1856 } 1857 if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1858 ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) && 1859 cookie->tie_tag_my_vtag == asoc->my_vtag_nonce && 1860 cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce && 1861 cookie->tie_tag_peer_vtag != 0) { 1862 struct sctpasochead *head; 1863 1864 if (asoc->peer_supports_nat) { 1865 /* 1866 * This is a gross gross hack. just call the 1867 * cookie_new code since we are allowing a duplicate 1868 * association. I hope this works... 1869 */ 1870 return (sctp_process_cookie_new(m, iphlen, offset, sh, cookie, cookie_len, 1871 inp, netp, init_src, notification, 1872 auth_skipped, auth_offset, auth_len, 1873 use_mflowid, mflowid, 1874 vrf_id, port)); 1875 } 1876 /* 1877 * case A in Section 5.2.4 Table 2: XXMM (peer restarted) 1878 */ 1879 /* temp code */ 1880 if (how_indx < sizeof(asoc->cookie_how)) 1881 asoc->cookie_how[how_indx] = 12; 1882 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_15); 1883 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 1884 1885 /* notify upper layer */ 1886 *notification = SCTP_NOTIFY_ASSOC_RESTART; 1887 atomic_add_int(&stcb->asoc.refcnt, 1); 1888 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) && 1889 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 1890 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) { 1891 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1892 } 1893 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1894 SCTP_STAT_INCR_GAUGE32(sctps_restartestab); 1895 } else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1896 SCTP_STAT_INCR_GAUGE32(sctps_collisionestab); 1897 } 1898 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1899 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1900 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1901 stcb->sctp_ep, stcb, asoc->primary_destination); 1902 1903 } else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) { 1904 /* move to OPEN state, if not in SHUTDOWN_SENT */ 1905 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1906 } 1907 asoc->pre_open_streams = 1908 ntohs(initack_cp->init.num_outbound_streams); 1909 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 1910 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 1911 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 1912 1913 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 1914 1915 asoc->str_reset_seq_in = asoc->init_seq_number; 1916 1917 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1918 if (asoc->mapping_array) { 1919 memset(asoc->mapping_array, 0, 1920 asoc->mapping_array_size); 1921 } 1922 if (asoc->nr_mapping_array) { 1923 memset(asoc->nr_mapping_array, 0, 1924 asoc->mapping_array_size); 1925 } 1926 SCTP_TCB_UNLOCK(stcb); 1927 SCTP_INP_INFO_WLOCK(); 1928 SCTP_INP_WLOCK(stcb->sctp_ep); 1929 SCTP_TCB_LOCK(stcb); 1930 atomic_add_int(&stcb->asoc.refcnt, -1); 1931 /* send up all the data */ 1932 SCTP_TCB_SEND_LOCK(stcb); 1933 1934 sctp_report_all_outbound(stcb, 0, 1, SCTP_SO_NOT_LOCKED); 1935 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1936 stcb->asoc.strmout[i].stream_no = i; 1937 stcb->asoc.strmout[i].next_sequence_sent = 0; 1938 stcb->asoc.strmout[i].last_msg_incomplete = 0; 1939 } 1940 /* process the INIT-ACK info (my info) */ 1941 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 1942 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1943 1944 /* pull from vtag hash */ 1945 LIST_REMOVE(stcb, sctp_asocs); 1946 /* re-insert to new vtag position */ 1947 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, 1948 SCTP_BASE_INFO(hashasocmark))]; 1949 /* 1950 * put it in the bucket in the vtag hash of assoc's for the 1951 * system 1952 */ 1953 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 1954 1955 /* process the INIT info (peer's info) */ 1956 SCTP_TCB_SEND_UNLOCK(stcb); 1957 SCTP_INP_WUNLOCK(stcb->sctp_ep); 1958 SCTP_INP_INFO_WUNLOCK(); 1959 1960 retval = sctp_process_init(init_cp, stcb); 1961 if (retval < 0) { 1962 if (how_indx < sizeof(asoc->cookie_how)) 1963 asoc->cookie_how[how_indx] = 13; 1964 1965 return (NULL); 1966 } 1967 /* 1968 * since we did not send a HB make sure we don't double 1969 * things 1970 */ 1971 net->hb_responded = 1; 1972 1973 if (sctp_load_addresses_from_init(stcb, m, 1974 init_offset + sizeof(struct sctp_init_chunk), 1975 initack_offset, sh, init_src)) { 1976 if (how_indx < sizeof(asoc->cookie_how)) 1977 asoc->cookie_how[how_indx] = 14; 1978 1979 return (NULL); 1980 } 1981 /* respond with a COOKIE-ACK */ 1982 sctp_stop_all_cookie_timers(stcb); 1983 sctp_toss_old_cookies(stcb, asoc); 1984 sctp_send_cookie_ack(stcb); 1985 if (how_indx < sizeof(asoc->cookie_how)) 1986 asoc->cookie_how[how_indx] = 15; 1987 1988 return (stcb); 1989 } 1990 if (how_indx < sizeof(asoc->cookie_how)) 1991 asoc->cookie_how[how_indx] = 16; 1992 /* all other cases... */ 1993 return (NULL); 1994} 1995 1996 1997/* 1998 * handle a state cookie for a new association m: input packet mbuf chain-- 1999 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf 2000 * and the cookie signature does not exist offset: offset into mbuf to the 2001 * cookie-echo chunk length: length of the cookie chunk to: where the init 2002 * was from returns a new TCB 2003 */ 2004static struct sctp_tcb * 2005sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, 2006 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 2007 struct sctp_inpcb *inp, struct sctp_nets **netp, 2008 struct sockaddr *init_src, int *notification, 2009 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 2010 uint8_t use_mflowid, uint32_t mflowid, 2011 uint32_t vrf_id, uint16_t port) 2012{ 2013 struct sctp_tcb *stcb; 2014 struct sctp_init_chunk *init_cp, init_buf; 2015 struct sctp_init_ack_chunk *initack_cp, initack_buf; 2016 struct sockaddr_storage sa_store; 2017 struct sockaddr *initack_src = (struct sockaddr *)&sa_store; 2018 struct sctp_association *asoc; 2019 int init_offset, initack_offset, initack_limit; 2020 int retval; 2021 int error = 0; 2022 uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE]; 2023 2024#ifdef INET 2025 struct sockaddr_in *sin; 2026 2027#endif 2028#ifdef INET6 2029 struct sockaddr_in6 *sin6; 2030 2031#endif 2032#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2033 struct socket *so; 2034 2035 so = SCTP_INP_SO(inp); 2036#endif 2037 2038 /* 2039 * find and validate the INIT chunk in the cookie (peer's info) the 2040 * INIT should start after the cookie-echo header struct (chunk 2041 * header, state cookie header struct) 2042 */ 2043 init_offset = offset + sizeof(struct sctp_cookie_echo_chunk); 2044 init_cp = (struct sctp_init_chunk *) 2045 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 2046 (uint8_t *) & init_buf); 2047 if (init_cp == NULL) { 2048 /* could not pull a INIT chunk in cookie */ 2049 SCTPDBG(SCTP_DEBUG_INPUT1, 2050 "process_cookie_new: could not pull INIT chunk hdr\n"); 2051 return (NULL); 2052 } 2053 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 2054 SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n"); 2055 return (NULL); 2056 } 2057 initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length)); 2058 /* 2059 * find and validate the INIT-ACK chunk in the cookie (my info) the 2060 * INIT-ACK follows the INIT chunk 2061 */ 2062 initack_cp = (struct sctp_init_ack_chunk *) 2063 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 2064 (uint8_t *) & initack_buf); 2065 if (initack_cp == NULL) { 2066 /* could not pull INIT-ACK chunk in cookie */ 2067 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n"); 2068 return (NULL); 2069 } 2070 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 2071 return (NULL); 2072 } 2073 /* 2074 * NOTE: We can't use the INIT_ACK's chk_length to determine the 2075 * "initack_limit" value. This is because the chk_length field 2076 * includes the length of the cookie, but the cookie is omitted when 2077 * the INIT and INIT_ACK are tacked onto the cookie... 2078 */ 2079 initack_limit = offset + cookie_len; 2080 2081 /* 2082 * now that we know the INIT/INIT-ACK are in place, create a new TCB 2083 * and popluate 2084 */ 2085 2086 /* 2087 * Here we do a trick, we set in NULL for the proc/thread argument. 2088 * We do this since in effect we only use the p argument when the 2089 * socket is unbound and we must do an implicit bind. Since we are 2090 * getting a cookie, we cannot be unbound. 2091 */ 2092 stcb = sctp_aloc_assoc(inp, init_src, &error, 2093 ntohl(initack_cp->init.initiate_tag), vrf_id, 2094 (struct thread *)NULL 2095 ); 2096 if (stcb == NULL) { 2097 struct mbuf *op_err; 2098 2099 /* memory problem? */ 2100 SCTPDBG(SCTP_DEBUG_INPUT1, 2101 "process_cookie_new: no room for another TCB!\n"); 2102 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 2103 2104 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 2105 sh, op_err, 2106 use_mflowid, mflowid, 2107 vrf_id, port); 2108 return (NULL); 2109 } 2110 /* get the correct sctp_nets */ 2111 if (netp) 2112 *netp = sctp_findnet(stcb, init_src); 2113 2114 asoc = &stcb->asoc; 2115 /* get scope variables out of cookie */ 2116 asoc->ipv4_local_scope = cookie->ipv4_scope; 2117 asoc->site_scope = cookie->site_scope; 2118 asoc->local_scope = cookie->local_scope; 2119 asoc->loopback_scope = cookie->loopback_scope; 2120 2121 if ((asoc->ipv4_addr_legal != cookie->ipv4_addr_legal) || 2122 (asoc->ipv6_addr_legal != cookie->ipv6_addr_legal)) { 2123 struct mbuf *op_err; 2124 2125 /* 2126 * Houston we have a problem. The EP changed while the 2127 * cookie was in flight. Only recourse is to abort the 2128 * association. 2129 */ 2130 atomic_add_int(&stcb->asoc.refcnt, 1); 2131 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 2132 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 2133 sh, op_err, 2134 use_mflowid, mflowid, 2135 vrf_id, port); 2136#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2137 SCTP_TCB_UNLOCK(stcb); 2138 SCTP_SOCKET_LOCK(so, 1); 2139 SCTP_TCB_LOCK(stcb); 2140#endif 2141 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2142 SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 2143#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2144 SCTP_SOCKET_UNLOCK(so, 1); 2145#endif 2146 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2147 return (NULL); 2148 } 2149 /* process the INIT-ACK info (my info) */ 2150 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 2151 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 2152 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); 2153 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 2154 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 2155 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 2156 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 2157 asoc->str_reset_seq_in = asoc->init_seq_number; 2158 2159 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 2160 2161 /* process the INIT info (peer's info) */ 2162 if (netp) 2163 retval = sctp_process_init(init_cp, stcb); 2164 else 2165 retval = 0; 2166 if (retval < 0) { 2167 atomic_add_int(&stcb->asoc.refcnt, 1); 2168#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2169 SCTP_TCB_UNLOCK(stcb); 2170 SCTP_SOCKET_LOCK(so, 1); 2171 SCTP_TCB_LOCK(stcb); 2172#endif 2173 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 2174#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2175 SCTP_SOCKET_UNLOCK(so, 1); 2176#endif 2177 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2178 return (NULL); 2179 } 2180 /* load all addresses */ 2181 if (sctp_load_addresses_from_init(stcb, m, 2182 init_offset + sizeof(struct sctp_init_chunk), initack_offset, sh, 2183 init_src)) { 2184 atomic_add_int(&stcb->asoc.refcnt, 1); 2185#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2186 SCTP_TCB_UNLOCK(stcb); 2187 SCTP_SOCKET_LOCK(so, 1); 2188 SCTP_TCB_LOCK(stcb); 2189#endif 2190 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_17); 2191#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2192 SCTP_SOCKET_UNLOCK(so, 1); 2193#endif 2194 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2195 return (NULL); 2196 } 2197 /* 2198 * verify any preceding AUTH chunk that was skipped 2199 */ 2200 /* pull the local authentication parameters from the cookie/init-ack */ 2201 sctp_auth_get_cookie_params(stcb, m, 2202 initack_offset + sizeof(struct sctp_init_ack_chunk), 2203 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk))); 2204 if (auth_skipped) { 2205 struct sctp_auth_chunk *auth; 2206 2207 auth = (struct sctp_auth_chunk *) 2208 sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf); 2209 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) { 2210 /* auth HMAC failed, dump the assoc and packet */ 2211 SCTPDBG(SCTP_DEBUG_AUTH1, 2212 "COOKIE-ECHO: AUTH failed\n"); 2213 atomic_add_int(&stcb->asoc.refcnt, 1); 2214#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2215 SCTP_TCB_UNLOCK(stcb); 2216 SCTP_SOCKET_LOCK(so, 1); 2217 SCTP_TCB_LOCK(stcb); 2218#endif 2219 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18); 2220#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2221 SCTP_SOCKET_UNLOCK(so, 1); 2222#endif 2223 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2224 return (NULL); 2225 } else { 2226 /* remaining chunks checked... good to go */ 2227 stcb->asoc.authenticated = 1; 2228 } 2229 } 2230 /* update current state */ 2231 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 2232 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 2233 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 2234 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 2235 stcb->sctp_ep, stcb, asoc->primary_destination); 2236 } 2237 sctp_stop_all_cookie_timers(stcb); 2238 SCTP_STAT_INCR_COUNTER32(sctps_passiveestab); 2239 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 2240 2241 /* 2242 * if we're doing ASCONFs, check to see if we have any new local 2243 * addresses that need to get added to the peer (eg. addresses 2244 * changed while cookie echo in flight). This needs to be done 2245 * after we go to the OPEN state to do the correct asconf 2246 * processing. else, make sure we have the correct addresses in our 2247 * lists 2248 */ 2249 2250 /* warning, we re-use sin, sin6, sa_store here! */ 2251 /* pull in local_address (our "from" address) */ 2252 switch (cookie->laddr_type) { 2253#ifdef INET 2254 case SCTP_IPV4_ADDRESS: 2255 /* source addr is IPv4 */ 2256 sin = (struct sockaddr_in *)initack_src; 2257 memset(sin, 0, sizeof(*sin)); 2258 sin->sin_family = AF_INET; 2259 sin->sin_len = sizeof(struct sockaddr_in); 2260 sin->sin_addr.s_addr = cookie->laddress[0]; 2261 break; 2262#endif 2263#ifdef INET6 2264 case SCTP_IPV6_ADDRESS: 2265 /* source addr is IPv6 */ 2266 sin6 = (struct sockaddr_in6 *)initack_src; 2267 memset(sin6, 0, sizeof(*sin6)); 2268 sin6->sin6_family = AF_INET6; 2269 sin6->sin6_len = sizeof(struct sockaddr_in6); 2270 sin6->sin6_scope_id = cookie->scope_id; 2271 memcpy(&sin6->sin6_addr, cookie->laddress, 2272 sizeof(sin6->sin6_addr)); 2273 break; 2274#endif 2275 default: 2276 atomic_add_int(&stcb->asoc.refcnt, 1); 2277#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2278 SCTP_TCB_UNLOCK(stcb); 2279 SCTP_SOCKET_LOCK(so, 1); 2280 SCTP_TCB_LOCK(stcb); 2281#endif 2282 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19); 2283#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2284 SCTP_SOCKET_UNLOCK(so, 1); 2285#endif 2286 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2287 return (NULL); 2288 } 2289 2290 /* set up to notify upper layer */ 2291 *notification = SCTP_NOTIFY_ASSOC_UP; 2292 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2293 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2294 (inp->sctp_socket->so_qlimit == 0)) { 2295 /* 2296 * This is an endpoint that called connect() how it got a 2297 * cookie that is NEW is a bit of a mystery. It must be that 2298 * the INIT was sent, but before it got there.. a complete 2299 * INIT/INIT-ACK/COOKIE arrived. But of course then it 2300 * should have went to the other code.. not here.. oh well.. 2301 * a bit of protection is worth having.. 2302 */ 2303 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 2304#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2305 atomic_add_int(&stcb->asoc.refcnt, 1); 2306 SCTP_TCB_UNLOCK(stcb); 2307 SCTP_SOCKET_LOCK(so, 1); 2308 SCTP_TCB_LOCK(stcb); 2309 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2310 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2311 SCTP_SOCKET_UNLOCK(so, 1); 2312 return (NULL); 2313 } 2314#endif 2315 soisconnected(stcb->sctp_socket); 2316#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2317 SCTP_SOCKET_UNLOCK(so, 1); 2318#endif 2319 } else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 2320 (inp->sctp_socket->so_qlimit)) { 2321 /* 2322 * We don't want to do anything with this one. Since it is 2323 * the listening guy. The timer will get started for 2324 * accepted connections in the caller. 2325 */ 2326 ; 2327 } 2328 /* since we did not send a HB make sure we don't double things */ 2329 if ((netp) && (*netp)) 2330 (*netp)->hb_responded = 1; 2331 2332 if (stcb->asoc.sctp_autoclose_ticks && 2333 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 2334 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL); 2335 } 2336 /* calculate the RTT */ 2337 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 2338 if ((netp) && (*netp)) { 2339 (*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp, 2340 &cookie->time_entered, sctp_align_unsafe_makecopy, 2341 SCTP_RTT_FROM_NON_DATA); 2342 } 2343 /* respond with a COOKIE-ACK */ 2344 sctp_send_cookie_ack(stcb); 2345 2346 /* 2347 * check the address lists for any ASCONFs that need to be sent 2348 * AFTER the cookie-ack is sent 2349 */ 2350 sctp_check_address_list(stcb, m, 2351 initack_offset + sizeof(struct sctp_init_ack_chunk), 2352 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)), 2353 initack_src, cookie->local_scope, cookie->site_scope, 2354 cookie->ipv4_scope, cookie->loopback_scope); 2355 2356 2357 return (stcb); 2358} 2359 2360/* 2361 * CODE LIKE THIS NEEDS TO RUN IF the peer supports the NAT extension, i.e 2362 * we NEED to make sure we are not already using the vtag. If so we 2363 * need to send back an ABORT-TRY-AGAIN-WITH-NEW-TAG No middle box bit! 2364 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag, 2365 SCTP_BASE_INFO(hashasocmark))]; 2366 LIST_FOREACH(stcb, head, sctp_asocs) { 2367 if ((stcb->asoc.my_vtag == tag) && (stcb->rport == rport) && (inp == stcb->sctp_ep)) { 2368 -- SEND ABORT - TRY AGAIN -- 2369 } 2370 } 2371*/ 2372 2373/* 2374 * handles a COOKIE-ECHO message stcb: modified to either a new or left as 2375 * existing (non-NULL) TCB 2376 */ 2377static struct mbuf * 2378sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, 2379 struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp, 2380 struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp, 2381 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 2382 struct sctp_tcb **locked_tcb, 2383 uint8_t use_mflowid, uint32_t mflowid, 2384 uint32_t vrf_id, uint16_t port) 2385{ 2386 struct sctp_state_cookie *cookie; 2387 struct sctp_tcb *l_stcb = *stcb; 2388 struct sctp_inpcb *l_inp; 2389 struct sockaddr *to; 2390 struct sctp_pcb *ep; 2391 struct mbuf *m_sig; 2392 uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE]; 2393 uint8_t *sig; 2394 uint8_t cookie_ok = 0; 2395 unsigned int size_of_pkt, sig_offset, cookie_offset; 2396 unsigned int cookie_len; 2397 struct timeval now; 2398 struct timeval time_expires; 2399 struct sockaddr_storage dest_store; 2400 struct sockaddr *localep_sa = (struct sockaddr *)&dest_store; 2401 struct ip *iph; 2402 int notification = 0; 2403 struct sctp_nets *netl; 2404 int had_a_existing_tcb = 0; 2405 int send_int_conf = 0; 2406 2407#ifdef INET 2408 struct sockaddr_in sin; 2409 2410#endif 2411#ifdef INET6 2412 struct sockaddr_in6 sin6; 2413 2414#endif 2415 2416 SCTPDBG(SCTP_DEBUG_INPUT2, 2417 "sctp_handle_cookie: handling COOKIE-ECHO\n"); 2418 2419 if (inp_p == NULL) { 2420 return (NULL); 2421 } 2422 /* First get the destination address setup too. */ 2423 iph = mtod(m, struct ip *); 2424 switch (iph->ip_v) { 2425#ifdef INET 2426 case IPVERSION: 2427 { 2428 /* its IPv4 */ 2429 struct sockaddr_in *lsin; 2430 2431 lsin = (struct sockaddr_in *)(localep_sa); 2432 memset(lsin, 0, sizeof(*lsin)); 2433 lsin->sin_family = AF_INET; 2434 lsin->sin_len = sizeof(*lsin); 2435 lsin->sin_port = sh->dest_port; 2436 lsin->sin_addr.s_addr = iph->ip_dst.s_addr; 2437 size_of_pkt = SCTP_GET_IPV4_LENGTH(iph); 2438 break; 2439 } 2440#endif 2441#ifdef INET6 2442 case IPV6_VERSION >> 4: 2443 { 2444 /* its IPv6 */ 2445 struct ip6_hdr *ip6; 2446 struct sockaddr_in6 *lsin6; 2447 2448 lsin6 = (struct sockaddr_in6 *)(localep_sa); 2449 memset(lsin6, 0, sizeof(*lsin6)); 2450 lsin6->sin6_family = AF_INET6; 2451 lsin6->sin6_len = sizeof(struct sockaddr_in6); 2452 ip6 = mtod(m, struct ip6_hdr *); 2453 lsin6->sin6_port = sh->dest_port; 2454 lsin6->sin6_addr = ip6->ip6_dst; 2455 size_of_pkt = SCTP_GET_IPV6_LENGTH(ip6) + iphlen; 2456 break; 2457 } 2458#endif 2459 default: 2460 return (NULL); 2461 } 2462 2463 cookie = &cp->cookie; 2464 cookie_offset = offset + sizeof(struct sctp_chunkhdr); 2465 cookie_len = ntohs(cp->ch.chunk_length); 2466 2467 if ((cookie->peerport != sh->src_port) && 2468 (cookie->myport != sh->dest_port) && 2469 (cookie->my_vtag != sh->v_tag)) { 2470 /* 2471 * invalid ports or bad tag. Note that we always leave the 2472 * v_tag in the header in network order and when we stored 2473 * it in the my_vtag slot we also left it in network order. 2474 * This maintains the match even though it may be in the 2475 * opposite byte order of the machine :-> 2476 */ 2477 return (NULL); 2478 } 2479 if (cookie_len > size_of_pkt || 2480 cookie_len < sizeof(struct sctp_cookie_echo_chunk) + 2481 sizeof(struct sctp_init_chunk) + 2482 sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) { 2483 /* cookie too long! or too small */ 2484 return (NULL); 2485 } 2486 /* 2487 * split off the signature into its own mbuf (since it should not be 2488 * calculated in the sctp_hmac_m() call). 2489 */ 2490 sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE; 2491 if (sig_offset > size_of_pkt) { 2492 /* packet not correct size! */ 2493 /* XXX this may already be accounted for earlier... */ 2494 return (NULL); 2495 } 2496 m_sig = m_split(m, sig_offset, M_DONTWAIT); 2497 if (m_sig == NULL) { 2498 /* out of memory or ?? */ 2499 return (NULL); 2500 } 2501#ifdef SCTP_MBUF_LOGGING 2502 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 2503 struct mbuf *mat; 2504 2505 for (mat = m_sig; mat; mat = SCTP_BUF_NEXT(mat)) { 2506 if (SCTP_BUF_IS_EXTENDED(mat)) { 2507 sctp_log_mb(mat, SCTP_MBUF_SPLIT); 2508 } 2509 } 2510 } 2511#endif 2512 2513 /* 2514 * compute the signature/digest for the cookie 2515 */ 2516 ep = &(*inp_p)->sctp_ep; 2517 l_inp = *inp_p; 2518 if (l_stcb) { 2519 SCTP_TCB_UNLOCK(l_stcb); 2520 } 2521 SCTP_INP_RLOCK(l_inp); 2522 if (l_stcb) { 2523 SCTP_TCB_LOCK(l_stcb); 2524 } 2525 /* which cookie is it? */ 2526 if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) && 2527 (ep->current_secret_number != ep->last_secret_number)) { 2528 /* it's the old cookie */ 2529 (void)sctp_hmac_m(SCTP_HMAC, 2530 (uint8_t *) ep->secret_key[(int)ep->last_secret_number], 2531 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2532 } else { 2533 /* it's the current cookie */ 2534 (void)sctp_hmac_m(SCTP_HMAC, 2535 (uint8_t *) ep->secret_key[(int)ep->current_secret_number], 2536 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2537 } 2538 /* get the signature */ 2539 SCTP_INP_RUNLOCK(l_inp); 2540 sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig); 2541 if (sig == NULL) { 2542 /* couldn't find signature */ 2543 sctp_m_freem(m_sig); 2544 return (NULL); 2545 } 2546 /* compare the received digest with the computed digest */ 2547 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) { 2548 /* try the old cookie? */ 2549 if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) && 2550 (ep->current_secret_number != ep->last_secret_number)) { 2551 /* compute digest with old */ 2552 (void)sctp_hmac_m(SCTP_HMAC, 2553 (uint8_t *) ep->secret_key[(int)ep->last_secret_number], 2554 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2555 /* compare */ 2556 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0) 2557 cookie_ok = 1; 2558 } 2559 } else { 2560 cookie_ok = 1; 2561 } 2562 2563 /* 2564 * Now before we continue we must reconstruct our mbuf so that 2565 * normal processing of any other chunks will work. 2566 */ 2567 { 2568 struct mbuf *m_at; 2569 2570 m_at = m; 2571 while (SCTP_BUF_NEXT(m_at) != NULL) { 2572 m_at = SCTP_BUF_NEXT(m_at); 2573 } 2574 SCTP_BUF_NEXT(m_at) = m_sig; 2575 } 2576 2577 if (cookie_ok == 0) { 2578 SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n"); 2579 SCTPDBG(SCTP_DEBUG_INPUT2, 2580 "offset = %u, cookie_offset = %u, sig_offset = %u\n", 2581 (uint32_t) offset, cookie_offset, sig_offset); 2582 return (NULL); 2583 } 2584 /* 2585 * check the cookie timestamps to be sure it's not stale 2586 */ 2587 (void)SCTP_GETTIME_TIMEVAL(&now); 2588 /* Expire time is in Ticks, so we convert to seconds */ 2589 time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life); 2590 time_expires.tv_usec = cookie->time_entered.tv_usec; 2591 /* 2592 * TODO sctp_constants.h needs alternative time macros when _KERNEL 2593 * is undefined. 2594 */ 2595 if (timevalcmp(&now, &time_expires, >)) { 2596 /* cookie is stale! */ 2597 struct mbuf *op_err; 2598 struct sctp_stale_cookie_msg *scm; 2599 uint32_t tim; 2600 2601 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg), 2602 0, M_DONTWAIT, 1, MT_DATA); 2603 if (op_err == NULL) { 2604 /* FOOBAR */ 2605 return (NULL); 2606 } 2607 /* Set the len */ 2608 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg); 2609 scm = mtod(op_err, struct sctp_stale_cookie_msg *); 2610 scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE); 2611 scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) + 2612 (sizeof(uint32_t)))); 2613 /* seconds to usec */ 2614 tim = (now.tv_sec - time_expires.tv_sec) * 1000000; 2615 /* add in usec */ 2616 if (tim == 0) 2617 tim = now.tv_usec - cookie->time_entered.tv_usec; 2618 scm->time_usec = htonl(tim); 2619 sctp_send_operr_to(m, sh, cookie->peers_vtag, op_err, 2620 use_mflowid, mflowid, 2621 vrf_id, port); 2622 return (NULL); 2623 } 2624 /* 2625 * Now we must see with the lookup address if we have an existing 2626 * asoc. This will only happen if we were in the COOKIE-WAIT state 2627 * and a INIT collided with us and somewhere the peer sent the 2628 * cookie on another address besides the single address our assoc 2629 * had for him. In this case we will have one of the tie-tags set at 2630 * least AND the address field in the cookie can be used to look it 2631 * up. 2632 */ 2633 to = NULL; 2634 switch (cookie->addr_type) { 2635#ifdef INET6 2636 case SCTP_IPV6_ADDRESS: 2637 memset(&sin6, 0, sizeof(sin6)); 2638 sin6.sin6_family = AF_INET6; 2639 sin6.sin6_len = sizeof(sin6); 2640 sin6.sin6_port = sh->src_port; 2641 sin6.sin6_scope_id = cookie->scope_id; 2642 memcpy(&sin6.sin6_addr.s6_addr, cookie->address, 2643 sizeof(sin6.sin6_addr.s6_addr)); 2644 to = (struct sockaddr *)&sin6; 2645 break; 2646#endif 2647#ifdef INET 2648 case SCTP_IPV4_ADDRESS: 2649 memset(&sin, 0, sizeof(sin)); 2650 sin.sin_family = AF_INET; 2651 sin.sin_len = sizeof(sin); 2652 sin.sin_port = sh->src_port; 2653 sin.sin_addr.s_addr = cookie->address[0]; 2654 to = (struct sockaddr *)&sin; 2655 break; 2656#endif 2657 default: 2658 /* This should not happen */ 2659 return (NULL); 2660 } 2661 if ((*stcb == NULL) && to) { 2662 /* Yep, lets check */ 2663 *stcb = sctp_findassociation_ep_addr(inp_p, to, netp, localep_sa, NULL); 2664 if (*stcb == NULL) { 2665 /* 2666 * We should have only got back the same inp. If we 2667 * got back a different ep we have a problem. The 2668 * original findep got back l_inp and now 2669 */ 2670 if (l_inp != *inp_p) { 2671 SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n"); 2672 } 2673 } else { 2674 if (*locked_tcb == NULL) { 2675 /* 2676 * In this case we found the assoc only 2677 * after we locked the create lock. This 2678 * means we are in a colliding case and we 2679 * must make sure that we unlock the tcb if 2680 * its one of the cases where we throw away 2681 * the incoming packets. 2682 */ 2683 *locked_tcb = *stcb; 2684 2685 /* 2686 * We must also increment the inp ref count 2687 * since the ref_count flags was set when we 2688 * did not find the TCB, now we found it 2689 * which reduces the refcount.. we must 2690 * raise it back out to balance it all :-) 2691 */ 2692 SCTP_INP_INCR_REF((*stcb)->sctp_ep); 2693 if ((*stcb)->sctp_ep != l_inp) { 2694 SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n", 2695 (*stcb)->sctp_ep, l_inp); 2696 } 2697 } 2698 } 2699 } 2700 if (to == NULL) { 2701 return (NULL); 2702 } 2703 cookie_len -= SCTP_SIGNATURE_SIZE; 2704 if (*stcb == NULL) { 2705 /* this is the "normal" case... get a new TCB */ 2706 *stcb = sctp_process_cookie_new(m, iphlen, offset, sh, cookie, 2707 cookie_len, *inp_p, netp, to, ¬ification, 2708 auth_skipped, auth_offset, auth_len, 2709 use_mflowid, mflowid, 2710 vrf_id, port); 2711 } else { 2712 /* this is abnormal... cookie-echo on existing TCB */ 2713 had_a_existing_tcb = 1; 2714 *stcb = sctp_process_cookie_existing(m, iphlen, offset, sh, 2715 cookie, cookie_len, *inp_p, *stcb, netp, to, 2716 ¬ification, auth_skipped, auth_offset, auth_len, 2717 use_mflowid, mflowid, 2718 vrf_id, port); 2719 } 2720 2721 if (*stcb == NULL) { 2722 /* still no TCB... must be bad cookie-echo */ 2723 return (NULL); 2724 } 2725 if ((*netp != NULL) && (use_mflowid != 0)) { 2726 (*netp)->flowid = mflowid; 2727#ifdef INVARIANTS 2728 (*netp)->flowidset = 1; 2729#endif 2730 } 2731 /* 2732 * Ok, we built an association so confirm the address we sent the 2733 * INIT-ACK to. 2734 */ 2735 netl = sctp_findnet(*stcb, to); 2736 /* 2737 * This code should in theory NOT run but 2738 */ 2739 if (netl == NULL) { 2740 /* TSNH! Huh, why do I need to add this address here? */ 2741 if (sctp_add_remote_addr(*stcb, to, NULL, SCTP_DONOT_SETSCOPE, SCTP_IN_COOKIE_PROC)) { 2742 return (NULL); 2743 } 2744 netl = sctp_findnet(*stcb, to); 2745 } 2746 if (netl) { 2747 if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) { 2748 netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 2749 (void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL, 2750 netl); 2751 send_int_conf = 1; 2752 } 2753 } 2754 sctp_start_net_timers(*stcb); 2755 if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 2756 if (!had_a_existing_tcb || 2757 (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { 2758 /* 2759 * If we have a NEW cookie or the connect never 2760 * reached the connected state during collision we 2761 * must do the TCP accept thing. 2762 */ 2763 struct socket *so, *oso; 2764 struct sctp_inpcb *inp; 2765 2766 if (notification == SCTP_NOTIFY_ASSOC_RESTART) { 2767 /* 2768 * For a restart we will keep the same 2769 * socket, no need to do anything. I THINK!! 2770 */ 2771 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2772 if (send_int_conf) { 2773 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 2774 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED); 2775 } 2776 return (m); 2777 } 2778 oso = (*inp_p)->sctp_socket; 2779 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2780 SCTP_TCB_UNLOCK((*stcb)); 2781 CURVNET_SET(oso->so_vnet); 2782 so = sonewconn(oso, 0 2783 ); 2784 CURVNET_RESTORE(); 2785 SCTP_TCB_LOCK((*stcb)); 2786 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2787 2788 if (so == NULL) { 2789 struct mbuf *op_err; 2790 2791#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2792 struct socket *pcb_so; 2793 2794#endif 2795 /* Too many sockets */ 2796 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n"); 2797 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 2798 sctp_abort_association(*inp_p, NULL, m, iphlen, 2799 sh, op_err, 2800 use_mflowid, mflowid, 2801 vrf_id, port); 2802#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2803 pcb_so = SCTP_INP_SO(*inp_p); 2804 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2805 SCTP_TCB_UNLOCK((*stcb)); 2806 SCTP_SOCKET_LOCK(pcb_so, 1); 2807 SCTP_TCB_LOCK((*stcb)); 2808 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2809#endif 2810 (void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20); 2811#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2812 SCTP_SOCKET_UNLOCK(pcb_so, 1); 2813#endif 2814 return (NULL); 2815 } 2816 inp = (struct sctp_inpcb *)so->so_pcb; 2817 SCTP_INP_INCR_REF(inp); 2818 /* 2819 * We add the unbound flag here so that if we get an 2820 * soabort() before we get the move_pcb done, we 2821 * will properly cleanup. 2822 */ 2823 inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE | 2824 SCTP_PCB_FLAGS_CONNECTED | 2825 SCTP_PCB_FLAGS_IN_TCPPOOL | 2826 SCTP_PCB_FLAGS_UNBOUND | 2827 (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) | 2828 SCTP_PCB_FLAGS_DONT_WAKE); 2829 inp->sctp_features = (*inp_p)->sctp_features; 2830 inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features; 2831 inp->sctp_socket = so; 2832 inp->sctp_frag_point = (*inp_p)->sctp_frag_point; 2833 inp->sctp_cmt_on_off = (*inp_p)->sctp_cmt_on_off; 2834 inp->sctp_ecn_enable = (*inp_p)->sctp_ecn_enable; 2835 inp->partial_delivery_point = (*inp_p)->partial_delivery_point; 2836 inp->sctp_context = (*inp_p)->sctp_context; 2837 inp->local_strreset_support = (*inp_p)->local_strreset_support; 2838 inp->inp_starting_point_for_iterator = NULL; 2839 /* 2840 * copy in the authentication parameters from the 2841 * original endpoint 2842 */ 2843 if (inp->sctp_ep.local_hmacs) 2844 sctp_free_hmaclist(inp->sctp_ep.local_hmacs); 2845 inp->sctp_ep.local_hmacs = 2846 sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs); 2847 if (inp->sctp_ep.local_auth_chunks) 2848 sctp_free_chunklist(inp->sctp_ep.local_auth_chunks); 2849 inp->sctp_ep.local_auth_chunks = 2850 sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks); 2851 2852 /* 2853 * Now we must move it from one hash table to 2854 * another and get the tcb in the right place. 2855 */ 2856 2857 /* 2858 * This is where the one-2-one socket is put into 2859 * the accept state waiting for the accept! 2860 */ 2861 if (*stcb) { 2862 (*stcb)->asoc.state |= SCTP_STATE_IN_ACCEPT_QUEUE; 2863 } 2864 sctp_move_pcb_and_assoc(*inp_p, inp, *stcb); 2865 2866 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2867 SCTP_TCB_UNLOCK((*stcb)); 2868 2869 sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, 2870 0); 2871 SCTP_TCB_LOCK((*stcb)); 2872 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2873 2874 2875 /* 2876 * now we must check to see if we were aborted while 2877 * the move was going on and the lock/unlock 2878 * happened. 2879 */ 2880 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 2881 /* 2882 * yep it was, we leave the assoc attached 2883 * to the socket since the sctp_inpcb_free() 2884 * call will send an abort for us. 2885 */ 2886 SCTP_INP_DECR_REF(inp); 2887 return (NULL); 2888 } 2889 SCTP_INP_DECR_REF(inp); 2890 /* Switch over to the new guy */ 2891 *inp_p = inp; 2892 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2893 if (send_int_conf) { 2894 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 2895 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED); 2896 } 2897 /* 2898 * Pull it from the incomplete queue and wake the 2899 * guy 2900 */ 2901#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2902 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2903 SCTP_TCB_UNLOCK((*stcb)); 2904 SCTP_SOCKET_LOCK(so, 1); 2905#endif 2906 soisconnected(so); 2907#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2908 SCTP_TCB_LOCK((*stcb)); 2909 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2910 SCTP_SOCKET_UNLOCK(so, 1); 2911#endif 2912 return (m); 2913 } 2914 } 2915 if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 2916 if (notification) { 2917 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2918 } 2919 if (send_int_conf) { 2920 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 2921 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED); 2922 } 2923 } 2924 return (m); 2925} 2926 2927static void 2928sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp SCTP_UNUSED, 2929 struct sctp_tcb *stcb, struct sctp_nets *net) 2930{ 2931 /* cp must not be used, others call this without a c-ack :-) */ 2932 struct sctp_association *asoc; 2933 2934 SCTPDBG(SCTP_DEBUG_INPUT2, 2935 "sctp_handle_cookie_ack: handling COOKIE-ACK\n"); 2936 if (stcb == NULL) 2937 return; 2938 2939 asoc = &stcb->asoc; 2940 2941 sctp_stop_all_cookie_timers(stcb); 2942 /* process according to association state */ 2943 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) { 2944 /* state change only needed when I am in right state */ 2945 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 2946 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 2947 sctp_start_net_timers(stcb); 2948 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 2949 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 2950 stcb->sctp_ep, stcb, asoc->primary_destination); 2951 2952 } 2953 /* update RTO */ 2954 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 2955 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 2956 if (asoc->overall_error_count == 0) { 2957 net->RTO = sctp_calculate_rto(stcb, asoc, net, 2958 &asoc->time_entered, sctp_align_safe_nocopy, 2959 SCTP_RTT_FROM_NON_DATA); 2960 } 2961 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 2962 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2963 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2964 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 2965#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2966 struct socket *so; 2967 2968#endif 2969 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 2970#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2971 so = SCTP_INP_SO(stcb->sctp_ep); 2972 atomic_add_int(&stcb->asoc.refcnt, 1); 2973 SCTP_TCB_UNLOCK(stcb); 2974 SCTP_SOCKET_LOCK(so, 1); 2975 SCTP_TCB_LOCK(stcb); 2976 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2977#endif 2978 if ((stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) == 0) { 2979 soisconnected(stcb->sctp_socket); 2980 } 2981#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2982 SCTP_SOCKET_UNLOCK(so, 1); 2983#endif 2984 } 2985 /* 2986 * since we did not send a HB make sure we don't double 2987 * things 2988 */ 2989 net->hb_responded = 1; 2990 2991 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2992 /* 2993 * We don't need to do the asconf thing, nor hb or 2994 * autoclose if the socket is closed. 2995 */ 2996 goto closed_socket; 2997 } 2998 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, 2999 stcb, net); 3000 3001 3002 if (stcb->asoc.sctp_autoclose_ticks && 3003 sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) { 3004 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 3005 stcb->sctp_ep, stcb, NULL); 3006 } 3007 /* 3008 * send ASCONF if parameters are pending and ASCONFs are 3009 * allowed (eg. addresses changed when init/cookie echo were 3010 * in flight) 3011 */ 3012 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) && 3013 (stcb->asoc.peer_supports_asconf) && 3014 (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) { 3015#ifdef SCTP_TIMER_BASED_ASCONF 3016 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, 3017 stcb->sctp_ep, stcb, 3018 stcb->asoc.primary_destination); 3019#else 3020 sctp_send_asconf(stcb, stcb->asoc.primary_destination, 3021 SCTP_ADDR_NOT_LOCKED); 3022#endif 3023 } 3024 } 3025closed_socket: 3026 /* Toss the cookie if I can */ 3027 sctp_toss_old_cookies(stcb, asoc); 3028 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 3029 /* Restart the timer if we have pending data */ 3030 struct sctp_tmit_chunk *chk; 3031 3032 chk = TAILQ_FIRST(&asoc->sent_queue); 3033 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 3034 } 3035} 3036 3037static void 3038sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp, 3039 struct sctp_tcb *stcb) 3040{ 3041 struct sctp_nets *net; 3042 struct sctp_tmit_chunk *lchk; 3043 struct sctp_ecne_chunk bkup; 3044 uint8_t override_bit; 3045 uint32_t tsn, window_data_tsn; 3046 int len; 3047 unsigned int pkt_cnt; 3048 3049 len = ntohs(cp->ch.chunk_length); 3050 if ((len != sizeof(struct sctp_ecne_chunk)) && 3051 (len != sizeof(struct old_sctp_ecne_chunk))) { 3052 return; 3053 } 3054 if (len == sizeof(struct old_sctp_ecne_chunk)) { 3055 /* Its the old format */ 3056 memcpy(&bkup, cp, sizeof(struct old_sctp_ecne_chunk)); 3057 bkup.num_pkts_since_cwr = htonl(1); 3058 cp = &bkup; 3059 } 3060 SCTP_STAT_INCR(sctps_recvecne); 3061 tsn = ntohl(cp->tsn); 3062 pkt_cnt = ntohl(cp->num_pkts_since_cwr); 3063 lchk = TAILQ_LAST(&stcb->asoc.send_queue, sctpchunk_listhead); 3064 if (lchk == NULL) { 3065 window_data_tsn = stcb->asoc.sending_seq - 1; 3066 } else { 3067 window_data_tsn = lchk->rec.data.TSN_seq; 3068 } 3069 3070 /* Find where it was sent to if possible. */ 3071 net = NULL; 3072 TAILQ_FOREACH(lchk, &stcb->asoc.sent_queue, sctp_next) { 3073 if (lchk->rec.data.TSN_seq == tsn) { 3074 net = lchk->whoTo; 3075 net->ecn_prev_cwnd = lchk->rec.data.cwnd_at_send; 3076 break; 3077 } 3078 if (SCTP_TSN_GT(lchk->rec.data.TSN_seq, tsn)) { 3079 break; 3080 } 3081 } 3082 if (net == NULL) { 3083 /* 3084 * What to do. A previous send of a CWR was possibly lost. 3085 * See how old it is, we may have it marked on the actual 3086 * net. 3087 */ 3088 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3089 if (tsn == net->last_cwr_tsn) { 3090 /* Found him, send it off */ 3091 break; 3092 } 3093 } 3094 if (net == NULL) { 3095 /* 3096 * If we reach here, we need to send a special CWR 3097 * that says hey, we did this a long time ago and 3098 * you lost the response. 3099 */ 3100 net = TAILQ_FIRST(&stcb->asoc.nets); 3101 if (net == NULL) { 3102 /* TSNH */ 3103 return; 3104 } 3105 override_bit = SCTP_CWR_REDUCE_OVERRIDE; 3106 } else { 3107 override_bit = 0; 3108 } 3109 } else { 3110 override_bit = 0; 3111 } 3112 if (SCTP_TSN_GT(tsn, net->cwr_window_tsn) && 3113 ((override_bit & SCTP_CWR_REDUCE_OVERRIDE) == 0)) { 3114 /* 3115 * JRS - Use the congestion control given in the pluggable 3116 * CC module 3117 */ 3118 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 0, pkt_cnt); 3119 /* 3120 * We reduce once every RTT. So we will only lower cwnd at 3121 * the next sending seq i.e. the window_data_tsn 3122 */ 3123 net->cwr_window_tsn = window_data_tsn; 3124 net->ecn_ce_pkt_cnt += pkt_cnt; 3125 net->lost_cnt = pkt_cnt; 3126 net->last_cwr_tsn = tsn; 3127 } else { 3128 override_bit |= SCTP_CWR_IN_SAME_WINDOW; 3129 if (SCTP_TSN_GT(tsn, net->last_cwr_tsn) && 3130 ((override_bit & SCTP_CWR_REDUCE_OVERRIDE) == 0)) { 3131 /* 3132 * Another loss in the same window update how many 3133 * marks/packets lost we have had. 3134 */ 3135 int cnt = 1; 3136 3137 if (pkt_cnt > net->lost_cnt) { 3138 /* Should be the case */ 3139 cnt = (pkt_cnt - net->lost_cnt); 3140 net->ecn_ce_pkt_cnt += cnt; 3141 } 3142 net->lost_cnt = pkt_cnt; 3143 net->last_cwr_tsn = tsn; 3144 /* 3145 * Most CC functions will ignore this call, since we 3146 * are in-window yet of the initial CE the peer saw. 3147 */ 3148 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 1, cnt); 3149 } 3150 } 3151 /* 3152 * We always send a CWR this way if our previous one was lost our 3153 * peer will get an update, or if it is not time again to reduce we 3154 * still get the cwr to the peer. Note we set the override when we 3155 * could not find the TSN on the chunk or the destination network. 3156 */ 3157 sctp_send_cwr(stcb, net, net->last_cwr_tsn, override_bit); 3158} 3159 3160static void 3161sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb, struct sctp_nets *net) 3162{ 3163 /* 3164 * Here we get a CWR from the peer. We must look in the outqueue and 3165 * make sure that we have a covered ECNE in teh control chunk part. 3166 * If so remove it. 3167 */ 3168 struct sctp_tmit_chunk *chk; 3169 struct sctp_ecne_chunk *ecne; 3170 int override; 3171 uint32_t cwr_tsn; 3172 3173 cwr_tsn = ntohl(cp->tsn); 3174 3175 override = cp->ch.chunk_flags & SCTP_CWR_REDUCE_OVERRIDE; 3176 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 3177 if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) { 3178 continue; 3179 } 3180 if ((override == 0) && (chk->whoTo != net)) { 3181 /* Must be from the right src unless override is set */ 3182 continue; 3183 } 3184 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 3185 if (SCTP_TSN_GE(cwr_tsn, ntohl(ecne->tsn))) { 3186 /* this covers this ECNE, we can remove it */ 3187 stcb->asoc.ecn_echo_cnt_onq--; 3188 TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, 3189 sctp_next); 3190 if (chk->data) { 3191 sctp_m_freem(chk->data); 3192 chk->data = NULL; 3193 } 3194 stcb->asoc.ctrl_queue_cnt--; 3195 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 3196 if (override == 0) { 3197 break; 3198 } 3199 } 3200 } 3201} 3202 3203static void 3204sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp SCTP_UNUSED, 3205 struct sctp_tcb *stcb, struct sctp_nets *net) 3206{ 3207 struct sctp_association *asoc; 3208 3209#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3210 struct socket *so; 3211 3212#endif 3213 3214 SCTPDBG(SCTP_DEBUG_INPUT2, 3215 "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n"); 3216 if (stcb == NULL) 3217 return; 3218 3219 asoc = &stcb->asoc; 3220 /* process according to association state */ 3221 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) { 3222 /* unexpected SHUTDOWN-COMPLETE... so ignore... */ 3223 SCTPDBG(SCTP_DEBUG_INPUT2, 3224 "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n"); 3225 SCTP_TCB_UNLOCK(stcb); 3226 return; 3227 } 3228 /* notify upper layer protocol */ 3229 if (stcb->sctp_socket) { 3230 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 3231 /* are the queues empty? they should be */ 3232 if (!TAILQ_EMPTY(&asoc->send_queue) || 3233 !TAILQ_EMPTY(&asoc->sent_queue) || 3234 !stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) { 3235 sctp_report_all_outbound(stcb, 0, 0, SCTP_SO_NOT_LOCKED); 3236 } 3237 } 3238 /* stop the timer */ 3239 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22); 3240 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 3241 /* free the TCB */ 3242 SCTPDBG(SCTP_DEBUG_INPUT2, 3243 "sctp_handle_shutdown_complete: calls free-asoc\n"); 3244#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3245 so = SCTP_INP_SO(stcb->sctp_ep); 3246 atomic_add_int(&stcb->asoc.refcnt, 1); 3247 SCTP_TCB_UNLOCK(stcb); 3248 SCTP_SOCKET_LOCK(so, 1); 3249 SCTP_TCB_LOCK(stcb); 3250 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3251#endif 3252 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23); 3253#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3254 SCTP_SOCKET_UNLOCK(so, 1); 3255#endif 3256 return; 3257} 3258 3259static int 3260process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc, 3261 struct sctp_nets *net, uint8_t flg) 3262{ 3263 switch (desc->chunk_type) { 3264 case SCTP_DATA: 3265 /* find the tsn to resend (possibly */ 3266 { 3267 uint32_t tsn; 3268 struct sctp_tmit_chunk *tp1; 3269 3270 tsn = ntohl(desc->tsn_ifany); 3271 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) { 3272 if (tp1->rec.data.TSN_seq == tsn) { 3273 /* found it */ 3274 break; 3275 } 3276 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, tsn)) { 3277 /* not found */ 3278 tp1 = NULL; 3279 break; 3280 } 3281 } 3282 if (tp1 == NULL) { 3283 /* 3284 * Do it the other way , aka without paying 3285 * attention to queue seq order. 3286 */ 3287 SCTP_STAT_INCR(sctps_pdrpdnfnd); 3288 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) { 3289 if (tp1->rec.data.TSN_seq == tsn) { 3290 /* found it */ 3291 break; 3292 } 3293 } 3294 } 3295 if (tp1 == NULL) { 3296 SCTP_STAT_INCR(sctps_pdrptsnnf); 3297 } 3298 if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) { 3299 uint8_t *ddp; 3300 3301 if (((flg & SCTP_BADCRC) == 0) && 3302 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) { 3303 return (0); 3304 } 3305 if ((stcb->asoc.peers_rwnd == 0) && 3306 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) { 3307 SCTP_STAT_INCR(sctps_pdrpdiwnp); 3308 return (0); 3309 } 3310 if (stcb->asoc.peers_rwnd == 0 && 3311 (flg & SCTP_FROM_MIDDLE_BOX)) { 3312 SCTP_STAT_INCR(sctps_pdrpdizrw); 3313 return (0); 3314 } 3315 ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+ 3316 sizeof(struct sctp_data_chunk)); 3317 { 3318 unsigned int iii; 3319 3320 for (iii = 0; iii < sizeof(desc->data_bytes); 3321 iii++) { 3322 if (ddp[iii] != desc->data_bytes[iii]) { 3323 SCTP_STAT_INCR(sctps_pdrpbadd); 3324 return (-1); 3325 } 3326 } 3327 } 3328 3329 if (tp1->do_rtt) { 3330 /* 3331 * this guy had a RTO calculation 3332 * pending on it, cancel it 3333 */ 3334 if (tp1->whoTo->rto_needed == 0) { 3335 tp1->whoTo->rto_needed = 1; 3336 } 3337 tp1->do_rtt = 0; 3338 } 3339 SCTP_STAT_INCR(sctps_pdrpmark); 3340 if (tp1->sent != SCTP_DATAGRAM_RESEND) 3341 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3342 /* 3343 * mark it as if we were doing a FR, since 3344 * we will be getting gap ack reports behind 3345 * the info from the router. 3346 */ 3347 tp1->rec.data.doing_fast_retransmit = 1; 3348 /* 3349 * mark the tsn with what sequences can 3350 * cause a new FR. 3351 */ 3352 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) { 3353 tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq; 3354 } else { 3355 tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq; 3356 } 3357 3358 /* restart the timer */ 3359 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 3360 stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_24); 3361 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 3362 stcb, tp1->whoTo); 3363 3364 /* fix counts and things */ 3365 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3366 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP, 3367 tp1->whoTo->flight_size, 3368 tp1->book_size, 3369 (uintptr_t) stcb, 3370 tp1->rec.data.TSN_seq); 3371 } 3372 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3373 sctp_flight_size_decrease(tp1); 3374 sctp_total_flight_decrease(stcb, tp1); 3375 } 3376 tp1->sent = SCTP_DATAGRAM_RESEND; 3377 } { 3378 /* audit code */ 3379 unsigned int audit; 3380 3381 audit = 0; 3382 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) { 3383 if (tp1->sent == SCTP_DATAGRAM_RESEND) 3384 audit++; 3385 } 3386 TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue, 3387 sctp_next) { 3388 if (tp1->sent == SCTP_DATAGRAM_RESEND) 3389 audit++; 3390 } 3391 if (audit != stcb->asoc.sent_queue_retran_cnt) { 3392 SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n", 3393 audit, stcb->asoc.sent_queue_retran_cnt); 3394#ifndef SCTP_AUDITING_ENABLED 3395 stcb->asoc.sent_queue_retran_cnt = audit; 3396#endif 3397 } 3398 } 3399 } 3400 break; 3401 case SCTP_ASCONF: 3402 { 3403 struct sctp_tmit_chunk *asconf; 3404 3405 TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue, 3406 sctp_next) { 3407 if (asconf->rec.chunk_id.id == SCTP_ASCONF) { 3408 break; 3409 } 3410 } 3411 if (asconf) { 3412 if (asconf->sent != SCTP_DATAGRAM_RESEND) 3413 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3414 asconf->sent = SCTP_DATAGRAM_RESEND; 3415 asconf->snd_count--; 3416 } 3417 } 3418 break; 3419 case SCTP_INITIATION: 3420 /* resend the INIT */ 3421 stcb->asoc.dropped_special_cnt++; 3422 if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) { 3423 /* 3424 * If we can get it in, in a few attempts we do 3425 * this, otherwise we let the timer fire. 3426 */ 3427 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, 3428 stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25); 3429 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 3430 } 3431 break; 3432 case SCTP_SELECTIVE_ACK: 3433 case SCTP_NR_SELECTIVE_ACK: 3434 /* resend the sack */ 3435 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 3436 break; 3437 case SCTP_HEARTBEAT_REQUEST: 3438 /* resend a demand HB */ 3439 if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) { 3440 /* 3441 * Only retransmit if we KNOW we wont destroy the 3442 * tcb 3443 */ 3444 sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED); 3445 } 3446 break; 3447 case SCTP_SHUTDOWN: 3448 sctp_send_shutdown(stcb, net); 3449 break; 3450 case SCTP_SHUTDOWN_ACK: 3451 sctp_send_shutdown_ack(stcb, net); 3452 break; 3453 case SCTP_COOKIE_ECHO: 3454 { 3455 struct sctp_tmit_chunk *cookie; 3456 3457 cookie = NULL; 3458 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, 3459 sctp_next) { 3460 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 3461 break; 3462 } 3463 } 3464 if (cookie) { 3465 if (cookie->sent != SCTP_DATAGRAM_RESEND) 3466 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3467 cookie->sent = SCTP_DATAGRAM_RESEND; 3468 sctp_stop_all_cookie_timers(stcb); 3469 } 3470 } 3471 break; 3472 case SCTP_COOKIE_ACK: 3473 sctp_send_cookie_ack(stcb); 3474 break; 3475 case SCTP_ASCONF_ACK: 3476 /* resend last asconf ack */ 3477 sctp_send_asconf_ack(stcb); 3478 break; 3479 case SCTP_FORWARD_CUM_TSN: 3480 send_forward_tsn(stcb, &stcb->asoc); 3481 break; 3482 /* can't do anything with these */ 3483 case SCTP_PACKET_DROPPED: 3484 case SCTP_INITIATION_ACK: /* this should not happen */ 3485 case SCTP_HEARTBEAT_ACK: 3486 case SCTP_ABORT_ASSOCIATION: 3487 case SCTP_OPERATION_ERROR: 3488 case SCTP_SHUTDOWN_COMPLETE: 3489 case SCTP_ECN_ECHO: 3490 case SCTP_ECN_CWR: 3491 default: 3492 break; 3493 } 3494 return (0); 3495} 3496 3497void 3498sctp_reset_in_stream(struct sctp_tcb *stcb, int number_entries, uint16_t * list) 3499{ 3500 int i; 3501 uint16_t temp; 3502 3503 /* 3504 * We set things to 0xffff since this is the last delivered sequence 3505 * and we will be sending in 0 after the reset. 3506 */ 3507 3508 if (number_entries) { 3509 for (i = 0; i < number_entries; i++) { 3510 temp = ntohs(list[i]); 3511 if (temp >= stcb->asoc.streamincnt) { 3512 continue; 3513 } 3514 stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff; 3515 } 3516 } else { 3517 list = NULL; 3518 for (i = 0; i < stcb->asoc.streamincnt; i++) { 3519 stcb->asoc.strmin[i].last_sequence_delivered = 0xffff; 3520 } 3521 } 3522 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED); 3523} 3524 3525static void 3526sctp_reset_out_streams(struct sctp_tcb *stcb, int number_entries, uint16_t * list) 3527{ 3528 int i; 3529 3530 if (number_entries == 0) { 3531 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3532 stcb->asoc.strmout[i].next_sequence_sent = 0; 3533 } 3534 } else if (number_entries) { 3535 for (i = 0; i < number_entries; i++) { 3536 uint16_t temp; 3537 3538 temp = ntohs(list[i]); 3539 if (temp >= stcb->asoc.streamoutcnt) { 3540 /* no such stream */ 3541 continue; 3542 } 3543 stcb->asoc.strmout[temp].next_sequence_sent = 0; 3544 } 3545 } 3546 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED); 3547} 3548 3549 3550struct sctp_stream_reset_out_request * 3551sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk) 3552{ 3553 struct sctp_association *asoc; 3554 struct sctp_stream_reset_out_req *req; 3555 struct sctp_stream_reset_out_request *r; 3556 struct sctp_tmit_chunk *chk; 3557 int len, clen; 3558 3559 asoc = &stcb->asoc; 3560 if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { 3561 asoc->stream_reset_outstanding = 0; 3562 return (NULL); 3563 } 3564 if (stcb->asoc.str_reset == NULL) { 3565 asoc->stream_reset_outstanding = 0; 3566 return (NULL); 3567 } 3568 chk = stcb->asoc.str_reset; 3569 if (chk->data == NULL) { 3570 return (NULL); 3571 } 3572 if (bchk) { 3573 /* he wants a copy of the chk pointer */ 3574 *bchk = chk; 3575 } 3576 clen = chk->send_size; 3577 req = mtod(chk->data, struct sctp_stream_reset_out_req *); 3578 r = &req->sr_req; 3579 if (ntohl(r->request_seq) == seq) { 3580 /* found it */ 3581 return (r); 3582 } 3583 len = SCTP_SIZE32(ntohs(r->ph.param_length)); 3584 if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) { 3585 /* move to the next one, there can only be a max of two */ 3586 r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len); 3587 if (ntohl(r->request_seq) == seq) { 3588 return (r); 3589 } 3590 } 3591 /* that seq is not here */ 3592 return (NULL); 3593} 3594 3595static void 3596sctp_clean_up_stream_reset(struct sctp_tcb *stcb) 3597{ 3598 struct sctp_association *asoc; 3599 struct sctp_tmit_chunk *chk = stcb->asoc.str_reset; 3600 3601 if (stcb->asoc.str_reset == NULL) { 3602 return; 3603 } 3604 asoc = &stcb->asoc; 3605 3606 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_26); 3607 TAILQ_REMOVE(&asoc->control_send_queue, 3608 chk, 3609 sctp_next); 3610 if (chk->data) { 3611 sctp_m_freem(chk->data); 3612 chk->data = NULL; 3613 } 3614 asoc->ctrl_queue_cnt--; 3615 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 3616 /* sa_ignore NO_NULL_CHK */ 3617 stcb->asoc.str_reset = NULL; 3618} 3619 3620 3621static int 3622sctp_handle_stream_reset_response(struct sctp_tcb *stcb, 3623 uint32_t seq, uint32_t action, 3624 struct sctp_stream_reset_response *respin) 3625{ 3626 uint16_t type; 3627 int lparm_len; 3628 struct sctp_association *asoc = &stcb->asoc; 3629 struct sctp_tmit_chunk *chk; 3630 struct sctp_stream_reset_out_request *srparam; 3631 int number_entries; 3632 3633 if (asoc->stream_reset_outstanding == 0) { 3634 /* duplicate */ 3635 return (0); 3636 } 3637 if (seq == stcb->asoc.str_reset_seq_out) { 3638 srparam = sctp_find_stream_reset(stcb, seq, &chk); 3639 if (srparam) { 3640 stcb->asoc.str_reset_seq_out++; 3641 type = ntohs(srparam->ph.param_type); 3642 lparm_len = ntohs(srparam->ph.param_length); 3643 if (type == SCTP_STR_RESET_OUT_REQUEST) { 3644 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t); 3645 asoc->stream_reset_out_is_outstanding = 0; 3646 if (asoc->stream_reset_outstanding) 3647 asoc->stream_reset_outstanding--; 3648 if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) { 3649 /* do it */ 3650 sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams); 3651 } else if (action == SCTP_STREAM_RESET_RESULT_DENIED) { 3652 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_OUT, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED); 3653 } else { 3654 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED); 3655 } 3656 } else if (type == SCTP_STR_RESET_IN_REQUEST) { 3657 /* Answered my request */ 3658 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t); 3659 if (asoc->stream_reset_outstanding) 3660 asoc->stream_reset_outstanding--; 3661 if (action == SCTP_STREAM_RESET_RESULT_DENIED) { 3662 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_IN, stcb, 3663 number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED); 3664 } else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) { 3665 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, 3666 number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED); 3667 } 3668 } else if (type == SCTP_STR_RESET_ADD_OUT_STREAMS) { 3669 /* Ok we now may have more streams */ 3670 int num_stream; 3671 3672 num_stream = stcb->asoc.strm_pending_add_size; 3673 if (num_stream > (stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt)) { 3674 /* TSNH */ 3675 num_stream = stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt; 3676 } 3677 stcb->asoc.strm_pending_add_size = 0; 3678 if (asoc->stream_reset_outstanding) 3679 asoc->stream_reset_outstanding--; 3680 if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) { 3681 /* Put the new streams into effect */ 3682 stcb->asoc.streamoutcnt += num_stream; 3683 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 0); 3684 } else if (action == SCTP_STREAM_RESET_RESULT_DENIED) { 3685 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 3686 SCTP_STREAM_CHANGE_DENIED); 3687 } else { 3688 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 3689 SCTP_STREAM_CHANGE_FAILED); 3690 } 3691 } else if (type == SCTP_STR_RESET_ADD_IN_STREAMS) { 3692 if (asoc->stream_reset_outstanding) 3693 asoc->stream_reset_outstanding--; 3694 if (action == SCTP_STREAM_RESET_RESULT_DENIED) { 3695 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 3696 SCTP_STREAM_CHANGE_DENIED); 3697 } else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) { 3698 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 3699 SCTP_STREAM_CHANGE_FAILED); 3700 } 3701 } else if (type == SCTP_STR_RESET_TSN_REQUEST) { 3702 /** 3703 * a) Adopt the new in tsn. 3704 * b) reset the map 3705 * c) Adopt the new out-tsn 3706 */ 3707 struct sctp_stream_reset_response_tsn *resp; 3708 struct sctp_forward_tsn_chunk fwdtsn; 3709 int abort_flag = 0; 3710 3711 if (respin == NULL) { 3712 /* huh ? */ 3713 return (0); 3714 } 3715 if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) { 3716 resp = (struct sctp_stream_reset_response_tsn *)respin; 3717 asoc->stream_reset_outstanding--; 3718 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 3719 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 3720 fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1); 3721 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0); 3722 if (abort_flag) { 3723 return (1); 3724 } 3725 stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1); 3726 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 3727 sctp_log_map(0, 7, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 3728 } 3729 stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map; 3730 stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn); 3731 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 3732 3733 stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map; 3734 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size); 3735 3736 stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn); 3737 stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn; 3738 3739 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL); 3740 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL); 3741 sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1), 0); 3742 } else if (action == SCTP_STREAM_RESET_RESULT_DENIED) { 3743 sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1), 3744 SCTP_ASSOC_RESET_DENIED); 3745 } else { 3746 sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1), 3747 SCTP_ASSOC_RESET_FAILED); 3748 } 3749 } 3750 /* get rid of the request and get the request flags */ 3751 if (asoc->stream_reset_outstanding == 0) { 3752 sctp_clean_up_stream_reset(stcb); 3753 } 3754 } 3755 } 3756 return (0); 3757} 3758 3759static void 3760sctp_handle_str_reset_request_in(struct sctp_tcb *stcb, 3761 struct sctp_tmit_chunk *chk, 3762 struct sctp_stream_reset_in_request *req, int trunc) 3763{ 3764 uint32_t seq; 3765 int len, i; 3766 int number_entries; 3767 uint16_t temp; 3768 3769 /* 3770 * peer wants me to send a str-reset to him for my outgoing seq's if 3771 * seq_in is right. 3772 */ 3773 struct sctp_association *asoc = &stcb->asoc; 3774 3775 seq = ntohl(req->request_seq); 3776 if (asoc->str_reset_seq_in == seq) { 3777 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3778 if (!(asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ)) { 3779 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 3780 } else if (trunc) { 3781 /* Can't do it, since they exceeded our buffer size */ 3782 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 3783 } else if (stcb->asoc.stream_reset_out_is_outstanding == 0) { 3784 len = ntohs(req->ph.param_length); 3785 number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t)); 3786 for (i = 0; i < number_entries; i++) { 3787 temp = ntohs(req->list_of_streams[i]); 3788 req->list_of_streams[i] = temp; 3789 } 3790 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED; 3791 sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams, 3792 asoc->str_reset_seq_out, 3793 seq, (asoc->sending_seq - 1)); 3794 asoc->stream_reset_out_is_outstanding = 1; 3795 asoc->str_reset = chk; 3796 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo); 3797 stcb->asoc.stream_reset_outstanding++; 3798 } else { 3799 /* Can't do it, since we have sent one out */ 3800 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS; 3801 } 3802 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3803 asoc->str_reset_seq_in++; 3804 } else if (asoc->str_reset_seq_in - 1 == seq) { 3805 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3806 } else if (asoc->str_reset_seq_in - 2 == seq) { 3807 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 3808 } else { 3809 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO); 3810 } 3811} 3812 3813static int 3814sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb, 3815 struct sctp_tmit_chunk *chk, 3816 struct sctp_stream_reset_tsn_request *req) 3817{ 3818 /* reset all in and out and update the tsn */ 3819 /* 3820 * A) reset my str-seq's on in and out. B) Select a receive next, 3821 * and set cum-ack to it. Also process this selected number as a 3822 * fwd-tsn as well. C) set in the response my next sending seq. 3823 */ 3824 struct sctp_forward_tsn_chunk fwdtsn; 3825 struct sctp_association *asoc = &stcb->asoc; 3826 int abort_flag = 0; 3827 uint32_t seq; 3828 3829 seq = ntohl(req->request_seq); 3830 if (asoc->str_reset_seq_in == seq) { 3831 asoc->last_reset_action[1] = stcb->asoc.last_reset_action[0]; 3832 if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) { 3833 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 3834 } else { 3835 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 3836 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 3837 fwdtsn.ch.chunk_flags = 0; 3838 fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1); 3839 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0); 3840 if (abort_flag) { 3841 return (1); 3842 } 3843 asoc->highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA; 3844 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 3845 sctp_log_map(0, 10, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 3846 } 3847 asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->highest_tsn_inside_map; 3848 asoc->mapping_array_base_tsn = asoc->highest_tsn_inside_map + 1; 3849 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 3850 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map; 3851 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 3852 atomic_add_int(&asoc->sending_seq, 1); 3853 /* save off historical data for retrans */ 3854 asoc->last_sending_seq[1] = asoc->last_sending_seq[0]; 3855 asoc->last_sending_seq[0] = asoc->sending_seq; 3856 asoc->last_base_tsnsent[1] = asoc->last_base_tsnsent[0]; 3857 asoc->last_base_tsnsent[0] = asoc->mapping_array_base_tsn; 3858 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL); 3859 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL); 3860 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED; 3861 sctp_notify_stream_reset_tsn(stcb, asoc->sending_seq, (asoc->mapping_array_base_tsn + 1), 0); 3862 } 3863 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0], 3864 asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]); 3865 asoc->str_reset_seq_in++; 3866 } else if (asoc->str_reset_seq_in - 1 == seq) { 3867 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0], 3868 asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]); 3869 } else if (asoc->str_reset_seq_in - 2 == seq) { 3870 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1], 3871 asoc->last_sending_seq[1], asoc->last_base_tsnsent[1]); 3872 } else { 3873 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO); 3874 } 3875 return (0); 3876} 3877 3878static void 3879sctp_handle_str_reset_request_out(struct sctp_tcb *stcb, 3880 struct sctp_tmit_chunk *chk, 3881 struct sctp_stream_reset_out_request *req, int trunc) 3882{ 3883 uint32_t seq, tsn; 3884 int number_entries, len; 3885 struct sctp_association *asoc = &stcb->asoc; 3886 3887 seq = ntohl(req->request_seq); 3888 3889 /* now if its not a duplicate we process it */ 3890 if (asoc->str_reset_seq_in == seq) { 3891 len = ntohs(req->ph.param_length); 3892 number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t)); 3893 /* 3894 * the sender is resetting, handle the list issue.. we must 3895 * a) verify if we can do the reset, if so no problem b) If 3896 * we can't do the reset we must copy the request. c) queue 3897 * it, and setup the data in processor to trigger it off 3898 * when needed and dequeue all the queued data. 3899 */ 3900 tsn = ntohl(req->send_reset_at_tsn); 3901 3902 /* move the reset action back one */ 3903 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3904 if (!(asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ)) { 3905 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 3906 } else if (trunc) { 3907 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 3908 } else if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { 3909 /* we can do it now */ 3910 sctp_reset_in_stream(stcb, number_entries, req->list_of_streams); 3911 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED; 3912 } else { 3913 /* 3914 * we must queue it up and thus wait for the TSN's 3915 * to arrive that are at or before tsn 3916 */ 3917 struct sctp_stream_reset_list *liste; 3918 int siz; 3919 3920 siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t)); 3921 SCTP_MALLOC(liste, struct sctp_stream_reset_list *, 3922 siz, SCTP_M_STRESET); 3923 if (liste == NULL) { 3924 /* gak out of memory */ 3925 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 3926 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3927 return; 3928 } 3929 liste->tsn = tsn; 3930 liste->number_entries = number_entries; 3931 memcpy(&liste->req, req, 3932 (sizeof(struct sctp_stream_reset_out_request) + (number_entries * sizeof(uint16_t)))); 3933 TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp); 3934 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED; 3935 } 3936 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3937 asoc->str_reset_seq_in++; 3938 } else if ((asoc->str_reset_seq_in - 1) == seq) { 3939 /* 3940 * one seq back, just echo back last action since my 3941 * response was lost. 3942 */ 3943 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3944 } else if ((asoc->str_reset_seq_in - 2) == seq) { 3945 /* 3946 * two seq back, just echo back last action since my 3947 * response was lost. 3948 */ 3949 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 3950 } else { 3951 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO); 3952 } 3953} 3954 3955static void 3956sctp_handle_str_reset_add_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk, 3957 struct sctp_stream_reset_add_strm *str_add) 3958{ 3959 /* 3960 * Peer is requesting to add more streams. If its within our 3961 * max-streams we will allow it. 3962 */ 3963 uint32_t num_stream, i; 3964 uint32_t seq; 3965 struct sctp_association *asoc = &stcb->asoc; 3966 struct sctp_queued_to_read *ctl, *nctl; 3967 3968 /* Get the number. */ 3969 seq = ntohl(str_add->request_seq); 3970 num_stream = ntohs(str_add->number_of_streams); 3971 /* Now what would be the new total? */ 3972 if (asoc->str_reset_seq_in == seq) { 3973 num_stream += stcb->asoc.streamincnt; 3974 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0]; 3975 if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) { 3976 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 3977 } else if ((num_stream > stcb->asoc.max_inbound_streams) || 3978 (num_stream > 0xffff)) { 3979 /* We must reject it they ask for to many */ 3980 denied: 3981 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 3982 } else { 3983 /* Ok, we can do that :-) */ 3984 struct sctp_stream_in *oldstrm; 3985 3986 /* save off the old */ 3987 oldstrm = stcb->asoc.strmin; 3988 SCTP_MALLOC(stcb->asoc.strmin, struct sctp_stream_in *, 3989 (num_stream * sizeof(struct sctp_stream_in)), 3990 SCTP_M_STRMI); 3991 if (stcb->asoc.strmin == NULL) { 3992 stcb->asoc.strmin = oldstrm; 3993 goto denied; 3994 } 3995 /* copy off the old data */ 3996 for (i = 0; i < stcb->asoc.streamincnt; i++) { 3997 TAILQ_INIT(&stcb->asoc.strmin[i].inqueue); 3998 stcb->asoc.strmin[i].stream_no = i; 3999 stcb->asoc.strmin[i].last_sequence_delivered = oldstrm[i].last_sequence_delivered; 4000 stcb->asoc.strmin[i].delivery_started = oldstrm[i].delivery_started; 4001 /* now anything on those queues? */ 4002 TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].inqueue, next, nctl) { 4003 TAILQ_REMOVE(&oldstrm[i].inqueue, ctl, next); 4004 TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].inqueue, ctl, next); 4005 } 4006 } 4007 /* Init the new streams */ 4008 for (i = stcb->asoc.streamincnt; i < num_stream; i++) { 4009 TAILQ_INIT(&stcb->asoc.strmin[i].inqueue); 4010 stcb->asoc.strmin[i].stream_no = i; 4011 stcb->asoc.strmin[i].last_sequence_delivered = 0xffff; 4012 stcb->asoc.strmin[i].delivery_started = 0; 4013 } 4014 SCTP_FREE(oldstrm, SCTP_M_STRMI); 4015 /* update the size */ 4016 stcb->asoc.streamincnt = num_stream; 4017 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED; 4018 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 0); 4019 } 4020 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 4021 asoc->str_reset_seq_in++; 4022 } else if ((asoc->str_reset_seq_in - 1) == seq) { 4023 /* 4024 * one seq back, just echo back last action since my 4025 * response was lost. 4026 */ 4027 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 4028 } else if ((asoc->str_reset_seq_in - 2) == seq) { 4029 /* 4030 * two seq back, just echo back last action since my 4031 * response was lost. 4032 */ 4033 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 4034 } else { 4035 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO); 4036 4037 } 4038} 4039 4040static void 4041sctp_handle_str_reset_add_out_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk, 4042 struct sctp_stream_reset_add_strm *str_add) 4043{ 4044 /* 4045 * Peer is requesting to add more streams. If its within our 4046 * max-streams we will allow it. 4047 */ 4048 uint16_t num_stream; 4049 uint32_t seq; 4050 struct sctp_association *asoc = &stcb->asoc; 4051 4052 /* Get the number. */ 4053 seq = ntohl(str_add->request_seq); 4054 num_stream = ntohs(str_add->number_of_streams); 4055 /* Now what would be the new total? */ 4056 if (asoc->str_reset_seq_in == seq) { 4057 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0]; 4058 if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) { 4059 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 4060 } else if (stcb->asoc.stream_reset_outstanding) { 4061 /* We must reject it we have something pending */ 4062 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS; 4063 } else { 4064 /* Ok, we can do that :-) */ 4065 int mychk; 4066 4067 mychk = stcb->asoc.streamoutcnt; 4068 mychk += num_stream; 4069 if (mychk < 0x10000) { 4070 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED; 4071 if (sctp_send_str_reset_req(stcb, 0, NULL, 0, 0, 0, 1, num_stream, 0, 1)) { 4072 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 4073 } 4074 } else { 4075 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 4076 } 4077 } 4078 sctp_add_stream_reset_result(chk, seq, stcb->asoc.last_reset_action[0]); 4079 asoc->str_reset_seq_in++; 4080 } else if ((asoc->str_reset_seq_in - 1) == seq) { 4081 /* 4082 * one seq back, just echo back last action since my 4083 * response was lost. 4084 */ 4085 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 4086 } else if ((asoc->str_reset_seq_in - 2) == seq) { 4087 /* 4088 * two seq back, just echo back last action since my 4089 * response was lost. 4090 */ 4091 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 4092 } else { 4093 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO); 4094 } 4095} 4096 4097#ifdef __GNUC__ 4098__attribute__((noinline)) 4099#endif 4100 static int 4101 sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset, 4102 struct sctp_stream_reset_out_req *sr_req) 4103{ 4104 int chk_length, param_len, ptype; 4105 struct sctp_paramhdr pstore; 4106 uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE]; 4107 uint32_t seq = 0; 4108 int num_req = 0; 4109 int trunc = 0; 4110 struct sctp_tmit_chunk *chk; 4111 struct sctp_chunkhdr *ch; 4112 struct sctp_paramhdr *ph; 4113 int ret_code = 0; 4114 int num_param = 0; 4115 4116 /* now it may be a reset or a reset-response */ 4117 chk_length = ntohs(sr_req->ch.chunk_length); 4118 4119 /* setup for adding the response */ 4120 sctp_alloc_a_chunk(stcb, chk); 4121 if (chk == NULL) { 4122 return (ret_code); 4123 } 4124 chk->rec.chunk_id.id = SCTP_STREAM_RESET; 4125 chk->rec.chunk_id.can_take_data = 0; 4126 chk->asoc = &stcb->asoc; 4127 chk->no_fr_allowed = 0; 4128 chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr); 4129 chk->book_size_scale = 0; 4130 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 4131 if (chk->data == NULL) { 4132strres_nochunk: 4133 if (chk->data) { 4134 sctp_m_freem(chk->data); 4135 chk->data = NULL; 4136 } 4137 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 4138 return (ret_code); 4139 } 4140 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 4141 4142 /* setup chunk parameters */ 4143 chk->sent = SCTP_DATAGRAM_UNSENT; 4144 chk->snd_count = 0; 4145 chk->whoTo = NULL; 4146 4147 ch = mtod(chk->data, struct sctp_chunkhdr *); 4148 ch->chunk_type = SCTP_STREAM_RESET; 4149 ch->chunk_flags = 0; 4150 ch->chunk_length = htons(chk->send_size); 4151 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 4152 offset += sizeof(struct sctp_chunkhdr); 4153 while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) { 4154 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *) & pstore); 4155 if (ph == NULL) 4156 break; 4157 param_len = ntohs(ph->param_length); 4158 if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) { 4159 /* bad param */ 4160 break; 4161 } 4162 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, (int)sizeof(cstore)), 4163 (uint8_t *) & cstore); 4164 ptype = ntohs(ph->param_type); 4165 num_param++; 4166 if (param_len > (int)sizeof(cstore)) { 4167 trunc = 1; 4168 } else { 4169 trunc = 0; 4170 } 4171 if (num_param > SCTP_MAX_RESET_PARAMS) { 4172 /* hit the max of parameters already sorry.. */ 4173 break; 4174 } 4175 if (ptype == SCTP_STR_RESET_OUT_REQUEST) { 4176 struct sctp_stream_reset_out_request *req_out; 4177 4178 req_out = (struct sctp_stream_reset_out_request *)ph; 4179 num_req++; 4180 if (stcb->asoc.stream_reset_outstanding) { 4181 seq = ntohl(req_out->response_seq); 4182 if (seq == stcb->asoc.str_reset_seq_out) { 4183 /* implicit ack */ 4184 (void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_RESULT_PERFORMED, NULL); 4185 } 4186 } 4187 sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc); 4188 } else if (ptype == SCTP_STR_RESET_ADD_OUT_STREAMS) { 4189 struct sctp_stream_reset_add_strm *str_add; 4190 4191 str_add = (struct sctp_stream_reset_add_strm *)ph; 4192 num_req++; 4193 sctp_handle_str_reset_add_strm(stcb, chk, str_add); 4194 } else if (ptype == SCTP_STR_RESET_ADD_IN_STREAMS) { 4195 struct sctp_stream_reset_add_strm *str_add; 4196 4197 str_add = (struct sctp_stream_reset_add_strm *)ph; 4198 num_req++; 4199 sctp_handle_str_reset_add_out_strm(stcb, chk, str_add); 4200 } else if (ptype == SCTP_STR_RESET_IN_REQUEST) { 4201 struct sctp_stream_reset_in_request *req_in; 4202 4203 num_req++; 4204 req_in = (struct sctp_stream_reset_in_request *)ph; 4205 sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc); 4206 } else if (ptype == SCTP_STR_RESET_TSN_REQUEST) { 4207 struct sctp_stream_reset_tsn_request *req_tsn; 4208 4209 num_req++; 4210 req_tsn = (struct sctp_stream_reset_tsn_request *)ph; 4211 if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) { 4212 ret_code = 1; 4213 goto strres_nochunk; 4214 } 4215 /* no more */ 4216 break; 4217 } else if (ptype == SCTP_STR_RESET_RESPONSE) { 4218 struct sctp_stream_reset_response *resp; 4219 uint32_t result; 4220 4221 resp = (struct sctp_stream_reset_response *)ph; 4222 seq = ntohl(resp->response_seq); 4223 result = ntohl(resp->result); 4224 if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) { 4225 ret_code = 1; 4226 goto strres_nochunk; 4227 } 4228 } else { 4229 break; 4230 } 4231 offset += SCTP_SIZE32(param_len); 4232 chk_length -= SCTP_SIZE32(param_len); 4233 } 4234 if (num_req == 0) { 4235 /* we have no response free the stuff */ 4236 goto strres_nochunk; 4237 } 4238 /* ok we have a chunk to link in */ 4239 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, 4240 chk, 4241 sctp_next); 4242 stcb->asoc.ctrl_queue_cnt++; 4243 return (ret_code); 4244} 4245 4246/* 4247 * Handle a router or endpoints report of a packet loss, there are two ways 4248 * to handle this, either we get the whole packet and must disect it 4249 * ourselves (possibly with truncation and or corruption) or it is a summary 4250 * from a middle box that did the disectting for us. 4251 */ 4252static void 4253sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp, 4254 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit) 4255{ 4256 uint32_t bottle_bw, on_queue; 4257 uint16_t trunc_len; 4258 unsigned int chlen; 4259 unsigned int at; 4260 struct sctp_chunk_desc desc; 4261 struct sctp_chunkhdr *ch; 4262 4263 chlen = ntohs(cp->ch.chunk_length); 4264 chlen -= sizeof(struct sctp_pktdrop_chunk); 4265 /* XXX possible chlen underflow */ 4266 if (chlen == 0) { 4267 ch = NULL; 4268 if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) 4269 SCTP_STAT_INCR(sctps_pdrpbwrpt); 4270 } else { 4271 ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr)); 4272 chlen -= sizeof(struct sctphdr); 4273 /* XXX possible chlen underflow */ 4274 memset(&desc, 0, sizeof(desc)); 4275 } 4276 trunc_len = (uint16_t) ntohs(cp->trunc_len); 4277 if (trunc_len > limit) { 4278 trunc_len = limit; 4279 } 4280 /* now the chunks themselves */ 4281 while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) { 4282 desc.chunk_type = ch->chunk_type; 4283 /* get amount we need to move */ 4284 at = ntohs(ch->chunk_length); 4285 if (at < sizeof(struct sctp_chunkhdr)) { 4286 /* corrupt chunk, maybe at the end? */ 4287 SCTP_STAT_INCR(sctps_pdrpcrupt); 4288 break; 4289 } 4290 if (trunc_len == 0) { 4291 /* we are supposed to have all of it */ 4292 if (at > chlen) { 4293 /* corrupt skip it */ 4294 SCTP_STAT_INCR(sctps_pdrpcrupt); 4295 break; 4296 } 4297 } else { 4298 /* is there enough of it left ? */ 4299 if (desc.chunk_type == SCTP_DATA) { 4300 if (chlen < (sizeof(struct sctp_data_chunk) + 4301 sizeof(desc.data_bytes))) { 4302 break; 4303 } 4304 } else { 4305 if (chlen < sizeof(struct sctp_chunkhdr)) { 4306 break; 4307 } 4308 } 4309 } 4310 if (desc.chunk_type == SCTP_DATA) { 4311 /* can we get out the tsn? */ 4312 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) 4313 SCTP_STAT_INCR(sctps_pdrpmbda); 4314 4315 if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) { 4316 /* yep */ 4317 struct sctp_data_chunk *dcp; 4318 uint8_t *ddp; 4319 unsigned int iii; 4320 4321 dcp = (struct sctp_data_chunk *)ch; 4322 ddp = (uint8_t *) (dcp + 1); 4323 for (iii = 0; iii < sizeof(desc.data_bytes); iii++) { 4324 desc.data_bytes[iii] = ddp[iii]; 4325 } 4326 desc.tsn_ifany = dcp->dp.tsn; 4327 } else { 4328 /* nope we are done. */ 4329 SCTP_STAT_INCR(sctps_pdrpnedat); 4330 break; 4331 } 4332 } else { 4333 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) 4334 SCTP_STAT_INCR(sctps_pdrpmbct); 4335 } 4336 4337 if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) { 4338 SCTP_STAT_INCR(sctps_pdrppdbrk); 4339 break; 4340 } 4341 if (SCTP_SIZE32(at) > chlen) { 4342 break; 4343 } 4344 chlen -= SCTP_SIZE32(at); 4345 if (chlen < sizeof(struct sctp_chunkhdr)) { 4346 /* done, none left */ 4347 break; 4348 } 4349 ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at)); 4350 } 4351 /* Now update any rwnd --- possibly */ 4352 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) { 4353 /* From a peer, we get a rwnd report */ 4354 uint32_t a_rwnd; 4355 4356 SCTP_STAT_INCR(sctps_pdrpfehos); 4357 4358 bottle_bw = ntohl(cp->bottle_bw); 4359 on_queue = ntohl(cp->current_onq); 4360 if (bottle_bw && on_queue) { 4361 /* a rwnd report is in here */ 4362 if (bottle_bw > on_queue) 4363 a_rwnd = bottle_bw - on_queue; 4364 else 4365 a_rwnd = 0; 4366 4367 if (a_rwnd == 0) 4368 stcb->asoc.peers_rwnd = 0; 4369 else { 4370 if (a_rwnd > stcb->asoc.total_flight) { 4371 stcb->asoc.peers_rwnd = 4372 a_rwnd - stcb->asoc.total_flight; 4373 } else { 4374 stcb->asoc.peers_rwnd = 0; 4375 } 4376 if (stcb->asoc.peers_rwnd < 4377 stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4378 /* SWS sender side engages */ 4379 stcb->asoc.peers_rwnd = 0; 4380 } 4381 } 4382 } 4383 } else { 4384 SCTP_STAT_INCR(sctps_pdrpfmbox); 4385 } 4386 4387 /* now middle boxes in sat networks get a cwnd bump */ 4388 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) && 4389 (stcb->asoc.sat_t3_loss_recovery == 0) && 4390 (stcb->asoc.sat_network)) { 4391 /* 4392 * This is debateable but for sat networks it makes sense 4393 * Note if a T3 timer has went off, we will prohibit any 4394 * changes to cwnd until we exit the t3 loss recovery. 4395 */ 4396 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb, 4397 net, cp, &bottle_bw, &on_queue); 4398 } 4399} 4400 4401/* 4402 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to 4403 * still contain IP/SCTP header - stcb: is the tcb found for this packet - 4404 * offset: offset into the mbuf chain to first chunkhdr - length: is the 4405 * length of the complete packet outputs: - length: modified to remaining 4406 * length after control processing - netp: modified to new sctp_nets after 4407 * cookie-echo processing - return NULL to discard the packet (ie. no asoc, 4408 * bad packet,...) otherwise return the tcb for this packet 4409 */ 4410#ifdef __GNUC__ 4411__attribute__((noinline)) 4412#endif 4413 static struct sctp_tcb * 4414 sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, 4415 struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp, 4416 struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen, 4417 uint8_t use_mflowid, uint32_t mflowid, 4418 uint32_t vrf_id, uint16_t port) 4419{ 4420 struct sctp_association *asoc; 4421 uint32_t vtag_in; 4422 int num_chunks = 0; /* number of control chunks processed */ 4423 uint32_t chk_length; 4424 int ret; 4425 int abort_no_unlock = 0; 4426 int ecne_seen = 0; 4427 4428 /* 4429 * How big should this be, and should it be alloc'd? Lets try the 4430 * d-mtu-ceiling for now (2k) and that should hopefully work ... 4431 * until we get into jumbo grams and such.. 4432 */ 4433 uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE]; 4434 struct sctp_tcb *locked_tcb = stcb; 4435 int got_auth = 0; 4436 uint32_t auth_offset = 0, auth_len = 0; 4437 int auth_skipped = 0; 4438 int asconf_cnt = 0; 4439 4440#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4441 struct socket *so; 4442 4443#endif 4444 4445 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n", 4446 iphlen, *offset, length, stcb); 4447 4448 /* validate chunk header length... */ 4449 if (ntohs(ch->chunk_length) < sizeof(*ch)) { 4450 SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n", 4451 ntohs(ch->chunk_length)); 4452 if (locked_tcb) { 4453 SCTP_TCB_UNLOCK(locked_tcb); 4454 } 4455 return (NULL); 4456 } 4457 /* 4458 * validate the verification tag 4459 */ 4460 vtag_in = ntohl(sh->v_tag); 4461 4462 if (locked_tcb) { 4463 SCTP_TCB_LOCK_ASSERT(locked_tcb); 4464 } 4465 if (ch->chunk_type == SCTP_INITIATION) { 4466 SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n", 4467 ntohs(ch->chunk_length), vtag_in); 4468 if (vtag_in != 0) { 4469 /* protocol error- silently discard... */ 4470 SCTP_STAT_INCR(sctps_badvtag); 4471 if (locked_tcb) { 4472 SCTP_TCB_UNLOCK(locked_tcb); 4473 } 4474 return (NULL); 4475 } 4476 } else if (ch->chunk_type != SCTP_COOKIE_ECHO) { 4477 /* 4478 * If there is no stcb, skip the AUTH chunk and process 4479 * later after a stcb is found (to validate the lookup was 4480 * valid. 4481 */ 4482 if ((ch->chunk_type == SCTP_AUTHENTICATION) && 4483 (stcb == NULL) && 4484 !SCTP_BASE_SYSCTL(sctp_auth_disable)) { 4485 /* save this chunk for later processing */ 4486 auth_skipped = 1; 4487 auth_offset = *offset; 4488 auth_len = ntohs(ch->chunk_length); 4489 4490 /* (temporarily) move past this chunk */ 4491 *offset += SCTP_SIZE32(auth_len); 4492 if (*offset >= length) { 4493 /* no more data left in the mbuf chain */ 4494 *offset = length; 4495 if (locked_tcb) { 4496 SCTP_TCB_UNLOCK(locked_tcb); 4497 } 4498 return (NULL); 4499 } 4500 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4501 sizeof(struct sctp_chunkhdr), chunk_buf); 4502 } 4503 if (ch == NULL) { 4504 /* Help */ 4505 *offset = length; 4506 if (locked_tcb) { 4507 SCTP_TCB_UNLOCK(locked_tcb); 4508 } 4509 return (NULL); 4510 } 4511 if (ch->chunk_type == SCTP_COOKIE_ECHO) { 4512 goto process_control_chunks; 4513 } 4514 /* 4515 * first check if it's an ASCONF with an unknown src addr we 4516 * need to look inside to find the association 4517 */ 4518 if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) { 4519 struct sctp_chunkhdr *asconf_ch = ch; 4520 uint32_t asconf_offset = 0, asconf_len = 0; 4521 4522 /* inp's refcount may be reduced */ 4523 SCTP_INP_INCR_REF(inp); 4524 4525 asconf_offset = *offset; 4526 do { 4527 asconf_len = ntohs(asconf_ch->chunk_length); 4528 if (asconf_len < sizeof(struct sctp_asconf_paramhdr)) 4529 break; 4530 stcb = sctp_findassociation_ep_asconf(m, 4531 *offset, sh, &inp, netp, vrf_id); 4532 if (stcb != NULL) 4533 break; 4534 asconf_offset += SCTP_SIZE32(asconf_len); 4535 asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset, 4536 sizeof(struct sctp_chunkhdr), chunk_buf); 4537 } while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF); 4538 if (stcb == NULL) { 4539 /* 4540 * reduce inp's refcount if not reduced in 4541 * sctp_findassociation_ep_asconf(). 4542 */ 4543 SCTP_INP_DECR_REF(inp); 4544 } else { 4545 locked_tcb = stcb; 4546 } 4547 4548 /* now go back and verify any auth chunk to be sure */ 4549 if (auth_skipped && (stcb != NULL)) { 4550 struct sctp_auth_chunk *auth; 4551 4552 auth = (struct sctp_auth_chunk *) 4553 sctp_m_getptr(m, auth_offset, 4554 auth_len, chunk_buf); 4555 got_auth = 1; 4556 auth_skipped = 0; 4557 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, 4558 auth_offset)) { 4559 /* auth HMAC failed so dump it */ 4560 *offset = length; 4561 if (locked_tcb) { 4562 SCTP_TCB_UNLOCK(locked_tcb); 4563 } 4564 return (NULL); 4565 } else { 4566 /* remaining chunks are HMAC checked */ 4567 stcb->asoc.authenticated = 1; 4568 } 4569 } 4570 } 4571 if (stcb == NULL) { 4572 /* no association, so it's out of the blue... */ 4573 sctp_handle_ootb(m, iphlen, *offset, sh, inp, 4574 use_mflowid, mflowid, 4575 vrf_id, port); 4576 *offset = length; 4577 if (locked_tcb) { 4578 SCTP_TCB_UNLOCK(locked_tcb); 4579 } 4580 return (NULL); 4581 } 4582 asoc = &stcb->asoc; 4583 /* ABORT and SHUTDOWN can use either v_tag... */ 4584 if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) || 4585 (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) || 4586 (ch->chunk_type == SCTP_PACKET_DROPPED)) { 4587 if ((vtag_in == asoc->my_vtag) || 4588 ((ch->chunk_flags & SCTP_HAD_NO_TCB) && 4589 (vtag_in == asoc->peer_vtag))) { 4590 /* this is valid */ 4591 } else { 4592 /* drop this packet... */ 4593 SCTP_STAT_INCR(sctps_badvtag); 4594 if (locked_tcb) { 4595 SCTP_TCB_UNLOCK(locked_tcb); 4596 } 4597 return (NULL); 4598 } 4599 } else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 4600 if (vtag_in != asoc->my_vtag) { 4601 /* 4602 * this could be a stale SHUTDOWN-ACK or the 4603 * peer never got the SHUTDOWN-COMPLETE and 4604 * is still hung; we have started a new asoc 4605 * but it won't complete until the shutdown 4606 * is completed 4607 */ 4608 if (locked_tcb) { 4609 SCTP_TCB_UNLOCK(locked_tcb); 4610 } 4611 sctp_handle_ootb(m, iphlen, *offset, sh, inp, 4612 use_mflowid, mflowid, 4613 vrf_id, port); 4614 return (NULL); 4615 } 4616 } else { 4617 /* for all other chunks, vtag must match */ 4618 if (vtag_in != asoc->my_vtag) { 4619 /* invalid vtag... */ 4620 SCTPDBG(SCTP_DEBUG_INPUT3, 4621 "invalid vtag: %xh, expect %xh\n", 4622 vtag_in, asoc->my_vtag); 4623 SCTP_STAT_INCR(sctps_badvtag); 4624 if (locked_tcb) { 4625 SCTP_TCB_UNLOCK(locked_tcb); 4626 } 4627 *offset = length; 4628 return (NULL); 4629 } 4630 } 4631 } /* end if !SCTP_COOKIE_ECHO */ 4632 /* 4633 * process all control chunks... 4634 */ 4635 if (((ch->chunk_type == SCTP_SELECTIVE_ACK) || 4636 (ch->chunk_type == SCTP_NR_SELECTIVE_ACK) || 4637 (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) && 4638 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) { 4639 /* implied cookie-ack.. we must have lost the ack */ 4640 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4641 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4642 stcb->asoc.overall_error_count, 4643 0, 4644 SCTP_FROM_SCTP_INPUT, 4645 __LINE__); 4646 } 4647 stcb->asoc.overall_error_count = 0; 4648 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, 4649 *netp); 4650 } 4651process_control_chunks: 4652 while (IS_SCTP_CONTROL(ch)) { 4653 /* validate chunk length */ 4654 chk_length = ntohs(ch->chunk_length); 4655 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n", 4656 ch->chunk_type, chk_length); 4657 SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length); 4658 if (chk_length < sizeof(*ch) || 4659 (*offset + (int)chk_length) > length) { 4660 *offset = length; 4661 if (locked_tcb) { 4662 SCTP_TCB_UNLOCK(locked_tcb); 4663 } 4664 return (NULL); 4665 } 4666 SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks); 4667 /* 4668 * INIT-ACK only gets the init ack "header" portion only 4669 * because we don't have to process the peer's COOKIE. All 4670 * others get a complete chunk. 4671 */ 4672 if ((ch->chunk_type == SCTP_INITIATION_ACK) || 4673 (ch->chunk_type == SCTP_INITIATION)) { 4674 /* get an init-ack chunk */ 4675 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4676 sizeof(struct sctp_init_ack_chunk), chunk_buf); 4677 if (ch == NULL) { 4678 *offset = length; 4679 if (locked_tcb) { 4680 SCTP_TCB_UNLOCK(locked_tcb); 4681 } 4682 return (NULL); 4683 } 4684 } else { 4685 /* For cookies and all other chunks. */ 4686 if (chk_length > sizeof(chunk_buf)) { 4687 /* 4688 * use just the size of the chunk buffer so 4689 * the front part of our chunks fit in 4690 * contiguous space up to the chunk buffer 4691 * size (508 bytes). For chunks that need to 4692 * get more than that they must use the 4693 * sctp_m_getptr() function or other means 4694 * (e.g. know how to parse mbuf chains). 4695 * Cookies do this already. 4696 */ 4697 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4698 (sizeof(chunk_buf) - 4), 4699 chunk_buf); 4700 if (ch == NULL) { 4701 *offset = length; 4702 if (locked_tcb) { 4703 SCTP_TCB_UNLOCK(locked_tcb); 4704 } 4705 return (NULL); 4706 } 4707 } else { 4708 /* We can fit it all */ 4709 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4710 chk_length, chunk_buf); 4711 if (ch == NULL) { 4712 SCTP_PRINTF("sctp_process_control: Can't get the all data....\n"); 4713 *offset = length; 4714 if (locked_tcb) { 4715 SCTP_TCB_UNLOCK(locked_tcb); 4716 } 4717 return (NULL); 4718 } 4719 } 4720 } 4721 num_chunks++; 4722 /* Save off the last place we got a control from */ 4723 if (stcb != NULL) { 4724 if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) { 4725 /* 4726 * allow last_control to be NULL if 4727 * ASCONF... ASCONF processing will find the 4728 * right net later 4729 */ 4730 if ((netp != NULL) && (*netp != NULL)) 4731 stcb->asoc.last_control_chunk_from = *netp; 4732 } 4733 } 4734#ifdef SCTP_AUDITING_ENABLED 4735 sctp_audit_log(0xB0, ch->chunk_type); 4736#endif 4737 4738 /* check to see if this chunk required auth, but isn't */ 4739 if ((stcb != NULL) && 4740 !SCTP_BASE_SYSCTL(sctp_auth_disable) && 4741 sctp_auth_is_required_chunk(ch->chunk_type, stcb->asoc.local_auth_chunks) && 4742 !stcb->asoc.authenticated) { 4743 /* "silently" ignore */ 4744 SCTP_STAT_INCR(sctps_recvauthmissing); 4745 goto next_chunk; 4746 } 4747 switch (ch->chunk_type) { 4748 case SCTP_INITIATION: 4749 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n"); 4750 /* The INIT chunk must be the only chunk. */ 4751 if ((num_chunks > 1) || 4752 (length - *offset > (int)SCTP_SIZE32(chk_length))) { 4753 sctp_abort_association(inp, stcb, m, 4754 iphlen, sh, NULL, 4755 use_mflowid, mflowid, 4756 vrf_id, port); 4757 *offset = length; 4758 return (NULL); 4759 } 4760 /* Honor our resource limit. */ 4761 if (chk_length > SCTP_LARGEST_INIT_ACCEPTED) { 4762 struct mbuf *op_err; 4763 4764 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 4765 sctp_abort_association(inp, stcb, m, 4766 iphlen, sh, op_err, 4767 use_mflowid, mflowid, 4768 vrf_id, port); 4769 *offset = length; 4770 return (NULL); 4771 } 4772 sctp_handle_init(m, iphlen, *offset, sh, 4773 (struct sctp_init_chunk *)ch, inp, 4774 stcb, &abort_no_unlock, 4775 use_mflowid, mflowid, 4776 vrf_id, port); 4777 *offset = length; 4778 if ((!abort_no_unlock) && (locked_tcb)) { 4779 SCTP_TCB_UNLOCK(locked_tcb); 4780 } 4781 return (NULL); 4782 break; 4783 case SCTP_PAD_CHUNK: 4784 break; 4785 case SCTP_INITIATION_ACK: 4786 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n"); 4787 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4788 /* We are not interested anymore */ 4789 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4790 ; 4791 } else { 4792 if (locked_tcb != stcb) { 4793 /* Very unlikely */ 4794 SCTP_TCB_UNLOCK(locked_tcb); 4795 } 4796 *offset = length; 4797 if (stcb) { 4798#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4799 so = SCTP_INP_SO(inp); 4800 atomic_add_int(&stcb->asoc.refcnt, 1); 4801 SCTP_TCB_UNLOCK(stcb); 4802 SCTP_SOCKET_LOCK(so, 1); 4803 SCTP_TCB_LOCK(stcb); 4804 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4805#endif 4806 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27); 4807#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4808 SCTP_SOCKET_UNLOCK(so, 1); 4809#endif 4810 } 4811 return (NULL); 4812 } 4813 } 4814 /* The INIT-ACK chunk must be the only chunk. */ 4815 if ((num_chunks > 1) || 4816 (length - *offset > (int)SCTP_SIZE32(chk_length))) { 4817 *offset = length; 4818 if (locked_tcb) { 4819 SCTP_TCB_UNLOCK(locked_tcb); 4820 } 4821 return (NULL); 4822 } 4823 if ((netp) && (*netp)) { 4824 ret = sctp_handle_init_ack(m, iphlen, *offset, sh, 4825 (struct sctp_init_ack_chunk *)ch, 4826 stcb, *netp, 4827 &abort_no_unlock, 4828 use_mflowid, mflowid, 4829 vrf_id); 4830 } else { 4831 ret = -1; 4832 } 4833 *offset = length; 4834 if (abort_no_unlock) { 4835 return (NULL); 4836 } 4837 /* 4838 * Special case, I must call the output routine to 4839 * get the cookie echoed 4840 */ 4841 if ((stcb != NULL) && (ret == 0)) { 4842 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 4843 } 4844 if (locked_tcb) { 4845 SCTP_TCB_UNLOCK(locked_tcb); 4846 } 4847 return (NULL); 4848 break; 4849 case SCTP_SELECTIVE_ACK: 4850 { 4851 struct sctp_sack_chunk *sack; 4852 int abort_now = 0; 4853 uint32_t a_rwnd, cum_ack; 4854 uint16_t num_seg, num_dup; 4855 uint8_t flags; 4856 int offset_seg, offset_dup; 4857 4858 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n"); 4859 SCTP_STAT_INCR(sctps_recvsacks); 4860 if (stcb == NULL) { 4861 SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing SACK chunk\n"); 4862 break; 4863 } 4864 if (chk_length < sizeof(struct sctp_sack_chunk)) { 4865 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on SACK chunk, too small\n"); 4866 break; 4867 } 4868 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 4869 /*- 4870 * If we have sent a shutdown-ack, we will pay no 4871 * attention to a sack sent in to us since 4872 * we don't care anymore. 4873 */ 4874 break; 4875 } 4876 sack = (struct sctp_sack_chunk *)ch; 4877 flags = ch->chunk_flags; 4878 cum_ack = ntohl(sack->sack.cum_tsn_ack); 4879 num_seg = ntohs(sack->sack.num_gap_ack_blks); 4880 num_dup = ntohs(sack->sack.num_dup_tsns); 4881 a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd); 4882 if (sizeof(struct sctp_sack_chunk) + 4883 num_seg * sizeof(struct sctp_gap_ack_block) + 4884 num_dup * sizeof(uint32_t) != chk_length) { 4885 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of SACK chunk\n"); 4886 break; 4887 } 4888 offset_seg = *offset + sizeof(struct sctp_sack_chunk); 4889 offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block); 4890 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n", 4891 cum_ack, num_seg, a_rwnd); 4892 stcb->asoc.seen_a_sack_this_pkt = 1; 4893 if ((stcb->asoc.pr_sctp_cnt == 0) && 4894 (num_seg == 0) && 4895 SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) && 4896 (stcb->asoc.saw_sack_with_frags == 0) && 4897 (stcb->asoc.saw_sack_with_nr_frags == 0) && 4898 (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) 4899 ) { 4900 /* 4901 * We have a SIMPLE sack having no 4902 * prior segments and data on sent 4903 * queue to be acked.. Use the 4904 * faster path sack processing. We 4905 * also allow window update sacks 4906 * with no missing segments to go 4907 * this way too. 4908 */ 4909 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, &abort_now, ecne_seen); 4910 } else { 4911 if (netp && *netp) 4912 sctp_handle_sack(m, offset_seg, offset_dup, stcb, 4913 num_seg, 0, num_dup, &abort_now, flags, 4914 cum_ack, a_rwnd, ecne_seen); 4915 } 4916 if (abort_now) { 4917 /* ABORT signal from sack processing */ 4918 *offset = length; 4919 return (NULL); 4920 } 4921 if (TAILQ_EMPTY(&stcb->asoc.send_queue) && 4922 TAILQ_EMPTY(&stcb->asoc.sent_queue) && 4923 (stcb->asoc.stream_queue_cnt == 0)) { 4924 sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 4925 } 4926 } 4927 break; 4928 /* 4929 * EY - nr_sack: If the received chunk is an 4930 * nr_sack chunk 4931 */ 4932 case SCTP_NR_SELECTIVE_ACK: 4933 { 4934 struct sctp_nr_sack_chunk *nr_sack; 4935 int abort_now = 0; 4936 uint32_t a_rwnd, cum_ack; 4937 uint16_t num_seg, num_nr_seg, num_dup; 4938 uint8_t flags; 4939 int offset_seg, offset_dup; 4940 4941 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK\n"); 4942 SCTP_STAT_INCR(sctps_recvsacks); 4943 if (stcb == NULL) { 4944 SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing NR-SACK chunk\n"); 4945 break; 4946 } 4947 if ((stcb->asoc.sctp_nr_sack_on_off == 0) || 4948 (stcb->asoc.peer_supports_nr_sack == 0)) { 4949 goto unknown_chunk; 4950 } 4951 if (chk_length < sizeof(struct sctp_nr_sack_chunk)) { 4952 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on NR-SACK chunk, too small\n"); 4953 break; 4954 } 4955 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 4956 /*- 4957 * If we have sent a shutdown-ack, we will pay no 4958 * attention to a sack sent in to us since 4959 * we don't care anymore. 4960 */ 4961 break; 4962 } 4963 nr_sack = (struct sctp_nr_sack_chunk *)ch; 4964 flags = ch->chunk_flags; 4965 cum_ack = ntohl(nr_sack->nr_sack.cum_tsn_ack); 4966 num_seg = ntohs(nr_sack->nr_sack.num_gap_ack_blks); 4967 num_nr_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks); 4968 num_dup = ntohs(nr_sack->nr_sack.num_dup_tsns); 4969 a_rwnd = (uint32_t) ntohl(nr_sack->nr_sack.a_rwnd); 4970 if (sizeof(struct sctp_nr_sack_chunk) + 4971 (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block) + 4972 num_dup * sizeof(uint32_t) != chk_length) { 4973 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of NR_SACK chunk\n"); 4974 break; 4975 } 4976 offset_seg = *offset + sizeof(struct sctp_nr_sack_chunk); 4977 offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block); 4978 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n", 4979 cum_ack, num_seg, a_rwnd); 4980 stcb->asoc.seen_a_sack_this_pkt = 1; 4981 if ((stcb->asoc.pr_sctp_cnt == 0) && 4982 (num_seg == 0) && (num_nr_seg == 0) && 4983 SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) && 4984 (stcb->asoc.saw_sack_with_frags == 0) && 4985 (stcb->asoc.saw_sack_with_nr_frags == 0) && 4986 (!TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 4987 /* 4988 * We have a SIMPLE sack having no 4989 * prior segments and data on sent 4990 * queue to be acked. Use the faster 4991 * path sack processing. We also 4992 * allow window update sacks with no 4993 * missing segments to go this way 4994 * too. 4995 */ 4996 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 4997 &abort_now, ecne_seen); 4998 } else { 4999 if (netp && *netp) 5000 sctp_handle_sack(m, offset_seg, offset_dup, stcb, 5001 num_seg, num_nr_seg, num_dup, &abort_now, flags, 5002 cum_ack, a_rwnd, ecne_seen); 5003 } 5004 if (abort_now) { 5005 /* ABORT signal from sack processing */ 5006 *offset = length; 5007 return (NULL); 5008 } 5009 if (TAILQ_EMPTY(&stcb->asoc.send_queue) && 5010 TAILQ_EMPTY(&stcb->asoc.sent_queue) && 5011 (stcb->asoc.stream_queue_cnt == 0)) { 5012 sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 5013 } 5014 } 5015 break; 5016 5017 case SCTP_HEARTBEAT_REQUEST: 5018 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n"); 5019 if ((stcb) && netp && *netp) { 5020 SCTP_STAT_INCR(sctps_recvheartbeat); 5021 sctp_send_heartbeat_ack(stcb, m, *offset, 5022 chk_length, *netp); 5023 5024 /* He's alive so give him credit */ 5025 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5026 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5027 stcb->asoc.overall_error_count, 5028 0, 5029 SCTP_FROM_SCTP_INPUT, 5030 __LINE__); 5031 } 5032 stcb->asoc.overall_error_count = 0; 5033 } 5034 break; 5035 case SCTP_HEARTBEAT_ACK: 5036 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n"); 5037 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) { 5038 /* Its not ours */ 5039 *offset = length; 5040 if (locked_tcb) { 5041 SCTP_TCB_UNLOCK(locked_tcb); 5042 } 5043 return (NULL); 5044 } 5045 /* He's alive so give him credit */ 5046 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5047 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5048 stcb->asoc.overall_error_count, 5049 0, 5050 SCTP_FROM_SCTP_INPUT, 5051 __LINE__); 5052 } 5053 stcb->asoc.overall_error_count = 0; 5054 SCTP_STAT_INCR(sctps_recvheartbeatack); 5055 if (netp && *netp) 5056 sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch, 5057 stcb, *netp); 5058 break; 5059 case SCTP_ABORT_ASSOCIATION: 5060 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n", 5061 stcb); 5062 if ((stcb) && netp && *netp) 5063 sctp_handle_abort((struct sctp_abort_chunk *)ch, 5064 stcb, *netp); 5065 *offset = length; 5066 return (NULL); 5067 break; 5068 case SCTP_SHUTDOWN: 5069 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n", 5070 stcb); 5071 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) { 5072 *offset = length; 5073 if (locked_tcb) { 5074 SCTP_TCB_UNLOCK(locked_tcb); 5075 } 5076 return (NULL); 5077 } 5078 if (netp && *netp) { 5079 int abort_flag = 0; 5080 5081 sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch, 5082 stcb, *netp, &abort_flag); 5083 if (abort_flag) { 5084 *offset = length; 5085 return (NULL); 5086 } 5087 } 5088 break; 5089 case SCTP_SHUTDOWN_ACK: 5090 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", stcb); 5091 if ((stcb) && (netp) && (*netp)) 5092 sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp); 5093 *offset = length; 5094 return (NULL); 5095 break; 5096 5097 case SCTP_OPERATION_ERROR: 5098 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n"); 5099 if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) { 5100 *offset = length; 5101 return (NULL); 5102 } 5103 break; 5104 case SCTP_COOKIE_ECHO: 5105 SCTPDBG(SCTP_DEBUG_INPUT3, 5106 "SCTP_COOKIE-ECHO, stcb %p\n", stcb); 5107 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 5108 ; 5109 } else { 5110 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5111 /* We are not interested anymore */ 5112 abend: 5113 if (stcb) { 5114 SCTP_TCB_UNLOCK(stcb); 5115 } 5116 *offset = length; 5117 return (NULL); 5118 } 5119 } 5120 /* 5121 * First are we accepting? We do this again here 5122 * since it is possible that a previous endpoint WAS 5123 * listening responded to a INIT-ACK and then 5124 * closed. We opened and bound.. and are now no 5125 * longer listening. 5126 */ 5127 5128 if ((stcb == NULL) && (inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit)) { 5129 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 5130 (SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit))) { 5131 struct mbuf *op_err; 5132 5133 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 5134 sctp_abort_association(inp, stcb, m, 5135 iphlen, sh, op_err, 5136 use_mflowid, mflowid, 5137 vrf_id, port); 5138 } 5139 *offset = length; 5140 return (NULL); 5141 } else { 5142 struct mbuf *ret_buf; 5143 struct sctp_inpcb *linp; 5144 5145 if (stcb) { 5146 linp = NULL; 5147 } else { 5148 linp = inp; 5149 } 5150 5151 if (linp) { 5152 SCTP_ASOC_CREATE_LOCK(linp); 5153 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5154 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5155 SCTP_ASOC_CREATE_UNLOCK(linp); 5156 goto abend; 5157 } 5158 } 5159 if (netp) { 5160 ret_buf = 5161 sctp_handle_cookie_echo(m, iphlen, 5162 *offset, sh, 5163 (struct sctp_cookie_echo_chunk *)ch, 5164 &inp, &stcb, netp, 5165 auth_skipped, 5166 auth_offset, 5167 auth_len, 5168 &locked_tcb, 5169 use_mflowid, 5170 mflowid, 5171 vrf_id, 5172 port); 5173 } else { 5174 ret_buf = NULL; 5175 } 5176 if (linp) { 5177 SCTP_ASOC_CREATE_UNLOCK(linp); 5178 } 5179 if (ret_buf == NULL) { 5180 if (locked_tcb) { 5181 SCTP_TCB_UNLOCK(locked_tcb); 5182 } 5183 SCTPDBG(SCTP_DEBUG_INPUT3, 5184 "GAK, null buffer\n"); 5185 *offset = length; 5186 return (NULL); 5187 } 5188 /* if AUTH skipped, see if it verified... */ 5189 if (auth_skipped) { 5190 got_auth = 1; 5191 auth_skipped = 0; 5192 } 5193 if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) { 5194 /* 5195 * Restart the timer if we have 5196 * pending data 5197 */ 5198 struct sctp_tmit_chunk *chk; 5199 5200 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 5201 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 5202 } 5203 } 5204 break; 5205 case SCTP_COOKIE_ACK: 5206 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", stcb); 5207 if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) { 5208 if (locked_tcb) { 5209 SCTP_TCB_UNLOCK(locked_tcb); 5210 } 5211 return (NULL); 5212 } 5213 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5214 /* We are not interested anymore */ 5215 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 5216 ; 5217 } else if (stcb) { 5218#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5219 so = SCTP_INP_SO(inp); 5220 atomic_add_int(&stcb->asoc.refcnt, 1); 5221 SCTP_TCB_UNLOCK(stcb); 5222 SCTP_SOCKET_LOCK(so, 1); 5223 SCTP_TCB_LOCK(stcb); 5224 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5225#endif 5226 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27); 5227#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5228 SCTP_SOCKET_UNLOCK(so, 1); 5229#endif 5230 *offset = length; 5231 return (NULL); 5232 } 5233 } 5234 /* He's alive so give him credit */ 5235 if ((stcb) && netp && *netp) { 5236 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5237 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5238 stcb->asoc.overall_error_count, 5239 0, 5240 SCTP_FROM_SCTP_INPUT, 5241 __LINE__); 5242 } 5243 stcb->asoc.overall_error_count = 0; 5244 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp); 5245 } 5246 break; 5247 case SCTP_ECN_ECHO: 5248 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n"); 5249 /* He's alive so give him credit */ 5250 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) { 5251 /* Its not ours */ 5252 if (locked_tcb) { 5253 SCTP_TCB_UNLOCK(locked_tcb); 5254 } 5255 *offset = length; 5256 return (NULL); 5257 } 5258 if (stcb) { 5259 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5260 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5261 stcb->asoc.overall_error_count, 5262 0, 5263 SCTP_FROM_SCTP_INPUT, 5264 __LINE__); 5265 } 5266 stcb->asoc.overall_error_count = 0; 5267 sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch, 5268 stcb); 5269 ecne_seen = 1; 5270 } 5271 break; 5272 case SCTP_ECN_CWR: 5273 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n"); 5274 /* He's alive so give him credit */ 5275 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) { 5276 /* Its not ours */ 5277 if (locked_tcb) { 5278 SCTP_TCB_UNLOCK(locked_tcb); 5279 } 5280 *offset = length; 5281 return (NULL); 5282 } 5283 if (stcb) { 5284 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5285 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5286 stcb->asoc.overall_error_count, 5287 0, 5288 SCTP_FROM_SCTP_INPUT, 5289 __LINE__); 5290 } 5291 stcb->asoc.overall_error_count = 0; 5292 sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb, *netp); 5293 } 5294 break; 5295 case SCTP_SHUTDOWN_COMPLETE: 5296 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", stcb); 5297 /* must be first and only chunk */ 5298 if ((num_chunks > 1) || 5299 (length - *offset > (int)SCTP_SIZE32(chk_length))) { 5300 *offset = length; 5301 if (locked_tcb) { 5302 SCTP_TCB_UNLOCK(locked_tcb); 5303 } 5304 return (NULL); 5305 } 5306 if ((stcb) && netp && *netp) { 5307 sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch, 5308 stcb, *netp); 5309 } 5310 *offset = length; 5311 return (NULL); 5312 break; 5313 case SCTP_ASCONF: 5314 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n"); 5315 /* He's alive so give him credit */ 5316 if (stcb) { 5317 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5318 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5319 stcb->asoc.overall_error_count, 5320 0, 5321 SCTP_FROM_SCTP_INPUT, 5322 __LINE__); 5323 } 5324 stcb->asoc.overall_error_count = 0; 5325 sctp_handle_asconf(m, *offset, 5326 (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0); 5327 asconf_cnt++; 5328 } 5329 break; 5330 case SCTP_ASCONF_ACK: 5331 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n"); 5332 if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) { 5333 /* Its not ours */ 5334 if (locked_tcb) { 5335 SCTP_TCB_UNLOCK(locked_tcb); 5336 } 5337 *offset = length; 5338 return (NULL); 5339 } 5340 if ((stcb) && netp && *netp) { 5341 /* He's alive so give him credit */ 5342 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5343 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5344 stcb->asoc.overall_error_count, 5345 0, 5346 SCTP_FROM_SCTP_INPUT, 5347 __LINE__); 5348 } 5349 stcb->asoc.overall_error_count = 0; 5350 sctp_handle_asconf_ack(m, *offset, 5351 (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock); 5352 if (abort_no_unlock) 5353 return (NULL); 5354 } 5355 break; 5356 case SCTP_FORWARD_CUM_TSN: 5357 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n"); 5358 if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) { 5359 /* Its not ours */ 5360 if (locked_tcb) { 5361 SCTP_TCB_UNLOCK(locked_tcb); 5362 } 5363 *offset = length; 5364 return (NULL); 5365 } 5366 /* He's alive so give him credit */ 5367 if (stcb) { 5368 int abort_flag = 0; 5369 5370 stcb->asoc.overall_error_count = 0; 5371 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5372 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5373 stcb->asoc.overall_error_count, 5374 0, 5375 SCTP_FROM_SCTP_INPUT, 5376 __LINE__); 5377 } 5378 *fwd_tsn_seen = 1; 5379 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5380 /* We are not interested anymore */ 5381#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5382 so = SCTP_INP_SO(inp); 5383 atomic_add_int(&stcb->asoc.refcnt, 1); 5384 SCTP_TCB_UNLOCK(stcb); 5385 SCTP_SOCKET_LOCK(so, 1); 5386 SCTP_TCB_LOCK(stcb); 5387 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5388#endif 5389 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29); 5390#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5391 SCTP_SOCKET_UNLOCK(so, 1); 5392#endif 5393 *offset = length; 5394 return (NULL); 5395 } 5396 sctp_handle_forward_tsn(stcb, 5397 (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset); 5398 if (abort_flag) { 5399 *offset = length; 5400 return (NULL); 5401 } else { 5402 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5403 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5404 stcb->asoc.overall_error_count, 5405 0, 5406 SCTP_FROM_SCTP_INPUT, 5407 __LINE__); 5408 } 5409 stcb->asoc.overall_error_count = 0; 5410 } 5411 5412 } 5413 break; 5414 case SCTP_STREAM_RESET: 5415 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n"); 5416 if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) { 5417 /* Its not ours */ 5418 if (locked_tcb) { 5419 SCTP_TCB_UNLOCK(locked_tcb); 5420 } 5421 *offset = length; 5422 return (NULL); 5423 } 5424 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5425 /* We are not interested anymore */ 5426#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5427 so = SCTP_INP_SO(inp); 5428 atomic_add_int(&stcb->asoc.refcnt, 1); 5429 SCTP_TCB_UNLOCK(stcb); 5430 SCTP_SOCKET_LOCK(so, 1); 5431 SCTP_TCB_LOCK(stcb); 5432 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5433#endif 5434 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_30); 5435#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5436 SCTP_SOCKET_UNLOCK(so, 1); 5437#endif 5438 *offset = length; 5439 return (NULL); 5440 } 5441 if (stcb->asoc.peer_supports_strreset == 0) { 5442 /* 5443 * hmm, peer should have announced this, but 5444 * we will turn it on since he is sending us 5445 * a stream reset. 5446 */ 5447 stcb->asoc.peer_supports_strreset = 1; 5448 } 5449 if (sctp_handle_stream_reset(stcb, m, *offset, (struct sctp_stream_reset_out_req *)ch)) { 5450 /* stop processing */ 5451 *offset = length; 5452 return (NULL); 5453 } 5454 break; 5455 case SCTP_PACKET_DROPPED: 5456 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n"); 5457 /* re-get it all please */ 5458 if (chk_length < sizeof(struct sctp_pktdrop_chunk)) { 5459 /* Its not ours */ 5460 if (locked_tcb) { 5461 SCTP_TCB_UNLOCK(locked_tcb); 5462 } 5463 *offset = length; 5464 return (NULL); 5465 } 5466 if (ch && (stcb) && netp && (*netp)) { 5467 sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch, 5468 stcb, *netp, 5469 min(chk_length, (sizeof(chunk_buf) - 4))); 5470 5471 } 5472 break; 5473 5474 case SCTP_AUTHENTICATION: 5475 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n"); 5476 if (SCTP_BASE_SYSCTL(sctp_auth_disable)) 5477 goto unknown_chunk; 5478 5479 if (stcb == NULL) { 5480 /* save the first AUTH for later processing */ 5481 if (auth_skipped == 0) { 5482 auth_offset = *offset; 5483 auth_len = chk_length; 5484 auth_skipped = 1; 5485 } 5486 /* skip this chunk (temporarily) */ 5487 goto next_chunk; 5488 } 5489 if ((chk_length < (sizeof(struct sctp_auth_chunk))) || 5490 (chk_length > (sizeof(struct sctp_auth_chunk) + 5491 SCTP_AUTH_DIGEST_LEN_MAX))) { 5492 /* Its not ours */ 5493 if (locked_tcb) { 5494 SCTP_TCB_UNLOCK(locked_tcb); 5495 } 5496 *offset = length; 5497 return (NULL); 5498 } 5499 if (got_auth == 1) { 5500 /* skip this chunk... it's already auth'd */ 5501 goto next_chunk; 5502 } 5503 got_auth = 1; 5504 if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch, 5505 m, *offset)) { 5506 /* auth HMAC failed so dump the packet */ 5507 *offset = length; 5508 return (stcb); 5509 } else { 5510 /* remaining chunks are HMAC checked */ 5511 stcb->asoc.authenticated = 1; 5512 } 5513 break; 5514 5515 default: 5516 unknown_chunk: 5517 /* it's an unknown chunk! */ 5518 if ((ch->chunk_type & 0x40) && (stcb != NULL)) { 5519 struct mbuf *mm; 5520 struct sctp_paramhdr *phd; 5521 5522 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 5523 0, M_DONTWAIT, 1, MT_DATA); 5524 if (mm) { 5525 phd = mtod(mm, struct sctp_paramhdr *); 5526 /* 5527 * We cheat and use param type since 5528 * we did not bother to define a 5529 * error cause struct. They are the 5530 * same basic format with different 5531 * names. 5532 */ 5533 phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK); 5534 phd->param_length = htons(chk_length + sizeof(*phd)); 5535 SCTP_BUF_LEN(mm) = sizeof(*phd); 5536 SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, chk_length, M_DONTWAIT); 5537 if (SCTP_BUF_NEXT(mm)) { 5538 if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(mm), SCTP_SIZE32(chk_length) - chk_length, NULL)) { 5539 sctp_m_freem(mm); 5540 } else { 5541#ifdef SCTP_MBUF_LOGGING 5542 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 5543 struct mbuf *mat; 5544 5545 for (mat = SCTP_BUF_NEXT(mm); mat; mat = SCTP_BUF_NEXT(mat)) { 5546 if (SCTP_BUF_IS_EXTENDED(mat)) { 5547 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 5548 } 5549 } 5550 } 5551#endif 5552 sctp_queue_op_err(stcb, mm); 5553 } 5554 } else { 5555 sctp_m_freem(mm); 5556 } 5557 } 5558 } 5559 if ((ch->chunk_type & 0x80) == 0) { 5560 /* discard this packet */ 5561 *offset = length; 5562 return (stcb); 5563 } /* else skip this bad chunk and continue... */ 5564 break; 5565 } /* switch (ch->chunk_type) */ 5566 5567 5568next_chunk: 5569 /* get the next chunk */ 5570 *offset += SCTP_SIZE32(chk_length); 5571 if (*offset >= length) { 5572 /* no more data left in the mbuf chain */ 5573 break; 5574 } 5575 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 5576 sizeof(struct sctp_chunkhdr), chunk_buf); 5577 if (ch == NULL) { 5578 if (locked_tcb) { 5579 SCTP_TCB_UNLOCK(locked_tcb); 5580 } 5581 *offset = length; 5582 return (NULL); 5583 } 5584 } /* while */ 5585 5586 if (asconf_cnt > 0 && stcb != NULL) { 5587 sctp_send_asconf_ack(stcb); 5588 } 5589 return (stcb); 5590} 5591 5592 5593#ifdef INVARIANTS 5594#ifdef __GNUC__ 5595__attribute__((noinline)) 5596#endif 5597 void 5598 sctp_validate_no_locks(struct sctp_inpcb *inp) 5599{ 5600 struct sctp_tcb *lstcb; 5601 5602 LIST_FOREACH(lstcb, &inp->sctp_asoc_list, sctp_tcblist) { 5603 if (mtx_owned(&lstcb->tcb_mtx)) { 5604 panic("Own lock on stcb at return from input"); 5605 } 5606 } 5607 if (mtx_owned(&inp->inp_create_mtx)) { 5608 panic("Own create lock on inp"); 5609 } 5610 if (mtx_owned(&inp->inp_mtx)) { 5611 panic("Own inp lock on inp"); 5612 } 5613} 5614 5615#endif 5616 5617/* 5618 * common input chunk processing (v4 and v6) 5619 */ 5620void 5621sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, 5622 int length, struct sctphdr *sh, struct sctp_chunkhdr *ch, 5623 struct sctp_inpcb *inp, struct sctp_tcb *stcb, 5624 struct sctp_nets *net, uint8_t ecn_bits, 5625 uint8_t use_mflowid, uint32_t mflowid, 5626 uint32_t vrf_id, uint16_t port) 5627{ 5628 /* 5629 * Control chunk processing 5630 */ 5631 uint32_t high_tsn; 5632 int fwd_tsn_seen = 0, data_processed = 0; 5633 struct mbuf *m = *mm; 5634 int un_sent; 5635 int cnt_ctrl_ready = 0; 5636 5637 SCTP_STAT_INCR(sctps_recvdatagrams); 5638#ifdef SCTP_AUDITING_ENABLED 5639 sctp_audit_log(0xE0, 1); 5640 sctp_auditing(0, inp, stcb, net); 5641#endif 5642 5643 SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d length:%d stcb:%p\n", 5644 m, iphlen, offset, length, stcb); 5645 if (stcb) { 5646 /* always clear this before beginning a packet */ 5647 stcb->asoc.authenticated = 0; 5648 stcb->asoc.seen_a_sack_this_pkt = 0; 5649 SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n", 5650 stcb, stcb->asoc.state); 5651 5652 if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) || 5653 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5654 /*- 5655 * If we hit here, we had a ref count 5656 * up when the assoc was aborted and the 5657 * timer is clearing out the assoc, we should 5658 * NOT respond to any packet.. its OOTB. 5659 */ 5660 SCTP_TCB_UNLOCK(stcb); 5661 sctp_handle_ootb(m, iphlen, offset, sh, inp, 5662 use_mflowid, mflowid, 5663 vrf_id, port); 5664 goto out_now; 5665 } 5666 } 5667 if (IS_SCTP_CONTROL(ch)) { 5668 /* process the control portion of the SCTP packet */ 5669 /* sa_ignore NO_NULL_CHK */ 5670 stcb = sctp_process_control(m, iphlen, &offset, length, sh, ch, 5671 inp, stcb, &net, &fwd_tsn_seen, 5672 use_mflowid, mflowid, 5673 vrf_id, port); 5674 if (stcb) { 5675 /* 5676 * This covers us if the cookie-echo was there and 5677 * it changes our INP. 5678 */ 5679 inp = stcb->sctp_ep; 5680 if ((net) && (port)) { 5681 if (net->port == 0) { 5682 sctp_pathmtu_adjustment(stcb, net->mtu - sizeof(struct udphdr)); 5683 } 5684 net->port = port; 5685 } 5686 } 5687 } else { 5688 /* 5689 * no control chunks, so pre-process DATA chunks (these 5690 * checks are taken care of by control processing) 5691 */ 5692 5693 /* 5694 * if DATA only packet, and auth is required, then punt... 5695 * can't have authenticated without any AUTH (control) 5696 * chunks 5697 */ 5698 if ((stcb != NULL) && 5699 !SCTP_BASE_SYSCTL(sctp_auth_disable) && 5700 sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks)) { 5701 /* "silently" ignore */ 5702 SCTP_STAT_INCR(sctps_recvauthmissing); 5703 SCTP_TCB_UNLOCK(stcb); 5704 goto out_now; 5705 } 5706 if (stcb == NULL) { 5707 /* out of the blue DATA chunk */ 5708 sctp_handle_ootb(m, iphlen, offset, sh, inp, 5709 use_mflowid, mflowid, 5710 vrf_id, port); 5711 goto out_now; 5712 } 5713 if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) { 5714 /* v_tag mismatch! */ 5715 SCTP_STAT_INCR(sctps_badvtag); 5716 SCTP_TCB_UNLOCK(stcb); 5717 goto out_now; 5718 } 5719 } 5720 5721 if (stcb == NULL) { 5722 /* 5723 * no valid TCB for this packet, or we found it's a bad 5724 * packet while processing control, or we're done with this 5725 * packet (done or skip rest of data), so we drop it... 5726 */ 5727 goto out_now; 5728 } 5729 /* 5730 * DATA chunk processing 5731 */ 5732 /* plow through the data chunks while length > offset */ 5733 5734 /* 5735 * Rest should be DATA only. Check authentication state if AUTH for 5736 * DATA is required. 5737 */ 5738 if ((length > offset) && 5739 (stcb != NULL) && 5740 !SCTP_BASE_SYSCTL(sctp_auth_disable) && 5741 sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks) && 5742 !stcb->asoc.authenticated) { 5743 /* "silently" ignore */ 5744 SCTP_STAT_INCR(sctps_recvauthmissing); 5745 SCTPDBG(SCTP_DEBUG_AUTH1, 5746 "Data chunk requires AUTH, skipped\n"); 5747 goto trigger_send; 5748 } 5749 if (length > offset) { 5750 int retval; 5751 5752 /* 5753 * First check to make sure our state is correct. We would 5754 * not get here unless we really did have a tag, so we don't 5755 * abort if this happens, just dump the chunk silently. 5756 */ 5757 switch (SCTP_GET_STATE(&stcb->asoc)) { 5758 case SCTP_STATE_COOKIE_ECHOED: 5759 /* 5760 * we consider data with valid tags in this state 5761 * shows us the cookie-ack was lost. Imply it was 5762 * there. 5763 */ 5764 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5765 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5766 stcb->asoc.overall_error_count, 5767 0, 5768 SCTP_FROM_SCTP_INPUT, 5769 __LINE__); 5770 } 5771 stcb->asoc.overall_error_count = 0; 5772 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net); 5773 break; 5774 case SCTP_STATE_COOKIE_WAIT: 5775 /* 5776 * We consider OOTB any data sent during asoc setup. 5777 */ 5778 sctp_handle_ootb(m, iphlen, offset, sh, inp, 5779 use_mflowid, mflowid, 5780 vrf_id, port); 5781 SCTP_TCB_UNLOCK(stcb); 5782 goto out_now; 5783 /* sa_ignore NOTREACHED */ 5784 break; 5785 case SCTP_STATE_EMPTY: /* should not happen */ 5786 case SCTP_STATE_INUSE: /* should not happen */ 5787 case SCTP_STATE_SHUTDOWN_RECEIVED: /* This is a peer error */ 5788 case SCTP_STATE_SHUTDOWN_ACK_SENT: 5789 default: 5790 SCTP_TCB_UNLOCK(stcb); 5791 goto out_now; 5792 /* sa_ignore NOTREACHED */ 5793 break; 5794 case SCTP_STATE_OPEN: 5795 case SCTP_STATE_SHUTDOWN_SENT: 5796 break; 5797 } 5798 /* plow through the data chunks while length > offset */ 5799 retval = sctp_process_data(mm, iphlen, &offset, length, sh, 5800 inp, stcb, net, &high_tsn, 5801 use_mflowid, mflowid, 5802 vrf_id, port); 5803 if (retval == 2) { 5804 /* 5805 * The association aborted, NO UNLOCK needed since 5806 * the association is destroyed. 5807 */ 5808 goto out_now; 5809 } 5810 data_processed = 1; 5811 /* 5812 * Anything important needs to have been m_copy'ed in 5813 * process_data 5814 */ 5815 } 5816 /* take care of ecn */ 5817 if ((data_processed == 1) && 5818 (stcb->asoc.ecn_allowed == 1) && 5819 ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS)) { 5820 /* Yep, we need to add a ECNE */ 5821 sctp_send_ecn_echo(stcb, net, high_tsn); 5822 } 5823 if ((data_processed == 0) && (fwd_tsn_seen)) { 5824 int was_a_gap; 5825 uint32_t highest_tsn; 5826 5827 if (SCTP_TSN_GT(stcb->asoc.highest_tsn_inside_nr_map, stcb->asoc.highest_tsn_inside_map)) { 5828 highest_tsn = stcb->asoc.highest_tsn_inside_nr_map; 5829 } else { 5830 highest_tsn = stcb->asoc.highest_tsn_inside_map; 5831 } 5832 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 5833 stcb->asoc.send_sack = 1; 5834 sctp_sack_check(stcb, was_a_gap); 5835 } else if (fwd_tsn_seen) { 5836 stcb->asoc.send_sack = 1; 5837 } 5838 /* trigger send of any chunks in queue... */ 5839trigger_send: 5840#ifdef SCTP_AUDITING_ENABLED 5841 sctp_audit_log(0xE0, 2); 5842 sctp_auditing(1, inp, stcb, net); 5843#endif 5844 SCTPDBG(SCTP_DEBUG_INPUT1, 5845 "Check for chunk output prw:%d tqe:%d tf=%d\n", 5846 stcb->asoc.peers_rwnd, 5847 TAILQ_EMPTY(&stcb->asoc.control_send_queue), 5848 stcb->asoc.total_flight); 5849 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight); 5850 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { 5851 cnt_ctrl_ready = stcb->asoc.ctrl_queue_cnt - stcb->asoc.ecn_echo_cnt_onq; 5852 } 5853 if (cnt_ctrl_ready || 5854 ((un_sent) && 5855 (stcb->asoc.peers_rwnd > 0 || 5856 (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) { 5857 SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n"); 5858 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 5859 SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n"); 5860 } 5861#ifdef SCTP_AUDITING_ENABLED 5862 sctp_audit_log(0xE0, 3); 5863 sctp_auditing(2, inp, stcb, net); 5864#endif 5865 SCTP_TCB_UNLOCK(stcb); 5866out_now: 5867#ifdef INVARIANTS 5868 sctp_validate_no_locks(inp); 5869#endif 5870 return; 5871} 5872 5873#if 0 5874static void 5875sctp_print_mbuf_chain(struct mbuf *m) 5876{ 5877 for (; m; m = SCTP_BUF_NEXT(m)) { 5878 SCTP_PRINTF("%p: m_len = %ld\n", m, SCTP_BUF_LEN(m)); 5879 if (SCTP_BUF_IS_EXTENDED(m)) 5880 SCTP_PRINTF("%p: extend_size = %d\n", m, SCTP_BUF_EXTEND_SIZE(m)); 5881 } 5882} 5883 5884#endif 5885 5886#ifdef INET 5887void 5888sctp_input_with_port(struct mbuf *i_pak, int off, uint16_t port) 5889{ 5890 struct mbuf *m; 5891 int iphlen; 5892 uint32_t vrf_id = 0; 5893 uint8_t ecn_bits; 5894 struct ip *ip; 5895 struct sctphdr *sh; 5896 struct sctp_inpcb *inp = NULL; 5897 struct sctp_nets *net; 5898 struct sctp_tcb *stcb = NULL; 5899 struct sctp_chunkhdr *ch; 5900 int refcount_up = 0; 5901 int length, mlen, offset; 5902 uint32_t mflowid; 5903 uint8_t use_mflowid; 5904 5905#if !defined(SCTP_WITH_NO_CSUM) 5906 uint32_t check, calc_check; 5907 5908#endif 5909 5910 if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) { 5911 SCTP_RELEASE_PKT(i_pak); 5912 return; 5913 } 5914 mlen = SCTP_HEADER_LEN(i_pak); 5915 iphlen = off; 5916 m = SCTP_HEADER_TO_CHAIN(i_pak); 5917 5918 net = NULL; 5919 SCTP_STAT_INCR(sctps_recvpackets); 5920 SCTP_STAT_INCR_COUNTER64(sctps_inpackets); 5921 5922 5923#ifdef SCTP_MBUF_LOGGING 5924 /* Log in any input mbufs */ 5925 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 5926 struct mbuf *mat; 5927 5928 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 5929 if (SCTP_BUF_IS_EXTENDED(mat)) { 5930 sctp_log_mb(mat, SCTP_MBUF_INPUT); 5931 } 5932 } 5933 } 5934#endif 5935#ifdef SCTP_PACKET_LOGGING 5936 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) 5937 sctp_packet_log(m); 5938#endif 5939 if (m->m_flags & M_FLOWID) { 5940 mflowid = m->m_pkthdr.flowid; 5941 use_mflowid = 1; 5942 } else { 5943 mflowid = 0; 5944 use_mflowid = 0; 5945 } 5946 /* 5947 * Must take out the iphlen, since mlen expects this (only effect lb 5948 * case) 5949 */ 5950 mlen -= iphlen; 5951 5952 /* 5953 * Get IP, SCTP, and first chunk header together in first mbuf. 5954 */ 5955 ip = mtod(m, struct ip *); 5956 offset = iphlen + sizeof(*sh) + sizeof(*ch); 5957 if (SCTP_BUF_LEN(m) < offset) { 5958 if ((m = m_pullup(m, offset)) == 0) { 5959 SCTP_STAT_INCR(sctps_hdrops); 5960 return; 5961 } 5962 ip = mtod(m, struct ip *); 5963 } 5964 /* validate mbuf chain length with IP payload length */ 5965 if (mlen < (SCTP_GET_IPV4_LENGTH(ip) - iphlen)) { 5966 SCTP_STAT_INCR(sctps_hdrops); 5967 goto bad; 5968 } 5969 sh = (struct sctphdr *)((caddr_t)ip + iphlen); 5970 ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh)); 5971 SCTPDBG(SCTP_DEBUG_INPUT1, 5972 "sctp_input() length:%d iphlen:%d\n", mlen, iphlen); 5973 5974 /* SCTP does not allow broadcasts or multicasts */ 5975 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 5976 goto bad; 5977 } 5978 if (SCTP_IS_IT_BROADCAST(ip->ip_dst, m)) { 5979 /* 5980 * We only look at broadcast if its a front state, All 5981 * others we will not have a tcb for anyway. 5982 */ 5983 goto bad; 5984 } 5985 /* validate SCTP checksum */ 5986 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 5987 "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n", 5988 m->m_pkthdr.len, 5989 if_name(m->m_pkthdr.rcvif), 5990 m->m_pkthdr.csum_flags); 5991#if defined(SCTP_WITH_NO_CSUM) 5992 SCTP_STAT_INCR(sctps_recvnocrc); 5993#else 5994 if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) { 5995 SCTP_STAT_INCR(sctps_recvhwcrc); 5996 goto sctp_skip_csum_4; 5997 } 5998 check = sh->checksum; /* save incoming checksum */ 5999 sh->checksum = 0; /* prepare for calc */ 6000 calc_check = sctp_calculate_cksum(m, iphlen); 6001 sh->checksum = check; 6002 SCTP_STAT_INCR(sctps_recvswcrc); 6003 if (calc_check != check) { 6004 SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n", 6005 calc_check, check, m, mlen, iphlen); 6006 6007 stcb = sctp_findassociation_addr(m, 6008 offset - sizeof(*ch), 6009 sh, ch, &inp, &net, 6010 vrf_id); 6011 if ((net) && (port)) { 6012 if (net->port == 0) { 6013 sctp_pathmtu_adjustment(stcb, net->mtu - sizeof(struct udphdr)); 6014 } 6015 net->port = port; 6016 } 6017 if ((net != NULL) && (use_mflowid != 0)) { 6018 net->flowid = mflowid; 6019#ifdef INVARIANTS 6020 net->flowidset = 1; 6021#endif 6022 } 6023 if ((inp) && (stcb)) { 6024 sctp_send_packet_dropped(stcb, net, m, iphlen, 1); 6025 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED); 6026 } else if ((inp != NULL) && (stcb == NULL)) { 6027 refcount_up = 1; 6028 } 6029 SCTP_STAT_INCR(sctps_badsum); 6030 SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors); 6031 goto bad; 6032 } 6033sctp_skip_csum_4: 6034#endif 6035 /* destination port of 0 is illegal, based on RFC2960. */ 6036 if (sh->dest_port == 0) { 6037 SCTP_STAT_INCR(sctps_hdrops); 6038 goto bad; 6039 } 6040 /* 6041 * Locate pcb and tcb for datagram sctp_findassociation_addr() wants 6042 * IP/SCTP/first chunk header... 6043 */ 6044 stcb = sctp_findassociation_addr(m, offset - sizeof(*ch), 6045 sh, ch, &inp, &net, vrf_id); 6046 if ((net) && (port)) { 6047 if (net->port == 0) { 6048 sctp_pathmtu_adjustment(stcb, net->mtu - sizeof(struct udphdr)); 6049 } 6050 net->port = port; 6051 } 6052 if ((net != NULL) && (use_mflowid != 0)) { 6053 net->flowid = mflowid; 6054#ifdef INVARIANTS 6055 net->flowidset = 1; 6056#endif 6057 } 6058 /* inp's ref-count increased && stcb locked */ 6059 if (inp == NULL) { 6060 struct sctp_init_chunk *init_chk, chunk_buf; 6061 6062 SCTP_STAT_INCR(sctps_noport); 6063 if (badport_bandlim(BANDLIM_SCTP_OOTB) < 0) 6064 goto bad; 6065 SCTPDBG(SCTP_DEBUG_INPUT1, 6066 "Sending a ABORT from packet entry!\n"); 6067 if (ch->chunk_type == SCTP_INITIATION) { 6068 /* 6069 * we do a trick here to get the INIT tag, dig in 6070 * and get the tag from the INIT and put it in the 6071 * common header. 6072 */ 6073 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 6074 iphlen + sizeof(*sh), sizeof(*init_chk), 6075 (uint8_t *) & chunk_buf); 6076 if (init_chk != NULL) 6077 sh->v_tag = init_chk->init.initiate_tag; 6078 } 6079 if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 6080 sctp_send_shutdown_complete2(m, sh, 6081 use_mflowid, mflowid, 6082 vrf_id, port); 6083 goto bad; 6084 } 6085 if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) { 6086 goto bad; 6087 } 6088 if (ch->chunk_type != SCTP_ABORT_ASSOCIATION) { 6089 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 6090 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 6091 (ch->chunk_type != SCTP_INIT))) { 6092 sctp_send_abort(m, iphlen, sh, 0, NULL, 6093 use_mflowid, mflowid, 6094 vrf_id, port); 6095 } 6096 } 6097 goto bad; 6098 } else if (stcb == NULL) { 6099 refcount_up = 1; 6100 } 6101#ifdef IPSEC 6102 /* 6103 * I very much doubt any of the IPSEC stuff will work but I have no 6104 * idea, so I will leave it in place. 6105 */ 6106 if (inp && ipsec4_in_reject(m, &inp->ip_inp.inp)) { 6107 MODULE_GLOBAL(ipsec4stat).in_polvio++; 6108 SCTP_STAT_INCR(sctps_hdrops); 6109 goto bad; 6110 } 6111#endif /* IPSEC */ 6112 6113 /* 6114 * common chunk processing 6115 */ 6116 length = ip->ip_len + iphlen; 6117 offset -= sizeof(struct sctp_chunkhdr); 6118 6119 ecn_bits = ip->ip_tos; 6120 6121 /* sa_ignore NO_NULL_CHK */ 6122 sctp_common_input_processing(&m, iphlen, offset, length, sh, ch, 6123 inp, stcb, net, ecn_bits, 6124 use_mflowid, mflowid, 6125 vrf_id, port); 6126 /* inp's ref-count reduced && stcb unlocked */ 6127 if (m) { 6128 sctp_m_freem(m); 6129 } 6130 if ((inp) && (refcount_up)) { 6131 /* reduce ref-count */ 6132 SCTP_INP_DECR_REF(inp); 6133 } 6134 return; 6135bad: 6136 if (stcb) { 6137 SCTP_TCB_UNLOCK(stcb); 6138 } 6139 if ((inp) && (refcount_up)) { 6140 /* reduce ref-count */ 6141 SCTP_INP_DECR_REF(inp); 6142 } 6143 if (m) { 6144 sctp_m_freem(m); 6145 } 6146 return; 6147} 6148 6149#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP) 6150extern int *sctp_cpuarry; 6151 6152#endif 6153 6154void 6155sctp_input(struct mbuf *m, int off) 6156{ 6157#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP) 6158 struct ip *ip; 6159 struct sctphdr *sh; 6160 int offset; 6161 int cpu_to_use; 6162 uint32_t flowid, tag; 6163 6164 if (mp_ncpus > 1) { 6165 if (m->m_flags & M_FLOWID) { 6166 flowid = m->m_pkthdr.flowid; 6167 } else { 6168 /* 6169 * No flow id built by lower layers fix it so we 6170 * create one. 6171 */ 6172 ip = mtod(m, struct ip *); 6173 offset = off + sizeof(*sh); 6174 if (SCTP_BUF_LEN(m) < offset) { 6175 if ((m = m_pullup(m, offset)) == 0) { 6176 SCTP_STAT_INCR(sctps_hdrops); 6177 return; 6178 } 6179 ip = mtod(m, struct ip *); 6180 } 6181 sh = (struct sctphdr *)((caddr_t)ip + off); 6182 tag = htonl(sh->v_tag); 6183 flowid = tag ^ ntohs(sh->dest_port) ^ ntohs(sh->src_port); 6184 m->m_pkthdr.flowid = flowid; 6185 m->m_flags |= M_FLOWID; 6186 } 6187 cpu_to_use = sctp_cpuarry[flowid % mp_ncpus]; 6188 sctp_queue_to_mcore(m, off, cpu_to_use); 6189 return; 6190 } 6191#endif 6192 sctp_input_with_port(m, off, 0); 6193} 6194 6195#endif 6196