sctp_input.c revision 221249
1/*- 2 * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2008-2011, by Randall Stewart. All rights reserved. 4 * Copyright (c) 2008-2011, by Michael Tuexen. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * a) Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * b) Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the distribution. 15 * 16 * c) Neither the name of Cisco Systems, Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33/* $KAME: sctp_input.c,v 1.27 2005/03/06 16:04:17 itojun Exp $ */ 34 35#include <sys/cdefs.h> 36__FBSDID("$FreeBSD: head/sys/netinet/sctp_input.c 221249 2011-04-30 11:18:16Z tuexen $"); 37 38#include <netinet/sctp_os.h> 39#include <netinet/sctp_var.h> 40#include <netinet/sctp_sysctl.h> 41#include <netinet/sctp_pcb.h> 42#include <netinet/sctp_header.h> 43#include <netinet/sctputil.h> 44#include <netinet/sctp_output.h> 45#include <netinet/sctp_input.h> 46#include <netinet/sctp_auth.h> 47#include <netinet/sctp_indata.h> 48#include <netinet/sctp_asconf.h> 49#include <netinet/sctp_bsd_addr.h> 50#include <netinet/sctp_timer.h> 51#include <netinet/sctp_crc32.h> 52#include <netinet/udp.h> 53#include <sys/smp.h> 54 55 56 57static void 58sctp_stop_all_cookie_timers(struct sctp_tcb *stcb) 59{ 60 struct sctp_nets *net; 61 62 /* 63 * This now not only stops all cookie timers it also stops any INIT 64 * timers as well. This will make sure that the timers are stopped 65 * in all collision cases. 66 */ 67 SCTP_TCB_LOCK_ASSERT(stcb); 68 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 69 if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) { 70 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, 71 stcb->sctp_ep, 72 stcb, 73 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1); 74 } else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) { 75 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, 76 stcb->sctp_ep, 77 stcb, 78 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2); 79 } 80 } 81} 82 83/* INIT handler */ 84static void 85sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh, 86 struct sctp_init_chunk *cp, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 87 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id, uint16_t port) 88{ 89 struct sctp_init *init; 90 struct mbuf *op_err; 91 uint32_t init_limit; 92 93 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n", 94 stcb); 95 if (stcb == NULL) { 96 SCTP_INP_RLOCK(inp); 97 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 98 goto outnow; 99 } 100 } 101 op_err = NULL; 102 init = &cp->init; 103 /* First are we accepting? */ 104 if ((inp->sctp_socket->so_qlimit == 0) && (stcb == NULL)) { 105 SCTPDBG(SCTP_DEBUG_INPUT2, 106 "sctp_handle_init: Abort, so_qlimit:%d\n", 107 inp->sctp_socket->so_qlimit); 108 /* 109 * FIX ME ?? What about TCP model and we have a 110 * match/restart case? Actually no fix is needed. the lookup 111 * will always find the existing assoc so stcb would not be 112 * NULL. It may be questionable to do this since we COULD 113 * just send back the INIT-ACK and hope that the app did 114 * accept()'s by the time the COOKIE was sent. But there is 115 * a price to pay for COOKIE generation and I don't want to 116 * pay it on the chance that the app will actually do some 117 * accepts(). The App just looses and should NOT be in this 118 * state :-) 119 */ 120 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 121 vrf_id, port); 122 if (stcb) 123 *abort_no_unlock = 1; 124 goto outnow; 125 } 126 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) { 127 /* Invalid length */ 128 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 129 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 130 vrf_id, port); 131 if (stcb) 132 *abort_no_unlock = 1; 133 goto outnow; 134 } 135 /* validate parameters */ 136 if (init->initiate_tag == 0) { 137 /* protocol error... send abort */ 138 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 139 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 140 vrf_id, port); 141 if (stcb) 142 *abort_no_unlock = 1; 143 goto outnow; 144 } 145 if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) { 146 /* invalid parameter... send abort */ 147 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 148 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 149 vrf_id, port); 150 if (stcb) 151 *abort_no_unlock = 1; 152 goto outnow; 153 } 154 if (init->num_inbound_streams == 0) { 155 /* protocol error... send abort */ 156 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 157 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 158 vrf_id, port); 159 if (stcb) 160 *abort_no_unlock = 1; 161 goto outnow; 162 } 163 if (init->num_outbound_streams == 0) { 164 /* protocol error... send abort */ 165 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 166 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 167 vrf_id, port); 168 if (stcb) 169 *abort_no_unlock = 1; 170 goto outnow; 171 } 172 init_limit = offset + ntohs(cp->ch.chunk_length); 173 if (sctp_validate_init_auth_params(m, offset + sizeof(*cp), 174 init_limit)) { 175 /* auth parameter(s) error... send abort */ 176 sctp_abort_association(inp, stcb, m, iphlen, sh, NULL, vrf_id, port); 177 if (stcb) 178 *abort_no_unlock = 1; 179 goto outnow; 180 } 181 /* send an INIT-ACK w/cookie */ 182 SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n"); 183 sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, sh, cp, vrf_id, port, 184 ((stcb == NULL) ? SCTP_HOLDS_LOCK : SCTP_NOT_LOCKED)); 185outnow: 186 if (stcb == NULL) { 187 SCTP_INP_RUNLOCK(inp); 188 } 189} 190 191/* 192 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error 193 */ 194 195int 196sctp_is_there_unsent_data(struct sctp_tcb *stcb) 197{ 198 int unsent_data = 0; 199 unsigned int i; 200 struct sctp_stream_queue_pending *sp; 201 struct sctp_association *asoc; 202 203 /* 204 * This function returns the number of streams that have true unsent 205 * data on them. Note that as it looks through it will clean up any 206 * places that have old data that has been sent but left at top of 207 * stream queue. 208 */ 209 asoc = &stcb->asoc; 210 SCTP_TCB_SEND_LOCK(stcb); 211 if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) { 212 /* Check to see if some data queued */ 213 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 214 /* sa_ignore FREED_MEMORY */ 215 sp = TAILQ_FIRST(&stcb->asoc.strmout[i].outqueue); 216 if (sp == NULL) { 217 continue; 218 } 219 if ((sp->msg_is_complete) && 220 (sp->length == 0) && 221 (sp->sender_all_done)) { 222 /* 223 * We are doing differed cleanup. Last time 224 * through when we took all the data the 225 * sender_all_done was not set. 226 */ 227 if (sp->put_last_out == 0) { 228 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n"); 229 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n", 230 sp->sender_all_done, 231 sp->length, 232 sp->msg_is_complete, 233 sp->put_last_out); 234 } 235 atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1); 236 TAILQ_REMOVE(&stcb->asoc.strmout[i].outqueue, sp, next); 237 if (sp->net) { 238 sctp_free_remote_addr(sp->net); 239 sp->net = NULL; 240 } 241 if (sp->data) { 242 sctp_m_freem(sp->data); 243 sp->data = NULL; 244 } 245 sctp_free_a_strmoq(stcb, sp); 246 } else { 247 unsent_data++; 248 break; 249 } 250 } 251 } 252 SCTP_TCB_SEND_UNLOCK(stcb); 253 return (unsent_data); 254} 255 256static int 257sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb, 258 struct sctp_nets *net) 259{ 260 struct sctp_init *init; 261 struct sctp_association *asoc; 262 struct sctp_nets *lnet; 263 unsigned int i; 264 265 init = &cp->init; 266 asoc = &stcb->asoc; 267 /* save off parameters */ 268 asoc->peer_vtag = ntohl(init->initiate_tag); 269 asoc->peers_rwnd = ntohl(init->a_rwnd); 270 /* init tsn's */ 271 asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1; 272 273 if (!TAILQ_EMPTY(&asoc->nets)) { 274 /* update any ssthresh's that may have a default */ 275 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) { 276 lnet->ssthresh = asoc->peers_rwnd; 277 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) { 278 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION); 279 } 280 } 281 } 282 SCTP_TCB_SEND_LOCK(stcb); 283 if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) { 284 unsigned int newcnt; 285 struct sctp_stream_out *outs; 286 struct sctp_stream_queue_pending *sp, *nsp; 287 struct sctp_tmit_chunk *chk, *nchk; 288 289 /* abandon the upper streams */ 290 newcnt = ntohs(init->num_inbound_streams); 291 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 292 if (chk->rec.data.stream_number >= newcnt) { 293 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 294 asoc->send_queue_cnt--; 295 if (chk->data != NULL) { 296 sctp_free_bufspace(stcb, asoc, chk, 1); 297 sctp_ulp_notify(SCTP_NOTIFY_DG_FAIL, stcb, 298 SCTP_NOTIFY_DATAGRAM_UNSENT, chk, SCTP_SO_NOT_LOCKED); 299 if (chk->data) { 300 sctp_m_freem(chk->data); 301 chk->data = NULL; 302 } 303 } 304 sctp_free_a_chunk(stcb, chk); 305 /* sa_ignore FREED_MEMORY */ 306 } 307 } 308 if (asoc->strmout) { 309 for (i = newcnt; i < asoc->pre_open_streams; i++) { 310 outs = &asoc->strmout[i]; 311 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 312 TAILQ_REMOVE(&outs->outqueue, sp, next); 313 asoc->stream_queue_cnt--; 314 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, 315 stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, 316 sp, SCTP_SO_NOT_LOCKED); 317 if (sp->data) { 318 sctp_m_freem(sp->data); 319 sp->data = NULL; 320 } 321 if (sp->net) { 322 sctp_free_remote_addr(sp->net); 323 sp->net = NULL; 324 } 325 /* Free the chunk */ 326 sctp_free_a_strmoq(stcb, sp); 327 /* sa_ignore FREED_MEMORY */ 328 } 329 } 330 } 331 /* cut back the count */ 332 asoc->pre_open_streams = newcnt; 333 } 334 SCTP_TCB_SEND_UNLOCK(stcb); 335 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams; 336 337 /* EY - nr_sack: initialize highest tsn in nr_mapping_array */ 338 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map; 339 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 340 sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 341 } 342 /* This is the next one we expect */ 343 asoc->str_reset_seq_in = asoc->asconf_seq_in + 1; 344 345 asoc->mapping_array_base_tsn = ntohl(init->initial_tsn); 346 asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->asconf_seq_in; 347 348 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 349 /* open the requested streams */ 350 351 if (asoc->strmin != NULL) { 352 /* Free the old ones */ 353 struct sctp_queued_to_read *ctl, *nctl; 354 355 for (i = 0; i < asoc->streamincnt; i++) { 356 TAILQ_FOREACH_SAFE(ctl, &asoc->strmin[i].inqueue, next, nctl) { 357 TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next); 358 sctp_free_remote_addr(ctl->whoFrom); 359 ctl->whoFrom = NULL; 360 sctp_m_freem(ctl->data); 361 ctl->data = NULL; 362 sctp_free_a_readq(stcb, ctl); 363 } 364 } 365 SCTP_FREE(asoc->strmin, SCTP_M_STRMI); 366 } 367 asoc->streamincnt = ntohs(init->num_outbound_streams); 368 if (asoc->streamincnt > MAX_SCTP_STREAMS) { 369 asoc->streamincnt = MAX_SCTP_STREAMS; 370 } 371 SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt * 372 sizeof(struct sctp_stream_in), SCTP_M_STRMI); 373 if (asoc->strmin == NULL) { 374 /* we didn't get memory for the streams! */ 375 SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n"); 376 return (-1); 377 } 378 for (i = 0; i < asoc->streamincnt; i++) { 379 asoc->strmin[i].stream_no = i; 380 asoc->strmin[i].last_sequence_delivered = 0xffff; 381 /* 382 * U-stream ranges will be set when the cookie is unpacked. 383 * Or for the INIT sender they are un set (if pr-sctp not 384 * supported) when the INIT-ACK arrives. 385 */ 386 TAILQ_INIT(&asoc->strmin[i].inqueue); 387 asoc->strmin[i].delivery_started = 0; 388 } 389 /* 390 * load_address_from_init will put the addresses into the 391 * association when the COOKIE is processed or the INIT-ACK is 392 * processed. Both types of COOKIE's existing and new call this 393 * routine. It will remove addresses that are no longer in the 394 * association (for the restarting case where addresses are 395 * removed). Up front when the INIT arrives we will discard it if it 396 * is a restart and new addresses have been added. 397 */ 398 /* sa_ignore MEMLEAK */ 399 return (0); 400} 401 402/* 403 * INIT-ACK message processing/consumption returns value < 0 on error 404 */ 405static int 406sctp_process_init_ack(struct mbuf *m, int iphlen, int offset, 407 struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 408 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id) 409{ 410 struct sctp_association *asoc; 411 struct mbuf *op_err; 412 int retval, abort_flag; 413 uint32_t initack_limit; 414 int nat_friendly = 0; 415 416 /* First verify that we have no illegal param's */ 417 abort_flag = 0; 418 op_err = NULL; 419 420 op_err = sctp_arethere_unrecognized_parameters(m, 421 (offset + sizeof(struct sctp_init_chunk)), 422 &abort_flag, (struct sctp_chunkhdr *)cp, &nat_friendly); 423 if (abort_flag) { 424 /* Send an abort and notify peer */ 425 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_CAUSE_PROTOCOL_VIOLATION, op_err, SCTP_SO_NOT_LOCKED); 426 *abort_no_unlock = 1; 427 return (-1); 428 } 429 asoc = &stcb->asoc; 430 asoc->peer_supports_nat = (uint8_t) nat_friendly; 431 /* process the peer's parameters in the INIT-ACK */ 432 retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb, net); 433 if (retval < 0) { 434 return (retval); 435 } 436 initack_limit = offset + ntohs(cp->ch.chunk_length); 437 /* load all addresses */ 438 if ((retval = sctp_load_addresses_from_init(stcb, m, iphlen, 439 (offset + sizeof(struct sctp_init_chunk)), initack_limit, sh, 440 NULL))) { 441 /* Huh, we should abort */ 442 SCTPDBG(SCTP_DEBUG_INPUT1, 443 "Load addresses from INIT causes an abort %d\n", 444 retval); 445 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 446 NULL, 0, net->port); 447 *abort_no_unlock = 1; 448 return (-1); 449 } 450 /* if the peer doesn't support asconf, flush the asconf queue */ 451 if (asoc->peer_supports_asconf == 0) { 452 struct sctp_asconf_addr *param, *nparam; 453 454 TAILQ_FOREACH_SAFE(param, &asoc->asconf_queue, next, nparam) { 455 TAILQ_REMOVE(&asoc->asconf_queue, param, next); 456 SCTP_FREE(param, SCTP_M_ASC_ADDR); 457 } 458 } 459 stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs, 460 stcb->asoc.local_hmacs); 461 if (op_err) { 462 sctp_queue_op_err(stcb, op_err); 463 /* queuing will steal away the mbuf chain to the out queue */ 464 op_err = NULL; 465 } 466 /* extract the cookie and queue it to "echo" it back... */ 467 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 468 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 469 stcb->asoc.overall_error_count, 470 0, 471 SCTP_FROM_SCTP_INPUT, 472 __LINE__); 473 } 474 stcb->asoc.overall_error_count = 0; 475 net->error_count = 0; 476 477 /* 478 * Cancel the INIT timer, We do this first before queueing the 479 * cookie. We always cancel at the primary to assue that we are 480 * canceling the timer started by the INIT which always goes to the 481 * primary. 482 */ 483 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb, 484 asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4); 485 486 /* calculate the RTO */ 487 net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered, sctp_align_safe_nocopy, 488 SCTP_RTT_FROM_NON_DATA); 489 490 retval = sctp_send_cookie_echo(m, offset, stcb, net); 491 if (retval < 0) { 492 /* 493 * No cookie, we probably should send a op error. But in any 494 * case if there is no cookie in the INIT-ACK, we can 495 * abandon the peer, its broke. 496 */ 497 if (retval == -3) { 498 /* We abort with an error of missing mandatory param */ 499 op_err = 500 sctp_generate_invmanparam(SCTP_CAUSE_MISSING_PARAM); 501 if (op_err) { 502 /* 503 * Expand beyond to include the mandatory 504 * param cookie 505 */ 506 struct sctp_inv_mandatory_param *mp; 507 508 SCTP_BUF_LEN(op_err) = 509 sizeof(struct sctp_inv_mandatory_param); 510 mp = mtod(op_err, 511 struct sctp_inv_mandatory_param *); 512 /* Subtract the reserved param */ 513 mp->length = 514 htons(sizeof(struct sctp_inv_mandatory_param) - 2); 515 mp->num_param = htonl(1); 516 mp->param = htons(SCTP_STATE_COOKIE); 517 mp->resv = 0; 518 } 519 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 520 sh, op_err, 0, net->port); 521 *abort_no_unlock = 1; 522 } 523 return (retval); 524 } 525 return (0); 526} 527 528static void 529sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp, 530 struct sctp_tcb *stcb, struct sctp_nets *net) 531{ 532 struct sockaddr_storage store; 533 struct sctp_nets *r_net, *f_net; 534 struct timeval tv; 535 int req_prim = 0; 536 537#ifdef INET 538 struct sockaddr_in *sin; 539 540#endif 541#ifdef INET6 542 struct sockaddr_in6 *sin6; 543 544#endif 545 546 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) { 547 /* Invalid length */ 548 return; 549 } 550 memset(&store, 0, sizeof(store)); 551 switch (cp->heartbeat.hb_info.addr_family) { 552#ifdef INET 553 case AF_INET: 554 if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) { 555 sin = (struct sockaddr_in *)&store; 556 sin->sin_family = cp->heartbeat.hb_info.addr_family; 557 sin->sin_len = cp->heartbeat.hb_info.addr_len; 558 sin->sin_port = stcb->rport; 559 memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address, 560 sizeof(sin->sin_addr)); 561 } else { 562 return; 563 } 564 break; 565#endif 566#ifdef INET6 567 case AF_INET6: 568 if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) { 569 sin6 = (struct sockaddr_in6 *)&store; 570 sin6->sin6_family = cp->heartbeat.hb_info.addr_family; 571 sin6->sin6_len = cp->heartbeat.hb_info.addr_len; 572 sin6->sin6_port = stcb->rport; 573 memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address, 574 sizeof(sin6->sin6_addr)); 575 } else { 576 return; 577 } 578 break; 579#endif 580 default: 581 return; 582 } 583 r_net = sctp_findnet(stcb, (struct sockaddr *)&store); 584 if (r_net == NULL) { 585 SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n"); 586 return; 587 } 588 if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) && 589 (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) && 590 (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) { 591 /* 592 * If the its a HB and it's random value is correct when can 593 * confirm the destination. 594 */ 595 r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 596 if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) { 597 stcb->asoc.primary_destination = r_net; 598 r_net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY; 599 r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY; 600 f_net = TAILQ_FIRST(&stcb->asoc.nets); 601 if (f_net != r_net) { 602 /* 603 * first one on the list is NOT the primary 604 * sctp_cmpaddr() is much more efficent if 605 * the primary is the first on the list, 606 * make it so. 607 */ 608 TAILQ_REMOVE(&stcb->asoc.nets, r_net, sctp_next); 609 TAILQ_INSERT_HEAD(&stcb->asoc.nets, r_net, sctp_next); 610 } 611 req_prim = 1; 612 } 613 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 614 stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED); 615 } 616 r_net->error_count = 0; 617 r_net->hb_responded = 1; 618 tv.tv_sec = cp->heartbeat.hb_info.time_value_1; 619 tv.tv_usec = cp->heartbeat.hb_info.time_value_2; 620 if (r_net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 621 r_net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE; 622 r_net->dest_state |= SCTP_ADDR_REACHABLE; 623 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 624 SCTP_HEARTBEAT_SUCCESS, (void *)r_net, SCTP_SO_NOT_LOCKED); 625 /* now was it the primary? if so restore */ 626 if (r_net->dest_state & SCTP_ADDR_WAS_PRIMARY) { 627 (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, r_net); 628 } 629 } 630 /* 631 * JRS 5/14/07 - If CMT PF is on and the destination is in PF state, 632 * set the destination to active state and set the cwnd to one or 633 * two MTU's based on whether PF1 or PF2 is being used. If a T3 634 * timer is running, for the destination, stop the timer because a 635 * PF-heartbeat was received. 636 */ 637 if ((stcb->asoc.sctp_cmt_on_off > 0) && 638 (stcb->asoc.sctp_cmt_pf > 0) && 639 ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) { 640 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 641 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 642 stcb, net, 643 SCTP_FROM_SCTP_INPUT + SCTP_LOC_5); 644 } 645 net->dest_state &= ~SCTP_ADDR_PF; 646 net->cwnd = net->mtu * stcb->asoc.sctp_cmt_pf; 647 SCTPDBG(SCTP_DEBUG_INPUT1, "Destination %p moved from PF to reachable with cwnd %d.\n", 648 net, net->cwnd); 649 } 650 /* Now lets do a RTO with this */ 651 r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv, sctp_align_safe_nocopy, 652 SCTP_RTT_FROM_NON_DATA); 653 /* Mobility adaptation */ 654 if (req_prim) { 655 if ((sctp_is_mobility_feature_on(stcb->sctp_ep, 656 SCTP_MOBILITY_BASE) || 657 sctp_is_mobility_feature_on(stcb->sctp_ep, 658 SCTP_MOBILITY_FASTHANDOFF)) && 659 sctp_is_mobility_feature_on(stcb->sctp_ep, 660 SCTP_MOBILITY_PRIM_DELETED)) { 661 662 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_TIMER + SCTP_LOC_7); 663 if (sctp_is_mobility_feature_on(stcb->sctp_ep, 664 SCTP_MOBILITY_FASTHANDOFF)) { 665 sctp_assoc_immediate_retrans(stcb, 666 stcb->asoc.primary_destination); 667 } 668 if (sctp_is_mobility_feature_on(stcb->sctp_ep, 669 SCTP_MOBILITY_BASE)) { 670 sctp_move_chunks_from_net(stcb, 671 stcb->asoc.deleted_primary); 672 } 673 sctp_delete_prim_timer(stcb->sctp_ep, stcb, 674 stcb->asoc.deleted_primary); 675 } 676 } 677} 678 679static int 680sctp_handle_nat_colliding_state(struct sctp_tcb *stcb) 681{ 682 /* 683 * return 0 means we want you to proceed with the abort non-zero 684 * means no abort processing 685 */ 686 struct sctpasochead *head; 687 688 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) { 689 /* generate a new vtag and send init */ 690 LIST_REMOVE(stcb, sctp_asocs); 691 stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 692 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))]; 693 /* 694 * put it in the bucket in the vtag hash of assoc's for the 695 * system 696 */ 697 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 698 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 699 return (1); 700 } 701 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { 702 /* 703 * treat like a case where the cookie expired i.e.: - dump 704 * current cookie. - generate a new vtag. - resend init. 705 */ 706 /* generate a new vtag and send init */ 707 LIST_REMOVE(stcb, sctp_asocs); 708 stcb->asoc.state &= ~SCTP_STATE_COOKIE_ECHOED; 709 stcb->asoc.state |= SCTP_STATE_COOKIE_WAIT; 710 sctp_stop_all_cookie_timers(stcb); 711 sctp_toss_old_cookies(stcb, &stcb->asoc); 712 stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 713 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))]; 714 /* 715 * put it in the bucket in the vtag hash of assoc's for the 716 * system 717 */ 718 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 719 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 720 return (1); 721 } 722 return (0); 723} 724 725static int 726sctp_handle_nat_missing_state(struct sctp_tcb *stcb, 727 struct sctp_nets *net) 728{ 729 /* 730 * return 0 means we want you to proceed with the abort non-zero 731 * means no abort processing 732 */ 733 if (stcb->asoc.peer_supports_auth == 0) { 734 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_nat_missing_state: Peer does not support AUTH, cannot send an asconf\n"); 735 return (0); 736 } 737 sctp_asconf_send_nat_state_update(stcb, net); 738 return (1); 739} 740 741 742static void 743sctp_handle_abort(struct sctp_abort_chunk *cp, 744 struct sctp_tcb *stcb, struct sctp_nets *net) 745{ 746#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 747 struct socket *so; 748 749#endif 750 uint16_t len; 751 752 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n"); 753 if (stcb == NULL) 754 return; 755 756 len = ntohs(cp->ch.chunk_length); 757 if (len > sizeof(struct sctp_chunkhdr)) { 758 /* 759 * Need to check the cause codes for our two magic nat 760 * aborts which don't kill the assoc necessarily. 761 */ 762 struct sctp_abort_chunk *cpnext; 763 struct sctp_missing_nat_state *natc; 764 uint16_t cause; 765 766 cpnext = cp; 767 cpnext++; 768 natc = (struct sctp_missing_nat_state *)cpnext; 769 cause = ntohs(natc->cause); 770 if (cause == SCTP_CAUSE_NAT_COLLIDING_STATE) { 771 SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n", 772 cp->ch.chunk_flags); 773 if (sctp_handle_nat_colliding_state(stcb)) { 774 return; 775 } 776 } else if (cause == SCTP_CAUSE_NAT_MISSING_STATE) { 777 SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n", 778 cp->ch.chunk_flags); 779 if (sctp_handle_nat_missing_state(stcb, net)) { 780 return; 781 } 782 } 783 } 784 /* stop any receive timers */ 785 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6); 786 /* notify user of the abort and clean up... */ 787 sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED); 788 /* free the tcb */ 789#if defined(SCTP_PANIC_ON_ABORT) 790 printf("stcb:%p state:%d rport:%d net:%p\n", 791 stcb, stcb->asoc.state, stcb->rport, net); 792 if (!(stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 793 panic("Received an ABORT"); 794 } else { 795 printf("No panic its in state %x closed\n", stcb->asoc.state); 796 } 797#endif 798 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 799 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 800 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 801 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 802 } 803#ifdef SCTP_ASOCLOG_OF_TSNS 804 sctp_print_out_track_log(stcb); 805#endif 806#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 807 so = SCTP_INP_SO(stcb->sctp_ep); 808 atomic_add_int(&stcb->asoc.refcnt, 1); 809 SCTP_TCB_UNLOCK(stcb); 810 SCTP_SOCKET_LOCK(so, 1); 811 SCTP_TCB_LOCK(stcb); 812 atomic_subtract_int(&stcb->asoc.refcnt, 1); 813#endif 814 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED; 815 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 816 SCTP_FROM_SCTP_INPUT + SCTP_LOC_6); 817#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 818 SCTP_SOCKET_UNLOCK(so, 1); 819#endif 820 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n"); 821} 822 823static void 824sctp_handle_shutdown(struct sctp_shutdown_chunk *cp, 825 struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag) 826{ 827 struct sctp_association *asoc; 828 int some_on_streamwheel; 829 830#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 831 struct socket *so; 832 833#endif 834 835 SCTPDBG(SCTP_DEBUG_INPUT2, 836 "sctp_handle_shutdown: handling SHUTDOWN\n"); 837 if (stcb == NULL) 838 return; 839 asoc = &stcb->asoc; 840 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 841 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 842 return; 843 } 844 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) { 845 /* Shutdown NOT the expected size */ 846 return; 847 } else { 848 sctp_update_acked(stcb, cp, net, abort_flag); 849 if (*abort_flag) { 850 return; 851 } 852 } 853 if (asoc->control_pdapi) { 854 /* 855 * With a normal shutdown we assume the end of last record. 856 */ 857 SCTP_INP_READ_LOCK(stcb->sctp_ep); 858 asoc->control_pdapi->end_added = 1; 859 asoc->control_pdapi->pdapi_aborted = 1; 860 asoc->control_pdapi = NULL; 861 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 862#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 863 so = SCTP_INP_SO(stcb->sctp_ep); 864 atomic_add_int(&stcb->asoc.refcnt, 1); 865 SCTP_TCB_UNLOCK(stcb); 866 SCTP_SOCKET_LOCK(so, 1); 867 SCTP_TCB_LOCK(stcb); 868 atomic_subtract_int(&stcb->asoc.refcnt, 1); 869 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 870 /* assoc was freed while we were unlocked */ 871 SCTP_SOCKET_UNLOCK(so, 1); 872 return; 873 } 874#endif 875 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 876#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 877 SCTP_SOCKET_UNLOCK(so, 1); 878#endif 879 } 880 /* goto SHUTDOWN_RECEIVED state to block new requests */ 881 if (stcb->sctp_socket) { 882 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 883 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) && 884 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) { 885 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_RECEIVED); 886 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 887 /* 888 * notify upper layer that peer has initiated a 889 * shutdown 890 */ 891 sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 892 893 /* reset time */ 894 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 895 } 896 } 897 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 898 /* 899 * stop the shutdown timer, since we WILL move to 900 * SHUTDOWN-ACK-SENT. 901 */ 902 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8); 903 } 904 /* Now is there unsent data on a stream somewhere? */ 905 some_on_streamwheel = sctp_is_there_unsent_data(stcb); 906 907 if (!TAILQ_EMPTY(&asoc->send_queue) || 908 !TAILQ_EMPTY(&asoc->sent_queue) || 909 some_on_streamwheel) { 910 /* By returning we will push more data out */ 911 return; 912 } else { 913 /* no outstanding data to send, so move on... */ 914 /* send SHUTDOWN-ACK */ 915 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination); 916 /* move to SHUTDOWN-ACK-SENT state */ 917 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 918 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 919 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 920 } 921 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 922 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 923 sctp_stop_timers_for_shutdown(stcb); 924 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, 925 stcb, net); 926 } 927} 928 929static void 930sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp, 931 struct sctp_tcb *stcb, 932 struct sctp_nets *net) 933{ 934 struct sctp_association *asoc; 935 936#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 937 struct socket *so; 938 939 so = SCTP_INP_SO(stcb->sctp_ep); 940#endif 941 SCTPDBG(SCTP_DEBUG_INPUT2, 942 "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n"); 943 if (stcb == NULL) 944 return; 945 946 asoc = &stcb->asoc; 947 /* process according to association state */ 948 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 949 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 950 /* unexpected SHUTDOWN-ACK... do OOTB handling... */ 951 sctp_send_shutdown_complete(stcb, net, 1); 952 SCTP_TCB_UNLOCK(stcb); 953 return; 954 } 955 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 956 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 957 /* unexpected SHUTDOWN-ACK... so ignore... */ 958 SCTP_TCB_UNLOCK(stcb); 959 return; 960 } 961 if (asoc->control_pdapi) { 962 /* 963 * With a normal shutdown we assume the end of last record. 964 */ 965 SCTP_INP_READ_LOCK(stcb->sctp_ep); 966 asoc->control_pdapi->end_added = 1; 967 asoc->control_pdapi->pdapi_aborted = 1; 968 asoc->control_pdapi = NULL; 969 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 970#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 971 atomic_add_int(&stcb->asoc.refcnt, 1); 972 SCTP_TCB_UNLOCK(stcb); 973 SCTP_SOCKET_LOCK(so, 1); 974 SCTP_TCB_LOCK(stcb); 975 atomic_subtract_int(&stcb->asoc.refcnt, 1); 976 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 977 /* assoc was freed while we were unlocked */ 978 SCTP_SOCKET_UNLOCK(so, 1); 979 return; 980 } 981#endif 982 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 983#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 984 SCTP_SOCKET_UNLOCK(so, 1); 985#endif 986 } 987 /* are the queues empty? */ 988 if (!TAILQ_EMPTY(&asoc->send_queue) || 989 !TAILQ_EMPTY(&asoc->sent_queue) || 990 !stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) { 991 sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED); 992 } 993 /* stop the timer */ 994 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9); 995 /* send SHUTDOWN-COMPLETE */ 996 sctp_send_shutdown_complete(stcb, net, 0); 997 /* notify upper layer protocol */ 998 if (stcb->sctp_socket) { 999 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 1000 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1001 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 1002 /* Set the connected flag to disconnected */ 1003 stcb->sctp_ep->sctp_socket->so_snd.sb_cc = 0; 1004 } 1005 } 1006 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 1007 /* free the TCB but first save off the ep */ 1008#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1009 atomic_add_int(&stcb->asoc.refcnt, 1); 1010 SCTP_TCB_UNLOCK(stcb); 1011 SCTP_SOCKET_LOCK(so, 1); 1012 SCTP_TCB_LOCK(stcb); 1013 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1014#endif 1015 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 1016 SCTP_FROM_SCTP_INPUT + SCTP_LOC_10); 1017#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1018 SCTP_SOCKET_UNLOCK(so, 1); 1019#endif 1020} 1021 1022/* 1023 * Skip past the param header and then we will find the chunk that caused the 1024 * problem. There are two possiblities ASCONF or FWD-TSN other than that and 1025 * our peer must be broken. 1026 */ 1027static void 1028sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr, 1029 struct sctp_nets *net) 1030{ 1031 struct sctp_chunkhdr *chk; 1032 1033 chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr)); 1034 switch (chk->chunk_type) { 1035 case SCTP_ASCONF_ACK: 1036 case SCTP_ASCONF: 1037 sctp_asconf_cleanup(stcb, net); 1038 break; 1039 case SCTP_FORWARD_CUM_TSN: 1040 stcb->asoc.peer_supports_prsctp = 0; 1041 break; 1042 default: 1043 SCTPDBG(SCTP_DEBUG_INPUT2, 1044 "Peer does not support chunk type %d(%x)??\n", 1045 chk->chunk_type, (uint32_t) chk->chunk_type); 1046 break; 1047 } 1048} 1049 1050/* 1051 * Skip past the param header and then we will find the param that caused the 1052 * problem. There are a number of param's in a ASCONF OR the prsctp param 1053 * these will turn of specific features. 1054 */ 1055static void 1056sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr) 1057{ 1058 struct sctp_paramhdr *pbad; 1059 1060 pbad = phdr + 1; 1061 switch (ntohs(pbad->param_type)) { 1062 /* pr-sctp draft */ 1063 case SCTP_PRSCTP_SUPPORTED: 1064 stcb->asoc.peer_supports_prsctp = 0; 1065 break; 1066 case SCTP_SUPPORTED_CHUNK_EXT: 1067 break; 1068 /* draft-ietf-tsvwg-addip-sctp */ 1069 case SCTP_HAS_NAT_SUPPORT: 1070 stcb->asoc.peer_supports_nat = 0; 1071 break; 1072 case SCTP_ADD_IP_ADDRESS: 1073 case SCTP_DEL_IP_ADDRESS: 1074 case SCTP_SET_PRIM_ADDR: 1075 stcb->asoc.peer_supports_asconf = 0; 1076 break; 1077 case SCTP_SUCCESS_REPORT: 1078 case SCTP_ERROR_CAUSE_IND: 1079 SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n"); 1080 SCTPDBG(SCTP_DEBUG_INPUT2, 1081 "Turning off ASCONF to this strange peer\n"); 1082 stcb->asoc.peer_supports_asconf = 0; 1083 break; 1084 default: 1085 SCTPDBG(SCTP_DEBUG_INPUT2, 1086 "Peer does not support param type %d(%x)??\n", 1087 pbad->param_type, (uint32_t) pbad->param_type); 1088 break; 1089 } 1090} 1091 1092static int 1093sctp_handle_error(struct sctp_chunkhdr *ch, 1094 struct sctp_tcb *stcb, struct sctp_nets *net) 1095{ 1096 int chklen; 1097 struct sctp_paramhdr *phdr; 1098 uint16_t error_type; 1099 uint16_t error_len; 1100 struct sctp_association *asoc; 1101 int adjust; 1102 1103#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1104 struct socket *so; 1105 1106#endif 1107 1108 /* parse through all of the errors and process */ 1109 asoc = &stcb->asoc; 1110 phdr = (struct sctp_paramhdr *)((caddr_t)ch + 1111 sizeof(struct sctp_chunkhdr)); 1112 chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr); 1113 while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) { 1114 /* Process an Error Cause */ 1115 error_type = ntohs(phdr->param_type); 1116 error_len = ntohs(phdr->param_length); 1117 if ((error_len > chklen) || (error_len == 0)) { 1118 /* invalid param length for this param */ 1119 SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n", 1120 chklen, error_len); 1121 return (0); 1122 } 1123 switch (error_type) { 1124 case SCTP_CAUSE_INVALID_STREAM: 1125 case SCTP_CAUSE_MISSING_PARAM: 1126 case SCTP_CAUSE_INVALID_PARAM: 1127 case SCTP_CAUSE_NO_USER_DATA: 1128 SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n", 1129 error_type); 1130 break; 1131 case SCTP_CAUSE_NAT_COLLIDING_STATE: 1132 SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n", 1133 ch->chunk_flags); 1134 if (sctp_handle_nat_colliding_state(stcb)) { 1135 return (0); 1136 } 1137 break; 1138 case SCTP_CAUSE_NAT_MISSING_STATE: 1139 SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n", 1140 ch->chunk_flags); 1141 if (sctp_handle_nat_missing_state(stcb, net)) { 1142 return (0); 1143 } 1144 break; 1145 case SCTP_CAUSE_STALE_COOKIE: 1146 /* 1147 * We only act if we have echoed a cookie and are 1148 * waiting. 1149 */ 1150 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) { 1151 int *p; 1152 1153 p = (int *)((caddr_t)phdr + sizeof(*phdr)); 1154 /* Save the time doubled */ 1155 asoc->cookie_preserve_req = ntohl(*p) << 1; 1156 asoc->stale_cookie_count++; 1157 if (asoc->stale_cookie_count > 1158 asoc->max_init_times) { 1159 sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED); 1160 /* now free the asoc */ 1161#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1162 so = SCTP_INP_SO(stcb->sctp_ep); 1163 atomic_add_int(&stcb->asoc.refcnt, 1); 1164 SCTP_TCB_UNLOCK(stcb); 1165 SCTP_SOCKET_LOCK(so, 1); 1166 SCTP_TCB_LOCK(stcb); 1167 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1168#endif 1169 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 1170 SCTP_FROM_SCTP_INPUT + SCTP_LOC_11); 1171#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1172 SCTP_SOCKET_UNLOCK(so, 1); 1173#endif 1174 return (-1); 1175 } 1176 /* blast back to INIT state */ 1177 sctp_toss_old_cookies(stcb, &stcb->asoc); 1178 asoc->state &= ~SCTP_STATE_COOKIE_ECHOED; 1179 asoc->state |= SCTP_STATE_COOKIE_WAIT; 1180 sctp_stop_all_cookie_timers(stcb); 1181 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 1182 } 1183 break; 1184 case SCTP_CAUSE_UNRESOLVABLE_ADDR: 1185 /* 1186 * Nothing we can do here, we don't do hostname 1187 * addresses so if the peer does not like my IPv6 1188 * (or IPv4 for that matter) it does not matter. If 1189 * they don't support that type of address, they can 1190 * NOT possibly get that packet type... i.e. with no 1191 * IPv6 you can't recieve a IPv6 packet. so we can 1192 * safely ignore this one. If we ever added support 1193 * for HOSTNAME Addresses, then we would need to do 1194 * something here. 1195 */ 1196 break; 1197 case SCTP_CAUSE_UNRECOG_CHUNK: 1198 sctp_process_unrecog_chunk(stcb, phdr, net); 1199 break; 1200 case SCTP_CAUSE_UNRECOG_PARAM: 1201 sctp_process_unrecog_param(stcb, phdr); 1202 break; 1203 case SCTP_CAUSE_COOKIE_IN_SHUTDOWN: 1204 /* 1205 * We ignore this since the timer will drive out a 1206 * new cookie anyway and there timer will drive us 1207 * to send a SHUTDOWN_COMPLETE. We can't send one 1208 * here since we don't have their tag. 1209 */ 1210 break; 1211 case SCTP_CAUSE_DELETING_LAST_ADDR: 1212 case SCTP_CAUSE_RESOURCE_SHORTAGE: 1213 case SCTP_CAUSE_DELETING_SRC_ADDR: 1214 /* 1215 * We should NOT get these here, but in a 1216 * ASCONF-ACK. 1217 */ 1218 SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n", 1219 error_type); 1220 break; 1221 case SCTP_CAUSE_OUT_OF_RESC: 1222 /* 1223 * And what, pray tell do we do with the fact that 1224 * the peer is out of resources? Not really sure we 1225 * could do anything but abort. I suspect this 1226 * should have came WITH an abort instead of in a 1227 * OP-ERROR. 1228 */ 1229 break; 1230 default: 1231 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n", 1232 error_type); 1233 break; 1234 } 1235 adjust = SCTP_SIZE32(error_len); 1236 chklen -= adjust; 1237 phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust); 1238 } 1239 return (0); 1240} 1241 1242static int 1243sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset, 1244 struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 1245 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id) 1246{ 1247 struct sctp_init_ack *init_ack; 1248 struct mbuf *op_err; 1249 1250 SCTPDBG(SCTP_DEBUG_INPUT2, 1251 "sctp_handle_init_ack: handling INIT-ACK\n"); 1252 1253 if (stcb == NULL) { 1254 SCTPDBG(SCTP_DEBUG_INPUT2, 1255 "sctp_handle_init_ack: TCB is null\n"); 1256 return (-1); 1257 } 1258 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) { 1259 /* Invalid length */ 1260 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1261 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1262 op_err, 0, net->port); 1263 *abort_no_unlock = 1; 1264 return (-1); 1265 } 1266 init_ack = &cp->init; 1267 /* validate parameters */ 1268 if (init_ack->initiate_tag == 0) { 1269 /* protocol error... send an abort */ 1270 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1271 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1272 op_err, 0, net->port); 1273 *abort_no_unlock = 1; 1274 return (-1); 1275 } 1276 if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) { 1277 /* protocol error... send an abort */ 1278 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1279 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1280 op_err, 0, net->port); 1281 *abort_no_unlock = 1; 1282 return (-1); 1283 } 1284 if (init_ack->num_inbound_streams == 0) { 1285 /* protocol error... send an abort */ 1286 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1287 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1288 op_err, 0, net->port); 1289 *abort_no_unlock = 1; 1290 return (-1); 1291 } 1292 if (init_ack->num_outbound_streams == 0) { 1293 /* protocol error... send an abort */ 1294 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1295 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1296 op_err, 0, net->port); 1297 *abort_no_unlock = 1; 1298 return (-1); 1299 } 1300 /* process according to association state... */ 1301 switch (stcb->asoc.state & SCTP_STATE_MASK) { 1302 case SCTP_STATE_COOKIE_WAIT: 1303 /* this is the expected state for this chunk */ 1304 /* process the INIT-ACK parameters */ 1305 if (stcb->asoc.primary_destination->dest_state & 1306 SCTP_ADDR_UNCONFIRMED) { 1307 /* 1308 * The primary is where we sent the INIT, we can 1309 * always consider it confirmed when the INIT-ACK is 1310 * returned. Do this before we load addresses 1311 * though. 1312 */ 1313 stcb->asoc.primary_destination->dest_state &= 1314 ~SCTP_ADDR_UNCONFIRMED; 1315 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 1316 stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED); 1317 } 1318 if (sctp_process_init_ack(m, iphlen, offset, sh, cp, stcb, 1319 net, abort_no_unlock, vrf_id) < 0) { 1320 /* error in parsing parameters */ 1321 return (-1); 1322 } 1323 /* update our state */ 1324 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n"); 1325 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_ECHOED); 1326 1327 /* reset the RTO calc */ 1328 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 1329 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 1330 stcb->asoc.overall_error_count, 1331 0, 1332 SCTP_FROM_SCTP_INPUT, 1333 __LINE__); 1334 } 1335 stcb->asoc.overall_error_count = 0; 1336 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 1337 /* 1338 * collapse the init timer back in case of a exponential 1339 * backoff 1340 */ 1341 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep, 1342 stcb, net); 1343 /* 1344 * the send at the end of the inbound data processing will 1345 * cause the cookie to be sent 1346 */ 1347 break; 1348 case SCTP_STATE_SHUTDOWN_SENT: 1349 /* incorrect state... discard */ 1350 break; 1351 case SCTP_STATE_COOKIE_ECHOED: 1352 /* incorrect state... discard */ 1353 break; 1354 case SCTP_STATE_OPEN: 1355 /* incorrect state... discard */ 1356 break; 1357 case SCTP_STATE_EMPTY: 1358 case SCTP_STATE_INUSE: 1359 default: 1360 /* incorrect state... discard */ 1361 return (-1); 1362 break; 1363 } 1364 SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n"); 1365 return (0); 1366} 1367 1368static struct sctp_tcb * 1369sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, 1370 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1371 struct sctp_inpcb *inp, struct sctp_nets **netp, 1372 struct sockaddr *init_src, int *notification, 1373 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 1374 uint32_t vrf_id, uint16_t port); 1375 1376 1377/* 1378 * handle a state cookie for an existing association m: input packet mbuf 1379 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a 1380 * "split" mbuf and the cookie signature does not exist offset: offset into 1381 * mbuf to the cookie-echo chunk 1382 */ 1383static struct sctp_tcb * 1384sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, 1385 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1386 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp, 1387 struct sockaddr *init_src, int *notification, sctp_assoc_t * sac_assoc_id, 1388 uint32_t vrf_id, int auth_skipped, uint32_t auth_offset, uint32_t auth_len, uint16_t port) 1389{ 1390 struct sctp_association *asoc; 1391 struct sctp_init_chunk *init_cp, init_buf; 1392 struct sctp_init_ack_chunk *initack_cp, initack_buf; 1393 struct sctp_nets *net; 1394 struct mbuf *op_err; 1395 struct sctp_paramhdr *ph; 1396 int chk_length; 1397 int init_offset, initack_offset, i; 1398 int retval; 1399 int spec_flag = 0; 1400 uint32_t how_indx; 1401 1402 net = *netp; 1403 /* I know that the TCB is non-NULL from the caller */ 1404 asoc = &stcb->asoc; 1405 for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) { 1406 if (asoc->cookie_how[how_indx] == 0) 1407 break; 1408 } 1409 if (how_indx < sizeof(asoc->cookie_how)) { 1410 asoc->cookie_how[how_indx] = 1; 1411 } 1412 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 1413 /* SHUTDOWN came in after sending INIT-ACK */ 1414 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination); 1415 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 1416 0, M_DONTWAIT, 1, MT_DATA); 1417 if (op_err == NULL) { 1418 /* FOOBAR */ 1419 return (NULL); 1420 } 1421 /* Set the len */ 1422 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr); 1423 ph = mtod(op_err, struct sctp_paramhdr *); 1424 ph->param_type = htons(SCTP_CAUSE_COOKIE_IN_SHUTDOWN); 1425 ph->param_length = htons(sizeof(struct sctp_paramhdr)); 1426 sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag, 1427 vrf_id, net->port); 1428 if (how_indx < sizeof(asoc->cookie_how)) 1429 asoc->cookie_how[how_indx] = 2; 1430 return (NULL); 1431 } 1432 /* 1433 * find and validate the INIT chunk in the cookie (peer's info) the 1434 * INIT should start after the cookie-echo header struct (chunk 1435 * header, state cookie header struct) 1436 */ 1437 init_offset = offset += sizeof(struct sctp_cookie_echo_chunk); 1438 1439 init_cp = (struct sctp_init_chunk *) 1440 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 1441 (uint8_t *) & init_buf); 1442 if (init_cp == NULL) { 1443 /* could not pull a INIT chunk in cookie */ 1444 return (NULL); 1445 } 1446 chk_length = ntohs(init_cp->ch.chunk_length); 1447 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 1448 return (NULL); 1449 } 1450 /* 1451 * find and validate the INIT-ACK chunk in the cookie (my info) the 1452 * INIT-ACK follows the INIT chunk 1453 */ 1454 initack_offset = init_offset + SCTP_SIZE32(chk_length); 1455 initack_cp = (struct sctp_init_ack_chunk *) 1456 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 1457 (uint8_t *) & initack_buf); 1458 if (initack_cp == NULL) { 1459 /* could not pull INIT-ACK chunk in cookie */ 1460 return (NULL); 1461 } 1462 chk_length = ntohs(initack_cp->ch.chunk_length); 1463 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 1464 return (NULL); 1465 } 1466 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && 1467 (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) { 1468 /* 1469 * case D in Section 5.2.4 Table 2: MMAA process accordingly 1470 * to get into the OPEN state 1471 */ 1472 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1473 /*- 1474 * Opps, this means that we somehow generated two vtag's 1475 * the same. I.e. we did: 1476 * Us Peer 1477 * <---INIT(tag=a)------ 1478 * ----INIT-ACK(tag=t)--> 1479 * ----INIT(tag=t)------> *1 1480 * <---INIT-ACK(tag=a)--- 1481 * <----CE(tag=t)------------- *2 1482 * 1483 * At point *1 we should be generating a different 1484 * tag t'. Which means we would throw away the CE and send 1485 * ours instead. Basically this is case C (throw away side). 1486 */ 1487 if (how_indx < sizeof(asoc->cookie_how)) 1488 asoc->cookie_how[how_indx] = 17; 1489 return (NULL); 1490 1491 } 1492 switch SCTP_GET_STATE 1493 (asoc) { 1494 case SCTP_STATE_COOKIE_WAIT: 1495 case SCTP_STATE_COOKIE_ECHOED: 1496 /* 1497 * INIT was sent but got a COOKIE_ECHO with the 1498 * correct tags... just accept it...but we must 1499 * process the init so that we can make sure we have 1500 * the right seq no's. 1501 */ 1502 /* First we must process the INIT !! */ 1503 retval = sctp_process_init(init_cp, stcb, net); 1504 if (retval < 0) { 1505 if (how_indx < sizeof(asoc->cookie_how)) 1506 asoc->cookie_how[how_indx] = 3; 1507 return (NULL); 1508 } 1509 /* we have already processed the INIT so no problem */ 1510 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, 1511 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12); 1512 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_13); 1513 /* update current state */ 1514 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) 1515 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1516 else 1517 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1518 1519 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1520 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1521 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1522 stcb->sctp_ep, stcb, asoc->primary_destination); 1523 } 1524 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1525 sctp_stop_all_cookie_timers(stcb); 1526 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1527 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1528 (inp->sctp_socket->so_qlimit == 0) 1529 ) { 1530#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1531 struct socket *so; 1532 1533#endif 1534 /* 1535 * Here is where collision would go if we 1536 * did a connect() and instead got a 1537 * init/init-ack/cookie done before the 1538 * init-ack came back.. 1539 */ 1540 stcb->sctp_ep->sctp_flags |= 1541 SCTP_PCB_FLAGS_CONNECTED; 1542#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1543 so = SCTP_INP_SO(stcb->sctp_ep); 1544 atomic_add_int(&stcb->asoc.refcnt, 1); 1545 SCTP_TCB_UNLOCK(stcb); 1546 SCTP_SOCKET_LOCK(so, 1); 1547 SCTP_TCB_LOCK(stcb); 1548 atomic_add_int(&stcb->asoc.refcnt, -1); 1549 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1550 SCTP_SOCKET_UNLOCK(so, 1); 1551 return (NULL); 1552 } 1553#endif 1554 soisconnected(stcb->sctp_socket); 1555#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1556 SCTP_SOCKET_UNLOCK(so, 1); 1557#endif 1558 } 1559 /* notify upper layer */ 1560 *notification = SCTP_NOTIFY_ASSOC_UP; 1561 /* 1562 * since we did not send a HB make sure we don't 1563 * double things 1564 */ 1565 net->hb_responded = 1; 1566 net->RTO = sctp_calculate_rto(stcb, asoc, net, 1567 &cookie->time_entered, 1568 sctp_align_unsafe_makecopy, 1569 SCTP_RTT_FROM_NON_DATA); 1570 1571 if (stcb->asoc.sctp_autoclose_ticks && 1572 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) { 1573 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 1574 inp, stcb, NULL); 1575 } 1576 break; 1577 default: 1578 /* 1579 * we're in the OPEN state (or beyond), so peer must 1580 * have simply lost the COOKIE-ACK 1581 */ 1582 break; 1583 } /* end switch */ 1584 sctp_stop_all_cookie_timers(stcb); 1585 /* 1586 * We ignore the return code here.. not sure if we should 1587 * somehow abort.. but we do have an existing asoc. This 1588 * really should not fail. 1589 */ 1590 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1591 init_offset + sizeof(struct sctp_init_chunk), 1592 initack_offset, sh, init_src)) { 1593 if (how_indx < sizeof(asoc->cookie_how)) 1594 asoc->cookie_how[how_indx] = 4; 1595 return (NULL); 1596 } 1597 /* respond with a COOKIE-ACK */ 1598 sctp_toss_old_cookies(stcb, asoc); 1599 sctp_send_cookie_ack(stcb); 1600 if (how_indx < sizeof(asoc->cookie_how)) 1601 asoc->cookie_how[how_indx] = 5; 1602 return (stcb); 1603 } 1604 if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1605 ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag && 1606 cookie->tie_tag_my_vtag == 0 && 1607 cookie->tie_tag_peer_vtag == 0) { 1608 /* 1609 * case C in Section 5.2.4 Table 2: XMOO silently discard 1610 */ 1611 if (how_indx < sizeof(asoc->cookie_how)) 1612 asoc->cookie_how[how_indx] = 6; 1613 return (NULL); 1614 } 1615 /* 1616 * If nat support, and the below and stcb is established, send back 1617 * a ABORT(colliding state) if we are established. 1618 */ 1619 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) && 1620 (asoc->peer_supports_nat) && 1621 ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && 1622 ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) || 1623 (asoc->peer_vtag == 0)))) { 1624 /* 1625 * Special case - Peer's support nat. We may have two init's 1626 * that we gave out the same tag on since one was not 1627 * established.. i.e. we get INIT from host-1 behind the nat 1628 * and we respond tag-a, we get a INIT from host-2 behind 1629 * the nat and we get tag-a again. Then we bring up host-1 1630 * (or 2's) assoc, Then comes the cookie from hsot-2 (or 1). 1631 * Now we have colliding state. We must send an abort here 1632 * with colliding state indication. 1633 */ 1634 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 1635 0, M_DONTWAIT, 1, MT_DATA); 1636 if (op_err == NULL) { 1637 /* FOOBAR */ 1638 return (NULL); 1639 } 1640 /* pre-reserve some space */ 1641#ifdef INET6 1642 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 1643#else 1644 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip)); 1645#endif 1646 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 1647 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 1648 /* Set the len */ 1649 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr); 1650 ph = mtod(op_err, struct sctp_paramhdr *); 1651 ph->param_type = htons(SCTP_CAUSE_NAT_COLLIDING_STATE); 1652 ph->param_length = htons(sizeof(struct sctp_paramhdr)); 1653 sctp_send_abort(m, iphlen, sh, 0, op_err, vrf_id, port); 1654 return (NULL); 1655 } 1656 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && 1657 ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) || 1658 (asoc->peer_vtag == 0))) { 1659 /* 1660 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info 1661 * should be ok, re-accept peer info 1662 */ 1663 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1664 /* 1665 * Extension of case C. If we hit this, then the 1666 * random number generator returned the same vtag 1667 * when we first sent our INIT-ACK and when we later 1668 * sent our INIT. The side with the seq numbers that 1669 * are different will be the one that normnally 1670 * would have hit case C. This in effect "extends" 1671 * our vtags in this collision case to be 64 bits. 1672 * The same collision could occur aka you get both 1673 * vtag and seq number the same twice in a row.. but 1674 * is much less likely. If it did happen then we 1675 * would proceed through and bring up the assoc.. we 1676 * may end up with the wrong stream setup however.. 1677 * which would be bad.. but there is no way to 1678 * tell.. until we send on a stream that does not 1679 * exist :-) 1680 */ 1681 if (how_indx < sizeof(asoc->cookie_how)) 1682 asoc->cookie_how[how_indx] = 7; 1683 1684 return (NULL); 1685 } 1686 if (how_indx < sizeof(asoc->cookie_how)) 1687 asoc->cookie_how[how_indx] = 8; 1688 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_14); 1689 sctp_stop_all_cookie_timers(stcb); 1690 /* 1691 * since we did not send a HB make sure we don't double 1692 * things 1693 */ 1694 net->hb_responded = 1; 1695 if (stcb->asoc.sctp_autoclose_ticks && 1696 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1697 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, 1698 NULL); 1699 } 1700 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1701 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); 1702 1703 if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) { 1704 /* 1705 * Ok the peer probably discarded our data (if we 1706 * echoed a cookie+data). So anything on the 1707 * sent_queue should be marked for retransmit, we 1708 * may not get something to kick us so it COULD 1709 * still take a timeout to move these.. but it can't 1710 * hurt to mark them. 1711 */ 1712 struct sctp_tmit_chunk *chk; 1713 1714 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1715 if (chk->sent < SCTP_DATAGRAM_RESEND) { 1716 chk->sent = SCTP_DATAGRAM_RESEND; 1717 sctp_flight_size_decrease(chk); 1718 sctp_total_flight_decrease(stcb, chk); 1719 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1720 spec_flag++; 1721 } 1722 } 1723 1724 } 1725 /* process the INIT info (peer's info) */ 1726 retval = sctp_process_init(init_cp, stcb, net); 1727 if (retval < 0) { 1728 if (how_indx < sizeof(asoc->cookie_how)) 1729 asoc->cookie_how[how_indx] = 9; 1730 return (NULL); 1731 } 1732 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1733 init_offset + sizeof(struct sctp_init_chunk), 1734 initack_offset, sh, init_src)) { 1735 if (how_indx < sizeof(asoc->cookie_how)) 1736 asoc->cookie_how[how_indx] = 10; 1737 return (NULL); 1738 } 1739 if ((asoc->state & SCTP_STATE_COOKIE_WAIT) || 1740 (asoc->state & SCTP_STATE_COOKIE_ECHOED)) { 1741 *notification = SCTP_NOTIFY_ASSOC_UP; 1742 1743 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1744 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1745 (inp->sctp_socket->so_qlimit == 0)) { 1746#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1747 struct socket *so; 1748 1749#endif 1750 stcb->sctp_ep->sctp_flags |= 1751 SCTP_PCB_FLAGS_CONNECTED; 1752#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1753 so = SCTP_INP_SO(stcb->sctp_ep); 1754 atomic_add_int(&stcb->asoc.refcnt, 1); 1755 SCTP_TCB_UNLOCK(stcb); 1756 SCTP_SOCKET_LOCK(so, 1); 1757 SCTP_TCB_LOCK(stcb); 1758 atomic_add_int(&stcb->asoc.refcnt, -1); 1759 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1760 SCTP_SOCKET_UNLOCK(so, 1); 1761 return (NULL); 1762 } 1763#endif 1764 soisconnected(stcb->sctp_socket); 1765#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1766 SCTP_SOCKET_UNLOCK(so, 1); 1767#endif 1768 } 1769 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) 1770 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1771 else 1772 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1773 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1774 } else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1775 SCTP_STAT_INCR_COUNTER32(sctps_restartestab); 1776 } else { 1777 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1778 } 1779 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1780 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1781 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1782 stcb->sctp_ep, stcb, asoc->primary_destination); 1783 } 1784 sctp_stop_all_cookie_timers(stcb); 1785 sctp_toss_old_cookies(stcb, asoc); 1786 sctp_send_cookie_ack(stcb); 1787 if (spec_flag) { 1788 /* 1789 * only if we have retrans set do we do this. What 1790 * this call does is get only the COOKIE-ACK out and 1791 * then when we return the normal call to 1792 * sctp_chunk_output will get the retrans out behind 1793 * this. 1794 */ 1795 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED); 1796 } 1797 if (how_indx < sizeof(asoc->cookie_how)) 1798 asoc->cookie_how[how_indx] = 11; 1799 1800 return (stcb); 1801 } 1802 if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1803 ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) && 1804 cookie->tie_tag_my_vtag == asoc->my_vtag_nonce && 1805 cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce && 1806 cookie->tie_tag_peer_vtag != 0) { 1807 struct sctpasochead *head; 1808 1809 if (asoc->peer_supports_nat) { 1810 /* 1811 * This is a gross gross hack. just call the 1812 * cookie_new code since we are allowing a duplicate 1813 * association. I hope this works... 1814 */ 1815 return (sctp_process_cookie_new(m, iphlen, offset, sh, cookie, cookie_len, 1816 inp, netp, init_src, notification, 1817 auth_skipped, auth_offset, auth_len, 1818 vrf_id, port)); 1819 } 1820 /* 1821 * case A in Section 5.2.4 Table 2: XXMM (peer restarted) 1822 */ 1823 /* temp code */ 1824 if (how_indx < sizeof(asoc->cookie_how)) 1825 asoc->cookie_how[how_indx] = 12; 1826 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_15); 1827 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 1828 1829 *sac_assoc_id = sctp_get_associd(stcb); 1830 /* notify upper layer */ 1831 *notification = SCTP_NOTIFY_ASSOC_RESTART; 1832 atomic_add_int(&stcb->asoc.refcnt, 1); 1833 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) && 1834 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 1835 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) { 1836 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1837 } 1838 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1839 SCTP_STAT_INCR_GAUGE32(sctps_restartestab); 1840 } else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1841 SCTP_STAT_INCR_GAUGE32(sctps_collisionestab); 1842 } 1843 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1844 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1845 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1846 stcb->sctp_ep, stcb, asoc->primary_destination); 1847 1848 } else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) { 1849 /* move to OPEN state, if not in SHUTDOWN_SENT */ 1850 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1851 } 1852 asoc->pre_open_streams = 1853 ntohs(initack_cp->init.num_outbound_streams); 1854 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 1855 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 1856 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 1857 1858 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 1859 1860 asoc->str_reset_seq_in = asoc->init_seq_number; 1861 1862 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1863 if (asoc->mapping_array) { 1864 memset(asoc->mapping_array, 0, 1865 asoc->mapping_array_size); 1866 } 1867 if (asoc->nr_mapping_array) { 1868 memset(asoc->nr_mapping_array, 0, 1869 asoc->mapping_array_size); 1870 } 1871 SCTP_TCB_UNLOCK(stcb); 1872 SCTP_INP_INFO_WLOCK(); 1873 SCTP_INP_WLOCK(stcb->sctp_ep); 1874 SCTP_TCB_LOCK(stcb); 1875 atomic_add_int(&stcb->asoc.refcnt, -1); 1876 /* send up all the data */ 1877 SCTP_TCB_SEND_LOCK(stcb); 1878 1879 sctp_report_all_outbound(stcb, 1, SCTP_SO_NOT_LOCKED); 1880 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1881 stcb->asoc.strmout[i].stream_no = i; 1882 stcb->asoc.strmout[i].next_sequence_sent = 0; 1883 stcb->asoc.strmout[i].last_msg_incomplete = 0; 1884 } 1885 /* process the INIT-ACK info (my info) */ 1886 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 1887 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1888 1889 /* pull from vtag hash */ 1890 LIST_REMOVE(stcb, sctp_asocs); 1891 /* re-insert to new vtag position */ 1892 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, 1893 SCTP_BASE_INFO(hashasocmark))]; 1894 /* 1895 * put it in the bucket in the vtag hash of assoc's for the 1896 * system 1897 */ 1898 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 1899 1900 /* process the INIT info (peer's info) */ 1901 SCTP_TCB_SEND_UNLOCK(stcb); 1902 SCTP_INP_WUNLOCK(stcb->sctp_ep); 1903 SCTP_INP_INFO_WUNLOCK(); 1904 1905 retval = sctp_process_init(init_cp, stcb, net); 1906 if (retval < 0) { 1907 if (how_indx < sizeof(asoc->cookie_how)) 1908 asoc->cookie_how[how_indx] = 13; 1909 1910 return (NULL); 1911 } 1912 /* 1913 * since we did not send a HB make sure we don't double 1914 * things 1915 */ 1916 net->hb_responded = 1; 1917 1918 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1919 init_offset + sizeof(struct sctp_init_chunk), 1920 initack_offset, sh, init_src)) { 1921 if (how_indx < sizeof(asoc->cookie_how)) 1922 asoc->cookie_how[how_indx] = 14; 1923 1924 return (NULL); 1925 } 1926 /* respond with a COOKIE-ACK */ 1927 sctp_stop_all_cookie_timers(stcb); 1928 sctp_toss_old_cookies(stcb, asoc); 1929 sctp_send_cookie_ack(stcb); 1930 if (how_indx < sizeof(asoc->cookie_how)) 1931 asoc->cookie_how[how_indx] = 15; 1932 1933 return (stcb); 1934 } 1935 if (how_indx < sizeof(asoc->cookie_how)) 1936 asoc->cookie_how[how_indx] = 16; 1937 /* all other cases... */ 1938 return (NULL); 1939} 1940 1941 1942/* 1943 * handle a state cookie for a new association m: input packet mbuf chain-- 1944 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf 1945 * and the cookie signature does not exist offset: offset into mbuf to the 1946 * cookie-echo chunk length: length of the cookie chunk to: where the init 1947 * was from returns a new TCB 1948 */ 1949struct sctp_tcb * 1950sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, 1951 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1952 struct sctp_inpcb *inp, struct sctp_nets **netp, 1953 struct sockaddr *init_src, int *notification, 1954 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 1955 uint32_t vrf_id, uint16_t port) 1956{ 1957 struct sctp_tcb *stcb; 1958 struct sctp_init_chunk *init_cp, init_buf; 1959 struct sctp_init_ack_chunk *initack_cp, initack_buf; 1960 struct sockaddr_storage sa_store; 1961 struct sockaddr *initack_src = (struct sockaddr *)&sa_store; 1962 struct sctp_association *asoc; 1963 int chk_length; 1964 int init_offset, initack_offset, initack_limit; 1965 int retval; 1966 int error = 0; 1967 uint32_t old_tag; 1968 uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE]; 1969 1970#ifdef INET 1971 struct sockaddr_in *sin; 1972 1973#endif 1974#ifdef INET6 1975 struct sockaddr_in6 *sin6; 1976 1977#endif 1978#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1979 struct socket *so; 1980 1981 so = SCTP_INP_SO(inp); 1982#endif 1983 1984 /* 1985 * find and validate the INIT chunk in the cookie (peer's info) the 1986 * INIT should start after the cookie-echo header struct (chunk 1987 * header, state cookie header struct) 1988 */ 1989 init_offset = offset + sizeof(struct sctp_cookie_echo_chunk); 1990 init_cp = (struct sctp_init_chunk *) 1991 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 1992 (uint8_t *) & init_buf); 1993 if (init_cp == NULL) { 1994 /* could not pull a INIT chunk in cookie */ 1995 SCTPDBG(SCTP_DEBUG_INPUT1, 1996 "process_cookie_new: could not pull INIT chunk hdr\n"); 1997 return (NULL); 1998 } 1999 chk_length = ntohs(init_cp->ch.chunk_length); 2000 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 2001 SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n"); 2002 return (NULL); 2003 } 2004 initack_offset = init_offset + SCTP_SIZE32(chk_length); 2005 /* 2006 * find and validate the INIT-ACK chunk in the cookie (my info) the 2007 * INIT-ACK follows the INIT chunk 2008 */ 2009 initack_cp = (struct sctp_init_ack_chunk *) 2010 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 2011 (uint8_t *) & initack_buf); 2012 if (initack_cp == NULL) { 2013 /* could not pull INIT-ACK chunk in cookie */ 2014 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n"); 2015 return (NULL); 2016 } 2017 chk_length = ntohs(initack_cp->ch.chunk_length); 2018 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 2019 return (NULL); 2020 } 2021 /* 2022 * NOTE: We can't use the INIT_ACK's chk_length to determine the 2023 * "initack_limit" value. This is because the chk_length field 2024 * includes the length of the cookie, but the cookie is omitted when 2025 * the INIT and INIT_ACK are tacked onto the cookie... 2026 */ 2027 initack_limit = offset + cookie_len; 2028 2029 /* 2030 * now that we know the INIT/INIT-ACK are in place, create a new TCB 2031 * and popluate 2032 */ 2033 2034 /* 2035 * Here we do a trick, we set in NULL for the proc/thread argument. 2036 * We do this since in effect we only use the p argument when the 2037 * socket is unbound and we must do an implicit bind. Since we are 2038 * getting a cookie, we cannot be unbound. 2039 */ 2040 stcb = sctp_aloc_assoc(inp, init_src, &error, 2041 ntohl(initack_cp->init.initiate_tag), vrf_id, 2042 (struct thread *)NULL 2043 ); 2044 if (stcb == NULL) { 2045 struct mbuf *op_err; 2046 2047 /* memory problem? */ 2048 SCTPDBG(SCTP_DEBUG_INPUT1, 2049 "process_cookie_new: no room for another TCB!\n"); 2050 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 2051 2052 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 2053 sh, op_err, vrf_id, port); 2054 return (NULL); 2055 } 2056 /* get the correct sctp_nets */ 2057 if (netp) 2058 *netp = sctp_findnet(stcb, init_src); 2059 2060 asoc = &stcb->asoc; 2061 /* get scope variables out of cookie */ 2062 asoc->ipv4_local_scope = cookie->ipv4_scope; 2063 asoc->site_scope = cookie->site_scope; 2064 asoc->local_scope = cookie->local_scope; 2065 asoc->loopback_scope = cookie->loopback_scope; 2066 2067 if ((asoc->ipv4_addr_legal != cookie->ipv4_addr_legal) || 2068 (asoc->ipv6_addr_legal != cookie->ipv6_addr_legal)) { 2069 struct mbuf *op_err; 2070 2071 /* 2072 * Houston we have a problem. The EP changed while the 2073 * cookie was in flight. Only recourse is to abort the 2074 * association. 2075 */ 2076 atomic_add_int(&stcb->asoc.refcnt, 1); 2077 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 2078 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 2079 sh, op_err, vrf_id, port); 2080#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2081 SCTP_TCB_UNLOCK(stcb); 2082 SCTP_SOCKET_LOCK(so, 1); 2083 SCTP_TCB_LOCK(stcb); 2084#endif 2085 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2086 SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 2087#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2088 SCTP_SOCKET_UNLOCK(so, 1); 2089#endif 2090 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2091 return (NULL); 2092 } 2093 /* process the INIT-ACK info (my info) */ 2094 old_tag = asoc->my_vtag; 2095 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 2096 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 2097 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); 2098 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 2099 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 2100 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 2101 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 2102 asoc->str_reset_seq_in = asoc->init_seq_number; 2103 2104 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 2105 2106 /* process the INIT info (peer's info) */ 2107 if (netp) 2108 retval = sctp_process_init(init_cp, stcb, *netp); 2109 else 2110 retval = 0; 2111 if (retval < 0) { 2112 atomic_add_int(&stcb->asoc.refcnt, 1); 2113#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2114 SCTP_TCB_UNLOCK(stcb); 2115 SCTP_SOCKET_LOCK(so, 1); 2116 SCTP_TCB_LOCK(stcb); 2117#endif 2118 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 2119#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2120 SCTP_SOCKET_UNLOCK(so, 1); 2121#endif 2122 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2123 return (NULL); 2124 } 2125 /* load all addresses */ 2126 if (sctp_load_addresses_from_init(stcb, m, iphlen, 2127 init_offset + sizeof(struct sctp_init_chunk), initack_offset, sh, 2128 init_src)) { 2129 atomic_add_int(&stcb->asoc.refcnt, 1); 2130#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2131 SCTP_TCB_UNLOCK(stcb); 2132 SCTP_SOCKET_LOCK(so, 1); 2133 SCTP_TCB_LOCK(stcb); 2134#endif 2135 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_17); 2136#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2137 SCTP_SOCKET_UNLOCK(so, 1); 2138#endif 2139 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2140 return (NULL); 2141 } 2142 /* 2143 * verify any preceding AUTH chunk that was skipped 2144 */ 2145 /* pull the local authentication parameters from the cookie/init-ack */ 2146 sctp_auth_get_cookie_params(stcb, m, 2147 initack_offset + sizeof(struct sctp_init_ack_chunk), 2148 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk))); 2149 if (auth_skipped) { 2150 struct sctp_auth_chunk *auth; 2151 2152 auth = (struct sctp_auth_chunk *) 2153 sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf); 2154 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) { 2155 /* auth HMAC failed, dump the assoc and packet */ 2156 SCTPDBG(SCTP_DEBUG_AUTH1, 2157 "COOKIE-ECHO: AUTH failed\n"); 2158 atomic_add_int(&stcb->asoc.refcnt, 1); 2159#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2160 SCTP_TCB_UNLOCK(stcb); 2161 SCTP_SOCKET_LOCK(so, 1); 2162 SCTP_TCB_LOCK(stcb); 2163#endif 2164 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18); 2165#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2166 SCTP_SOCKET_UNLOCK(so, 1); 2167#endif 2168 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2169 return (NULL); 2170 } else { 2171 /* remaining chunks checked... good to go */ 2172 stcb->asoc.authenticated = 1; 2173 } 2174 } 2175 /* update current state */ 2176 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 2177 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 2178 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 2179 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 2180 stcb->sctp_ep, stcb, asoc->primary_destination); 2181 } 2182 sctp_stop_all_cookie_timers(stcb); 2183 SCTP_STAT_INCR_COUNTER32(sctps_passiveestab); 2184 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 2185 2186 /* 2187 * if we're doing ASCONFs, check to see if we have any new local 2188 * addresses that need to get added to the peer (eg. addresses 2189 * changed while cookie echo in flight). This needs to be done 2190 * after we go to the OPEN state to do the correct asconf 2191 * processing. else, make sure we have the correct addresses in our 2192 * lists 2193 */ 2194 2195 /* warning, we re-use sin, sin6, sa_store here! */ 2196 /* pull in local_address (our "from" address) */ 2197 switch (cookie->laddr_type) { 2198#ifdef INET 2199 case SCTP_IPV4_ADDRESS: 2200 /* source addr is IPv4 */ 2201 sin = (struct sockaddr_in *)initack_src; 2202 memset(sin, 0, sizeof(*sin)); 2203 sin->sin_family = AF_INET; 2204 sin->sin_len = sizeof(struct sockaddr_in); 2205 sin->sin_addr.s_addr = cookie->laddress[0]; 2206 break; 2207#endif 2208#ifdef INET6 2209 case SCTP_IPV6_ADDRESS: 2210 /* source addr is IPv6 */ 2211 sin6 = (struct sockaddr_in6 *)initack_src; 2212 memset(sin6, 0, sizeof(*sin6)); 2213 sin6->sin6_family = AF_INET6; 2214 sin6->sin6_len = sizeof(struct sockaddr_in6); 2215 sin6->sin6_scope_id = cookie->scope_id; 2216 memcpy(&sin6->sin6_addr, cookie->laddress, 2217 sizeof(sin6->sin6_addr)); 2218 break; 2219#endif 2220 default: 2221 atomic_add_int(&stcb->asoc.refcnt, 1); 2222#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2223 SCTP_TCB_UNLOCK(stcb); 2224 SCTP_SOCKET_LOCK(so, 1); 2225 SCTP_TCB_LOCK(stcb); 2226#endif 2227 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19); 2228#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2229 SCTP_SOCKET_UNLOCK(so, 1); 2230#endif 2231 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2232 return (NULL); 2233 } 2234 2235 /* set up to notify upper layer */ 2236 *notification = SCTP_NOTIFY_ASSOC_UP; 2237 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2238 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2239 (inp->sctp_socket->so_qlimit == 0)) { 2240 /* 2241 * This is an endpoint that called connect() how it got a 2242 * cookie that is NEW is a bit of a mystery. It must be that 2243 * the INIT was sent, but before it got there.. a complete 2244 * INIT/INIT-ACK/COOKIE arrived. But of course then it 2245 * should have went to the other code.. not here.. oh well.. 2246 * a bit of protection is worth having.. 2247 */ 2248 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 2249#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2250 atomic_add_int(&stcb->asoc.refcnt, 1); 2251 SCTP_TCB_UNLOCK(stcb); 2252 SCTP_SOCKET_LOCK(so, 1); 2253 SCTP_TCB_LOCK(stcb); 2254 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2255 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2256 SCTP_SOCKET_UNLOCK(so, 1); 2257 return (NULL); 2258 } 2259#endif 2260 soisconnected(stcb->sctp_socket); 2261#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2262 SCTP_SOCKET_UNLOCK(so, 1); 2263#endif 2264 } else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 2265 (inp->sctp_socket->so_qlimit)) { 2266 /* 2267 * We don't want to do anything with this one. Since it is 2268 * the listening guy. The timer will get started for 2269 * accepted connections in the caller. 2270 */ 2271 ; 2272 } 2273 /* since we did not send a HB make sure we don't double things */ 2274 if ((netp) && (*netp)) 2275 (*netp)->hb_responded = 1; 2276 2277 if (stcb->asoc.sctp_autoclose_ticks && 2278 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 2279 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL); 2280 } 2281 /* calculate the RTT */ 2282 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 2283 if ((netp) && (*netp)) { 2284 (*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp, 2285 &cookie->time_entered, sctp_align_unsafe_makecopy, 2286 SCTP_RTT_FROM_NON_DATA); 2287 } 2288 /* respond with a COOKIE-ACK */ 2289 sctp_send_cookie_ack(stcb); 2290 2291 /* 2292 * check the address lists for any ASCONFs that need to be sent 2293 * AFTER the cookie-ack is sent 2294 */ 2295 sctp_check_address_list(stcb, m, 2296 initack_offset + sizeof(struct sctp_init_ack_chunk), 2297 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)), 2298 initack_src, cookie->local_scope, cookie->site_scope, 2299 cookie->ipv4_scope, cookie->loopback_scope); 2300 2301 2302 return (stcb); 2303} 2304 2305/* 2306 * CODE LIKE THIS NEEDS TO RUN IF the peer supports the NAT extension, i.e 2307 * we NEED to make sure we are not already using the vtag. If so we 2308 * need to send back an ABORT-TRY-AGAIN-WITH-NEW-TAG No middle box bit! 2309 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag, 2310 SCTP_BASE_INFO(hashasocmark))]; 2311 LIST_FOREACH(stcb, head, sctp_asocs) { 2312 if ((stcb->asoc.my_vtag == tag) && (stcb->rport == rport) && (inp == stcb->sctp_ep)) { 2313 -- SEND ABORT - TRY AGAIN -- 2314 } 2315 } 2316*/ 2317 2318/* 2319 * handles a COOKIE-ECHO message stcb: modified to either a new or left as 2320 * existing (non-NULL) TCB 2321 */ 2322static struct mbuf * 2323sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, 2324 struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp, 2325 struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp, 2326 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 2327 struct sctp_tcb **locked_tcb, uint32_t vrf_id, uint16_t port) 2328{ 2329 struct sctp_state_cookie *cookie; 2330 struct sctp_tcb *l_stcb = *stcb; 2331 struct sctp_inpcb *l_inp; 2332 struct sockaddr *to; 2333 sctp_assoc_t sac_restart_id; 2334 struct sctp_pcb *ep; 2335 struct mbuf *m_sig; 2336 uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE]; 2337 uint8_t *sig; 2338 uint8_t cookie_ok = 0; 2339 unsigned int size_of_pkt, sig_offset, cookie_offset; 2340 unsigned int cookie_len; 2341 struct timeval now; 2342 struct timeval time_expires; 2343 struct sockaddr_storage dest_store; 2344 struct sockaddr *localep_sa = (struct sockaddr *)&dest_store; 2345 struct ip *iph; 2346 int notification = 0; 2347 struct sctp_nets *netl; 2348 int had_a_existing_tcb = 0; 2349 int send_int_conf = 0; 2350 2351#ifdef INET 2352 struct sockaddr_in sin; 2353 2354#endif 2355#ifdef INET6 2356 struct sockaddr_in6 sin6; 2357 2358#endif 2359 2360 SCTPDBG(SCTP_DEBUG_INPUT2, 2361 "sctp_handle_cookie: handling COOKIE-ECHO\n"); 2362 2363 if (inp_p == NULL) { 2364 return (NULL); 2365 } 2366 /* First get the destination address setup too. */ 2367 iph = mtod(m, struct ip *); 2368 switch (iph->ip_v) { 2369#ifdef INET 2370 case IPVERSION: 2371 { 2372 /* its IPv4 */ 2373 struct sockaddr_in *lsin; 2374 2375 lsin = (struct sockaddr_in *)(localep_sa); 2376 memset(lsin, 0, sizeof(*lsin)); 2377 lsin->sin_family = AF_INET; 2378 lsin->sin_len = sizeof(*lsin); 2379 lsin->sin_port = sh->dest_port; 2380 lsin->sin_addr.s_addr = iph->ip_dst.s_addr; 2381 size_of_pkt = SCTP_GET_IPV4_LENGTH(iph); 2382 break; 2383 } 2384#endif 2385#ifdef INET6 2386 case IPV6_VERSION >> 4: 2387 { 2388 /* its IPv6 */ 2389 struct ip6_hdr *ip6; 2390 struct sockaddr_in6 *lsin6; 2391 2392 lsin6 = (struct sockaddr_in6 *)(localep_sa); 2393 memset(lsin6, 0, sizeof(*lsin6)); 2394 lsin6->sin6_family = AF_INET6; 2395 lsin6->sin6_len = sizeof(struct sockaddr_in6); 2396 ip6 = mtod(m, struct ip6_hdr *); 2397 lsin6->sin6_port = sh->dest_port; 2398 lsin6->sin6_addr = ip6->ip6_dst; 2399 size_of_pkt = SCTP_GET_IPV6_LENGTH(ip6) + iphlen; 2400 break; 2401 } 2402#endif 2403 default: 2404 return (NULL); 2405 } 2406 2407 cookie = &cp->cookie; 2408 cookie_offset = offset + sizeof(struct sctp_chunkhdr); 2409 cookie_len = ntohs(cp->ch.chunk_length); 2410 2411 if ((cookie->peerport != sh->src_port) && 2412 (cookie->myport != sh->dest_port) && 2413 (cookie->my_vtag != sh->v_tag)) { 2414 /* 2415 * invalid ports or bad tag. Note that we always leave the 2416 * v_tag in the header in network order and when we stored 2417 * it in the my_vtag slot we also left it in network order. 2418 * This maintains the match even though it may be in the 2419 * opposite byte order of the machine :-> 2420 */ 2421 return (NULL); 2422 } 2423 if (cookie_len > size_of_pkt || 2424 cookie_len < sizeof(struct sctp_cookie_echo_chunk) + 2425 sizeof(struct sctp_init_chunk) + 2426 sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) { 2427 /* cookie too long! or too small */ 2428 return (NULL); 2429 } 2430 /* 2431 * split off the signature into its own mbuf (since it should not be 2432 * calculated in the sctp_hmac_m() call). 2433 */ 2434 sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE; 2435 if (sig_offset > size_of_pkt) { 2436 /* packet not correct size! */ 2437 /* XXX this may already be accounted for earlier... */ 2438 return (NULL); 2439 } 2440 m_sig = m_split(m, sig_offset, M_DONTWAIT); 2441 if (m_sig == NULL) { 2442 /* out of memory or ?? */ 2443 return (NULL); 2444 } 2445#ifdef SCTP_MBUF_LOGGING 2446 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 2447 struct mbuf *mat; 2448 2449 mat = m_sig; 2450 while (mat) { 2451 if (SCTP_BUF_IS_EXTENDED(mat)) { 2452 sctp_log_mb(mat, SCTP_MBUF_SPLIT); 2453 } 2454 mat = SCTP_BUF_NEXT(mat); 2455 } 2456 } 2457#endif 2458 2459 /* 2460 * compute the signature/digest for the cookie 2461 */ 2462 ep = &(*inp_p)->sctp_ep; 2463 l_inp = *inp_p; 2464 if (l_stcb) { 2465 SCTP_TCB_UNLOCK(l_stcb); 2466 } 2467 SCTP_INP_RLOCK(l_inp); 2468 if (l_stcb) { 2469 SCTP_TCB_LOCK(l_stcb); 2470 } 2471 /* which cookie is it? */ 2472 if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) && 2473 (ep->current_secret_number != ep->last_secret_number)) { 2474 /* it's the old cookie */ 2475 (void)sctp_hmac_m(SCTP_HMAC, 2476 (uint8_t *) ep->secret_key[(int)ep->last_secret_number], 2477 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2478 } else { 2479 /* it's the current cookie */ 2480 (void)sctp_hmac_m(SCTP_HMAC, 2481 (uint8_t *) ep->secret_key[(int)ep->current_secret_number], 2482 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2483 } 2484 /* get the signature */ 2485 SCTP_INP_RUNLOCK(l_inp); 2486 sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig); 2487 if (sig == NULL) { 2488 /* couldn't find signature */ 2489 sctp_m_freem(m_sig); 2490 return (NULL); 2491 } 2492 /* compare the received digest with the computed digest */ 2493 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) { 2494 /* try the old cookie? */ 2495 if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) && 2496 (ep->current_secret_number != ep->last_secret_number)) { 2497 /* compute digest with old */ 2498 (void)sctp_hmac_m(SCTP_HMAC, 2499 (uint8_t *) ep->secret_key[(int)ep->last_secret_number], 2500 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2501 /* compare */ 2502 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0) 2503 cookie_ok = 1; 2504 } 2505 } else { 2506 cookie_ok = 1; 2507 } 2508 2509 /* 2510 * Now before we continue we must reconstruct our mbuf so that 2511 * normal processing of any other chunks will work. 2512 */ 2513 { 2514 struct mbuf *m_at; 2515 2516 m_at = m; 2517 while (SCTP_BUF_NEXT(m_at) != NULL) { 2518 m_at = SCTP_BUF_NEXT(m_at); 2519 } 2520 SCTP_BUF_NEXT(m_at) = m_sig; 2521 } 2522 2523 if (cookie_ok == 0) { 2524 SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n"); 2525 SCTPDBG(SCTP_DEBUG_INPUT2, 2526 "offset = %u, cookie_offset = %u, sig_offset = %u\n", 2527 (uint32_t) offset, cookie_offset, sig_offset); 2528 return (NULL); 2529 } 2530 /* 2531 * check the cookie timestamps to be sure it's not stale 2532 */ 2533 (void)SCTP_GETTIME_TIMEVAL(&now); 2534 /* Expire time is in Ticks, so we convert to seconds */ 2535 time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life); 2536 time_expires.tv_usec = cookie->time_entered.tv_usec; 2537 /* 2538 * TODO sctp_constants.h needs alternative time macros when _KERNEL 2539 * is undefined. 2540 */ 2541 if (timevalcmp(&now, &time_expires, >)) { 2542 /* cookie is stale! */ 2543 struct mbuf *op_err; 2544 struct sctp_stale_cookie_msg *scm; 2545 uint32_t tim; 2546 2547 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg), 2548 0, M_DONTWAIT, 1, MT_DATA); 2549 if (op_err == NULL) { 2550 /* FOOBAR */ 2551 return (NULL); 2552 } 2553 /* Set the len */ 2554 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg); 2555 scm = mtod(op_err, struct sctp_stale_cookie_msg *); 2556 scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE); 2557 scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) + 2558 (sizeof(uint32_t)))); 2559 /* seconds to usec */ 2560 tim = (now.tv_sec - time_expires.tv_sec) * 1000000; 2561 /* add in usec */ 2562 if (tim == 0) 2563 tim = now.tv_usec - cookie->time_entered.tv_usec; 2564 scm->time_usec = htonl(tim); 2565 sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag, 2566 vrf_id, port); 2567 return (NULL); 2568 } 2569 /* 2570 * Now we must see with the lookup address if we have an existing 2571 * asoc. This will only happen if we were in the COOKIE-WAIT state 2572 * and a INIT collided with us and somewhere the peer sent the 2573 * cookie on another address besides the single address our assoc 2574 * had for him. In this case we will have one of the tie-tags set at 2575 * least AND the address field in the cookie can be used to look it 2576 * up. 2577 */ 2578 to = NULL; 2579 switch (cookie->addr_type) { 2580#ifdef INET6 2581 case SCTP_IPV6_ADDRESS: 2582 memset(&sin6, 0, sizeof(sin6)); 2583 sin6.sin6_family = AF_INET6; 2584 sin6.sin6_len = sizeof(sin6); 2585 sin6.sin6_port = sh->src_port; 2586 sin6.sin6_scope_id = cookie->scope_id; 2587 memcpy(&sin6.sin6_addr.s6_addr, cookie->address, 2588 sizeof(sin6.sin6_addr.s6_addr)); 2589 to = (struct sockaddr *)&sin6; 2590 break; 2591#endif 2592#ifdef INET 2593 case SCTP_IPV4_ADDRESS: 2594 memset(&sin, 0, sizeof(sin)); 2595 sin.sin_family = AF_INET; 2596 sin.sin_len = sizeof(sin); 2597 sin.sin_port = sh->src_port; 2598 sin.sin_addr.s_addr = cookie->address[0]; 2599 to = (struct sockaddr *)&sin; 2600 break; 2601#endif 2602 default: 2603 /* This should not happen */ 2604 return (NULL); 2605 } 2606 if ((*stcb == NULL) && to) { 2607 /* Yep, lets check */ 2608 *stcb = sctp_findassociation_ep_addr(inp_p, to, netp, localep_sa, NULL); 2609 if (*stcb == NULL) { 2610 /* 2611 * We should have only got back the same inp. If we 2612 * got back a different ep we have a problem. The 2613 * original findep got back l_inp and now 2614 */ 2615 if (l_inp != *inp_p) { 2616 SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n"); 2617 } 2618 } else { 2619 if (*locked_tcb == NULL) { 2620 /* 2621 * In this case we found the assoc only 2622 * after we locked the create lock. This 2623 * means we are in a colliding case and we 2624 * must make sure that we unlock the tcb if 2625 * its one of the cases where we throw away 2626 * the incoming packets. 2627 */ 2628 *locked_tcb = *stcb; 2629 2630 /* 2631 * We must also increment the inp ref count 2632 * since the ref_count flags was set when we 2633 * did not find the TCB, now we found it 2634 * which reduces the refcount.. we must 2635 * raise it back out to balance it all :-) 2636 */ 2637 SCTP_INP_INCR_REF((*stcb)->sctp_ep); 2638 if ((*stcb)->sctp_ep != l_inp) { 2639 SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n", 2640 (*stcb)->sctp_ep, l_inp); 2641 } 2642 } 2643 } 2644 } 2645 if (to == NULL) { 2646 return (NULL); 2647 } 2648 cookie_len -= SCTP_SIGNATURE_SIZE; 2649 if (*stcb == NULL) { 2650 /* this is the "normal" case... get a new TCB */ 2651 *stcb = sctp_process_cookie_new(m, iphlen, offset, sh, cookie, 2652 cookie_len, *inp_p, netp, to, ¬ification, 2653 auth_skipped, auth_offset, auth_len, vrf_id, port); 2654 } else { 2655 /* this is abnormal... cookie-echo on existing TCB */ 2656 had_a_existing_tcb = 1; 2657 *stcb = sctp_process_cookie_existing(m, iphlen, offset, sh, 2658 cookie, cookie_len, *inp_p, *stcb, netp, to, 2659 ¬ification, &sac_restart_id, vrf_id, auth_skipped, auth_offset, auth_len, port); 2660 } 2661 2662 if (*stcb == NULL) { 2663 /* still no TCB... must be bad cookie-echo */ 2664 return (NULL); 2665 } 2666 if ((*netp != NULL) && (m->m_flags & M_FLOWID)) { 2667 (*netp)->flowid = m->m_pkthdr.flowid; 2668#ifdef INVARIANTS 2669 (*netp)->flowidset = 1; 2670#endif 2671 } 2672 /* 2673 * Ok, we built an association so confirm the address we sent the 2674 * INIT-ACK to. 2675 */ 2676 netl = sctp_findnet(*stcb, to); 2677 /* 2678 * This code should in theory NOT run but 2679 */ 2680 if (netl == NULL) { 2681 /* TSNH! Huh, why do I need to add this address here? */ 2682 int ret; 2683 2684 ret = sctp_add_remote_addr(*stcb, to, SCTP_DONOT_SETSCOPE, 2685 SCTP_IN_COOKIE_PROC); 2686 netl = sctp_findnet(*stcb, to); 2687 } 2688 if (netl) { 2689 if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) { 2690 netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 2691 (void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL, 2692 netl); 2693 send_int_conf = 1; 2694 } 2695 } 2696 if (*stcb) { 2697 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, *inp_p, 2698 *stcb, NULL); 2699 } 2700 if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 2701 if (!had_a_existing_tcb || 2702 (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { 2703 /* 2704 * If we have a NEW cookie or the connect never 2705 * reached the connected state during collision we 2706 * must do the TCP accept thing. 2707 */ 2708 struct socket *so, *oso; 2709 struct sctp_inpcb *inp; 2710 2711 if (notification == SCTP_NOTIFY_ASSOC_RESTART) { 2712 /* 2713 * For a restart we will keep the same 2714 * socket, no need to do anything. I THINK!! 2715 */ 2716 sctp_ulp_notify(notification, *stcb, 0, (void *)&sac_restart_id, SCTP_SO_NOT_LOCKED); 2717 if (send_int_conf) { 2718 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 2719 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED); 2720 } 2721 return (m); 2722 } 2723 oso = (*inp_p)->sctp_socket; 2724 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2725 SCTP_TCB_UNLOCK((*stcb)); 2726 CURVNET_SET(oso->so_vnet); 2727 so = sonewconn(oso, 0 2728 ); 2729 CURVNET_RESTORE(); 2730 SCTP_TCB_LOCK((*stcb)); 2731 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2732 2733 if (so == NULL) { 2734 struct mbuf *op_err; 2735 2736#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2737 struct socket *pcb_so; 2738 2739#endif 2740 /* Too many sockets */ 2741 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n"); 2742 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 2743 sctp_abort_association(*inp_p, NULL, m, iphlen, 2744 sh, op_err, vrf_id, port); 2745#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2746 pcb_so = SCTP_INP_SO(*inp_p); 2747 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2748 SCTP_TCB_UNLOCK((*stcb)); 2749 SCTP_SOCKET_LOCK(pcb_so, 1); 2750 SCTP_TCB_LOCK((*stcb)); 2751 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2752#endif 2753 (void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20); 2754#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2755 SCTP_SOCKET_UNLOCK(pcb_so, 1); 2756#endif 2757 return (NULL); 2758 } 2759 inp = (struct sctp_inpcb *)so->so_pcb; 2760 SCTP_INP_INCR_REF(inp); 2761 /* 2762 * We add the unbound flag here so that if we get an 2763 * soabort() before we get the move_pcb done, we 2764 * will properly cleanup. 2765 */ 2766 inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE | 2767 SCTP_PCB_FLAGS_CONNECTED | 2768 SCTP_PCB_FLAGS_IN_TCPPOOL | 2769 SCTP_PCB_FLAGS_UNBOUND | 2770 (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) | 2771 SCTP_PCB_FLAGS_DONT_WAKE); 2772 inp->sctp_features = (*inp_p)->sctp_features; 2773 inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features; 2774 inp->sctp_socket = so; 2775 inp->sctp_frag_point = (*inp_p)->sctp_frag_point; 2776 inp->sctp_cmt_on_off = (*inp_p)->sctp_cmt_on_off; 2777 inp->sctp_ecn_enable = (*inp_p)->sctp_ecn_enable; 2778 inp->partial_delivery_point = (*inp_p)->partial_delivery_point; 2779 inp->sctp_context = (*inp_p)->sctp_context; 2780 inp->inp_starting_point_for_iterator = NULL; 2781 /* 2782 * copy in the authentication parameters from the 2783 * original endpoint 2784 */ 2785 if (inp->sctp_ep.local_hmacs) 2786 sctp_free_hmaclist(inp->sctp_ep.local_hmacs); 2787 inp->sctp_ep.local_hmacs = 2788 sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs); 2789 if (inp->sctp_ep.local_auth_chunks) 2790 sctp_free_chunklist(inp->sctp_ep.local_auth_chunks); 2791 inp->sctp_ep.local_auth_chunks = 2792 sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks); 2793 2794 /* 2795 * Now we must move it from one hash table to 2796 * another and get the tcb in the right place. 2797 */ 2798 2799 /* 2800 * This is where the one-2-one socket is put into 2801 * the accept state waiting for the accept! 2802 */ 2803 if (*stcb) { 2804 (*stcb)->asoc.state |= SCTP_STATE_IN_ACCEPT_QUEUE; 2805 } 2806 sctp_move_pcb_and_assoc(*inp_p, inp, *stcb); 2807 2808 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2809 SCTP_TCB_UNLOCK((*stcb)); 2810 2811 sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, 2812 0); 2813 SCTP_TCB_LOCK((*stcb)); 2814 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2815 2816 2817 /* 2818 * now we must check to see if we were aborted while 2819 * the move was going on and the lock/unlock 2820 * happened. 2821 */ 2822 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 2823 /* 2824 * yep it was, we leave the assoc attached 2825 * to the socket since the sctp_inpcb_free() 2826 * call will send an abort for us. 2827 */ 2828 SCTP_INP_DECR_REF(inp); 2829 return (NULL); 2830 } 2831 SCTP_INP_DECR_REF(inp); 2832 /* Switch over to the new guy */ 2833 *inp_p = inp; 2834 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2835 if (send_int_conf) { 2836 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 2837 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED); 2838 } 2839 /* 2840 * Pull it from the incomplete queue and wake the 2841 * guy 2842 */ 2843#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2844 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2845 SCTP_TCB_UNLOCK((*stcb)); 2846 SCTP_SOCKET_LOCK(so, 1); 2847#endif 2848 soisconnected(so); 2849#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2850 SCTP_TCB_LOCK((*stcb)); 2851 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2852 SCTP_SOCKET_UNLOCK(so, 1); 2853#endif 2854 return (m); 2855 } 2856 } 2857 if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE) { 2858 if (notification) { 2859 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2860 } 2861 if (send_int_conf) { 2862 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 2863 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED); 2864 } 2865 } 2866 return (m); 2867} 2868 2869static void 2870sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp, 2871 struct sctp_tcb *stcb, struct sctp_nets *net) 2872{ 2873 /* cp must not be used, others call this without a c-ack :-) */ 2874 struct sctp_association *asoc; 2875 2876 SCTPDBG(SCTP_DEBUG_INPUT2, 2877 "sctp_handle_cookie_ack: handling COOKIE-ACK\n"); 2878 if (stcb == NULL) 2879 return; 2880 2881 asoc = &stcb->asoc; 2882 2883 sctp_stop_all_cookie_timers(stcb); 2884 /* process according to association state */ 2885 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) { 2886 /* state change only needed when I am in right state */ 2887 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 2888 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 2889 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 2890 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 2891 stcb->sctp_ep, stcb, asoc->primary_destination); 2892 2893 } 2894 /* update RTO */ 2895 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 2896 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 2897 if (asoc->overall_error_count == 0) { 2898 net->RTO = sctp_calculate_rto(stcb, asoc, net, 2899 &asoc->time_entered, sctp_align_safe_nocopy, 2900 SCTP_RTT_FROM_NON_DATA); 2901 } 2902 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 2903 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2904 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2905 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 2906#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2907 struct socket *so; 2908 2909#endif 2910 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 2911#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2912 so = SCTP_INP_SO(stcb->sctp_ep); 2913 atomic_add_int(&stcb->asoc.refcnt, 1); 2914 SCTP_TCB_UNLOCK(stcb); 2915 SCTP_SOCKET_LOCK(so, 1); 2916 SCTP_TCB_LOCK(stcb); 2917 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2918#endif 2919 if ((stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) == 0) { 2920 soisconnected(stcb->sctp_socket); 2921 } 2922#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2923 SCTP_SOCKET_UNLOCK(so, 1); 2924#endif 2925 } 2926 /* 2927 * since we did not send a HB make sure we don't double 2928 * things 2929 */ 2930 net->hb_responded = 1; 2931 2932 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2933 /* 2934 * We don't need to do the asconf thing, nor hb or 2935 * autoclose if the socket is closed. 2936 */ 2937 goto closed_socket; 2938 } 2939 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, 2940 stcb, net); 2941 2942 2943 if (stcb->asoc.sctp_autoclose_ticks && 2944 sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) { 2945 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 2946 stcb->sctp_ep, stcb, NULL); 2947 } 2948 /* 2949 * send ASCONF if parameters are pending and ASCONFs are 2950 * allowed (eg. addresses changed when init/cookie echo were 2951 * in flight) 2952 */ 2953 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) && 2954 (stcb->asoc.peer_supports_asconf) && 2955 (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) { 2956#ifdef SCTP_TIMER_BASED_ASCONF 2957 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, 2958 stcb->sctp_ep, stcb, 2959 stcb->asoc.primary_destination); 2960#else 2961 sctp_send_asconf(stcb, stcb->asoc.primary_destination, 2962 SCTP_ADDR_NOT_LOCKED); 2963#endif 2964 } 2965 } 2966closed_socket: 2967 /* Toss the cookie if I can */ 2968 sctp_toss_old_cookies(stcb, asoc); 2969 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 2970 /* Restart the timer if we have pending data */ 2971 struct sctp_tmit_chunk *chk; 2972 2973 chk = TAILQ_FIRST(&asoc->sent_queue); 2974 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 2975 } 2976} 2977 2978static void 2979sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp, 2980 struct sctp_tcb *stcb) 2981{ 2982 struct sctp_nets *net; 2983 struct sctp_tmit_chunk *lchk; 2984 struct sctp_ecne_chunk bkup; 2985 uint8_t override_bit = 0; 2986 uint32_t tsn, window_data_tsn; 2987 int len; 2988 unsigned int pkt_cnt; 2989 2990 len = ntohs(cp->ch.chunk_length); 2991 if ((len != sizeof(struct sctp_ecne_chunk)) && 2992 (len != sizeof(struct old_sctp_ecne_chunk))) { 2993 return; 2994 } 2995 if (len == sizeof(struct old_sctp_ecne_chunk)) { 2996 /* Its the old format */ 2997 memcpy(&bkup, cp, sizeof(struct old_sctp_ecne_chunk)); 2998 bkup.num_pkts_since_cwr = htonl(1); 2999 cp = &bkup; 3000 } 3001 SCTP_STAT_INCR(sctps_recvecne); 3002 tsn = ntohl(cp->tsn); 3003 pkt_cnt = ntohl(cp->num_pkts_since_cwr); 3004 lchk = TAILQ_LAST(&stcb->asoc.send_queue, sctpchunk_listhead); 3005 if (lchk == NULL) { 3006 window_data_tsn = stcb->asoc.sending_seq - 1; 3007 } else { 3008 window_data_tsn = lchk->rec.data.TSN_seq; 3009 } 3010 3011 /* Find where it was sent to if possible. */ 3012 net = NULL; 3013 TAILQ_FOREACH(lchk, &stcb->asoc.sent_queue, sctp_next) { 3014 if (lchk->rec.data.TSN_seq == tsn) { 3015 net = lchk->whoTo; 3016 net->ecn_prev_cwnd = lchk->rec.data.cwnd_at_send; 3017 break; 3018 } 3019 if (SCTP_TSN_GT(lchk->rec.data.TSN_seq, tsn)) { 3020 break; 3021 } 3022 } 3023 if (net == NULL) { 3024 /* 3025 * What to do. A previous send of a CWR was possibly lost. 3026 * See how old it is, we may have it marked on the actual 3027 * net. 3028 */ 3029 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3030 if (tsn == net->last_cwr_tsn) { 3031 /* Found him, send it off */ 3032 goto out; 3033 } 3034 } 3035 /* 3036 * If we reach here, we need to send a special CWR that says 3037 * hey, we did this a long time ago and you lost the 3038 * response. 3039 */ 3040 net = TAILQ_FIRST(&stcb->asoc.nets); 3041 override_bit = SCTP_CWR_REDUCE_OVERRIDE; 3042 } 3043out: 3044 if (SCTP_TSN_GT(tsn, net->cwr_window_tsn) && 3045 ((override_bit & SCTP_CWR_REDUCE_OVERRIDE) == 0)) { 3046 /* 3047 * JRS - Use the congestion control given in the pluggable 3048 * CC module 3049 */ 3050 int ocwnd; 3051 3052 ocwnd = net->cwnd; 3053 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 0, pkt_cnt); 3054 /* 3055 * We reduce once every RTT. So we will only lower cwnd at 3056 * the next sending seq i.e. the window_data_tsn 3057 */ 3058 net->cwr_window_tsn = window_data_tsn; 3059 net->ecn_ce_pkt_cnt += pkt_cnt; 3060 net->lost_cnt = pkt_cnt; 3061 net->last_cwr_tsn = tsn; 3062 } else { 3063 override_bit |= SCTP_CWR_IN_SAME_WINDOW; 3064 if (SCTP_TSN_GT(tsn, net->last_cwr_tsn) && 3065 ((override_bit & SCTP_CWR_REDUCE_OVERRIDE) == 0)) { 3066 /* 3067 * Another loss in the same window update how many 3068 * marks/packets lost we have had. 3069 */ 3070 int cnt = 1; 3071 3072 if (pkt_cnt > net->lost_cnt) { 3073 /* Should be the case */ 3074 cnt = (pkt_cnt - net->lost_cnt); 3075 net->ecn_ce_pkt_cnt += cnt; 3076 } 3077 net->lost_cnt = pkt_cnt; 3078 net->last_cwr_tsn = tsn; 3079 /* 3080 * Most CC functions will ignore this call, since we 3081 * are in-window yet of the initial CE the peer saw. 3082 */ 3083 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 1, cnt); 3084 } 3085 } 3086 /* 3087 * We always send a CWR this way if our previous one was lost our 3088 * peer will get an update, or if it is not time again to reduce we 3089 * still get the cwr to the peer. Note we set the override when we 3090 * could not find the TSN on the chunk or the destination network. 3091 */ 3092 sctp_send_cwr(stcb, net, net->last_cwr_tsn, override_bit); 3093} 3094 3095static void 3096sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb, struct sctp_nets *net) 3097{ 3098 /* 3099 * Here we get a CWR from the peer. We must look in the outqueue and 3100 * make sure that we have a covered ECNE in teh control chunk part. 3101 * If so remove it. 3102 */ 3103 struct sctp_tmit_chunk *chk; 3104 struct sctp_ecne_chunk *ecne; 3105 int override; 3106 uint32_t cwr_tsn; 3107 3108 cwr_tsn = ntohl(cp->tsn); 3109 3110 override = cp->ch.chunk_flags & SCTP_CWR_REDUCE_OVERRIDE; 3111 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 3112 if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) { 3113 continue; 3114 } 3115 if ((override == 0) && (chk->whoTo != net)) { 3116 /* Must be from the right src unless override is set */ 3117 continue; 3118 } 3119 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 3120 if (SCTP_TSN_GE(cwr_tsn, ntohl(ecne->tsn))) { 3121 /* this covers this ECNE, we can remove it */ 3122 stcb->asoc.ecn_echo_cnt_onq--; 3123 TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, 3124 sctp_next); 3125 if (chk->data) { 3126 sctp_m_freem(chk->data); 3127 chk->data = NULL; 3128 } 3129 stcb->asoc.ctrl_queue_cnt--; 3130 sctp_free_a_chunk(stcb, chk); 3131 if (override == 0) { 3132 break; 3133 } 3134 } 3135 } 3136} 3137 3138static void 3139sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp, 3140 struct sctp_tcb *stcb, struct sctp_nets *net) 3141{ 3142 struct sctp_association *asoc; 3143 3144#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3145 struct socket *so; 3146 3147#endif 3148 3149 SCTPDBG(SCTP_DEBUG_INPUT2, 3150 "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n"); 3151 if (stcb == NULL) 3152 return; 3153 3154 asoc = &stcb->asoc; 3155 /* process according to association state */ 3156 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) { 3157 /* unexpected SHUTDOWN-COMPLETE... so ignore... */ 3158 SCTPDBG(SCTP_DEBUG_INPUT2, 3159 "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n"); 3160 SCTP_TCB_UNLOCK(stcb); 3161 return; 3162 } 3163 /* notify upper layer protocol */ 3164 if (stcb->sctp_socket) { 3165 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 3166 /* are the queues empty? they should be */ 3167 if (!TAILQ_EMPTY(&asoc->send_queue) || 3168 !TAILQ_EMPTY(&asoc->sent_queue) || 3169 !stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) { 3170 sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED); 3171 } 3172 } 3173 /* stop the timer */ 3174 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22); 3175 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 3176 /* free the TCB */ 3177 SCTPDBG(SCTP_DEBUG_INPUT2, 3178 "sctp_handle_shutdown_complete: calls free-asoc\n"); 3179#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3180 so = SCTP_INP_SO(stcb->sctp_ep); 3181 atomic_add_int(&stcb->asoc.refcnt, 1); 3182 SCTP_TCB_UNLOCK(stcb); 3183 SCTP_SOCKET_LOCK(so, 1); 3184 SCTP_TCB_LOCK(stcb); 3185 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3186#endif 3187 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23); 3188#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3189 SCTP_SOCKET_UNLOCK(so, 1); 3190#endif 3191 return; 3192} 3193 3194static int 3195process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc, 3196 struct sctp_nets *net, uint8_t flg) 3197{ 3198 switch (desc->chunk_type) { 3199 case SCTP_DATA: 3200 /* find the tsn to resend (possibly */ 3201 { 3202 uint32_t tsn; 3203 struct sctp_tmit_chunk *tp1; 3204 3205 tsn = ntohl(desc->tsn_ifany); 3206 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) { 3207 if (tp1->rec.data.TSN_seq == tsn) { 3208 /* found it */ 3209 break; 3210 } 3211 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, tsn)) { 3212 /* not found */ 3213 tp1 = NULL; 3214 break; 3215 } 3216 } 3217 if (tp1 == NULL) { 3218 /* 3219 * Do it the other way , aka without paying 3220 * attention to queue seq order. 3221 */ 3222 SCTP_STAT_INCR(sctps_pdrpdnfnd); 3223 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) { 3224 if (tp1->rec.data.TSN_seq == tsn) { 3225 /* found it */ 3226 break; 3227 } 3228 } 3229 } 3230 if (tp1 == NULL) { 3231 SCTP_STAT_INCR(sctps_pdrptsnnf); 3232 } 3233 if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) { 3234 uint8_t *ddp; 3235 3236 if (((flg & SCTP_BADCRC) == 0) && 3237 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) { 3238 return (0); 3239 } 3240 if ((stcb->asoc.peers_rwnd == 0) && 3241 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) { 3242 SCTP_STAT_INCR(sctps_pdrpdiwnp); 3243 return (0); 3244 } 3245 if (stcb->asoc.peers_rwnd == 0 && 3246 (flg & SCTP_FROM_MIDDLE_BOX)) { 3247 SCTP_STAT_INCR(sctps_pdrpdizrw); 3248 return (0); 3249 } 3250 ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+ 3251 sizeof(struct sctp_data_chunk)); 3252 { 3253 unsigned int iii; 3254 3255 for (iii = 0; iii < sizeof(desc->data_bytes); 3256 iii++) { 3257 if (ddp[iii] != desc->data_bytes[iii]) { 3258 SCTP_STAT_INCR(sctps_pdrpbadd); 3259 return (-1); 3260 } 3261 } 3262 } 3263 3264 if (tp1->do_rtt) { 3265 /* 3266 * this guy had a RTO calculation 3267 * pending on it, cancel it 3268 */ 3269 if (tp1->whoTo->rto_needed == 0) { 3270 tp1->whoTo->rto_needed = 1; 3271 } 3272 tp1->do_rtt = 0; 3273 } 3274 SCTP_STAT_INCR(sctps_pdrpmark); 3275 if (tp1->sent != SCTP_DATAGRAM_RESEND) 3276 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3277 /* 3278 * mark it as if we were doing a FR, since 3279 * we will be getting gap ack reports behind 3280 * the info from the router. 3281 */ 3282 tp1->rec.data.doing_fast_retransmit = 1; 3283 /* 3284 * mark the tsn with what sequences can 3285 * cause a new FR. 3286 */ 3287 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) { 3288 tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq; 3289 } else { 3290 tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq; 3291 } 3292 3293 /* restart the timer */ 3294 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 3295 stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_24); 3296 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 3297 stcb, tp1->whoTo); 3298 3299 /* fix counts and things */ 3300 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3301 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP, 3302 tp1->whoTo->flight_size, 3303 tp1->book_size, 3304 (uintptr_t) stcb, 3305 tp1->rec.data.TSN_seq); 3306 } 3307 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3308 sctp_flight_size_decrease(tp1); 3309 sctp_total_flight_decrease(stcb, tp1); 3310 } 3311 tp1->sent = SCTP_DATAGRAM_RESEND; 3312 } { 3313 /* audit code */ 3314 unsigned int audit; 3315 3316 audit = 0; 3317 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) { 3318 if (tp1->sent == SCTP_DATAGRAM_RESEND) 3319 audit++; 3320 } 3321 TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue, 3322 sctp_next) { 3323 if (tp1->sent == SCTP_DATAGRAM_RESEND) 3324 audit++; 3325 } 3326 if (audit != stcb->asoc.sent_queue_retran_cnt) { 3327 SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n", 3328 audit, stcb->asoc.sent_queue_retran_cnt); 3329#ifndef SCTP_AUDITING_ENABLED 3330 stcb->asoc.sent_queue_retran_cnt = audit; 3331#endif 3332 } 3333 } 3334 } 3335 break; 3336 case SCTP_ASCONF: 3337 { 3338 struct sctp_tmit_chunk *asconf; 3339 3340 TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue, 3341 sctp_next) { 3342 if (asconf->rec.chunk_id.id == SCTP_ASCONF) { 3343 break; 3344 } 3345 } 3346 if (asconf) { 3347 if (asconf->sent != SCTP_DATAGRAM_RESEND) 3348 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3349 asconf->sent = SCTP_DATAGRAM_RESEND; 3350 asconf->snd_count--; 3351 } 3352 } 3353 break; 3354 case SCTP_INITIATION: 3355 /* resend the INIT */ 3356 stcb->asoc.dropped_special_cnt++; 3357 if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) { 3358 /* 3359 * If we can get it in, in a few attempts we do 3360 * this, otherwise we let the timer fire. 3361 */ 3362 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, 3363 stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25); 3364 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 3365 } 3366 break; 3367 case SCTP_SELECTIVE_ACK: 3368 case SCTP_NR_SELECTIVE_ACK: 3369 /* resend the sack */ 3370 sctp_send_sack(stcb); 3371 break; 3372 case SCTP_HEARTBEAT_REQUEST: 3373 /* resend a demand HB */ 3374 if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) { 3375 /* 3376 * Only retransmit if we KNOW we wont destroy the 3377 * tcb 3378 */ 3379 (void)sctp_send_hb(stcb, 1, net); 3380 } 3381 break; 3382 case SCTP_SHUTDOWN: 3383 sctp_send_shutdown(stcb, net); 3384 break; 3385 case SCTP_SHUTDOWN_ACK: 3386 sctp_send_shutdown_ack(stcb, net); 3387 break; 3388 case SCTP_COOKIE_ECHO: 3389 { 3390 struct sctp_tmit_chunk *cookie; 3391 3392 cookie = NULL; 3393 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, 3394 sctp_next) { 3395 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 3396 break; 3397 } 3398 } 3399 if (cookie) { 3400 if (cookie->sent != SCTP_DATAGRAM_RESEND) 3401 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3402 cookie->sent = SCTP_DATAGRAM_RESEND; 3403 sctp_stop_all_cookie_timers(stcb); 3404 } 3405 } 3406 break; 3407 case SCTP_COOKIE_ACK: 3408 sctp_send_cookie_ack(stcb); 3409 break; 3410 case SCTP_ASCONF_ACK: 3411 /* resend last asconf ack */ 3412 sctp_send_asconf_ack(stcb); 3413 break; 3414 case SCTP_FORWARD_CUM_TSN: 3415 send_forward_tsn(stcb, &stcb->asoc); 3416 break; 3417 /* can't do anything with these */ 3418 case SCTP_PACKET_DROPPED: 3419 case SCTP_INITIATION_ACK: /* this should not happen */ 3420 case SCTP_HEARTBEAT_ACK: 3421 case SCTP_ABORT_ASSOCIATION: 3422 case SCTP_OPERATION_ERROR: 3423 case SCTP_SHUTDOWN_COMPLETE: 3424 case SCTP_ECN_ECHO: 3425 case SCTP_ECN_CWR: 3426 default: 3427 break; 3428 } 3429 return (0); 3430} 3431 3432void 3433sctp_reset_in_stream(struct sctp_tcb *stcb, int number_entries, uint16_t * list) 3434{ 3435 int i; 3436 uint16_t temp; 3437 3438 /* 3439 * We set things to 0xffff since this is the last delivered sequence 3440 * and we will be sending in 0 after the reset. 3441 */ 3442 3443 if (number_entries) { 3444 for (i = 0; i < number_entries; i++) { 3445 temp = ntohs(list[i]); 3446 if (temp >= stcb->asoc.streamincnt) { 3447 continue; 3448 } 3449 stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff; 3450 } 3451 } else { 3452 list = NULL; 3453 for (i = 0; i < stcb->asoc.streamincnt; i++) { 3454 stcb->asoc.strmin[i].last_sequence_delivered = 0xffff; 3455 } 3456 } 3457 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED); 3458} 3459 3460static void 3461sctp_reset_out_streams(struct sctp_tcb *stcb, int number_entries, uint16_t * list) 3462{ 3463 int i; 3464 3465 if (number_entries == 0) { 3466 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3467 stcb->asoc.strmout[i].next_sequence_sent = 0; 3468 } 3469 } else if (number_entries) { 3470 for (i = 0; i < number_entries; i++) { 3471 uint16_t temp; 3472 3473 temp = ntohs(list[i]); 3474 if (temp >= stcb->asoc.streamoutcnt) { 3475 /* no such stream */ 3476 continue; 3477 } 3478 stcb->asoc.strmout[temp].next_sequence_sent = 0; 3479 } 3480 } 3481 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED); 3482} 3483 3484 3485struct sctp_stream_reset_out_request * 3486sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk) 3487{ 3488 struct sctp_association *asoc; 3489 struct sctp_stream_reset_out_req *req; 3490 struct sctp_stream_reset_out_request *r; 3491 struct sctp_tmit_chunk *chk; 3492 int len, clen; 3493 3494 asoc = &stcb->asoc; 3495 if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { 3496 asoc->stream_reset_outstanding = 0; 3497 return (NULL); 3498 } 3499 if (stcb->asoc.str_reset == NULL) { 3500 asoc->stream_reset_outstanding = 0; 3501 return (NULL); 3502 } 3503 chk = stcb->asoc.str_reset; 3504 if (chk->data == NULL) { 3505 return (NULL); 3506 } 3507 if (bchk) { 3508 /* he wants a copy of the chk pointer */ 3509 *bchk = chk; 3510 } 3511 clen = chk->send_size; 3512 req = mtod(chk->data, struct sctp_stream_reset_out_req *); 3513 r = &req->sr_req; 3514 if (ntohl(r->request_seq) == seq) { 3515 /* found it */ 3516 return (r); 3517 } 3518 len = SCTP_SIZE32(ntohs(r->ph.param_length)); 3519 if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) { 3520 /* move to the next one, there can only be a max of two */ 3521 r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len); 3522 if (ntohl(r->request_seq) == seq) { 3523 return (r); 3524 } 3525 } 3526 /* that seq is not here */ 3527 return (NULL); 3528} 3529 3530static void 3531sctp_clean_up_stream_reset(struct sctp_tcb *stcb) 3532{ 3533 struct sctp_association *asoc; 3534 struct sctp_tmit_chunk *chk = stcb->asoc.str_reset; 3535 3536 if (stcb->asoc.str_reset == NULL) { 3537 return; 3538 } 3539 asoc = &stcb->asoc; 3540 3541 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_26); 3542 TAILQ_REMOVE(&asoc->control_send_queue, 3543 chk, 3544 sctp_next); 3545 if (chk->data) { 3546 sctp_m_freem(chk->data); 3547 chk->data = NULL; 3548 } 3549 asoc->ctrl_queue_cnt--; 3550 sctp_free_a_chunk(stcb, chk); 3551 /* sa_ignore NO_NULL_CHK */ 3552 stcb->asoc.str_reset = NULL; 3553} 3554 3555 3556static int 3557sctp_handle_stream_reset_response(struct sctp_tcb *stcb, 3558 uint32_t seq, uint32_t action, 3559 struct sctp_stream_reset_response *respin) 3560{ 3561 uint16_t type; 3562 int lparm_len; 3563 struct sctp_association *asoc = &stcb->asoc; 3564 struct sctp_tmit_chunk *chk; 3565 struct sctp_stream_reset_out_request *srparam; 3566 int number_entries; 3567 3568 if (asoc->stream_reset_outstanding == 0) { 3569 /* duplicate */ 3570 return (0); 3571 } 3572 if (seq == stcb->asoc.str_reset_seq_out) { 3573 srparam = sctp_find_stream_reset(stcb, seq, &chk); 3574 if (srparam) { 3575 stcb->asoc.str_reset_seq_out++; 3576 type = ntohs(srparam->ph.param_type); 3577 lparm_len = ntohs(srparam->ph.param_length); 3578 if (type == SCTP_STR_RESET_OUT_REQUEST) { 3579 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t); 3580 asoc->stream_reset_out_is_outstanding = 0; 3581 if (asoc->stream_reset_outstanding) 3582 asoc->stream_reset_outstanding--; 3583 if (action == SCTP_STREAM_RESET_PERFORMED) { 3584 /* do it */ 3585 sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams); 3586 } else { 3587 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED); 3588 } 3589 } else if (type == SCTP_STR_RESET_IN_REQUEST) { 3590 /* Answered my request */ 3591 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t); 3592 if (asoc->stream_reset_outstanding) 3593 asoc->stream_reset_outstanding--; 3594 if (action != SCTP_STREAM_RESET_PERFORMED) { 3595 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED); 3596 } 3597 } else if (type == SCTP_STR_RESET_ADD_STREAMS) { 3598 /* Ok we now may have more streams */ 3599 if (asoc->stream_reset_outstanding) 3600 asoc->stream_reset_outstanding--; 3601 if (action == SCTP_STREAM_RESET_PERFORMED) { 3602 /* Put the new streams into effect */ 3603 stcb->asoc.streamoutcnt = stcb->asoc.strm_realoutsize; 3604 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_ADD_OK, stcb, 3605 (uint32_t) stcb->asoc.streamoutcnt, NULL, SCTP_SO_NOT_LOCKED); 3606 } else { 3607 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_ADD_FAIL, stcb, 3608 (uint32_t) stcb->asoc.streamoutcnt, NULL, SCTP_SO_NOT_LOCKED); 3609 } 3610 } else if (type == SCTP_STR_RESET_TSN_REQUEST) { 3611 /** 3612 * a) Adopt the new in tsn. 3613 * b) reset the map 3614 * c) Adopt the new out-tsn 3615 */ 3616 struct sctp_stream_reset_response_tsn *resp; 3617 struct sctp_forward_tsn_chunk fwdtsn; 3618 int abort_flag = 0; 3619 3620 if (respin == NULL) { 3621 /* huh ? */ 3622 return (0); 3623 } 3624 if (action == SCTP_STREAM_RESET_PERFORMED) { 3625 resp = (struct sctp_stream_reset_response_tsn *)respin; 3626 asoc->stream_reset_outstanding--; 3627 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 3628 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 3629 fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1); 3630 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0); 3631 if (abort_flag) { 3632 return (1); 3633 } 3634 stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1); 3635 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 3636 sctp_log_map(0, 7, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 3637 } 3638 stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map; 3639 stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn); 3640 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 3641 3642 stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map; 3643 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size); 3644 3645 stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn); 3646 stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn; 3647 3648 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL); 3649 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL); 3650 3651 } 3652 } 3653 /* get rid of the request and get the request flags */ 3654 if (asoc->stream_reset_outstanding == 0) { 3655 sctp_clean_up_stream_reset(stcb); 3656 } 3657 } 3658 } 3659 return (0); 3660} 3661 3662static void 3663sctp_handle_str_reset_request_in(struct sctp_tcb *stcb, 3664 struct sctp_tmit_chunk *chk, 3665 struct sctp_stream_reset_in_request *req, int trunc) 3666{ 3667 uint32_t seq; 3668 int len, i; 3669 int number_entries; 3670 uint16_t temp; 3671 3672 /* 3673 * peer wants me to send a str-reset to him for my outgoing seq's if 3674 * seq_in is right. 3675 */ 3676 struct sctp_association *asoc = &stcb->asoc; 3677 3678 seq = ntohl(req->request_seq); 3679 if (asoc->str_reset_seq_in == seq) { 3680 if (trunc) { 3681 /* Can't do it, since they exceeded our buffer size */ 3682 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3683 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 3684 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3685 } else if (stcb->asoc.stream_reset_out_is_outstanding == 0) { 3686 len = ntohs(req->ph.param_length); 3687 number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t)); 3688 for (i = 0; i < number_entries; i++) { 3689 temp = ntohs(req->list_of_streams[i]); 3690 req->list_of_streams[i] = temp; 3691 } 3692 /* move the reset action back one */ 3693 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3694 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3695 sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams, 3696 asoc->str_reset_seq_out, 3697 seq, (asoc->sending_seq - 1)); 3698 asoc->stream_reset_out_is_outstanding = 1; 3699 asoc->str_reset = chk; 3700 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo); 3701 stcb->asoc.stream_reset_outstanding++; 3702 } else { 3703 /* Can't do it, since we have sent one out */ 3704 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3705 asoc->last_reset_action[0] = SCTP_STREAM_RESET_TRY_LATER; 3706 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3707 } 3708 asoc->str_reset_seq_in++; 3709 } else if (asoc->str_reset_seq_in - 1 == seq) { 3710 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3711 } else if (asoc->str_reset_seq_in - 2 == seq) { 3712 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 3713 } else { 3714 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 3715 } 3716} 3717 3718static int 3719sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb, 3720 struct sctp_tmit_chunk *chk, 3721 struct sctp_stream_reset_tsn_request *req) 3722{ 3723 /* reset all in and out and update the tsn */ 3724 /* 3725 * A) reset my str-seq's on in and out. B) Select a receive next, 3726 * and set cum-ack to it. Also process this selected number as a 3727 * fwd-tsn as well. C) set in the response my next sending seq. 3728 */ 3729 struct sctp_forward_tsn_chunk fwdtsn; 3730 struct sctp_association *asoc = &stcb->asoc; 3731 int abort_flag = 0; 3732 uint32_t seq; 3733 3734 seq = ntohl(req->request_seq); 3735 if (asoc->str_reset_seq_in == seq) { 3736 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 3737 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 3738 fwdtsn.ch.chunk_flags = 0; 3739 fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1); 3740 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0); 3741 if (abort_flag) { 3742 return (1); 3743 } 3744 stcb->asoc.highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA; 3745 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 3746 sctp_log_map(0, 10, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 3747 } 3748 stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map; 3749 stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1; 3750 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 3751 stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map; 3752 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size); 3753 atomic_add_int(&stcb->asoc.sending_seq, 1); 3754 /* save off historical data for retrans */ 3755 stcb->asoc.last_sending_seq[1] = stcb->asoc.last_sending_seq[0]; 3756 stcb->asoc.last_sending_seq[0] = stcb->asoc.sending_seq; 3757 stcb->asoc.last_base_tsnsent[1] = stcb->asoc.last_base_tsnsent[0]; 3758 stcb->asoc.last_base_tsnsent[0] = stcb->asoc.mapping_array_base_tsn; 3759 3760 sctp_add_stream_reset_result_tsn(chk, 3761 ntohl(req->request_seq), 3762 SCTP_STREAM_RESET_PERFORMED, 3763 stcb->asoc.sending_seq, 3764 stcb->asoc.mapping_array_base_tsn); 3765 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL); 3766 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL); 3767 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0]; 3768 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3769 3770 asoc->str_reset_seq_in++; 3771 } else if (asoc->str_reset_seq_in - 1 == seq) { 3772 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0], 3773 stcb->asoc.last_sending_seq[0], 3774 stcb->asoc.last_base_tsnsent[0] 3775 ); 3776 } else if (asoc->str_reset_seq_in - 2 == seq) { 3777 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1], 3778 stcb->asoc.last_sending_seq[1], 3779 stcb->asoc.last_base_tsnsent[1] 3780 ); 3781 } else { 3782 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 3783 } 3784 return (0); 3785} 3786 3787static void 3788sctp_handle_str_reset_request_out(struct sctp_tcb *stcb, 3789 struct sctp_tmit_chunk *chk, 3790 struct sctp_stream_reset_out_request *req, int trunc) 3791{ 3792 uint32_t seq, tsn; 3793 int number_entries, len; 3794 struct sctp_association *asoc = &stcb->asoc; 3795 3796 seq = ntohl(req->request_seq); 3797 3798 /* now if its not a duplicate we process it */ 3799 if (asoc->str_reset_seq_in == seq) { 3800 len = ntohs(req->ph.param_length); 3801 number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t)); 3802 /* 3803 * the sender is resetting, handle the list issue.. we must 3804 * a) verify if we can do the reset, if so no problem b) If 3805 * we can't do the reset we must copy the request. c) queue 3806 * it, and setup the data in processor to trigger it off 3807 * when needed and dequeue all the queued data. 3808 */ 3809 tsn = ntohl(req->send_reset_at_tsn); 3810 3811 /* move the reset action back one */ 3812 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3813 if (trunc) { 3814 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED); 3815 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 3816 } else if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { 3817 /* we can do it now */ 3818 sctp_reset_in_stream(stcb, number_entries, req->list_of_streams); 3819 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED); 3820 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3821 } else { 3822 /* 3823 * we must queue it up and thus wait for the TSN's 3824 * to arrive that are at or before tsn 3825 */ 3826 struct sctp_stream_reset_list *liste; 3827 int siz; 3828 3829 siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t)); 3830 SCTP_MALLOC(liste, struct sctp_stream_reset_list *, 3831 siz, SCTP_M_STRESET); 3832 if (liste == NULL) { 3833 /* gak out of memory */ 3834 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED); 3835 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 3836 return; 3837 } 3838 liste->tsn = tsn; 3839 liste->number_entries = number_entries; 3840 memcpy(&liste->req, req, 3841 (sizeof(struct sctp_stream_reset_out_request) + (number_entries * sizeof(uint16_t)))); 3842 TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp); 3843 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED); 3844 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3845 } 3846 asoc->str_reset_seq_in++; 3847 } else if ((asoc->str_reset_seq_in - 1) == seq) { 3848 /* 3849 * one seq back, just echo back last action since my 3850 * response was lost. 3851 */ 3852 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3853 } else if ((asoc->str_reset_seq_in - 2) == seq) { 3854 /* 3855 * two seq back, just echo back last action since my 3856 * response was lost. 3857 */ 3858 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 3859 } else { 3860 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 3861 } 3862} 3863 3864static void 3865sctp_handle_str_reset_add_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk, 3866 struct sctp_stream_reset_add_strm *str_add) 3867{ 3868 /* 3869 * Peer is requesting to add more streams. If its within our 3870 * max-streams we will allow it. 3871 */ 3872 uint16_t num_stream, i; 3873 uint32_t seq; 3874 struct sctp_association *asoc = &stcb->asoc; 3875 struct sctp_queued_to_read *ctl, *nctl; 3876 3877 /* Get the number. */ 3878 seq = ntohl(str_add->request_seq); 3879 num_stream = ntohs(str_add->number_of_streams); 3880 /* Now what would be the new total? */ 3881 if (asoc->str_reset_seq_in == seq) { 3882 num_stream += stcb->asoc.streamincnt; 3883 if (num_stream > stcb->asoc.max_inbound_streams) { 3884 /* We must reject it they ask for to many */ 3885 denied: 3886 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED); 3887 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0]; 3888 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 3889 } else { 3890 /* Ok, we can do that :-) */ 3891 struct sctp_stream_in *oldstrm; 3892 3893 /* save off the old */ 3894 oldstrm = stcb->asoc.strmin; 3895 SCTP_MALLOC(stcb->asoc.strmin, struct sctp_stream_in *, 3896 (num_stream * sizeof(struct sctp_stream_in)), 3897 SCTP_M_STRMI); 3898 if (stcb->asoc.strmin == NULL) { 3899 stcb->asoc.strmin = oldstrm; 3900 goto denied; 3901 } 3902 /* copy off the old data */ 3903 for (i = 0; i < stcb->asoc.streamincnt; i++) { 3904 TAILQ_INIT(&stcb->asoc.strmin[i].inqueue); 3905 stcb->asoc.strmin[i].stream_no = i; 3906 stcb->asoc.strmin[i].last_sequence_delivered = oldstrm[i].last_sequence_delivered; 3907 stcb->asoc.strmin[i].delivery_started = oldstrm[i].delivery_started; 3908 /* now anything on those queues? */ 3909 TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].inqueue, next, nctl) { 3910 TAILQ_REMOVE(&oldstrm[i].inqueue, ctl, next); 3911 TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].inqueue, ctl, next); 3912 } 3913 } 3914 /* Init the new streams */ 3915 for (i = stcb->asoc.streamincnt; i < num_stream; i++) { 3916 TAILQ_INIT(&stcb->asoc.strmin[i].inqueue); 3917 stcb->asoc.strmin[i].stream_no = i; 3918 stcb->asoc.strmin[i].last_sequence_delivered = 0xffff; 3919 stcb->asoc.strmin[i].delivery_started = 0; 3920 } 3921 SCTP_FREE(oldstrm, SCTP_M_STRMI); 3922 /* update the size */ 3923 stcb->asoc.streamincnt = num_stream; 3924 /* Send the ack */ 3925 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED); 3926 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0]; 3927 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3928 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_INSTREAM_ADD_OK, stcb, 3929 (uint32_t) stcb->asoc.streamincnt, NULL, SCTP_SO_NOT_LOCKED); 3930 } 3931 } else if ((asoc->str_reset_seq_in - 1) == seq) { 3932 /* 3933 * one seq back, just echo back last action since my 3934 * response was lost. 3935 */ 3936 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3937 } else if ((asoc->str_reset_seq_in - 2) == seq) { 3938 /* 3939 * two seq back, just echo back last action since my 3940 * response was lost. 3941 */ 3942 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 3943 } else { 3944 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 3945 3946 } 3947} 3948 3949#ifdef __GNUC__ 3950__attribute__((noinline)) 3951#endif 3952 static int 3953 sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset, 3954 struct sctp_stream_reset_out_req *sr_req) 3955{ 3956 int chk_length, param_len, ptype; 3957 struct sctp_paramhdr pstore; 3958 uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE]; 3959 3960 uint32_t seq; 3961 int num_req = 0; 3962 int trunc = 0; 3963 struct sctp_tmit_chunk *chk; 3964 struct sctp_chunkhdr *ch; 3965 struct sctp_paramhdr *ph; 3966 int ret_code = 0; 3967 int num_param = 0; 3968 3969 /* now it may be a reset or a reset-response */ 3970 chk_length = ntohs(sr_req->ch.chunk_length); 3971 3972 /* setup for adding the response */ 3973 sctp_alloc_a_chunk(stcb, chk); 3974 if (chk == NULL) { 3975 return (ret_code); 3976 } 3977 chk->rec.chunk_id.id = SCTP_STREAM_RESET; 3978 chk->rec.chunk_id.can_take_data = 0; 3979 chk->asoc = &stcb->asoc; 3980 chk->no_fr_allowed = 0; 3981 chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr); 3982 chk->book_size_scale = 0; 3983 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 3984 if (chk->data == NULL) { 3985strres_nochunk: 3986 if (chk->data) { 3987 sctp_m_freem(chk->data); 3988 chk->data = NULL; 3989 } 3990 sctp_free_a_chunk(stcb, chk); 3991 return (ret_code); 3992 } 3993 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 3994 3995 /* setup chunk parameters */ 3996 chk->sent = SCTP_DATAGRAM_UNSENT; 3997 chk->snd_count = 0; 3998 chk->whoTo = stcb->asoc.primary_destination; 3999 atomic_add_int(&chk->whoTo->ref_count, 1); 4000 4001 ch = mtod(chk->data, struct sctp_chunkhdr *); 4002 ch->chunk_type = SCTP_STREAM_RESET; 4003 ch->chunk_flags = 0; 4004 ch->chunk_length = htons(chk->send_size); 4005 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 4006 offset += sizeof(struct sctp_chunkhdr); 4007 while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) { 4008 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *) & pstore); 4009 if (ph == NULL) 4010 break; 4011 param_len = ntohs(ph->param_length); 4012 if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) { 4013 /* bad param */ 4014 break; 4015 } 4016 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, (int)sizeof(cstore)), 4017 (uint8_t *) & cstore); 4018 ptype = ntohs(ph->param_type); 4019 num_param++; 4020 if (param_len > (int)sizeof(cstore)) { 4021 trunc = 1; 4022 } else { 4023 trunc = 0; 4024 } 4025 4026 if (num_param > SCTP_MAX_RESET_PARAMS) { 4027 /* hit the max of parameters already sorry.. */ 4028 break; 4029 } 4030 if (ptype == SCTP_STR_RESET_OUT_REQUEST) { 4031 struct sctp_stream_reset_out_request *req_out; 4032 4033 req_out = (struct sctp_stream_reset_out_request *)ph; 4034 num_req++; 4035 if (stcb->asoc.stream_reset_outstanding) { 4036 seq = ntohl(req_out->response_seq); 4037 if (seq == stcb->asoc.str_reset_seq_out) { 4038 /* implicit ack */ 4039 (void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_PERFORMED, NULL); 4040 } 4041 } 4042 sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc); 4043 } else if (ptype == SCTP_STR_RESET_ADD_STREAMS) { 4044 struct sctp_stream_reset_add_strm *str_add; 4045 4046 str_add = (struct sctp_stream_reset_add_strm *)ph; 4047 num_req++; 4048 sctp_handle_str_reset_add_strm(stcb, chk, str_add); 4049 } else if (ptype == SCTP_STR_RESET_IN_REQUEST) { 4050 struct sctp_stream_reset_in_request *req_in; 4051 4052 num_req++; 4053 4054 req_in = (struct sctp_stream_reset_in_request *)ph; 4055 4056 sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc); 4057 } else if (ptype == SCTP_STR_RESET_TSN_REQUEST) { 4058 struct sctp_stream_reset_tsn_request *req_tsn; 4059 4060 num_req++; 4061 req_tsn = (struct sctp_stream_reset_tsn_request *)ph; 4062 4063 if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) { 4064 ret_code = 1; 4065 goto strres_nochunk; 4066 } 4067 /* no more */ 4068 break; 4069 } else if (ptype == SCTP_STR_RESET_RESPONSE) { 4070 struct sctp_stream_reset_response *resp; 4071 uint32_t result; 4072 4073 resp = (struct sctp_stream_reset_response *)ph; 4074 seq = ntohl(resp->response_seq); 4075 result = ntohl(resp->result); 4076 if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) { 4077 ret_code = 1; 4078 goto strres_nochunk; 4079 } 4080 } else { 4081 break; 4082 } 4083 offset += SCTP_SIZE32(param_len); 4084 chk_length -= SCTP_SIZE32(param_len); 4085 } 4086 if (num_req == 0) { 4087 /* we have no response free the stuff */ 4088 goto strres_nochunk; 4089 } 4090 /* ok we have a chunk to link in */ 4091 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, 4092 chk, 4093 sctp_next); 4094 stcb->asoc.ctrl_queue_cnt++; 4095 return (ret_code); 4096} 4097 4098/* 4099 * Handle a router or endpoints report of a packet loss, there are two ways 4100 * to handle this, either we get the whole packet and must disect it 4101 * ourselves (possibly with truncation and or corruption) or it is a summary 4102 * from a middle box that did the disectting for us. 4103 */ 4104static void 4105sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp, 4106 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit) 4107{ 4108 uint32_t bottle_bw, on_queue; 4109 uint16_t trunc_len; 4110 unsigned int chlen; 4111 unsigned int at; 4112 struct sctp_chunk_desc desc; 4113 struct sctp_chunkhdr *ch; 4114 4115 chlen = ntohs(cp->ch.chunk_length); 4116 chlen -= sizeof(struct sctp_pktdrop_chunk); 4117 /* XXX possible chlen underflow */ 4118 if (chlen == 0) { 4119 ch = NULL; 4120 if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) 4121 SCTP_STAT_INCR(sctps_pdrpbwrpt); 4122 } else { 4123 ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr)); 4124 chlen -= sizeof(struct sctphdr); 4125 /* XXX possible chlen underflow */ 4126 memset(&desc, 0, sizeof(desc)); 4127 } 4128 trunc_len = (uint16_t) ntohs(cp->trunc_len); 4129 if (trunc_len > limit) { 4130 trunc_len = limit; 4131 } 4132 /* now the chunks themselves */ 4133 while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) { 4134 desc.chunk_type = ch->chunk_type; 4135 /* get amount we need to move */ 4136 at = ntohs(ch->chunk_length); 4137 if (at < sizeof(struct sctp_chunkhdr)) { 4138 /* corrupt chunk, maybe at the end? */ 4139 SCTP_STAT_INCR(sctps_pdrpcrupt); 4140 break; 4141 } 4142 if (trunc_len == 0) { 4143 /* we are supposed to have all of it */ 4144 if (at > chlen) { 4145 /* corrupt skip it */ 4146 SCTP_STAT_INCR(sctps_pdrpcrupt); 4147 break; 4148 } 4149 } else { 4150 /* is there enough of it left ? */ 4151 if (desc.chunk_type == SCTP_DATA) { 4152 if (chlen < (sizeof(struct sctp_data_chunk) + 4153 sizeof(desc.data_bytes))) { 4154 break; 4155 } 4156 } else { 4157 if (chlen < sizeof(struct sctp_chunkhdr)) { 4158 break; 4159 } 4160 } 4161 } 4162 if (desc.chunk_type == SCTP_DATA) { 4163 /* can we get out the tsn? */ 4164 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) 4165 SCTP_STAT_INCR(sctps_pdrpmbda); 4166 4167 if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) { 4168 /* yep */ 4169 struct sctp_data_chunk *dcp; 4170 uint8_t *ddp; 4171 unsigned int iii; 4172 4173 dcp = (struct sctp_data_chunk *)ch; 4174 ddp = (uint8_t *) (dcp + 1); 4175 for (iii = 0; iii < sizeof(desc.data_bytes); iii++) { 4176 desc.data_bytes[iii] = ddp[iii]; 4177 } 4178 desc.tsn_ifany = dcp->dp.tsn; 4179 } else { 4180 /* nope we are done. */ 4181 SCTP_STAT_INCR(sctps_pdrpnedat); 4182 break; 4183 } 4184 } else { 4185 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) 4186 SCTP_STAT_INCR(sctps_pdrpmbct); 4187 } 4188 4189 if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) { 4190 SCTP_STAT_INCR(sctps_pdrppdbrk); 4191 break; 4192 } 4193 if (SCTP_SIZE32(at) > chlen) { 4194 break; 4195 } 4196 chlen -= SCTP_SIZE32(at); 4197 if (chlen < sizeof(struct sctp_chunkhdr)) { 4198 /* done, none left */ 4199 break; 4200 } 4201 ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at)); 4202 } 4203 /* Now update any rwnd --- possibly */ 4204 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) { 4205 /* From a peer, we get a rwnd report */ 4206 uint32_t a_rwnd; 4207 4208 SCTP_STAT_INCR(sctps_pdrpfehos); 4209 4210 bottle_bw = ntohl(cp->bottle_bw); 4211 on_queue = ntohl(cp->current_onq); 4212 if (bottle_bw && on_queue) { 4213 /* a rwnd report is in here */ 4214 if (bottle_bw > on_queue) 4215 a_rwnd = bottle_bw - on_queue; 4216 else 4217 a_rwnd = 0; 4218 4219 if (a_rwnd == 0) 4220 stcb->asoc.peers_rwnd = 0; 4221 else { 4222 if (a_rwnd > stcb->asoc.total_flight) { 4223 stcb->asoc.peers_rwnd = 4224 a_rwnd - stcb->asoc.total_flight; 4225 } else { 4226 stcb->asoc.peers_rwnd = 0; 4227 } 4228 if (stcb->asoc.peers_rwnd < 4229 stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4230 /* SWS sender side engages */ 4231 stcb->asoc.peers_rwnd = 0; 4232 } 4233 } 4234 } 4235 } else { 4236 SCTP_STAT_INCR(sctps_pdrpfmbox); 4237 } 4238 4239 /* now middle boxes in sat networks get a cwnd bump */ 4240 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) && 4241 (stcb->asoc.sat_t3_loss_recovery == 0) && 4242 (stcb->asoc.sat_network)) { 4243 /* 4244 * This is debateable but for sat networks it makes sense 4245 * Note if a T3 timer has went off, we will prohibit any 4246 * changes to cwnd until we exit the t3 loss recovery. 4247 */ 4248 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb, 4249 net, cp, &bottle_bw, &on_queue); 4250 } 4251} 4252 4253/* 4254 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to 4255 * still contain IP/SCTP header - stcb: is the tcb found for this packet - 4256 * offset: offset into the mbuf chain to first chunkhdr - length: is the 4257 * length of the complete packet outputs: - length: modified to remaining 4258 * length after control processing - netp: modified to new sctp_nets after 4259 * cookie-echo processing - return NULL to discard the packet (ie. no asoc, 4260 * bad packet,...) otherwise return the tcb for this packet 4261 */ 4262#ifdef __GNUC__ 4263__attribute__((noinline)) 4264#endif 4265 static struct sctp_tcb * 4266 sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, 4267 struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp, 4268 struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen, 4269 uint32_t vrf_id, uint16_t port) 4270{ 4271 struct sctp_association *asoc; 4272 uint32_t vtag_in; 4273 int num_chunks = 0; /* number of control chunks processed */ 4274 uint32_t chk_length; 4275 int ret; 4276 int abort_no_unlock = 0; 4277 int ecne_seen = 0; 4278 4279 /* 4280 * How big should this be, and should it be alloc'd? Lets try the 4281 * d-mtu-ceiling for now (2k) and that should hopefully work ... 4282 * until we get into jumbo grams and such.. 4283 */ 4284 uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE]; 4285 struct sctp_tcb *locked_tcb = stcb; 4286 int got_auth = 0; 4287 uint32_t auth_offset = 0, auth_len = 0; 4288 int auth_skipped = 0; 4289 int asconf_cnt = 0; 4290 4291#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4292 struct socket *so; 4293 4294#endif 4295 4296 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n", 4297 iphlen, *offset, length, stcb); 4298 4299 /* validate chunk header length... */ 4300 if (ntohs(ch->chunk_length) < sizeof(*ch)) { 4301 SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n", 4302 ntohs(ch->chunk_length)); 4303 if (locked_tcb) { 4304 SCTP_TCB_UNLOCK(locked_tcb); 4305 } 4306 return (NULL); 4307 } 4308 /* 4309 * validate the verification tag 4310 */ 4311 vtag_in = ntohl(sh->v_tag); 4312 4313 if (locked_tcb) { 4314 SCTP_TCB_LOCK_ASSERT(locked_tcb); 4315 } 4316 if (ch->chunk_type == SCTP_INITIATION) { 4317 SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n", 4318 ntohs(ch->chunk_length), vtag_in); 4319 if (vtag_in != 0) { 4320 /* protocol error- silently discard... */ 4321 SCTP_STAT_INCR(sctps_badvtag); 4322 if (locked_tcb) { 4323 SCTP_TCB_UNLOCK(locked_tcb); 4324 } 4325 return (NULL); 4326 } 4327 } else if (ch->chunk_type != SCTP_COOKIE_ECHO) { 4328 /* 4329 * If there is no stcb, skip the AUTH chunk and process 4330 * later after a stcb is found (to validate the lookup was 4331 * valid. 4332 */ 4333 if ((ch->chunk_type == SCTP_AUTHENTICATION) && 4334 (stcb == NULL) && 4335 !SCTP_BASE_SYSCTL(sctp_auth_disable)) { 4336 /* save this chunk for later processing */ 4337 auth_skipped = 1; 4338 auth_offset = *offset; 4339 auth_len = ntohs(ch->chunk_length); 4340 4341 /* (temporarily) move past this chunk */ 4342 *offset += SCTP_SIZE32(auth_len); 4343 if (*offset >= length) { 4344 /* no more data left in the mbuf chain */ 4345 *offset = length; 4346 if (locked_tcb) { 4347 SCTP_TCB_UNLOCK(locked_tcb); 4348 } 4349 return (NULL); 4350 } 4351 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4352 sizeof(struct sctp_chunkhdr), chunk_buf); 4353 } 4354 if (ch == NULL) { 4355 /* Help */ 4356 *offset = length; 4357 if (locked_tcb) { 4358 SCTP_TCB_UNLOCK(locked_tcb); 4359 } 4360 return (NULL); 4361 } 4362 if (ch->chunk_type == SCTP_COOKIE_ECHO) { 4363 goto process_control_chunks; 4364 } 4365 /* 4366 * first check if it's an ASCONF with an unknown src addr we 4367 * need to look inside to find the association 4368 */ 4369 if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) { 4370 struct sctp_chunkhdr *asconf_ch = ch; 4371 uint32_t asconf_offset = 0, asconf_len = 0; 4372 4373 /* inp's refcount may be reduced */ 4374 SCTP_INP_INCR_REF(inp); 4375 4376 asconf_offset = *offset; 4377 do { 4378 asconf_len = ntohs(asconf_ch->chunk_length); 4379 if (asconf_len < sizeof(struct sctp_asconf_paramhdr)) 4380 break; 4381 stcb = sctp_findassociation_ep_asconf(m, iphlen, 4382 *offset, sh, &inp, netp, vrf_id); 4383 if (stcb != NULL) 4384 break; 4385 asconf_offset += SCTP_SIZE32(asconf_len); 4386 asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset, 4387 sizeof(struct sctp_chunkhdr), chunk_buf); 4388 } while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF); 4389 if (stcb == NULL) { 4390 /* 4391 * reduce inp's refcount if not reduced in 4392 * sctp_findassociation_ep_asconf(). 4393 */ 4394 SCTP_INP_DECR_REF(inp); 4395 } else { 4396 locked_tcb = stcb; 4397 } 4398 4399 /* now go back and verify any auth chunk to be sure */ 4400 if (auth_skipped && (stcb != NULL)) { 4401 struct sctp_auth_chunk *auth; 4402 4403 auth = (struct sctp_auth_chunk *) 4404 sctp_m_getptr(m, auth_offset, 4405 auth_len, chunk_buf); 4406 got_auth = 1; 4407 auth_skipped = 0; 4408 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, 4409 auth_offset)) { 4410 /* auth HMAC failed so dump it */ 4411 *offset = length; 4412 if (locked_tcb) { 4413 SCTP_TCB_UNLOCK(locked_tcb); 4414 } 4415 return (NULL); 4416 } else { 4417 /* remaining chunks are HMAC checked */ 4418 stcb->asoc.authenticated = 1; 4419 } 4420 } 4421 } 4422 if (stcb == NULL) { 4423 /* no association, so it's out of the blue... */ 4424 sctp_handle_ootb(m, iphlen, *offset, sh, inp, NULL, 4425 vrf_id, port); 4426 *offset = length; 4427 if (locked_tcb) { 4428 SCTP_TCB_UNLOCK(locked_tcb); 4429 } 4430 return (NULL); 4431 } 4432 asoc = &stcb->asoc; 4433 /* ABORT and SHUTDOWN can use either v_tag... */ 4434 if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) || 4435 (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) || 4436 (ch->chunk_type == SCTP_PACKET_DROPPED)) { 4437 if ((vtag_in == asoc->my_vtag) || 4438 ((ch->chunk_flags & SCTP_HAD_NO_TCB) && 4439 (vtag_in == asoc->peer_vtag))) { 4440 /* this is valid */ 4441 } else { 4442 /* drop this packet... */ 4443 SCTP_STAT_INCR(sctps_badvtag); 4444 if (locked_tcb) { 4445 SCTP_TCB_UNLOCK(locked_tcb); 4446 } 4447 return (NULL); 4448 } 4449 } else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 4450 if (vtag_in != asoc->my_vtag) { 4451 /* 4452 * this could be a stale SHUTDOWN-ACK or the 4453 * peer never got the SHUTDOWN-COMPLETE and 4454 * is still hung; we have started a new asoc 4455 * but it won't complete until the shutdown 4456 * is completed 4457 */ 4458 if (locked_tcb) { 4459 SCTP_TCB_UNLOCK(locked_tcb); 4460 } 4461 sctp_handle_ootb(m, iphlen, *offset, sh, inp, 4462 NULL, vrf_id, port); 4463 return (NULL); 4464 } 4465 } else { 4466 /* for all other chunks, vtag must match */ 4467 if (vtag_in != asoc->my_vtag) { 4468 /* invalid vtag... */ 4469 SCTPDBG(SCTP_DEBUG_INPUT3, 4470 "invalid vtag: %xh, expect %xh\n", 4471 vtag_in, asoc->my_vtag); 4472 SCTP_STAT_INCR(sctps_badvtag); 4473 if (locked_tcb) { 4474 SCTP_TCB_UNLOCK(locked_tcb); 4475 } 4476 *offset = length; 4477 return (NULL); 4478 } 4479 } 4480 } /* end if !SCTP_COOKIE_ECHO */ 4481 /* 4482 * process all control chunks... 4483 */ 4484 if (((ch->chunk_type == SCTP_SELECTIVE_ACK) || 4485 /* EY */ 4486 (ch->chunk_type == SCTP_NR_SELECTIVE_ACK) || 4487 (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) && 4488 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) { 4489 /* implied cookie-ack.. we must have lost the ack */ 4490 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4491 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4492 stcb->asoc.overall_error_count, 4493 0, 4494 SCTP_FROM_SCTP_INPUT, 4495 __LINE__); 4496 } 4497 stcb->asoc.overall_error_count = 0; 4498 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, 4499 *netp); 4500 } 4501process_control_chunks: 4502 while (IS_SCTP_CONTROL(ch)) { 4503 /* validate chunk length */ 4504 chk_length = ntohs(ch->chunk_length); 4505 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n", 4506 ch->chunk_type, chk_length); 4507 SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length); 4508 if (chk_length < sizeof(*ch) || 4509 (*offset + (int)chk_length) > length) { 4510 *offset = length; 4511 if (locked_tcb) { 4512 SCTP_TCB_UNLOCK(locked_tcb); 4513 } 4514 return (NULL); 4515 } 4516 SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks); 4517 /* 4518 * INIT-ACK only gets the init ack "header" portion only 4519 * because we don't have to process the peer's COOKIE. All 4520 * others get a complete chunk. 4521 */ 4522 if ((ch->chunk_type == SCTP_INITIATION_ACK) || 4523 (ch->chunk_type == SCTP_INITIATION)) { 4524 /* get an init-ack chunk */ 4525 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4526 sizeof(struct sctp_init_ack_chunk), chunk_buf); 4527 if (ch == NULL) { 4528 *offset = length; 4529 if (locked_tcb) { 4530 SCTP_TCB_UNLOCK(locked_tcb); 4531 } 4532 return (NULL); 4533 } 4534 } else { 4535 /* For cookies and all other chunks. */ 4536 if (chk_length > sizeof(chunk_buf)) { 4537 /* 4538 * use just the size of the chunk buffer so 4539 * the front part of our chunks fit in 4540 * contiguous space up to the chunk buffer 4541 * size (508 bytes). For chunks that need to 4542 * get more than that they must use the 4543 * sctp_m_getptr() function or other means 4544 * (e.g. know how to parse mbuf chains). 4545 * Cookies do this already. 4546 */ 4547 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4548 (sizeof(chunk_buf) - 4), 4549 chunk_buf); 4550 if (ch == NULL) { 4551 *offset = length; 4552 if (locked_tcb) { 4553 SCTP_TCB_UNLOCK(locked_tcb); 4554 } 4555 return (NULL); 4556 } 4557 } else { 4558 /* We can fit it all */ 4559 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4560 chk_length, chunk_buf); 4561 if (ch == NULL) { 4562 SCTP_PRINTF("sctp_process_control: Can't get the all data....\n"); 4563 *offset = length; 4564 if (locked_tcb) { 4565 SCTP_TCB_UNLOCK(locked_tcb); 4566 } 4567 return (NULL); 4568 } 4569 } 4570 } 4571 num_chunks++; 4572 /* Save off the last place we got a control from */ 4573 if (stcb != NULL) { 4574 if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) { 4575 /* 4576 * allow last_control to be NULL if 4577 * ASCONF... ASCONF processing will find the 4578 * right net later 4579 */ 4580 if ((netp != NULL) && (*netp != NULL)) 4581 stcb->asoc.last_control_chunk_from = *netp; 4582 } 4583 } 4584#ifdef SCTP_AUDITING_ENABLED 4585 sctp_audit_log(0xB0, ch->chunk_type); 4586#endif 4587 4588 /* check to see if this chunk required auth, but isn't */ 4589 if ((stcb != NULL) && 4590 !SCTP_BASE_SYSCTL(sctp_auth_disable) && 4591 sctp_auth_is_required_chunk(ch->chunk_type, stcb->asoc.local_auth_chunks) && 4592 !stcb->asoc.authenticated) { 4593 /* "silently" ignore */ 4594 SCTP_STAT_INCR(sctps_recvauthmissing); 4595 goto next_chunk; 4596 } 4597 switch (ch->chunk_type) { 4598 case SCTP_INITIATION: 4599 /* must be first and only chunk */ 4600 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n"); 4601 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4602 /* We are not interested anymore? */ 4603 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4604 /* 4605 * collision case where we are 4606 * sending to them too 4607 */ 4608 ; 4609 } else { 4610 if (locked_tcb) { 4611 SCTP_TCB_UNLOCK(locked_tcb); 4612 } 4613 *offset = length; 4614 return (NULL); 4615 } 4616 } 4617 if ((chk_length > SCTP_LARGEST_INIT_ACCEPTED) || 4618 (num_chunks > 1) || 4619 (SCTP_BASE_SYSCTL(sctp_strict_init) && (length - *offset > (int)SCTP_SIZE32(chk_length)))) { 4620 *offset = length; 4621 if (locked_tcb) { 4622 SCTP_TCB_UNLOCK(locked_tcb); 4623 } 4624 return (NULL); 4625 } 4626 if ((stcb != NULL) && 4627 (SCTP_GET_STATE(&stcb->asoc) == 4628 SCTP_STATE_SHUTDOWN_ACK_SENT)) { 4629 sctp_send_shutdown_ack(stcb, 4630 stcb->asoc.primary_destination); 4631 *offset = length; 4632 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 4633 if (locked_tcb) { 4634 SCTP_TCB_UNLOCK(locked_tcb); 4635 } 4636 return (NULL); 4637 } 4638 if (netp) { 4639 sctp_handle_init(m, iphlen, *offset, sh, 4640 (struct sctp_init_chunk *)ch, inp, 4641 stcb, *netp, &abort_no_unlock, vrf_id, port); 4642 } 4643 if (abort_no_unlock) 4644 return (NULL); 4645 4646 *offset = length; 4647 if (locked_tcb) { 4648 SCTP_TCB_UNLOCK(locked_tcb); 4649 } 4650 return (NULL); 4651 break; 4652 case SCTP_PAD_CHUNK: 4653 break; 4654 case SCTP_INITIATION_ACK: 4655 /* must be first and only chunk */ 4656 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n"); 4657 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4658 /* We are not interested anymore */ 4659 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4660 ; 4661 } else { 4662 if (locked_tcb != stcb) { 4663 /* Very unlikely */ 4664 SCTP_TCB_UNLOCK(locked_tcb); 4665 } 4666 *offset = length; 4667 if (stcb) { 4668#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4669 so = SCTP_INP_SO(inp); 4670 atomic_add_int(&stcb->asoc.refcnt, 1); 4671 SCTP_TCB_UNLOCK(stcb); 4672 SCTP_SOCKET_LOCK(so, 1); 4673 SCTP_TCB_LOCK(stcb); 4674 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4675#endif 4676 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27); 4677#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4678 SCTP_SOCKET_UNLOCK(so, 1); 4679#endif 4680 } 4681 return (NULL); 4682 } 4683 } 4684 if ((num_chunks > 1) || 4685 (SCTP_BASE_SYSCTL(sctp_strict_init) && (length - *offset > (int)SCTP_SIZE32(chk_length)))) { 4686 *offset = length; 4687 if (locked_tcb) { 4688 SCTP_TCB_UNLOCK(locked_tcb); 4689 } 4690 return (NULL); 4691 } 4692 if ((netp) && (*netp)) { 4693 ret = sctp_handle_init_ack(m, iphlen, *offset, sh, 4694 (struct sctp_init_ack_chunk *)ch, stcb, *netp, &abort_no_unlock, vrf_id); 4695 } else { 4696 ret = -1; 4697 } 4698 /* 4699 * Special case, I must call the output routine to 4700 * get the cookie echoed 4701 */ 4702 if (abort_no_unlock) 4703 return (NULL); 4704 4705 if ((stcb) && ret == 0) 4706 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 4707 *offset = length; 4708 if (locked_tcb) { 4709 SCTP_TCB_UNLOCK(locked_tcb); 4710 } 4711 return (NULL); 4712 break; 4713 case SCTP_SELECTIVE_ACK: 4714 { 4715 struct sctp_sack_chunk *sack; 4716 int abort_now = 0; 4717 uint32_t a_rwnd, cum_ack; 4718 uint16_t num_seg, num_dup; 4719 uint8_t flags; 4720 int offset_seg, offset_dup; 4721 4722 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n"); 4723 SCTP_STAT_INCR(sctps_recvsacks); 4724 if (stcb == NULL) { 4725 SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing SACK chunk\n"); 4726 break; 4727 } 4728 if (chk_length < sizeof(struct sctp_sack_chunk)) { 4729 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on SACK chunk, too small\n"); 4730 break; 4731 } 4732 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 4733 /*- 4734 * If we have sent a shutdown-ack, we will pay no 4735 * attention to a sack sent in to us since 4736 * we don't care anymore. 4737 */ 4738 break; 4739 } 4740 sack = (struct sctp_sack_chunk *)ch; 4741 flags = ch->chunk_flags; 4742 cum_ack = ntohl(sack->sack.cum_tsn_ack); 4743 num_seg = ntohs(sack->sack.num_gap_ack_blks); 4744 num_dup = ntohs(sack->sack.num_dup_tsns); 4745 a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd); 4746 if (sizeof(struct sctp_sack_chunk) + 4747 num_seg * sizeof(struct sctp_gap_ack_block) + 4748 num_dup * sizeof(uint32_t) != chk_length) { 4749 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of SACK chunk\n"); 4750 break; 4751 } 4752 offset_seg = *offset + sizeof(struct sctp_sack_chunk); 4753 offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block); 4754 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n", 4755 cum_ack, num_seg, a_rwnd); 4756 stcb->asoc.seen_a_sack_this_pkt = 1; 4757 if ((stcb->asoc.pr_sctp_cnt == 0) && 4758 (num_seg == 0) && 4759 SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) && 4760 (stcb->asoc.saw_sack_with_frags == 0) && 4761 (stcb->asoc.saw_sack_with_nr_frags == 0) && 4762 (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) 4763 ) { 4764 /* 4765 * We have a SIMPLE sack having no 4766 * prior segments and data on sent 4767 * queue to be acked.. Use the 4768 * faster path sack processing. We 4769 * also allow window update sacks 4770 * with no missing segments to go 4771 * this way too. 4772 */ 4773 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, &abort_now, ecne_seen); 4774 } else { 4775 if (netp && *netp) 4776 sctp_handle_sack(m, offset_seg, offset_dup, 4777 stcb, *netp, 4778 num_seg, 0, num_dup, &abort_now, flags, 4779 cum_ack, a_rwnd, ecne_seen); 4780 } 4781 if (abort_now) { 4782 /* ABORT signal from sack processing */ 4783 *offset = length; 4784 return (NULL); 4785 } 4786 if (TAILQ_EMPTY(&stcb->asoc.send_queue) && 4787 TAILQ_EMPTY(&stcb->asoc.sent_queue) && 4788 (stcb->asoc.stream_queue_cnt == 0)) { 4789 sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 4790 } 4791 } 4792 break; 4793 /* 4794 * EY - nr_sack: If the received chunk is an 4795 * nr_sack chunk 4796 */ 4797 case SCTP_NR_SELECTIVE_ACK: 4798 { 4799 struct sctp_nr_sack_chunk *nr_sack; 4800 int abort_now = 0; 4801 uint32_t a_rwnd, cum_ack; 4802 uint16_t num_seg, num_nr_seg, num_dup; 4803 uint8_t flags; 4804 int offset_seg, offset_dup; 4805 4806 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK\n"); 4807 SCTP_STAT_INCR(sctps_recvsacks); 4808 if (stcb == NULL) { 4809 SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing NR-SACK chunk\n"); 4810 break; 4811 } 4812 if ((stcb->asoc.sctp_nr_sack_on_off == 0) || 4813 (stcb->asoc.peer_supports_nr_sack == 0)) { 4814 goto unknown_chunk; 4815 } 4816 if (chk_length < sizeof(struct sctp_nr_sack_chunk)) { 4817 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on NR-SACK chunk, too small\n"); 4818 break; 4819 } 4820 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 4821 /*- 4822 * If we have sent a shutdown-ack, we will pay no 4823 * attention to a sack sent in to us since 4824 * we don't care anymore. 4825 */ 4826 break; 4827 } 4828 nr_sack = (struct sctp_nr_sack_chunk *)ch; 4829 flags = ch->chunk_flags; 4830 cum_ack = ntohl(nr_sack->nr_sack.cum_tsn_ack); 4831 num_seg = ntohs(nr_sack->nr_sack.num_gap_ack_blks); 4832 num_nr_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks); 4833 num_dup = ntohs(nr_sack->nr_sack.num_dup_tsns); 4834 a_rwnd = (uint32_t) ntohl(nr_sack->nr_sack.a_rwnd); 4835 if (sizeof(struct sctp_nr_sack_chunk) + 4836 (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block) + 4837 num_dup * sizeof(uint32_t) != chk_length) { 4838 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of NR_SACK chunk\n"); 4839 break; 4840 } 4841 offset_seg = *offset + sizeof(struct sctp_nr_sack_chunk); 4842 offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block); 4843 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n", 4844 cum_ack, num_seg, a_rwnd); 4845 stcb->asoc.seen_a_sack_this_pkt = 1; 4846 if ((stcb->asoc.pr_sctp_cnt == 0) && 4847 (num_seg == 0) && (num_nr_seg == 0) && 4848 SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) && 4849 (stcb->asoc.saw_sack_with_frags == 0) && 4850 (stcb->asoc.saw_sack_with_nr_frags == 0) && 4851 (!TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 4852 /* 4853 * We have a SIMPLE sack having no 4854 * prior segments and data on sent 4855 * queue to be acked. Use the faster 4856 * path sack processing. We also 4857 * allow window update sacks with no 4858 * missing segments to go this way 4859 * too. 4860 */ 4861 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 4862 &abort_now, ecne_seen); 4863 } else { 4864 if (netp && *netp) 4865 sctp_handle_sack(m, offset_seg, offset_dup, 4866 stcb, *netp, 4867 num_seg, num_nr_seg, num_dup, &abort_now, flags, 4868 cum_ack, a_rwnd, ecne_seen); 4869 } 4870 if (abort_now) { 4871 /* ABORT signal from sack processing */ 4872 *offset = length; 4873 return (NULL); 4874 } 4875 if (TAILQ_EMPTY(&stcb->asoc.send_queue) && 4876 TAILQ_EMPTY(&stcb->asoc.sent_queue) && 4877 (stcb->asoc.stream_queue_cnt == 0)) { 4878 sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 4879 } 4880 } 4881 break; 4882 4883 case SCTP_HEARTBEAT_REQUEST: 4884 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n"); 4885 if ((stcb) && netp && *netp) { 4886 SCTP_STAT_INCR(sctps_recvheartbeat); 4887 sctp_send_heartbeat_ack(stcb, m, *offset, 4888 chk_length, *netp); 4889 4890 /* He's alive so give him credit */ 4891 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4892 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4893 stcb->asoc.overall_error_count, 4894 0, 4895 SCTP_FROM_SCTP_INPUT, 4896 __LINE__); 4897 } 4898 stcb->asoc.overall_error_count = 0; 4899 } 4900 break; 4901 case SCTP_HEARTBEAT_ACK: 4902 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n"); 4903 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) { 4904 /* Its not ours */ 4905 *offset = length; 4906 if (locked_tcb) { 4907 SCTP_TCB_UNLOCK(locked_tcb); 4908 } 4909 return (NULL); 4910 } 4911 /* He's alive so give him credit */ 4912 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4913 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4914 stcb->asoc.overall_error_count, 4915 0, 4916 SCTP_FROM_SCTP_INPUT, 4917 __LINE__); 4918 } 4919 stcb->asoc.overall_error_count = 0; 4920 SCTP_STAT_INCR(sctps_recvheartbeatack); 4921 if (netp && *netp) 4922 sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch, 4923 stcb, *netp); 4924 break; 4925 case SCTP_ABORT_ASSOCIATION: 4926 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n", 4927 stcb); 4928 if ((stcb) && netp && *netp) 4929 sctp_handle_abort((struct sctp_abort_chunk *)ch, 4930 stcb, *netp); 4931 *offset = length; 4932 return (NULL); 4933 break; 4934 case SCTP_SHUTDOWN: 4935 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n", 4936 stcb); 4937 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) { 4938 *offset = length; 4939 if (locked_tcb) { 4940 SCTP_TCB_UNLOCK(locked_tcb); 4941 } 4942 return (NULL); 4943 } 4944 if (netp && *netp) { 4945 int abort_flag = 0; 4946 4947 sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch, 4948 stcb, *netp, &abort_flag); 4949 if (abort_flag) { 4950 *offset = length; 4951 return (NULL); 4952 } 4953 } 4954 break; 4955 case SCTP_SHUTDOWN_ACK: 4956 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", stcb); 4957 if ((stcb) && (netp) && (*netp)) 4958 sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp); 4959 *offset = length; 4960 return (NULL); 4961 break; 4962 4963 case SCTP_OPERATION_ERROR: 4964 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n"); 4965 if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) { 4966 4967 *offset = length; 4968 return (NULL); 4969 } 4970 break; 4971 case SCTP_COOKIE_ECHO: 4972 SCTPDBG(SCTP_DEBUG_INPUT3, 4973 "SCTP_COOKIE-ECHO, stcb %p\n", stcb); 4974 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4975 ; 4976 } else { 4977 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4978 /* We are not interested anymore */ 4979 abend: 4980 if (stcb) { 4981 SCTP_TCB_UNLOCK(stcb); 4982 } 4983 *offset = length; 4984 return (NULL); 4985 } 4986 } 4987 /* 4988 * First are we accepting? We do this again here 4989 * since it is possible that a previous endpoint WAS 4990 * listening responded to a INIT-ACK and then 4991 * closed. We opened and bound.. and are now no 4992 * longer listening. 4993 */ 4994 4995 if ((stcb == NULL) && (inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit)) { 4996 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4997 (SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit))) { 4998 struct mbuf *oper; 4999 struct sctp_paramhdr *phdr; 5000 5001 oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 5002 0, M_DONTWAIT, 1, MT_DATA); 5003 if (oper) { 5004 SCTP_BUF_LEN(oper) = 5005 sizeof(struct sctp_paramhdr); 5006 phdr = mtod(oper, 5007 struct sctp_paramhdr *); 5008 phdr->param_type = 5009 htons(SCTP_CAUSE_OUT_OF_RESC); 5010 phdr->param_length = 5011 htons(sizeof(struct sctp_paramhdr)); 5012 } 5013 sctp_abort_association(inp, stcb, m, 5014 iphlen, sh, oper, vrf_id, port); 5015 } 5016 *offset = length; 5017 return (NULL); 5018 } else { 5019 struct mbuf *ret_buf; 5020 struct sctp_inpcb *linp; 5021 5022 if (stcb) { 5023 linp = NULL; 5024 } else { 5025 linp = inp; 5026 } 5027 5028 if (linp) { 5029 SCTP_ASOC_CREATE_LOCK(linp); 5030 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5031 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5032 SCTP_ASOC_CREATE_UNLOCK(linp); 5033 goto abend; 5034 } 5035 } 5036 if (netp) { 5037 ret_buf = 5038 sctp_handle_cookie_echo(m, iphlen, 5039 *offset, sh, 5040 (struct sctp_cookie_echo_chunk *)ch, 5041 &inp, &stcb, netp, 5042 auth_skipped, 5043 auth_offset, 5044 auth_len, 5045 &locked_tcb, 5046 vrf_id, 5047 port); 5048 } else { 5049 ret_buf = NULL; 5050 } 5051 if (linp) { 5052 SCTP_ASOC_CREATE_UNLOCK(linp); 5053 } 5054 if (ret_buf == NULL) { 5055 if (locked_tcb) { 5056 SCTP_TCB_UNLOCK(locked_tcb); 5057 } 5058 SCTPDBG(SCTP_DEBUG_INPUT3, 5059 "GAK, null buffer\n"); 5060 auth_skipped = 0; 5061 *offset = length; 5062 return (NULL); 5063 } 5064 /* if AUTH skipped, see if it verified... */ 5065 if (auth_skipped) { 5066 got_auth = 1; 5067 auth_skipped = 0; 5068 } 5069 if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) { 5070 /* 5071 * Restart the timer if we have 5072 * pending data 5073 */ 5074 struct sctp_tmit_chunk *chk; 5075 5076 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 5077 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 5078 } 5079 } 5080 break; 5081 case SCTP_COOKIE_ACK: 5082 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", stcb); 5083 if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) { 5084 if (locked_tcb) { 5085 SCTP_TCB_UNLOCK(locked_tcb); 5086 } 5087 return (NULL); 5088 } 5089 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5090 /* We are not interested anymore */ 5091 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 5092 ; 5093 } else if (stcb) { 5094#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5095 so = SCTP_INP_SO(inp); 5096 atomic_add_int(&stcb->asoc.refcnt, 1); 5097 SCTP_TCB_UNLOCK(stcb); 5098 SCTP_SOCKET_LOCK(so, 1); 5099 SCTP_TCB_LOCK(stcb); 5100 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5101#endif 5102 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27); 5103#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5104 SCTP_SOCKET_UNLOCK(so, 1); 5105#endif 5106 *offset = length; 5107 return (NULL); 5108 } 5109 } 5110 /* He's alive so give him credit */ 5111 if ((stcb) && netp && *netp) { 5112 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5113 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5114 stcb->asoc.overall_error_count, 5115 0, 5116 SCTP_FROM_SCTP_INPUT, 5117 __LINE__); 5118 } 5119 stcb->asoc.overall_error_count = 0; 5120 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp); 5121 } 5122 break; 5123 case SCTP_ECN_ECHO: 5124 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n"); 5125 /* He's alive so give him credit */ 5126 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) { 5127 /* Its not ours */ 5128 if (locked_tcb) { 5129 SCTP_TCB_UNLOCK(locked_tcb); 5130 } 5131 *offset = length; 5132 return (NULL); 5133 } 5134 if (stcb) { 5135 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5136 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5137 stcb->asoc.overall_error_count, 5138 0, 5139 SCTP_FROM_SCTP_INPUT, 5140 __LINE__); 5141 } 5142 stcb->asoc.overall_error_count = 0; 5143 sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch, 5144 stcb); 5145 ecne_seen = 1; 5146 } 5147 break; 5148 case SCTP_ECN_CWR: 5149 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n"); 5150 /* He's alive so give him credit */ 5151 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) { 5152 /* Its not ours */ 5153 if (locked_tcb) { 5154 SCTP_TCB_UNLOCK(locked_tcb); 5155 } 5156 *offset = length; 5157 return (NULL); 5158 } 5159 if (stcb) { 5160 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5161 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5162 stcb->asoc.overall_error_count, 5163 0, 5164 SCTP_FROM_SCTP_INPUT, 5165 __LINE__); 5166 } 5167 stcb->asoc.overall_error_count = 0; 5168 sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb, *netp); 5169 } 5170 break; 5171 case SCTP_SHUTDOWN_COMPLETE: 5172 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", stcb); 5173 /* must be first and only chunk */ 5174 if ((num_chunks > 1) || 5175 (length - *offset > (int)SCTP_SIZE32(chk_length))) { 5176 *offset = length; 5177 if (locked_tcb) { 5178 SCTP_TCB_UNLOCK(locked_tcb); 5179 } 5180 return (NULL); 5181 } 5182 if ((stcb) && netp && *netp) { 5183 sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch, 5184 stcb, *netp); 5185 } 5186 *offset = length; 5187 return (NULL); 5188 break; 5189 case SCTP_ASCONF: 5190 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n"); 5191 /* He's alive so give him credit */ 5192 if (stcb) { 5193 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5194 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5195 stcb->asoc.overall_error_count, 5196 0, 5197 SCTP_FROM_SCTP_INPUT, 5198 __LINE__); 5199 } 5200 stcb->asoc.overall_error_count = 0; 5201 sctp_handle_asconf(m, *offset, 5202 (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0); 5203 asconf_cnt++; 5204 } 5205 break; 5206 case SCTP_ASCONF_ACK: 5207 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n"); 5208 if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) { 5209 /* Its not ours */ 5210 if (locked_tcb) { 5211 SCTP_TCB_UNLOCK(locked_tcb); 5212 } 5213 *offset = length; 5214 return (NULL); 5215 } 5216 if ((stcb) && netp && *netp) { 5217 /* He's alive so give him credit */ 5218 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5219 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5220 stcb->asoc.overall_error_count, 5221 0, 5222 SCTP_FROM_SCTP_INPUT, 5223 __LINE__); 5224 } 5225 stcb->asoc.overall_error_count = 0; 5226 sctp_handle_asconf_ack(m, *offset, 5227 (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock); 5228 if (abort_no_unlock) 5229 return (NULL); 5230 } 5231 break; 5232 case SCTP_FORWARD_CUM_TSN: 5233 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n"); 5234 if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) { 5235 /* Its not ours */ 5236 if (locked_tcb) { 5237 SCTP_TCB_UNLOCK(locked_tcb); 5238 } 5239 *offset = length; 5240 return (NULL); 5241 } 5242 /* He's alive so give him credit */ 5243 if (stcb) { 5244 int abort_flag = 0; 5245 5246 stcb->asoc.overall_error_count = 0; 5247 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5248 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5249 stcb->asoc.overall_error_count, 5250 0, 5251 SCTP_FROM_SCTP_INPUT, 5252 __LINE__); 5253 } 5254 *fwd_tsn_seen = 1; 5255 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5256 /* We are not interested anymore */ 5257#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5258 so = SCTP_INP_SO(inp); 5259 atomic_add_int(&stcb->asoc.refcnt, 1); 5260 SCTP_TCB_UNLOCK(stcb); 5261 SCTP_SOCKET_LOCK(so, 1); 5262 SCTP_TCB_LOCK(stcb); 5263 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5264#endif 5265 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29); 5266#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5267 SCTP_SOCKET_UNLOCK(so, 1); 5268#endif 5269 *offset = length; 5270 return (NULL); 5271 } 5272 sctp_handle_forward_tsn(stcb, 5273 (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset); 5274 if (abort_flag) { 5275 *offset = length; 5276 return (NULL); 5277 } else { 5278 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5279 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5280 stcb->asoc.overall_error_count, 5281 0, 5282 SCTP_FROM_SCTP_INPUT, 5283 __LINE__); 5284 } 5285 stcb->asoc.overall_error_count = 0; 5286 } 5287 5288 } 5289 break; 5290 case SCTP_STREAM_RESET: 5291 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n"); 5292 if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) { 5293 /* Its not ours */ 5294 if (locked_tcb) { 5295 SCTP_TCB_UNLOCK(locked_tcb); 5296 } 5297 *offset = length; 5298 return (NULL); 5299 } 5300 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5301 /* We are not interested anymore */ 5302#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5303 so = SCTP_INP_SO(inp); 5304 atomic_add_int(&stcb->asoc.refcnt, 1); 5305 SCTP_TCB_UNLOCK(stcb); 5306 SCTP_SOCKET_LOCK(so, 1); 5307 SCTP_TCB_LOCK(stcb); 5308 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5309#endif 5310 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_30); 5311#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5312 SCTP_SOCKET_UNLOCK(so, 1); 5313#endif 5314 *offset = length; 5315 return (NULL); 5316 } 5317 if (stcb->asoc.peer_supports_strreset == 0) { 5318 /* 5319 * hmm, peer should have announced this, but 5320 * we will turn it on since he is sending us 5321 * a stream reset. 5322 */ 5323 stcb->asoc.peer_supports_strreset = 1; 5324 } 5325 if (sctp_handle_stream_reset(stcb, m, *offset, (struct sctp_stream_reset_out_req *)ch)) { 5326 /* stop processing */ 5327 *offset = length; 5328 return (NULL); 5329 } 5330 break; 5331 case SCTP_PACKET_DROPPED: 5332 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n"); 5333 /* re-get it all please */ 5334 if (chk_length < sizeof(struct sctp_pktdrop_chunk)) { 5335 /* Its not ours */ 5336 if (locked_tcb) { 5337 SCTP_TCB_UNLOCK(locked_tcb); 5338 } 5339 *offset = length; 5340 return (NULL); 5341 } 5342 if (ch && (stcb) && netp && (*netp)) { 5343 sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch, 5344 stcb, *netp, 5345 min(chk_length, (sizeof(chunk_buf) - 4))); 5346 5347 } 5348 break; 5349 5350 case SCTP_AUTHENTICATION: 5351 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n"); 5352 if (SCTP_BASE_SYSCTL(sctp_auth_disable)) 5353 goto unknown_chunk; 5354 5355 if (stcb == NULL) { 5356 /* save the first AUTH for later processing */ 5357 if (auth_skipped == 0) { 5358 auth_offset = *offset; 5359 auth_len = chk_length; 5360 auth_skipped = 1; 5361 } 5362 /* skip this chunk (temporarily) */ 5363 goto next_chunk; 5364 } 5365 if ((chk_length < (sizeof(struct sctp_auth_chunk))) || 5366 (chk_length > (sizeof(struct sctp_auth_chunk) + 5367 SCTP_AUTH_DIGEST_LEN_MAX))) { 5368 /* Its not ours */ 5369 if (locked_tcb) { 5370 SCTP_TCB_UNLOCK(locked_tcb); 5371 } 5372 *offset = length; 5373 return (NULL); 5374 } 5375 if (got_auth == 1) { 5376 /* skip this chunk... it's already auth'd */ 5377 goto next_chunk; 5378 } 5379 got_auth = 1; 5380 if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch, 5381 m, *offset)) { 5382 /* auth HMAC failed so dump the packet */ 5383 *offset = length; 5384 return (stcb); 5385 } else { 5386 /* remaining chunks are HMAC checked */ 5387 stcb->asoc.authenticated = 1; 5388 } 5389 break; 5390 5391 default: 5392 unknown_chunk: 5393 /* it's an unknown chunk! */ 5394 if ((ch->chunk_type & 0x40) && (stcb != NULL)) { 5395 struct mbuf *mm; 5396 struct sctp_paramhdr *phd; 5397 5398 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 5399 0, M_DONTWAIT, 1, MT_DATA); 5400 if (mm) { 5401 phd = mtod(mm, struct sctp_paramhdr *); 5402 /* 5403 * We cheat and use param type since 5404 * we did not bother to define a 5405 * error cause struct. They are the 5406 * same basic format with different 5407 * names. 5408 */ 5409 phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK); 5410 phd->param_length = htons(chk_length + sizeof(*phd)); 5411 SCTP_BUF_LEN(mm) = sizeof(*phd); 5412 SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, SCTP_SIZE32(chk_length), 5413 M_DONTWAIT); 5414 if (SCTP_BUF_NEXT(mm)) { 5415#ifdef SCTP_MBUF_LOGGING 5416 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 5417 struct mbuf *mat; 5418 5419 mat = SCTP_BUF_NEXT(mm); 5420 while (mat) { 5421 if (SCTP_BUF_IS_EXTENDED(mat)) { 5422 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 5423 } 5424 mat = SCTP_BUF_NEXT(mat); 5425 } 5426 } 5427#endif 5428 sctp_queue_op_err(stcb, mm); 5429 } else { 5430 sctp_m_freem(mm); 5431 } 5432 } 5433 } 5434 if ((ch->chunk_type & 0x80) == 0) { 5435 /* discard this packet */ 5436 *offset = length; 5437 return (stcb); 5438 } /* else skip this bad chunk and continue... */ 5439 break; 5440 } /* switch (ch->chunk_type) */ 5441 5442 5443next_chunk: 5444 /* get the next chunk */ 5445 *offset += SCTP_SIZE32(chk_length); 5446 if (*offset >= length) { 5447 /* no more data left in the mbuf chain */ 5448 break; 5449 } 5450 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 5451 sizeof(struct sctp_chunkhdr), chunk_buf); 5452 if (ch == NULL) { 5453 if (locked_tcb) { 5454 SCTP_TCB_UNLOCK(locked_tcb); 5455 } 5456 *offset = length; 5457 return (NULL); 5458 } 5459 } /* while */ 5460 5461 if (asconf_cnt > 0 && stcb != NULL) { 5462 sctp_send_asconf_ack(stcb); 5463 } 5464 return (stcb); 5465} 5466 5467 5468#ifdef INVARIANTS 5469#ifdef __GNUC__ 5470__attribute__((noinline)) 5471#endif 5472 void 5473 sctp_validate_no_locks(struct sctp_inpcb *inp) 5474{ 5475 struct sctp_tcb *lstcb; 5476 5477 LIST_FOREACH(lstcb, &inp->sctp_asoc_list, sctp_tcblist) { 5478 if (mtx_owned(&lstcb->tcb_mtx)) { 5479 panic("Own lock on stcb at return from input"); 5480 } 5481 } 5482 if (mtx_owned(&inp->inp_create_mtx)) { 5483 panic("Own create lock on inp"); 5484 } 5485 if (mtx_owned(&inp->inp_mtx)) { 5486 panic("Own inp lock on inp"); 5487 } 5488} 5489 5490#endif 5491 5492/* 5493 * common input chunk processing (v4 and v6) 5494 */ 5495void 5496sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, 5497 int length, struct sctphdr *sh, struct sctp_chunkhdr *ch, 5498 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, 5499 uint8_t ecn_bits, uint32_t vrf_id, uint16_t port) 5500{ 5501 /* 5502 * Control chunk processing 5503 */ 5504 uint32_t high_tsn; 5505 int fwd_tsn_seen = 0, data_processed = 0; 5506 struct mbuf *m = *mm; 5507 int abort_flag = 0; 5508 int un_sent; 5509 int cnt_ctrl_ready = 0; 5510 5511 SCTP_STAT_INCR(sctps_recvdatagrams); 5512#ifdef SCTP_AUDITING_ENABLED 5513 sctp_audit_log(0xE0, 1); 5514 sctp_auditing(0, inp, stcb, net); 5515#endif 5516 5517 SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d length:%d stcb:%p\n", 5518 m, iphlen, offset, length, stcb); 5519 if (stcb) { 5520 /* always clear this before beginning a packet */ 5521 stcb->asoc.authenticated = 0; 5522 stcb->asoc.seen_a_sack_this_pkt = 0; 5523 SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n", 5524 stcb, stcb->asoc.state); 5525 5526 if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) || 5527 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5528 /*- 5529 * If we hit here, we had a ref count 5530 * up when the assoc was aborted and the 5531 * timer is clearing out the assoc, we should 5532 * NOT respond to any packet.. its OOTB. 5533 */ 5534 SCTP_TCB_UNLOCK(stcb); 5535 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL, 5536 vrf_id, port); 5537 goto out_now; 5538 } 5539 } 5540 if (IS_SCTP_CONTROL(ch)) { 5541 /* process the control portion of the SCTP packet */ 5542 /* sa_ignore NO_NULL_CHK */ 5543 stcb = sctp_process_control(m, iphlen, &offset, length, sh, ch, 5544 inp, stcb, &net, &fwd_tsn_seen, vrf_id, port); 5545 if (stcb) { 5546 /* 5547 * This covers us if the cookie-echo was there and 5548 * it changes our INP. 5549 */ 5550 inp = stcb->sctp_ep; 5551 if ((net) && (port)) { 5552 if (net->port == 0) { 5553 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr)); 5554 } 5555 net->port = port; 5556 } 5557 } 5558 } else { 5559 /* 5560 * no control chunks, so pre-process DATA chunks (these 5561 * checks are taken care of by control processing) 5562 */ 5563 5564 /* 5565 * if DATA only packet, and auth is required, then punt... 5566 * can't have authenticated without any AUTH (control) 5567 * chunks 5568 */ 5569 if ((stcb != NULL) && 5570 !SCTP_BASE_SYSCTL(sctp_auth_disable) && 5571 sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks)) { 5572 /* "silently" ignore */ 5573 SCTP_STAT_INCR(sctps_recvauthmissing); 5574 SCTP_TCB_UNLOCK(stcb); 5575 goto out_now; 5576 } 5577 if (stcb == NULL) { 5578 /* out of the blue DATA chunk */ 5579 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL, 5580 vrf_id, port); 5581 goto out_now; 5582 } 5583 if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) { 5584 /* v_tag mismatch! */ 5585 SCTP_STAT_INCR(sctps_badvtag); 5586 SCTP_TCB_UNLOCK(stcb); 5587 goto out_now; 5588 } 5589 } 5590 5591 if (stcb == NULL) { 5592 /* 5593 * no valid TCB for this packet, or we found it's a bad 5594 * packet while processing control, or we're done with this 5595 * packet (done or skip rest of data), so we drop it... 5596 */ 5597 goto out_now; 5598 } 5599 /* 5600 * DATA chunk processing 5601 */ 5602 /* plow through the data chunks while length > offset */ 5603 5604 /* 5605 * Rest should be DATA only. Check authentication state if AUTH for 5606 * DATA is required. 5607 */ 5608 if ((length > offset) && 5609 (stcb != NULL) && 5610 !SCTP_BASE_SYSCTL(sctp_auth_disable) && 5611 sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks) && 5612 !stcb->asoc.authenticated) { 5613 /* "silently" ignore */ 5614 SCTP_STAT_INCR(sctps_recvauthmissing); 5615 SCTPDBG(SCTP_DEBUG_AUTH1, 5616 "Data chunk requires AUTH, skipped\n"); 5617 goto trigger_send; 5618 } 5619 if (length > offset) { 5620 int retval; 5621 5622 /* 5623 * First check to make sure our state is correct. We would 5624 * not get here unless we really did have a tag, so we don't 5625 * abort if this happens, just dump the chunk silently. 5626 */ 5627 switch (SCTP_GET_STATE(&stcb->asoc)) { 5628 case SCTP_STATE_COOKIE_ECHOED: 5629 /* 5630 * we consider data with valid tags in this state 5631 * shows us the cookie-ack was lost. Imply it was 5632 * there. 5633 */ 5634 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5635 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5636 stcb->asoc.overall_error_count, 5637 0, 5638 SCTP_FROM_SCTP_INPUT, 5639 __LINE__); 5640 } 5641 stcb->asoc.overall_error_count = 0; 5642 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net); 5643 break; 5644 case SCTP_STATE_COOKIE_WAIT: 5645 /* 5646 * We consider OOTB any data sent during asoc setup. 5647 */ 5648 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL, 5649 vrf_id, port); 5650 SCTP_TCB_UNLOCK(stcb); 5651 goto out_now; 5652 /* sa_ignore NOTREACHED */ 5653 break; 5654 case SCTP_STATE_EMPTY: /* should not happen */ 5655 case SCTP_STATE_INUSE: /* should not happen */ 5656 case SCTP_STATE_SHUTDOWN_RECEIVED: /* This is a peer error */ 5657 case SCTP_STATE_SHUTDOWN_ACK_SENT: 5658 default: 5659 SCTP_TCB_UNLOCK(stcb); 5660 goto out_now; 5661 /* sa_ignore NOTREACHED */ 5662 break; 5663 case SCTP_STATE_OPEN: 5664 case SCTP_STATE_SHUTDOWN_SENT: 5665 break; 5666 } 5667 /* plow through the data chunks while length > offset */ 5668 retval = sctp_process_data(mm, iphlen, &offset, length, sh, 5669 inp, stcb, net, &high_tsn); 5670 if (retval == 2) { 5671 /* 5672 * The association aborted, NO UNLOCK needed since 5673 * the association is destroyed. 5674 */ 5675 goto out_now; 5676 } 5677 data_processed = 1; 5678 /* 5679 * Anything important needs to have been m_copy'ed in 5680 * process_data 5681 */ 5682 } 5683 /* take care of ecn */ 5684 if ((stcb->asoc.ecn_allowed == 1) && 5685 ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS)) { 5686 /* Yep, we need to add a ECNE */ 5687 sctp_send_ecn_echo(stcb, net, high_tsn); 5688 } 5689 if ((data_processed == 0) && (fwd_tsn_seen)) { 5690 int was_a_gap; 5691 uint32_t highest_tsn; 5692 5693 if (SCTP_TSN_GT(stcb->asoc.highest_tsn_inside_nr_map, stcb->asoc.highest_tsn_inside_map)) { 5694 highest_tsn = stcb->asoc.highest_tsn_inside_nr_map; 5695 } else { 5696 highest_tsn = stcb->asoc.highest_tsn_inside_map; 5697 } 5698 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 5699 stcb->asoc.send_sack = 1; 5700 sctp_sack_check(stcb, was_a_gap, &abort_flag); 5701 if (abort_flag) { 5702 /* Again, we aborted so NO UNLOCK needed */ 5703 goto out_now; 5704 } 5705 } else if (fwd_tsn_seen) { 5706 stcb->asoc.send_sack = 1; 5707 } 5708 /* trigger send of any chunks in queue... */ 5709trigger_send: 5710#ifdef SCTP_AUDITING_ENABLED 5711 sctp_audit_log(0xE0, 2); 5712 sctp_auditing(1, inp, stcb, net); 5713#endif 5714 SCTPDBG(SCTP_DEBUG_INPUT1, 5715 "Check for chunk output prw:%d tqe:%d tf=%d\n", 5716 stcb->asoc.peers_rwnd, 5717 TAILQ_EMPTY(&stcb->asoc.control_send_queue), 5718 stcb->asoc.total_flight); 5719 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight); 5720 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { 5721 cnt_ctrl_ready = stcb->asoc.ctrl_queue_cnt - stcb->asoc.ecn_echo_cnt_onq; 5722 } 5723 if (cnt_ctrl_ready || 5724 ((un_sent) && 5725 (stcb->asoc.peers_rwnd > 0 || 5726 (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) { 5727 SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n"); 5728 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 5729 SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n"); 5730 } 5731#ifdef SCTP_AUDITING_ENABLED 5732 sctp_audit_log(0xE0, 3); 5733 sctp_auditing(2, inp, stcb, net); 5734#endif 5735 SCTP_TCB_UNLOCK(stcb); 5736out_now: 5737#ifdef INVARIANTS 5738 sctp_validate_no_locks(inp); 5739#endif 5740 return; 5741} 5742 5743#if 0 5744static void 5745sctp_print_mbuf_chain(struct mbuf *m) 5746{ 5747 for (; m; m = SCTP_BUF_NEXT(m)) { 5748 printf("%p: m_len = %ld\n", m, SCTP_BUF_LEN(m)); 5749 if (SCTP_BUF_IS_EXTENDED(m)) 5750 printf("%p: extend_size = %d\n", m, SCTP_BUF_EXTEND_SIZE(m)); 5751 } 5752} 5753 5754#endif 5755 5756#ifdef INET 5757void 5758sctp_input_with_port(struct mbuf *i_pak, int off, uint16_t port) 5759{ 5760#ifdef SCTP_MBUF_LOGGING 5761 struct mbuf *mat; 5762 5763#endif 5764 struct mbuf *m; 5765 int iphlen; 5766 uint32_t vrf_id = 0; 5767 uint8_t ecn_bits; 5768 struct ip *ip; 5769 struct sctphdr *sh; 5770 struct sctp_inpcb *inp = NULL; 5771 struct sctp_nets *net; 5772 struct sctp_tcb *stcb = NULL; 5773 struct sctp_chunkhdr *ch; 5774 int refcount_up = 0; 5775 int length, mlen, offset; 5776 5777#if !defined(SCTP_WITH_NO_CSUM) 5778 uint32_t check, calc_check; 5779 5780#endif 5781 5782 if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) { 5783 SCTP_RELEASE_PKT(i_pak); 5784 return; 5785 } 5786 mlen = SCTP_HEADER_LEN(i_pak); 5787 iphlen = off; 5788 m = SCTP_HEADER_TO_CHAIN(i_pak); 5789 5790 net = NULL; 5791 SCTP_STAT_INCR(sctps_recvpackets); 5792 SCTP_STAT_INCR_COUNTER64(sctps_inpackets); 5793 5794 5795#ifdef SCTP_MBUF_LOGGING 5796 /* Log in any input mbufs */ 5797 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 5798 mat = m; 5799 while (mat) { 5800 if (SCTP_BUF_IS_EXTENDED(mat)) { 5801 sctp_log_mb(mat, SCTP_MBUF_INPUT); 5802 } 5803 mat = SCTP_BUF_NEXT(mat); 5804 } 5805 } 5806#endif 5807#ifdef SCTP_PACKET_LOGGING 5808 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) 5809 sctp_packet_log(m, mlen); 5810#endif 5811 /* 5812 * Must take out the iphlen, since mlen expects this (only effect lb 5813 * case) 5814 */ 5815 mlen -= iphlen; 5816 5817 /* 5818 * Get IP, SCTP, and first chunk header together in first mbuf. 5819 */ 5820 ip = mtod(m, struct ip *); 5821 offset = iphlen + sizeof(*sh) + sizeof(*ch); 5822 if (SCTP_BUF_LEN(m) < offset) { 5823 if ((m = m_pullup(m, offset)) == 0) { 5824 SCTP_STAT_INCR(sctps_hdrops); 5825 return; 5826 } 5827 ip = mtod(m, struct ip *); 5828 } 5829 /* validate mbuf chain length with IP payload length */ 5830 if (mlen < (SCTP_GET_IPV4_LENGTH(ip) - iphlen)) { 5831 SCTP_STAT_INCR(sctps_hdrops); 5832 goto bad; 5833 } 5834 sh = (struct sctphdr *)((caddr_t)ip + iphlen); 5835 ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh)); 5836 SCTPDBG(SCTP_DEBUG_INPUT1, 5837 "sctp_input() length:%d iphlen:%d\n", mlen, iphlen); 5838 5839 /* SCTP does not allow broadcasts or multicasts */ 5840 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 5841 goto bad; 5842 } 5843 if (SCTP_IS_IT_BROADCAST(ip->ip_dst, m)) { 5844 /* 5845 * We only look at broadcast if its a front state, All 5846 * others we will not have a tcb for anyway. 5847 */ 5848 goto bad; 5849 } 5850 /* validate SCTP checksum */ 5851 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 5852 "sctp_input(): Packet of length %d received on %s with csum_flags 0x%x.\n", 5853 m->m_pkthdr.len, 5854 if_name(m->m_pkthdr.rcvif), 5855 m->m_pkthdr.csum_flags); 5856#if defined(SCTP_WITH_NO_CSUM) 5857 SCTP_STAT_INCR(sctps_recvnocrc); 5858#else 5859 if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) { 5860 SCTP_STAT_INCR(sctps_recvhwcrc); 5861 goto sctp_skip_csum_4; 5862 } 5863 check = sh->checksum; /* save incoming checksum */ 5864 sh->checksum = 0; /* prepare for calc */ 5865 calc_check = sctp_calculate_cksum(m, iphlen); 5866 sh->checksum = check; 5867 SCTP_STAT_INCR(sctps_recvswcrc); 5868 if (calc_check != check) { 5869 SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n", 5870 calc_check, check, m, mlen, iphlen); 5871 5872 stcb = sctp_findassociation_addr(m, iphlen, 5873 offset - sizeof(*ch), 5874 sh, ch, &inp, &net, 5875 vrf_id); 5876 if ((net) && (port)) { 5877 if (net->port == 0) { 5878 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr)); 5879 } 5880 net->port = port; 5881 } 5882 if ((net != NULL) && (m->m_flags & M_FLOWID)) { 5883 net->flowid = m->m_pkthdr.flowid; 5884#ifdef INVARIANTS 5885 net->flowidset = 1; 5886#endif 5887 } 5888 if ((inp) && (stcb)) { 5889 sctp_send_packet_dropped(stcb, net, m, iphlen, 1); 5890 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED); 5891 } else if ((inp != NULL) && (stcb == NULL)) { 5892 refcount_up = 1; 5893 } 5894 SCTP_STAT_INCR(sctps_badsum); 5895 SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors); 5896 goto bad; 5897 } 5898sctp_skip_csum_4: 5899#endif 5900 /* destination port of 0 is illegal, based on RFC2960. */ 5901 if (sh->dest_port == 0) { 5902 SCTP_STAT_INCR(sctps_hdrops); 5903 goto bad; 5904 } 5905 /* 5906 * Locate pcb and tcb for datagram sctp_findassociation_addr() wants 5907 * IP/SCTP/first chunk header... 5908 */ 5909 stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch), 5910 sh, ch, &inp, &net, vrf_id); 5911 if ((net) && (port)) { 5912 if (net->port == 0) { 5913 sctp_pathmtu_adjustment(inp, stcb, net, net->mtu - sizeof(struct udphdr)); 5914 } 5915 net->port = port; 5916 } 5917 if ((net != NULL) && (m->m_flags & M_FLOWID)) { 5918 net->flowid = m->m_pkthdr.flowid; 5919#ifdef INVARIANTS 5920 net->flowidset = 1; 5921#endif 5922 } 5923 /* inp's ref-count increased && stcb locked */ 5924 if (inp == NULL) { 5925 struct sctp_init_chunk *init_chk, chunk_buf; 5926 5927 SCTP_STAT_INCR(sctps_noport); 5928#ifdef ICMP_BANDLIM 5929 /* 5930 * we use the bandwidth limiting to protect against sending 5931 * too many ABORTS all at once. In this case these count the 5932 * same as an ICMP message. 5933 */ 5934 if (badport_bandlim(0) < 0) 5935 goto bad; 5936#endif /* ICMP_BANDLIM */ 5937 SCTPDBG(SCTP_DEBUG_INPUT1, 5938 "Sending a ABORT from packet entry!\n"); 5939 if (ch->chunk_type == SCTP_INITIATION) { 5940 /* 5941 * we do a trick here to get the INIT tag, dig in 5942 * and get the tag from the INIT and put it in the 5943 * common header. 5944 */ 5945 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 5946 iphlen + sizeof(*sh), sizeof(*init_chk), 5947 (uint8_t *) & chunk_buf); 5948 if (init_chk != NULL) 5949 sh->v_tag = init_chk->init.initiate_tag; 5950 } 5951 if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 5952 sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, port); 5953 goto bad; 5954 } 5955 if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) { 5956 goto bad; 5957 } 5958 if (ch->chunk_type != SCTP_ABORT_ASSOCIATION) 5959 sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id, port); 5960 goto bad; 5961 } else if (stcb == NULL) { 5962 refcount_up = 1; 5963 } 5964#ifdef IPSEC 5965 /* 5966 * I very much doubt any of the IPSEC stuff will work but I have no 5967 * idea, so I will leave it in place. 5968 */ 5969 if (inp && ipsec4_in_reject(m, &inp->ip_inp.inp)) { 5970 MODULE_GLOBAL(ipsec4stat).in_polvio++; 5971 SCTP_STAT_INCR(sctps_hdrops); 5972 goto bad; 5973 } 5974#endif /* IPSEC */ 5975 5976 /* 5977 * common chunk processing 5978 */ 5979 length = ip->ip_len + iphlen; 5980 offset -= sizeof(struct sctp_chunkhdr); 5981 5982 ecn_bits = ip->ip_tos; 5983 5984 /* sa_ignore NO_NULL_CHK */ 5985 sctp_common_input_processing(&m, iphlen, offset, length, sh, ch, 5986 inp, stcb, net, ecn_bits, vrf_id, port); 5987 /* inp's ref-count reduced && stcb unlocked */ 5988 if (m) { 5989 sctp_m_freem(m); 5990 } 5991 if ((inp) && (refcount_up)) { 5992 /* reduce ref-count */ 5993 SCTP_INP_DECR_REF(inp); 5994 } 5995 return; 5996bad: 5997 if (stcb) { 5998 SCTP_TCB_UNLOCK(stcb); 5999 } 6000 if ((inp) && (refcount_up)) { 6001 /* reduce ref-count */ 6002 SCTP_INP_DECR_REF(inp); 6003 } 6004 if (m) { 6005 sctp_m_freem(m); 6006 } 6007 return; 6008} 6009 6010#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP) 6011extern int *sctp_cpuarry; 6012 6013#endif 6014 6015void 6016sctp_input(struct mbuf *m, int off) 6017{ 6018#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP) 6019 struct ip *ip; 6020 struct sctphdr *sh; 6021 int offset; 6022 int cpu_to_use; 6023 uint32_t flowid, tag; 6024 6025 if (mp_ncpus > 1) { 6026 if (m->m_flags & M_FLOWID) { 6027 flowid = m->m_pkthdr.flowid; 6028 } else { 6029 /* 6030 * No flow id built by lower layers fix it so we 6031 * create one. 6032 */ 6033 ip = mtod(m, struct ip *); 6034 offset = off + sizeof(*sh); 6035 if (SCTP_BUF_LEN(m) < offset) { 6036 if ((m = m_pullup(m, offset)) == 0) { 6037 SCTP_STAT_INCR(sctps_hdrops); 6038 return; 6039 } 6040 ip = mtod(m, struct ip *); 6041 } 6042 sh = (struct sctphdr *)((caddr_t)ip + off); 6043 tag = htonl(sh->v_tag); 6044 flowid = tag ^ ntohs(sh->dest_port) ^ ntohs(sh->src_port); 6045 m->m_pkthdr.flowid = flowid; 6046 m->m_flags |= M_FLOWID; 6047 } 6048 cpu_to_use = sctp_cpuarry[flowid % mp_ncpus]; 6049 sctp_queue_to_mcore(m, off, cpu_to_use); 6050 return; 6051 } 6052#endif 6053 sctp_input_with_port(m, off, 0); 6054} 6055 6056#endif 6057