sctp_input.c revision 172118
1/*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31/* $KAME: sctp_input.c,v 1.27 2005/03/06 16:04:17 itojun Exp $ */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: head/sys/netinet/sctp_input.c 172118 2007-09-10 17:06:25Z rrs $"); 35 36#include <netinet/sctp_os.h> 37#include <netinet/sctp_var.h> 38#include <netinet/sctp_sysctl.h> 39#include <netinet/sctp_pcb.h> 40#include <netinet/sctp_header.h> 41#include <netinet/sctputil.h> 42#include <netinet/sctp_output.h> 43#include <netinet/sctp_input.h> 44#include <netinet/sctp_auth.h> 45#include <netinet/sctp_indata.h> 46#include <netinet/sctp_asconf.h> 47#include <netinet/sctp_bsd_addr.h> 48#include <netinet/sctp_timer.h> 49 50 51 52static void 53sctp_stop_all_cookie_timers(struct sctp_tcb *stcb) 54{ 55 struct sctp_nets *net; 56 57 /* 58 * This now not only stops all cookie timers it also stops any INIT 59 * timers as well. This will make sure that the timers are stopped 60 * in all collision cases. 61 */ 62 SCTP_TCB_LOCK_ASSERT(stcb); 63 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 64 if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) { 65 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, 66 stcb->sctp_ep, 67 stcb, 68 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1); 69 } else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) { 70 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, 71 stcb->sctp_ep, 72 stcb, 73 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2); 74 } 75 } 76} 77 78/* INIT handler */ 79static void 80sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh, 81 struct sctp_init_chunk *cp, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 82 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id) 83{ 84 struct sctp_init *init; 85 struct mbuf *op_err; 86 uint32_t init_limit; 87 88 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n", 89 stcb); 90 op_err = NULL; 91 init = &cp->init; 92 /* First are we accepting? */ 93 if ((inp->sctp_socket->so_qlimit == 0) && (stcb == NULL)) { 94 SCTPDBG(SCTP_DEBUG_INPUT2, 95 "sctp_handle_init: Abort, so_qlimit:%d\n", 96 inp->sctp_socket->so_qlimit); 97 /* 98 * FIX ME ?? What about TCP model and we have a 99 * match/restart case? 100 */ 101 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 102 vrf_id); 103 if (stcb) 104 *abort_no_unlock = 1; 105 return; 106 } 107 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) { 108 /* Invalid length */ 109 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 110 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 111 vrf_id); 112 if (stcb) 113 *abort_no_unlock = 1; 114 return; 115 } 116 /* validate parameters */ 117 if (init->initiate_tag == 0) { 118 /* protocol error... send abort */ 119 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 120 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 121 vrf_id); 122 if (stcb) 123 *abort_no_unlock = 1; 124 return; 125 } 126 if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) { 127 /* invalid parameter... send abort */ 128 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 129 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 130 vrf_id); 131 if (stcb) 132 *abort_no_unlock = 1; 133 return; 134 } 135 if (init->num_inbound_streams == 0) { 136 /* protocol error... send abort */ 137 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 138 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 139 vrf_id); 140 if (stcb) 141 *abort_no_unlock = 1; 142 return; 143 } 144 if (init->num_outbound_streams == 0) { 145 /* protocol error... send abort */ 146 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 147 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 148 vrf_id); 149 if (stcb) 150 *abort_no_unlock = 1; 151 return; 152 } 153 init_limit = offset + ntohs(cp->ch.chunk_length); 154 if (sctp_validate_init_auth_params(m, offset + sizeof(*cp), 155 init_limit)) { 156 /* auth parameter(s) error... send abort */ 157 sctp_abort_association(inp, stcb, m, iphlen, sh, NULL, vrf_id); 158 if (stcb) 159 *abort_no_unlock = 1; 160 return; 161 } 162 /* send an INIT-ACK w/cookie */ 163 SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n"); 164 sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, sh, cp, vrf_id); 165} 166 167/* 168 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error 169 */ 170 171int 172sctp_is_there_unsent_data(struct sctp_tcb *stcb) 173{ 174 int unsent_data = 0; 175 struct sctp_stream_queue_pending *sp; 176 struct sctp_stream_out *strq; 177 struct sctp_association *asoc; 178 179 /* 180 * This function returns the number of streams that have true unsent 181 * data on them. Note that as it looks through it will clean up any 182 * places that have old data that has been sent but left at top of 183 * stream queue. 184 */ 185 asoc = &stcb->asoc; 186 SCTP_TCB_SEND_LOCK(stcb); 187 if (!TAILQ_EMPTY(&asoc->out_wheel)) { 188 /* Check to see if some data queued */ 189 TAILQ_FOREACH(strq, &asoc->out_wheel, next_spoke) { 190 is_there_another: 191 /* sa_ignore FREED_MEMORY */ 192 sp = TAILQ_FIRST(&strq->outqueue); 193 if (sp == NULL) { 194 continue; 195 } 196 if ((sp->msg_is_complete) && 197 (sp->length == 0) && 198 (sp->sender_all_done)) { 199 /* 200 * We are doing differed cleanup. Last time 201 * through when we took all the data the 202 * sender_all_done was not set. 203 */ 204 if (sp->put_last_out == 0) { 205 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n"); 206 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n", 207 sp->sender_all_done, 208 sp->length, 209 sp->msg_is_complete, 210 sp->put_last_out); 211 } 212 atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1); 213 TAILQ_REMOVE(&strq->outqueue, sp, next); 214 sctp_free_remote_addr(sp->net); 215 if (sp->data) { 216 sctp_m_freem(sp->data); 217 sp->data = NULL; 218 } 219 sctp_free_a_strmoq(stcb, sp); 220 goto is_there_another; 221 } else { 222 unsent_data++; 223 continue; 224 } 225 } 226 } 227 SCTP_TCB_SEND_UNLOCK(stcb); 228 return (unsent_data); 229} 230 231static int 232sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb, 233 struct sctp_nets *net) 234{ 235 struct sctp_init *init; 236 struct sctp_association *asoc; 237 struct sctp_nets *lnet; 238 unsigned int i; 239 240 init = &cp->init; 241 asoc = &stcb->asoc; 242 /* save off parameters */ 243 asoc->peer_vtag = ntohl(init->initiate_tag); 244 asoc->peers_rwnd = ntohl(init->a_rwnd); 245 if (TAILQ_FIRST(&asoc->nets)) { 246 /* update any ssthresh's that may have a default */ 247 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) { 248 lnet->ssthresh = asoc->peers_rwnd; 249 250 if (sctp_logging_level & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) { 251 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION); 252 } 253 } 254 } 255 SCTP_TCB_SEND_LOCK(stcb); 256 if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) { 257 unsigned int newcnt; 258 struct sctp_stream_out *outs; 259 struct sctp_stream_queue_pending *sp; 260 261 /* cut back on number of streams */ 262 newcnt = ntohs(init->num_inbound_streams); 263 /* This if is probably not needed but I am cautious */ 264 if (asoc->strmout) { 265 /* First make sure no data chunks are trapped */ 266 for (i = newcnt; i < asoc->pre_open_streams; i++) { 267 outs = &asoc->strmout[i]; 268 sp = TAILQ_FIRST(&outs->outqueue); 269 while (sp) { 270 TAILQ_REMOVE(&outs->outqueue, sp, 271 next); 272 asoc->stream_queue_cnt--; 273 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, 274 stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, 275 sp, SCTP_SO_NOT_LOCKED); 276 if (sp->data) { 277 sctp_m_freem(sp->data); 278 sp->data = NULL; 279 } 280 sctp_free_remote_addr(sp->net); 281 sp->net = NULL; 282 /* Free the chunk */ 283 SCTP_PRINTF("sp:%p tcb:%p weird free case\n", 284 sp, stcb); 285 286 sctp_free_a_strmoq(stcb, sp); 287 /* sa_ignore FREED_MEMORY */ 288 sp = TAILQ_FIRST(&outs->outqueue); 289 } 290 } 291 } 292 /* cut back the count and abandon the upper streams */ 293 asoc->pre_open_streams = newcnt; 294 } 295 SCTP_TCB_SEND_UNLOCK(stcb); 296 asoc->streamoutcnt = asoc->pre_open_streams; 297 /* init tsn's */ 298 asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1; 299 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 300 sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 301 } 302 /* This is the next one we expect */ 303 asoc->str_reset_seq_in = asoc->asconf_seq_in + 1; 304 305 asoc->mapping_array_base_tsn = ntohl(init->initial_tsn); 306 asoc->cumulative_tsn = asoc->asconf_seq_in; 307 asoc->last_echo_tsn = asoc->asconf_seq_in; 308 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 309 /* open the requested streams */ 310 311 if (asoc->strmin != NULL) { 312 /* Free the old ones */ 313 struct sctp_queued_to_read *ctl; 314 315 for (i = 0; i < asoc->streamincnt; i++) { 316 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue); 317 while (ctl) { 318 TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next); 319 sctp_free_remote_addr(ctl->whoFrom); 320 ctl->whoFrom = NULL; 321 sctp_m_freem(ctl->data); 322 ctl->data = NULL; 323 sctp_free_a_readq(stcb, ctl); 324 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue); 325 } 326 } 327 SCTP_FREE(asoc->strmin, SCTP_M_STRMI); 328 } 329 asoc->streamincnt = ntohs(init->num_outbound_streams); 330 if (asoc->streamincnt > MAX_SCTP_STREAMS) { 331 asoc->streamincnt = MAX_SCTP_STREAMS; 332 } 333 SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt * 334 sizeof(struct sctp_stream_in), SCTP_M_STRMI); 335 if (asoc->strmin == NULL) { 336 /* we didn't get memory for the streams! */ 337 SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n"); 338 return (-1); 339 } 340 for (i = 0; i < asoc->streamincnt; i++) { 341 asoc->strmin[i].stream_no = i; 342 asoc->strmin[i].last_sequence_delivered = 0xffff; 343 /* 344 * U-stream ranges will be set when the cookie is unpacked. 345 * Or for the INIT sender they are un set (if pr-sctp not 346 * supported) when the INIT-ACK arrives. 347 */ 348 TAILQ_INIT(&asoc->strmin[i].inqueue); 349 asoc->strmin[i].delivery_started = 0; 350 } 351 /* 352 * load_address_from_init will put the addresses into the 353 * association when the COOKIE is processed or the INIT-ACK is 354 * processed. Both types of COOKIE's existing and new call this 355 * routine. It will remove addresses that are no longer in the 356 * association (for the restarting case where addresses are 357 * removed). Up front when the INIT arrives we will discard it if it 358 * is a restart and new addresses have been added. 359 */ 360 /* sa_ignore MEMLEAK */ 361 return (0); 362} 363 364/* 365 * INIT-ACK message processing/consumption returns value < 0 on error 366 */ 367static int 368sctp_process_init_ack(struct mbuf *m, int iphlen, int offset, 369 struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 370 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id) 371{ 372 struct sctp_association *asoc; 373 struct mbuf *op_err; 374 int retval, abort_flag; 375 uint32_t initack_limit; 376 377 /* First verify that we have no illegal param's */ 378 abort_flag = 0; 379 op_err = NULL; 380 381 op_err = sctp_arethere_unrecognized_parameters(m, 382 (offset + sizeof(struct sctp_init_chunk)), 383 &abort_flag, (struct sctp_chunkhdr *)cp); 384 if (abort_flag) { 385 /* Send an abort and notify peer */ 386 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_CAUSE_PROTOCOL_VIOLATION, op_err, SCTP_SO_NOT_LOCKED); 387 *abort_no_unlock = 1; 388 return (-1); 389 } 390 asoc = &stcb->asoc; 391 /* process the peer's parameters in the INIT-ACK */ 392 retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb, net); 393 if (retval < 0) { 394 return (retval); 395 } 396 initack_limit = offset + ntohs(cp->ch.chunk_length); 397 /* load all addresses */ 398 if ((retval = sctp_load_addresses_from_init(stcb, m, iphlen, 399 (offset + sizeof(struct sctp_init_chunk)), initack_limit, sh, 400 NULL))) { 401 /* Huh, we should abort */ 402 SCTPDBG(SCTP_DEBUG_INPUT1, 403 "Load addresses from INIT causes an abort %d\n", 404 retval); 405 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 406 NULL, 0); 407 *abort_no_unlock = 1; 408 return (-1); 409 } 410 /* if the peer doesn't support asconf, flush the asconf queue */ 411 if (asoc->peer_supports_asconf == 0) { 412 struct sctp_asconf_addr *aparam; 413 414 while (!TAILQ_EMPTY(&asoc->asconf_queue)) { 415 /* sa_ignore FREED_MEMORY */ 416 aparam = TAILQ_FIRST(&asoc->asconf_queue); 417 TAILQ_REMOVE(&asoc->asconf_queue, aparam, next); 418 SCTP_FREE(aparam, SCTP_M_ASC_ADDR); 419 } 420 } 421 stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs, 422 stcb->asoc.local_hmacs); 423 if (op_err) { 424 sctp_queue_op_err(stcb, op_err); 425 /* queuing will steal away the mbuf chain to the out queue */ 426 op_err = NULL; 427 } 428 /* extract the cookie and queue it to "echo" it back... */ 429 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 430 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 431 stcb->asoc.overall_error_count, 432 0, 433 SCTP_FROM_SCTP_INPUT, 434 __LINE__); 435 } 436 stcb->asoc.overall_error_count = 0; 437 net->error_count = 0; 438 439 /* 440 * Cancel the INIT timer, We do this first before queueing the 441 * cookie. We always cancel at the primary to assue that we are 442 * canceling the timer started by the INIT which always goes to the 443 * primary. 444 */ 445 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb, 446 asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4); 447 448 /* calculate the RTO */ 449 net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered, sctp_align_safe_nocopy); 450 451 retval = sctp_send_cookie_echo(m, offset, stcb, net); 452 if (retval < 0) { 453 /* 454 * No cookie, we probably should send a op error. But in any 455 * case if there is no cookie in the INIT-ACK, we can 456 * abandon the peer, its broke. 457 */ 458 if (retval == -3) { 459 /* We abort with an error of missing mandatory param */ 460 op_err = 461 sctp_generate_invmanparam(SCTP_CAUSE_MISSING_PARAM); 462 if (op_err) { 463 /* 464 * Expand beyond to include the mandatory 465 * param cookie 466 */ 467 struct sctp_inv_mandatory_param *mp; 468 469 SCTP_BUF_LEN(op_err) = 470 sizeof(struct sctp_inv_mandatory_param); 471 mp = mtod(op_err, 472 struct sctp_inv_mandatory_param *); 473 /* Subtract the reserved param */ 474 mp->length = 475 htons(sizeof(struct sctp_inv_mandatory_param) - 2); 476 mp->num_param = htonl(1); 477 mp->param = htons(SCTP_STATE_COOKIE); 478 mp->resv = 0; 479 } 480 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 481 sh, op_err, 0); 482 *abort_no_unlock = 1; 483 } 484 return (retval); 485 } 486 return (0); 487} 488 489static void 490sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp, 491 struct sctp_tcb *stcb, struct sctp_nets *net) 492{ 493 struct sockaddr_storage store; 494 struct sockaddr_in *sin; 495 struct sockaddr_in6 *sin6; 496 struct sctp_nets *r_net; 497 struct timeval tv; 498 499 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) { 500 /* Invalid length */ 501 return; 502 } 503 sin = (struct sockaddr_in *)&store; 504 sin6 = (struct sockaddr_in6 *)&store; 505 506 memset(&store, 0, sizeof(store)); 507 if (cp->heartbeat.hb_info.addr_family == AF_INET && 508 cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) { 509 sin->sin_family = cp->heartbeat.hb_info.addr_family; 510 sin->sin_len = cp->heartbeat.hb_info.addr_len; 511 sin->sin_port = stcb->rport; 512 memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address, 513 sizeof(sin->sin_addr)); 514 } else if (cp->heartbeat.hb_info.addr_family == AF_INET6 && 515 cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) { 516 sin6->sin6_family = cp->heartbeat.hb_info.addr_family; 517 sin6->sin6_len = cp->heartbeat.hb_info.addr_len; 518 sin6->sin6_port = stcb->rport; 519 memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address, 520 sizeof(sin6->sin6_addr)); 521 } else { 522 return; 523 } 524 r_net = sctp_findnet(stcb, (struct sockaddr *)sin); 525 if (r_net == NULL) { 526 SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n"); 527 return; 528 } 529 if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) && 530 (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) && 531 (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) { 532 /* 533 * If the its a HB and it's random value is correct when can 534 * confirm the destination. 535 */ 536 r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 537 if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) { 538 stcb->asoc.primary_destination = r_net; 539 r_net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY; 540 r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY; 541 r_net = TAILQ_FIRST(&stcb->asoc.nets); 542 if (r_net != stcb->asoc.primary_destination) { 543 /* 544 * first one on the list is NOT the primary 545 * sctp_cmpaddr() is much more efficent if 546 * the primary is the first on the list, 547 * make it so. 548 */ 549 TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); 550 TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); 551 } 552 /* Mobility adaptation */ 553 if ((sctp_is_mobility_feature_on(stcb->sctp_ep, 554 SCTP_MOBILITY_BASE) || 555 sctp_is_mobility_feature_on(stcb->sctp_ep, 556 SCTP_MOBILITY_FASTHANDOFF)) && 557 sctp_is_mobility_feature_on(stcb->sctp_ep, 558 SCTP_MOBILITY_PRIM_DELETED)) { 559 560 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_TIMER + SCTP_LOC_7); 561 if (sctp_is_mobility_feature_on(stcb->sctp_ep, 562 SCTP_MOBILITY_FASTHANDOFF)) { 563 sctp_assoc_immediate_retrans(stcb, 564 stcb->asoc.primary_destination); 565 } 566 if (sctp_is_mobility_feature_on(stcb->sctp_ep, 567 SCTP_MOBILITY_BASE)) { 568 sctp_move_chunks_from_deleted_prim(stcb, 569 stcb->asoc.primary_destination); 570 } 571 sctp_delete_prim_timer(stcb->sctp_ep, stcb, 572 stcb->asoc.deleted_primary); 573 } 574 } 575 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 576 stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED); 577 } 578 r_net->error_count = 0; 579 r_net->hb_responded = 1; 580 tv.tv_sec = cp->heartbeat.hb_info.time_value_1; 581 tv.tv_usec = cp->heartbeat.hb_info.time_value_2; 582 if (r_net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 583 r_net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE; 584 r_net->dest_state |= SCTP_ADDR_REACHABLE; 585 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 586 SCTP_HEARTBEAT_SUCCESS, (void *)r_net, SCTP_SO_NOT_LOCKED); 587 /* now was it the primary? if so restore */ 588 if (r_net->dest_state & SCTP_ADDR_WAS_PRIMARY) { 589 (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, r_net); 590 } 591 } 592 /* 593 * JRS 5/14/07 - If CMT PF is on and the destination is in PF state, 594 * set the destination to active state and set the cwnd to one or 595 * two MTU's based on whether PF1 or PF2 is being used. If a T3 596 * timer is running, for the destination, stop the timer because a 597 * PF-heartbeat was received. 598 */ 599 if (sctp_cmt_on_off && sctp_cmt_pf && (net->dest_state & SCTP_ADDR_PF) == 600 SCTP_ADDR_PF) { 601 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 602 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 603 stcb, net, 604 SCTP_FROM_SCTP_INPUT + SCTP_LOC_5); 605 } 606 net->dest_state &= ~SCTP_ADDR_PF; 607 net->cwnd = net->mtu * sctp_cmt_pf; 608 SCTPDBG(SCTP_DEBUG_INPUT1, "Destination %p moved from PF to reachable with cwnd %d.\n", 609 net, net->cwnd); 610 } 611 /* Now lets do a RTO with this */ 612 r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv, sctp_align_safe_nocopy); 613} 614 615static void 616sctp_handle_abort(struct sctp_abort_chunk *cp, 617 struct sctp_tcb *stcb, struct sctp_nets *net) 618{ 619#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 620 struct socket *so; 621 622#endif 623 624 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n"); 625 if (stcb == NULL) 626 return; 627 628 /* stop any receive timers */ 629 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6); 630 /* notify user of the abort and clean up... */ 631 sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED); 632 /* free the tcb */ 633 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 634 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 635 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 636 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 637 } 638#ifdef SCTP_ASOCLOG_OF_TSNS 639 sctp_print_out_track_log(stcb); 640#endif 641#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 642 so = SCTP_INP_SO(stcb->sctp_ep); 643 atomic_add_int(&stcb->asoc.refcnt, 1); 644 SCTP_TCB_UNLOCK(stcb); 645 SCTP_SOCKET_LOCK(so, 1); 646 SCTP_TCB_LOCK(stcb); 647 atomic_subtract_int(&stcb->asoc.refcnt, 1); 648#endif 649 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED; 650 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 651 SCTP_FROM_SCTP_INPUT + SCTP_LOC_6); 652#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 653 SCTP_SOCKET_UNLOCK(so, 1); 654#endif 655 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n"); 656} 657 658static void 659sctp_handle_shutdown(struct sctp_shutdown_chunk *cp, 660 struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag) 661{ 662 struct sctp_association *asoc; 663 int some_on_streamwheel; 664 665#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 666 struct socket *so; 667 668#endif 669 670 SCTPDBG(SCTP_DEBUG_INPUT2, 671 "sctp_handle_shutdown: handling SHUTDOWN\n"); 672 if (stcb == NULL) 673 return; 674 asoc = &stcb->asoc; 675 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 676 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 677 return; 678 } 679 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) { 680 /* Shutdown NOT the expected size */ 681 return; 682 } else { 683 sctp_update_acked(stcb, cp, net, abort_flag); 684 } 685 if (asoc->control_pdapi) { 686 /* 687 * With a normal shutdown we assume the end of last record. 688 */ 689 SCTP_INP_READ_LOCK(stcb->sctp_ep); 690 asoc->control_pdapi->end_added = 1; 691 asoc->control_pdapi->pdapi_aborted = 1; 692 asoc->control_pdapi = NULL; 693 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 694#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 695 so = SCTP_INP_SO(stcb->sctp_ep); 696 atomic_add_int(&stcb->asoc.refcnt, 1); 697 SCTP_TCB_UNLOCK(stcb); 698 SCTP_SOCKET_LOCK(so, 1); 699 SCTP_TCB_LOCK(stcb); 700 atomic_subtract_int(&stcb->asoc.refcnt, 1); 701 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 702 /* assoc was freed while we were unlocked */ 703 SCTP_SOCKET_UNLOCK(so, 1); 704 return; 705 } 706#endif 707 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 708#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 709 SCTP_SOCKET_UNLOCK(so, 1); 710#endif 711 } 712 /* goto SHUTDOWN_RECEIVED state to block new requests */ 713 if (stcb->sctp_socket) { 714 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 715 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) && 716 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) { 717 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_RECEIVED); 718 /* 719 * notify upper layer that peer has initiated a 720 * shutdown 721 */ 722 sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 723 724 /* reset time */ 725 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 726 } 727 } 728 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 729 /* 730 * stop the shutdown timer, since we WILL move to 731 * SHUTDOWN-ACK-SENT. 732 */ 733 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8); 734 } 735 /* Now is there unsent data on a stream somewhere? */ 736 some_on_streamwheel = sctp_is_there_unsent_data(stcb); 737 738 if (!TAILQ_EMPTY(&asoc->send_queue) || 739 !TAILQ_EMPTY(&asoc->sent_queue) || 740 some_on_streamwheel) { 741 /* By returning we will push more data out */ 742 return; 743 } else { 744 /* no outstanding data to send, so move on... */ 745 /* send SHUTDOWN-ACK */ 746 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination); 747 /* move to SHUTDOWN-ACK-SENT state */ 748 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 749 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 750 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 751 } 752 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 753 754 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, 755 SCTP_FROM_SCTP_INPUT + SCTP_LOC_7); 756 /* start SHUTDOWN timer */ 757 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, 758 stcb, net); 759 } 760} 761 762static void 763sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp, 764 struct sctp_tcb *stcb, struct sctp_nets *net) 765{ 766 struct sctp_association *asoc; 767 768#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 769 struct socket *so; 770 771 so = SCTP_INP_SO(stcb->sctp_ep); 772#endif 773 SCTPDBG(SCTP_DEBUG_INPUT2, 774 "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n"); 775 if (stcb == NULL) 776 return; 777 778 asoc = &stcb->asoc; 779 /* process according to association state */ 780 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 781 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 782 /* unexpected SHUTDOWN-ACK... so ignore... */ 783 SCTP_TCB_UNLOCK(stcb); 784 return; 785 } 786 if (asoc->control_pdapi) { 787 /* 788 * With a normal shutdown we assume the end of last record. 789 */ 790 SCTP_INP_READ_LOCK(stcb->sctp_ep); 791 asoc->control_pdapi->end_added = 1; 792 asoc->control_pdapi->pdapi_aborted = 1; 793 asoc->control_pdapi = NULL; 794 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 795#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 796 atomic_add_int(&stcb->asoc.refcnt, 1); 797 SCTP_TCB_UNLOCK(stcb); 798 SCTP_SOCKET_LOCK(so, 1); 799 SCTP_TCB_LOCK(stcb); 800 atomic_subtract_int(&stcb->asoc.refcnt, 1); 801 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 802 /* assoc was freed while we were unlocked */ 803 SCTP_SOCKET_UNLOCK(so, 1); 804 return; 805 } 806#endif 807 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 808#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 809 SCTP_SOCKET_UNLOCK(so, 1); 810#endif 811 } 812 /* are the queues empty? */ 813 if (!TAILQ_EMPTY(&asoc->send_queue) || 814 !TAILQ_EMPTY(&asoc->sent_queue) || 815 !TAILQ_EMPTY(&asoc->out_wheel)) { 816 sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED); 817 } 818 /* stop the timer */ 819 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9); 820 /* send SHUTDOWN-COMPLETE */ 821 sctp_send_shutdown_complete(stcb, net); 822 /* notify upper layer protocol */ 823 if (stcb->sctp_socket) { 824 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 825 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 826 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 827 /* Set the connected flag to disconnected */ 828 stcb->sctp_ep->sctp_socket->so_snd.sb_cc = 0; 829 } 830 } 831 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 832 /* free the TCB but first save off the ep */ 833#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 834 atomic_add_int(&stcb->asoc.refcnt, 1); 835 SCTP_TCB_UNLOCK(stcb); 836 SCTP_SOCKET_LOCK(so, 1); 837 SCTP_TCB_LOCK(stcb); 838 atomic_subtract_int(&stcb->asoc.refcnt, 1); 839#endif 840 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 841 SCTP_FROM_SCTP_INPUT + SCTP_LOC_10); 842#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 843 SCTP_SOCKET_UNLOCK(so, 1); 844#endif 845} 846 847/* 848 * Skip past the param header and then we will find the chunk that caused the 849 * problem. There are two possiblities ASCONF or FWD-TSN other than that and 850 * our peer must be broken. 851 */ 852static void 853sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr, 854 struct sctp_nets *net) 855{ 856 struct sctp_chunkhdr *chk; 857 858 chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr)); 859 switch (chk->chunk_type) { 860 case SCTP_ASCONF_ACK: 861 case SCTP_ASCONF: 862 sctp_asconf_cleanup(stcb, net); 863 break; 864 case SCTP_FORWARD_CUM_TSN: 865 stcb->asoc.peer_supports_prsctp = 0; 866 break; 867 default: 868 SCTPDBG(SCTP_DEBUG_INPUT2, 869 "Peer does not support chunk type %d(%x)??\n", 870 chk->chunk_type, (uint32_t) chk->chunk_type); 871 break; 872 } 873} 874 875/* 876 * Skip past the param header and then we will find the param that caused the 877 * problem. There are a number of param's in a ASCONF OR the prsctp param 878 * these will turn of specific features. 879 */ 880static void 881sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr) 882{ 883 struct sctp_paramhdr *pbad; 884 885 pbad = phdr + 1; 886 switch (ntohs(pbad->param_type)) { 887 /* pr-sctp draft */ 888 case SCTP_PRSCTP_SUPPORTED: 889 stcb->asoc.peer_supports_prsctp = 0; 890 break; 891 case SCTP_SUPPORTED_CHUNK_EXT: 892 break; 893 /* draft-ietf-tsvwg-addip-sctp */ 894 case SCTP_ECN_NONCE_SUPPORTED: 895 stcb->asoc.peer_supports_ecn_nonce = 0; 896 stcb->asoc.ecn_nonce_allowed = 0; 897 stcb->asoc.ecn_allowed = 0; 898 break; 899 case SCTP_ADD_IP_ADDRESS: 900 case SCTP_DEL_IP_ADDRESS: 901 case SCTP_SET_PRIM_ADDR: 902 stcb->asoc.peer_supports_asconf = 0; 903 break; 904 case SCTP_SUCCESS_REPORT: 905 case SCTP_ERROR_CAUSE_IND: 906 SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n"); 907 SCTPDBG(SCTP_DEBUG_INPUT2, 908 "Turning off ASCONF to this strange peer\n"); 909 stcb->asoc.peer_supports_asconf = 0; 910 break; 911 default: 912 SCTPDBG(SCTP_DEBUG_INPUT2, 913 "Peer does not support param type %d(%x)??\n", 914 pbad->param_type, (uint32_t) pbad->param_type); 915 break; 916 } 917} 918 919static int 920sctp_handle_error(struct sctp_chunkhdr *ch, 921 struct sctp_tcb *stcb, struct sctp_nets *net) 922{ 923 int chklen; 924 struct sctp_paramhdr *phdr; 925 uint16_t error_type; 926 uint16_t error_len; 927 struct sctp_association *asoc; 928 int adjust; 929 930#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 931 struct socket *so; 932 933#endif 934 935 /* parse through all of the errors and process */ 936 asoc = &stcb->asoc; 937 phdr = (struct sctp_paramhdr *)((caddr_t)ch + 938 sizeof(struct sctp_chunkhdr)); 939 chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr); 940 while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) { 941 /* Process an Error Cause */ 942 error_type = ntohs(phdr->param_type); 943 error_len = ntohs(phdr->param_length); 944 if ((error_len > chklen) || (error_len == 0)) { 945 /* invalid param length for this param */ 946 SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n", 947 chklen, error_len); 948 return (0); 949 } 950 switch (error_type) { 951 case SCTP_CAUSE_INVALID_STREAM: 952 case SCTP_CAUSE_MISSING_PARAM: 953 case SCTP_CAUSE_INVALID_PARAM: 954 case SCTP_CAUSE_NO_USER_DATA: 955 SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n", 956 error_type); 957 break; 958 case SCTP_CAUSE_STALE_COOKIE: 959 /* 960 * We only act if we have echoed a cookie and are 961 * waiting. 962 */ 963 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) { 964 int *p; 965 966 p = (int *)((caddr_t)phdr + sizeof(*phdr)); 967 /* Save the time doubled */ 968 asoc->cookie_preserve_req = ntohl(*p) << 1; 969 asoc->stale_cookie_count++; 970 if (asoc->stale_cookie_count > 971 asoc->max_init_times) { 972 sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED); 973 /* now free the asoc */ 974#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 975 so = SCTP_INP_SO(stcb->sctp_ep); 976 atomic_add_int(&stcb->asoc.refcnt, 1); 977 SCTP_TCB_UNLOCK(stcb); 978 SCTP_SOCKET_LOCK(so, 1); 979 SCTP_TCB_LOCK(stcb); 980 atomic_subtract_int(&stcb->asoc.refcnt, 1); 981#endif 982 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 983 SCTP_FROM_SCTP_INPUT + SCTP_LOC_11); 984#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 985 SCTP_SOCKET_UNLOCK(so, 1); 986#endif 987 return (-1); 988 } 989 /* blast back to INIT state */ 990 asoc->state &= ~SCTP_STATE_COOKIE_ECHOED; 991 asoc->state |= SCTP_STATE_COOKIE_WAIT; 992 993 sctp_stop_all_cookie_timers(stcb); 994 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 995 } 996 break; 997 case SCTP_CAUSE_UNRESOLVABLE_ADDR: 998 /* 999 * Nothing we can do here, we don't do hostname 1000 * addresses so if the peer does not like my IPv6 1001 * (or IPv4 for that matter) it does not matter. If 1002 * they don't support that type of address, they can 1003 * NOT possibly get that packet type... i.e. with no 1004 * IPv6 you can't recieve a IPv6 packet. so we can 1005 * safely ignore this one. If we ever added support 1006 * for HOSTNAME Addresses, then we would need to do 1007 * something here. 1008 */ 1009 break; 1010 case SCTP_CAUSE_UNRECOG_CHUNK: 1011 sctp_process_unrecog_chunk(stcb, phdr, net); 1012 break; 1013 case SCTP_CAUSE_UNRECOG_PARAM: 1014 sctp_process_unrecog_param(stcb, phdr); 1015 break; 1016 case SCTP_CAUSE_COOKIE_IN_SHUTDOWN: 1017 /* 1018 * We ignore this since the timer will drive out a 1019 * new cookie anyway and there timer will drive us 1020 * to send a SHUTDOWN_COMPLETE. We can't send one 1021 * here since we don't have their tag. 1022 */ 1023 break; 1024 case SCTP_CAUSE_DELETING_LAST_ADDR: 1025 case SCTP_CAUSE_RESOURCE_SHORTAGE: 1026 case SCTP_CAUSE_DELETING_SRC_ADDR: 1027 /* 1028 * We should NOT get these here, but in a 1029 * ASCONF-ACK. 1030 */ 1031 SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n", 1032 error_type); 1033 break; 1034 case SCTP_CAUSE_OUT_OF_RESC: 1035 /* 1036 * And what, pray tell do we do with the fact that 1037 * the peer is out of resources? Not really sure we 1038 * could do anything but abort. I suspect this 1039 * should have came WITH an abort instead of in a 1040 * OP-ERROR. 1041 */ 1042 break; 1043 default: 1044 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n", 1045 error_type); 1046 break; 1047 } 1048 adjust = SCTP_SIZE32(error_len); 1049 chklen -= adjust; 1050 phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust); 1051 } 1052 return (0); 1053} 1054 1055static int 1056sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset, 1057 struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 1058 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id) 1059{ 1060 struct sctp_init_ack *init_ack; 1061 struct mbuf *op_err; 1062 1063 SCTPDBG(SCTP_DEBUG_INPUT2, 1064 "sctp_handle_init_ack: handling INIT-ACK\n"); 1065 1066 if (stcb == NULL) { 1067 SCTPDBG(SCTP_DEBUG_INPUT2, 1068 "sctp_handle_init_ack: TCB is null\n"); 1069 return (-1); 1070 } 1071 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) { 1072 /* Invalid length */ 1073 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1074 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1075 op_err, 0); 1076 *abort_no_unlock = 1; 1077 return (-1); 1078 } 1079 init_ack = &cp->init; 1080 /* validate parameters */ 1081 if (init_ack->initiate_tag == 0) { 1082 /* protocol error... send an abort */ 1083 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1084 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1085 op_err, 0); 1086 *abort_no_unlock = 1; 1087 return (-1); 1088 } 1089 if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) { 1090 /* protocol error... send an abort */ 1091 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1092 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1093 op_err, 0); 1094 *abort_no_unlock = 1; 1095 return (-1); 1096 } 1097 if (init_ack->num_inbound_streams == 0) { 1098 /* protocol error... send an abort */ 1099 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1100 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1101 op_err, 0); 1102 *abort_no_unlock = 1; 1103 return (-1); 1104 } 1105 if (init_ack->num_outbound_streams == 0) { 1106 /* protocol error... send an abort */ 1107 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1108 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1109 op_err, 0); 1110 *abort_no_unlock = 1; 1111 return (-1); 1112 } 1113 /* process according to association state... */ 1114 switch (stcb->asoc.state & SCTP_STATE_MASK) { 1115 case SCTP_STATE_COOKIE_WAIT: 1116 /* this is the expected state for this chunk */ 1117 /* process the INIT-ACK parameters */ 1118 if (stcb->asoc.primary_destination->dest_state & 1119 SCTP_ADDR_UNCONFIRMED) { 1120 /* 1121 * The primary is where we sent the INIT, we can 1122 * always consider it confirmed when the INIT-ACK is 1123 * returned. Do this before we load addresses 1124 * though. 1125 */ 1126 stcb->asoc.primary_destination->dest_state &= 1127 ~SCTP_ADDR_UNCONFIRMED; 1128 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 1129 stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED); 1130 } 1131 if (sctp_process_init_ack(m, iphlen, offset, sh, cp, stcb, 1132 net, abort_no_unlock, vrf_id) < 0) { 1133 /* error in parsing parameters */ 1134 return (-1); 1135 } 1136 /* update our state */ 1137 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n"); 1138 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_ECHOED); 1139 1140 /* reset the RTO calc */ 1141 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 1142 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 1143 stcb->asoc.overall_error_count, 1144 0, 1145 SCTP_FROM_SCTP_INPUT, 1146 __LINE__); 1147 } 1148 stcb->asoc.overall_error_count = 0; 1149 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 1150 /* 1151 * collapse the init timer back in case of a exponential 1152 * backoff 1153 */ 1154 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep, 1155 stcb, net); 1156 /* 1157 * the send at the end of the inbound data processing will 1158 * cause the cookie to be sent 1159 */ 1160 break; 1161 case SCTP_STATE_SHUTDOWN_SENT: 1162 /* incorrect state... discard */ 1163 break; 1164 case SCTP_STATE_COOKIE_ECHOED: 1165 /* incorrect state... discard */ 1166 break; 1167 case SCTP_STATE_OPEN: 1168 /* incorrect state... discard */ 1169 break; 1170 case SCTP_STATE_EMPTY: 1171 case SCTP_STATE_INUSE: 1172 default: 1173 /* incorrect state... discard */ 1174 return (-1); 1175 break; 1176 } 1177 SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n"); 1178 return (0); 1179} 1180 1181 1182/* 1183 * handle a state cookie for an existing association m: input packet mbuf 1184 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a 1185 * "split" mbuf and the cookie signature does not exist offset: offset into 1186 * mbuf to the cookie-echo chunk 1187 */ 1188static struct sctp_tcb * 1189sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, 1190 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1191 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, 1192 struct sockaddr *init_src, int *notification, sctp_assoc_t * sac_assoc_id, 1193 uint32_t vrf_id) 1194{ 1195 struct sctp_association *asoc; 1196 struct sctp_init_chunk *init_cp, init_buf; 1197 struct sctp_init_ack_chunk *initack_cp, initack_buf; 1198 int chk_length; 1199 int init_offset, initack_offset, i; 1200 int retval; 1201 int spec_flag = 0; 1202 uint32_t how_indx; 1203 1204 /* I know that the TCB is non-NULL from the caller */ 1205 asoc = &stcb->asoc; 1206 for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) { 1207 if (asoc->cookie_how[how_indx] == 0) 1208 break; 1209 } 1210 if (how_indx < sizeof(asoc->cookie_how)) { 1211 asoc->cookie_how[how_indx] = 1; 1212 } 1213 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 1214 /* SHUTDOWN came in after sending INIT-ACK */ 1215 struct mbuf *op_err; 1216 struct sctp_paramhdr *ph; 1217 1218 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination); 1219 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 1220 0, M_DONTWAIT, 1, MT_DATA); 1221 if (op_err == NULL) { 1222 /* FOOBAR */ 1223 return (NULL); 1224 } 1225 /* pre-reserve some space */ 1226 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 1227 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 1228 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 1229 /* Set the len */ 1230 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr); 1231 ph = mtod(op_err, struct sctp_paramhdr *); 1232 ph->param_type = htons(SCTP_CAUSE_COOKIE_IN_SHUTDOWN); 1233 ph->param_length = htons(sizeof(struct sctp_paramhdr)); 1234 sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag, 1235 vrf_id); 1236 if (how_indx < sizeof(asoc->cookie_how)) 1237 asoc->cookie_how[how_indx] = 2; 1238 return (NULL); 1239 } 1240 /* 1241 * find and validate the INIT chunk in the cookie (peer's info) the 1242 * INIT should start after the cookie-echo header struct (chunk 1243 * header, state cookie header struct) 1244 */ 1245 init_offset = offset += sizeof(struct sctp_cookie_echo_chunk); 1246 1247 init_cp = (struct sctp_init_chunk *) 1248 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 1249 (uint8_t *) & init_buf); 1250 if (init_cp == NULL) { 1251 /* could not pull a INIT chunk in cookie */ 1252 return (NULL); 1253 } 1254 chk_length = ntohs(init_cp->ch.chunk_length); 1255 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 1256 return (NULL); 1257 } 1258 /* 1259 * find and validate the INIT-ACK chunk in the cookie (my info) the 1260 * INIT-ACK follows the INIT chunk 1261 */ 1262 initack_offset = init_offset + SCTP_SIZE32(chk_length); 1263 initack_cp = (struct sctp_init_ack_chunk *) 1264 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 1265 (uint8_t *) & initack_buf); 1266 if (initack_cp == NULL) { 1267 /* could not pull INIT-ACK chunk in cookie */ 1268 return (NULL); 1269 } 1270 chk_length = ntohs(initack_cp->ch.chunk_length); 1271 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 1272 return (NULL); 1273 } 1274 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && 1275 (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) { 1276 /* 1277 * case D in Section 5.2.4 Table 2: MMAA process accordingly 1278 * to get into the OPEN state 1279 */ 1280 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1281 /*- 1282 * Opps, this means that we somehow generated two vtag's 1283 * the same. I.e. we did: 1284 * Us Peer 1285 * <---INIT(tag=a)------ 1286 * ----INIT-ACK(tag=t)--> 1287 * ----INIT(tag=t)------> *1 1288 * <---INIT-ACK(tag=a)--- 1289 * <----CE(tag=t)------------- *2 1290 * 1291 * At point *1 we should be generating a different 1292 * tag t'. Which means we would throw away the CE and send 1293 * ours instead. Basically this is case C (throw away side). 1294 */ 1295 if (how_indx < sizeof(asoc->cookie_how)) 1296 asoc->cookie_how[how_indx] = 17; 1297 return (NULL); 1298 1299 } 1300 switch SCTP_GET_STATE 1301 (asoc) { 1302 case SCTP_STATE_COOKIE_WAIT: 1303 case SCTP_STATE_COOKIE_ECHOED: 1304 /* 1305 * INIT was sent but got a COOKIE_ECHO with the 1306 * correct tags... just accept it...but we must 1307 * process the init so that we can make sure we have 1308 * the right seq no's. 1309 */ 1310 /* First we must process the INIT !! */ 1311 retval = sctp_process_init(init_cp, stcb, net); 1312 if (retval < 0) { 1313 if (how_indx < sizeof(asoc->cookie_how)) 1314 asoc->cookie_how[how_indx] = 3; 1315 return (NULL); 1316 } 1317 /* we have already processed the INIT so no problem */ 1318 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, 1319 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12); 1320 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_13); 1321 /* update current state */ 1322 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) 1323 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1324 else 1325 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1326 1327 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1328 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1329 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1330 stcb->sctp_ep, stcb, asoc->primary_destination); 1331 } 1332 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1333 sctp_stop_all_cookie_timers(stcb); 1334 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1335 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1336 (inp->sctp_socket->so_qlimit == 0) 1337 ) { 1338#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1339 struct socket *so; 1340 1341#endif 1342 /* 1343 * Here is where collision would go if we 1344 * did a connect() and instead got a 1345 * init/init-ack/cookie done before the 1346 * init-ack came back.. 1347 */ 1348 stcb->sctp_ep->sctp_flags |= 1349 SCTP_PCB_FLAGS_CONNECTED; 1350#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1351 so = SCTP_INP_SO(stcb->sctp_ep); 1352 atomic_add_int(&stcb->asoc.refcnt, 1); 1353 SCTP_TCB_UNLOCK(stcb); 1354 SCTP_SOCKET_LOCK(so, 1); 1355 SCTP_TCB_LOCK(stcb); 1356 atomic_add_int(&stcb->asoc.refcnt, -1); 1357 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1358 SCTP_SOCKET_UNLOCK(so, 1); 1359 return (NULL); 1360 } 1361#endif 1362 soisconnected(stcb->sctp_socket); 1363#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1364 SCTP_SOCKET_UNLOCK(so, 1); 1365#endif 1366 } 1367 /* notify upper layer */ 1368 *notification = SCTP_NOTIFY_ASSOC_UP; 1369 /* 1370 * since we did not send a HB make sure we don't 1371 * double things 1372 */ 1373 net->hb_responded = 1; 1374 net->RTO = sctp_calculate_rto(stcb, asoc, net, 1375 &cookie->time_entered, sctp_align_unsafe_makecopy); 1376 1377 if (stcb->asoc.sctp_autoclose_ticks && 1378 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) { 1379 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 1380 inp, stcb, NULL); 1381 } 1382 break; 1383 default: 1384 /* 1385 * we're in the OPEN state (or beyond), so peer must 1386 * have simply lost the COOKIE-ACK 1387 */ 1388 break; 1389 } /* end switch */ 1390 sctp_stop_all_cookie_timers(stcb); 1391 /* 1392 * We ignore the return code here.. not sure if we should 1393 * somehow abort.. but we do have an existing asoc. This 1394 * really should not fail. 1395 */ 1396 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1397 init_offset + sizeof(struct sctp_init_chunk), 1398 initack_offset, sh, init_src)) { 1399 if (how_indx < sizeof(asoc->cookie_how)) 1400 asoc->cookie_how[how_indx] = 4; 1401 return (NULL); 1402 } 1403 /* respond with a COOKIE-ACK */ 1404 sctp_toss_old_cookies(stcb, asoc); 1405 sctp_send_cookie_ack(stcb); 1406 if (how_indx < sizeof(asoc->cookie_how)) 1407 asoc->cookie_how[how_indx] = 5; 1408 return (stcb); 1409 } 1410 if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1411 ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag && 1412 cookie->tie_tag_my_vtag == 0 && 1413 cookie->tie_tag_peer_vtag == 0) { 1414 /* 1415 * case C in Section 5.2.4 Table 2: XMOO silently discard 1416 */ 1417 if (how_indx < sizeof(asoc->cookie_how)) 1418 asoc->cookie_how[how_indx] = 6; 1419 return (NULL); 1420 } 1421 if (ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag && 1422 (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag || 1423 init_cp->init.initiate_tag == 0)) { 1424 /* 1425 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info 1426 * should be ok, re-accept peer info 1427 */ 1428 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1429 /* 1430 * Extension of case C. If we hit this, then the 1431 * random number generator returned the same vtag 1432 * when we first sent our INIT-ACK and when we later 1433 * sent our INIT. The side with the seq numbers that 1434 * are different will be the one that normnally 1435 * would have hit case C. This in effect "extends" 1436 * our vtags in this collision case to be 64 bits. 1437 * The same collision could occur aka you get both 1438 * vtag and seq number the same twice in a row.. but 1439 * is much less likely. If it did happen then we 1440 * would proceed through and bring up the assoc.. we 1441 * may end up with the wrong stream setup however.. 1442 * which would be bad.. but there is no way to 1443 * tell.. until we send on a stream that does not 1444 * exist :-) 1445 */ 1446 if (how_indx < sizeof(asoc->cookie_how)) 1447 asoc->cookie_how[how_indx] = 7; 1448 1449 return (NULL); 1450 } 1451 if (how_indx < sizeof(asoc->cookie_how)) 1452 asoc->cookie_how[how_indx] = 8; 1453 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_14); 1454 sctp_stop_all_cookie_timers(stcb); 1455 /* 1456 * since we did not send a HB make sure we don't double 1457 * things 1458 */ 1459 net->hb_responded = 1; 1460 if (stcb->asoc.sctp_autoclose_ticks && 1461 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1462 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, 1463 NULL); 1464 } 1465 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1466 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); 1467 1468 /* Note last_cwr_tsn? where is this used? */ 1469 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 1470 if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) { 1471 /* 1472 * Ok the peer probably discarded our data (if we 1473 * echoed a cookie+data). So anything on the 1474 * sent_queue should be marked for retransmit, we 1475 * may not get something to kick us so it COULD 1476 * still take a timeout to move these.. but it can't 1477 * hurt to mark them. 1478 */ 1479 struct sctp_tmit_chunk *chk; 1480 1481 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1482 if (chk->sent < SCTP_DATAGRAM_RESEND) { 1483 chk->sent = SCTP_DATAGRAM_RESEND; 1484 sctp_flight_size_decrease(chk); 1485 sctp_total_flight_decrease(stcb, chk); 1486 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1487 spec_flag++; 1488 } 1489 } 1490 1491 } 1492 /* process the INIT info (peer's info) */ 1493 retval = sctp_process_init(init_cp, stcb, net); 1494 if (retval < 0) { 1495 if (how_indx < sizeof(asoc->cookie_how)) 1496 asoc->cookie_how[how_indx] = 9; 1497 return (NULL); 1498 } 1499 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1500 init_offset + sizeof(struct sctp_init_chunk), 1501 initack_offset, sh, init_src)) { 1502 if (how_indx < sizeof(asoc->cookie_how)) 1503 asoc->cookie_how[how_indx] = 10; 1504 return (NULL); 1505 } 1506 if ((asoc->state & SCTP_STATE_COOKIE_WAIT) || 1507 (asoc->state & SCTP_STATE_COOKIE_ECHOED)) { 1508 *notification = SCTP_NOTIFY_ASSOC_UP; 1509 1510 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1511 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1512 (inp->sctp_socket->so_qlimit == 0)) { 1513#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1514 struct socket *so; 1515 1516#endif 1517 stcb->sctp_ep->sctp_flags |= 1518 SCTP_PCB_FLAGS_CONNECTED; 1519#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1520 so = SCTP_INP_SO(stcb->sctp_ep); 1521 atomic_add_int(&stcb->asoc.refcnt, 1); 1522 SCTP_TCB_UNLOCK(stcb); 1523 SCTP_SOCKET_LOCK(so, 1); 1524 SCTP_TCB_LOCK(stcb); 1525 atomic_add_int(&stcb->asoc.refcnt, -1); 1526 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1527 SCTP_SOCKET_UNLOCK(so, 1); 1528 return (NULL); 1529 } 1530#endif 1531 soisconnected(stcb->sctp_socket); 1532#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1533 SCTP_SOCKET_UNLOCK(so, 1); 1534#endif 1535 } 1536 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) 1537 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1538 else 1539 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1540 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1541 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1542 } else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1543 SCTP_STAT_INCR_COUNTER32(sctps_restartestab); 1544 } else { 1545 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1546 } 1547 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1548 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1549 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1550 stcb->sctp_ep, stcb, asoc->primary_destination); 1551 1552 } 1553 sctp_stop_all_cookie_timers(stcb); 1554 sctp_toss_old_cookies(stcb, asoc); 1555 sctp_send_cookie_ack(stcb); 1556 if (spec_flag) { 1557 /* 1558 * only if we have retrans set do we do this. What 1559 * this call does is get only the COOKIE-ACK out and 1560 * then when we return the normal call to 1561 * sctp_chunk_output will get the retrans out behind 1562 * this. 1563 */ 1564 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED); 1565 } 1566 if (how_indx < sizeof(asoc->cookie_how)) 1567 asoc->cookie_how[how_indx] = 11; 1568 1569 return (stcb); 1570 } 1571 if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1572 ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) && 1573 cookie->tie_tag_my_vtag == asoc->my_vtag_nonce && 1574 cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce && 1575 cookie->tie_tag_peer_vtag != 0) { 1576 struct sctpasochead *head; 1577 1578 /* 1579 * case A in Section 5.2.4 Table 2: XXMM (peer restarted) 1580 */ 1581 /* temp code */ 1582 if (how_indx < sizeof(asoc->cookie_how)) 1583 asoc->cookie_how[how_indx] = 12; 1584 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_15); 1585 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 1586 1587 *sac_assoc_id = sctp_get_associd(stcb); 1588 /* notify upper layer */ 1589 *notification = SCTP_NOTIFY_ASSOC_RESTART; 1590 atomic_add_int(&stcb->asoc.refcnt, 1); 1591 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) && 1592 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 1593 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) { 1594 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1595 } 1596 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1597 SCTP_STAT_INCR_GAUGE32(sctps_restartestab); 1598 } else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1599 SCTP_STAT_INCR_GAUGE32(sctps_collisionestab); 1600 } 1601 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1602 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1603 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1604 stcb->sctp_ep, stcb, asoc->primary_destination); 1605 1606 } else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) { 1607 /* move to OPEN state, if not in SHUTDOWN_SENT */ 1608 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1609 } 1610 asoc->pre_open_streams = 1611 ntohs(initack_cp->init.num_outbound_streams); 1612 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 1613 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 1614 1615 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 1616 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 1617 1618 asoc->str_reset_seq_in = asoc->init_seq_number; 1619 1620 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1621 if (asoc->mapping_array) { 1622 memset(asoc->mapping_array, 0, 1623 asoc->mapping_array_size); 1624 } 1625 SCTP_TCB_UNLOCK(stcb); 1626 SCTP_INP_INFO_WLOCK(); 1627 SCTP_INP_WLOCK(stcb->sctp_ep); 1628 SCTP_TCB_LOCK(stcb); 1629 atomic_add_int(&stcb->asoc.refcnt, -1); 1630 /* send up all the data */ 1631 SCTP_TCB_SEND_LOCK(stcb); 1632 1633 sctp_report_all_outbound(stcb, 1, SCTP_SO_NOT_LOCKED); 1634 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1635 stcb->asoc.strmout[i].stream_no = i; 1636 stcb->asoc.strmout[i].next_sequence_sent = 0; 1637 stcb->asoc.strmout[i].last_msg_incomplete = 0; 1638 } 1639 /* process the INIT-ACK info (my info) */ 1640 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 1641 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1642 1643 /* pull from vtag hash */ 1644 LIST_REMOVE(stcb, sctp_asocs); 1645 /* re-insert to new vtag position */ 1646 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, 1647 sctppcbinfo.hashasocmark)]; 1648 /* 1649 * put it in the bucket in the vtag hash of assoc's for the 1650 * system 1651 */ 1652 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 1653 1654 /* Is this the first restart? */ 1655 if (stcb->asoc.in_restart_hash == 0) { 1656 /* Ok add it to assoc_id vtag hash */ 1657 head = &sctppcbinfo.sctp_restarthash[SCTP_PCBHASH_ASOC(stcb->asoc.assoc_id, 1658 sctppcbinfo.hashrestartmark)]; 1659 LIST_INSERT_HEAD(head, stcb, sctp_tcbrestarhash); 1660 stcb->asoc.in_restart_hash = 1; 1661 } 1662 /* process the INIT info (peer's info) */ 1663 SCTP_TCB_SEND_UNLOCK(stcb); 1664 SCTP_INP_WUNLOCK(stcb->sctp_ep); 1665 SCTP_INP_INFO_WUNLOCK(); 1666 1667 retval = sctp_process_init(init_cp, stcb, net); 1668 if (retval < 0) { 1669 if (how_indx < sizeof(asoc->cookie_how)) 1670 asoc->cookie_how[how_indx] = 13; 1671 1672 return (NULL); 1673 } 1674 /* 1675 * since we did not send a HB make sure we don't double 1676 * things 1677 */ 1678 net->hb_responded = 1; 1679 1680 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1681 init_offset + sizeof(struct sctp_init_chunk), 1682 initack_offset, sh, init_src)) { 1683 if (how_indx < sizeof(asoc->cookie_how)) 1684 asoc->cookie_how[how_indx] = 14; 1685 1686 return (NULL); 1687 } 1688 /* respond with a COOKIE-ACK */ 1689 sctp_stop_all_cookie_timers(stcb); 1690 sctp_toss_old_cookies(stcb, asoc); 1691 sctp_send_cookie_ack(stcb); 1692 if (how_indx < sizeof(asoc->cookie_how)) 1693 asoc->cookie_how[how_indx] = 15; 1694 1695 return (stcb); 1696 } 1697 if (how_indx < sizeof(asoc->cookie_how)) 1698 asoc->cookie_how[how_indx] = 16; 1699 /* all other cases... */ 1700 return (NULL); 1701} 1702 1703 1704/* 1705 * handle a state cookie for a new association m: input packet mbuf chain-- 1706 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf 1707 * and the cookie signature does not exist offset: offset into mbuf to the 1708 * cookie-echo chunk length: length of the cookie chunk to: where the init 1709 * was from returns a new TCB 1710 */ 1711static struct sctp_tcb * 1712sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, 1713 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1714 struct sctp_inpcb *inp, struct sctp_nets **netp, 1715 struct sockaddr *init_src, int *notification, 1716 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 1717 uint32_t vrf_id) 1718{ 1719 struct sctp_tcb *stcb; 1720 struct sctp_init_chunk *init_cp, init_buf; 1721 struct sctp_init_ack_chunk *initack_cp, initack_buf; 1722 struct sockaddr_storage sa_store; 1723 struct sockaddr *initack_src = (struct sockaddr *)&sa_store; 1724 struct sockaddr_in *sin; 1725 struct sockaddr_in6 *sin6; 1726 struct sctp_association *asoc; 1727 int chk_length; 1728 int init_offset, initack_offset, initack_limit; 1729 int retval; 1730 int error = 0; 1731 uint32_t old_tag; 1732 uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE]; 1733 1734#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1735 struct socket *so; 1736 1737 so = SCTP_INP_SO(inp); 1738#endif 1739 1740 /* 1741 * find and validate the INIT chunk in the cookie (peer's info) the 1742 * INIT should start after the cookie-echo header struct (chunk 1743 * header, state cookie header struct) 1744 */ 1745 init_offset = offset + sizeof(struct sctp_cookie_echo_chunk); 1746 init_cp = (struct sctp_init_chunk *) 1747 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 1748 (uint8_t *) & init_buf); 1749 if (init_cp == NULL) { 1750 /* could not pull a INIT chunk in cookie */ 1751 SCTPDBG(SCTP_DEBUG_INPUT1, 1752 "process_cookie_new: could not pull INIT chunk hdr\n"); 1753 return (NULL); 1754 } 1755 chk_length = ntohs(init_cp->ch.chunk_length); 1756 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 1757 SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n"); 1758 return (NULL); 1759 } 1760 initack_offset = init_offset + SCTP_SIZE32(chk_length); 1761 /* 1762 * find and validate the INIT-ACK chunk in the cookie (my info) the 1763 * INIT-ACK follows the INIT chunk 1764 */ 1765 initack_cp = (struct sctp_init_ack_chunk *) 1766 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 1767 (uint8_t *) & initack_buf); 1768 if (initack_cp == NULL) { 1769 /* could not pull INIT-ACK chunk in cookie */ 1770 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n"); 1771 return (NULL); 1772 } 1773 chk_length = ntohs(initack_cp->ch.chunk_length); 1774 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 1775 return (NULL); 1776 } 1777 /* 1778 * NOTE: We can't use the INIT_ACK's chk_length to determine the 1779 * "initack_limit" value. This is because the chk_length field 1780 * includes the length of the cookie, but the cookie is omitted when 1781 * the INIT and INIT_ACK are tacked onto the cookie... 1782 */ 1783 initack_limit = offset + cookie_len; 1784 1785 /* 1786 * now that we know the INIT/INIT-ACK are in place, create a new TCB 1787 * and popluate 1788 */ 1789 1790 /* 1791 * Here we do a trick, we set in NULL for the proc/thread argument. 1792 * We do this since in effect we only use the p argument when the 1793 * socket is unbound and we must do an implicit bind. Since we are 1794 * getting a cookie, we cannot be unbound. 1795 */ 1796 stcb = sctp_aloc_assoc(inp, init_src, 0, &error, 1797 ntohl(initack_cp->init.initiate_tag), vrf_id, 1798 (struct thread *)NULL 1799 ); 1800 if (stcb == NULL) { 1801 struct mbuf *op_err; 1802 1803 /* memory problem? */ 1804 SCTPDBG(SCTP_DEBUG_INPUT1, 1805 "process_cookie_new: no room for another TCB!\n"); 1806 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1807 1808 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 1809 sh, op_err, vrf_id); 1810 return (NULL); 1811 } 1812 /* get the correct sctp_nets */ 1813 if (netp) 1814 *netp = sctp_findnet(stcb, init_src); 1815 1816 asoc = &stcb->asoc; 1817 /* get scope variables out of cookie */ 1818 asoc->ipv4_local_scope = cookie->ipv4_scope; 1819 asoc->site_scope = cookie->site_scope; 1820 asoc->local_scope = cookie->local_scope; 1821 asoc->loopback_scope = cookie->loopback_scope; 1822 1823 if ((asoc->ipv4_addr_legal != cookie->ipv4_addr_legal) || 1824 (asoc->ipv6_addr_legal != cookie->ipv6_addr_legal)) { 1825 struct mbuf *op_err; 1826 1827 /* 1828 * Houston we have a problem. The EP changed while the 1829 * cookie was in flight. Only recourse is to abort the 1830 * association. 1831 */ 1832 atomic_add_int(&stcb->asoc.refcnt, 1); 1833 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1834 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 1835 sh, op_err, vrf_id); 1836#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1837 SCTP_TCB_UNLOCK(stcb); 1838 SCTP_SOCKET_LOCK(so, 1); 1839 SCTP_TCB_LOCK(stcb); 1840#endif 1841 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 1842 SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 1843#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1844 SCTP_SOCKET_UNLOCK(so, 1); 1845#endif 1846 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1847 return (NULL); 1848 } 1849 /* process the INIT-ACK info (my info) */ 1850 old_tag = asoc->my_vtag; 1851 asoc->assoc_id = asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 1852 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1853 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); 1854 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 1855 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 1856 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 1857 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 1858 asoc->str_reset_seq_in = asoc->init_seq_number; 1859 1860 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1861 1862 /* process the INIT info (peer's info) */ 1863 if (netp) 1864 retval = sctp_process_init(init_cp, stcb, *netp); 1865 else 1866 retval = 0; 1867 if (retval < 0) { 1868 atomic_add_int(&stcb->asoc.refcnt, 1); 1869#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1870 SCTP_TCB_UNLOCK(stcb); 1871 SCTP_SOCKET_LOCK(so, 1); 1872 SCTP_TCB_LOCK(stcb); 1873#endif 1874 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 1875#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1876 SCTP_SOCKET_UNLOCK(so, 1); 1877#endif 1878 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1879 return (NULL); 1880 } 1881 /* load all addresses */ 1882 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1883 init_offset + sizeof(struct sctp_init_chunk), initack_offset, sh, 1884 init_src)) { 1885 atomic_add_int(&stcb->asoc.refcnt, 1); 1886#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1887 SCTP_TCB_UNLOCK(stcb); 1888 SCTP_SOCKET_LOCK(so, 1); 1889 SCTP_TCB_LOCK(stcb); 1890#endif 1891 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_17); 1892#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1893 SCTP_SOCKET_UNLOCK(so, 1); 1894#endif 1895 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1896 return (NULL); 1897 } 1898 /* 1899 * verify any preceding AUTH chunk that was skipped 1900 */ 1901 /* pull the local authentication parameters from the cookie/init-ack */ 1902 sctp_auth_get_cookie_params(stcb, m, 1903 initack_offset + sizeof(struct sctp_init_ack_chunk), 1904 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk))); 1905 if (auth_skipped) { 1906 struct sctp_auth_chunk *auth; 1907 1908 auth = (struct sctp_auth_chunk *) 1909 sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf); 1910 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) { 1911 /* auth HMAC failed, dump the assoc and packet */ 1912 SCTPDBG(SCTP_DEBUG_AUTH1, 1913 "COOKIE-ECHO: AUTH failed\n"); 1914 atomic_add_int(&stcb->asoc.refcnt, 1); 1915#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1916 SCTP_TCB_UNLOCK(stcb); 1917 SCTP_SOCKET_LOCK(so, 1); 1918 SCTP_TCB_LOCK(stcb); 1919#endif 1920 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18); 1921#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1922 SCTP_SOCKET_UNLOCK(so, 1); 1923#endif 1924 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1925 return (NULL); 1926 } else { 1927 /* remaining chunks checked... good to go */ 1928 stcb->asoc.authenticated = 1; 1929 } 1930 } 1931 /* update current state */ 1932 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 1933 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1934 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1935 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1936 stcb->sctp_ep, stcb, asoc->primary_destination); 1937 } 1938 sctp_stop_all_cookie_timers(stcb); 1939 SCTP_STAT_INCR_COUNTER32(sctps_passiveestab); 1940 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1941 1942 /* 1943 * if we're doing ASCONFs, check to see if we have any new local 1944 * addresses that need to get added to the peer (eg. addresses 1945 * changed while cookie echo in flight). This needs to be done 1946 * after we go to the OPEN state to do the correct asconf 1947 * processing. else, make sure we have the correct addresses in our 1948 * lists 1949 */ 1950 1951 /* warning, we re-use sin, sin6, sa_store here! */ 1952 /* pull in local_address (our "from" address) */ 1953 if (cookie->laddr_type == SCTP_IPV4_ADDRESS) { 1954 /* source addr is IPv4 */ 1955 sin = (struct sockaddr_in *)initack_src; 1956 memset(sin, 0, sizeof(*sin)); 1957 sin->sin_family = AF_INET; 1958 sin->sin_len = sizeof(struct sockaddr_in); 1959 sin->sin_addr.s_addr = cookie->laddress[0]; 1960 } else if (cookie->laddr_type == SCTP_IPV6_ADDRESS) { 1961 /* source addr is IPv6 */ 1962 sin6 = (struct sockaddr_in6 *)initack_src; 1963 memset(sin6, 0, sizeof(*sin6)); 1964 sin6->sin6_family = AF_INET6; 1965 sin6->sin6_len = sizeof(struct sockaddr_in6); 1966 sin6->sin6_scope_id = cookie->scope_id; 1967 memcpy(&sin6->sin6_addr, cookie->laddress, 1968 sizeof(sin6->sin6_addr)); 1969 } else { 1970 atomic_add_int(&stcb->asoc.refcnt, 1); 1971#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1972 SCTP_TCB_UNLOCK(stcb); 1973 SCTP_SOCKET_LOCK(so, 1); 1974 SCTP_TCB_LOCK(stcb); 1975#endif 1976 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19); 1977#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1978 SCTP_SOCKET_UNLOCK(so, 1); 1979#endif 1980 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1981 return (NULL); 1982 } 1983 1984 sctp_check_address_list(stcb, m, 1985 initack_offset + sizeof(struct sctp_init_ack_chunk), 1986 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)), 1987 initack_src, cookie->local_scope, cookie->site_scope, 1988 cookie->ipv4_scope, cookie->loopback_scope); 1989 1990 1991 /* set up to notify upper layer */ 1992 *notification = SCTP_NOTIFY_ASSOC_UP; 1993 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1994 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1995 (inp->sctp_socket->so_qlimit == 0)) { 1996 /* 1997 * This is an endpoint that called connect() how it got a 1998 * cookie that is NEW is a bit of a mystery. It must be that 1999 * the INIT was sent, but before it got there.. a complete 2000 * INIT/INIT-ACK/COOKIE arrived. But of course then it 2001 * should have went to the other code.. not here.. oh well.. 2002 * a bit of protection is worth having.. 2003 */ 2004 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 2005#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2006 atomic_add_int(&stcb->asoc.refcnt, 1); 2007 SCTP_TCB_UNLOCK(stcb); 2008 SCTP_SOCKET_LOCK(so, 1); 2009 SCTP_TCB_LOCK(stcb); 2010 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2011 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2012 SCTP_SOCKET_UNLOCK(so, 1); 2013 return (NULL); 2014 } 2015#endif 2016 soisconnected(stcb->sctp_socket); 2017#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2018 SCTP_SOCKET_UNLOCK(so, 1); 2019#endif 2020 } else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 2021 (inp->sctp_socket->so_qlimit)) { 2022 /* 2023 * We don't want to do anything with this one. Since it is 2024 * the listening guy. The timer will get started for 2025 * accepted connections in the caller. 2026 */ 2027 ; 2028 } 2029 /* since we did not send a HB make sure we don't double things */ 2030 if ((netp) && (*netp)) 2031 (*netp)->hb_responded = 1; 2032 2033 if (stcb->asoc.sctp_autoclose_ticks && 2034 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 2035 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL); 2036 } 2037 /* respond with a COOKIE-ACK */ 2038 /* calculate the RTT */ 2039 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 2040 if ((netp) && (*netp)) { 2041 (*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp, 2042 &cookie->time_entered, sctp_align_unsafe_makecopy); 2043 } 2044 sctp_send_cookie_ack(stcb); 2045 return (stcb); 2046} 2047 2048 2049/* 2050 * handles a COOKIE-ECHO message stcb: modified to either a new or left as 2051 * existing (non-NULL) TCB 2052 */ 2053static struct mbuf * 2054sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, 2055 struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp, 2056 struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp, 2057 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 2058 struct sctp_tcb **locked_tcb, uint32_t vrf_id) 2059{ 2060 struct sctp_state_cookie *cookie; 2061 struct sockaddr_in6 sin6; 2062 struct sockaddr_in sin; 2063 struct sctp_tcb *l_stcb = *stcb; 2064 struct sctp_inpcb *l_inp; 2065 struct sockaddr *to; 2066 sctp_assoc_t sac_restart_id; 2067 struct sctp_pcb *ep; 2068 struct mbuf *m_sig; 2069 uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE]; 2070 uint8_t *sig; 2071 uint8_t cookie_ok = 0; 2072 unsigned int size_of_pkt, sig_offset, cookie_offset; 2073 unsigned int cookie_len; 2074 struct timeval now; 2075 struct timeval time_expires; 2076 struct sockaddr_storage dest_store; 2077 struct sockaddr *localep_sa = (struct sockaddr *)&dest_store; 2078 struct ip *iph; 2079 int notification = 0; 2080 struct sctp_nets *netl; 2081 int had_a_existing_tcb = 0; 2082 2083 SCTPDBG(SCTP_DEBUG_INPUT2, 2084 "sctp_handle_cookie: handling COOKIE-ECHO\n"); 2085 2086 if (inp_p == NULL) { 2087 return (NULL); 2088 } 2089 /* First get the destination address setup too. */ 2090 iph = mtod(m, struct ip *); 2091 if (iph->ip_v == IPVERSION) { 2092 /* its IPv4 */ 2093 struct sockaddr_in *lsin; 2094 2095 lsin = (struct sockaddr_in *)(localep_sa); 2096 memset(lsin, 0, sizeof(*lsin)); 2097 lsin->sin_family = AF_INET; 2098 lsin->sin_len = sizeof(*lsin); 2099 lsin->sin_port = sh->dest_port; 2100 lsin->sin_addr.s_addr = iph->ip_dst.s_addr; 2101 size_of_pkt = SCTP_GET_IPV4_LENGTH(iph); 2102 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 2103 /* its IPv6 */ 2104 struct ip6_hdr *ip6; 2105 struct sockaddr_in6 *lsin6; 2106 2107 lsin6 = (struct sockaddr_in6 *)(localep_sa); 2108 memset(lsin6, 0, sizeof(*lsin6)); 2109 lsin6->sin6_family = AF_INET6; 2110 lsin6->sin6_len = sizeof(struct sockaddr_in6); 2111 ip6 = mtod(m, struct ip6_hdr *); 2112 lsin6->sin6_port = sh->dest_port; 2113 lsin6->sin6_addr = ip6->ip6_dst; 2114 size_of_pkt = SCTP_GET_IPV6_LENGTH(ip6) + iphlen; 2115 } else { 2116 return (NULL); 2117 } 2118 2119 cookie = &cp->cookie; 2120 cookie_offset = offset + sizeof(struct sctp_chunkhdr); 2121 cookie_len = ntohs(cp->ch.chunk_length); 2122 2123 if ((cookie->peerport != sh->src_port) && 2124 (cookie->myport != sh->dest_port) && 2125 (cookie->my_vtag != sh->v_tag)) { 2126 /* 2127 * invalid ports or bad tag. Note that we always leave the 2128 * v_tag in the header in network order and when we stored 2129 * it in the my_vtag slot we also left it in network order. 2130 * This maintains the match even though it may be in the 2131 * opposite byte order of the machine :-> 2132 */ 2133 return (NULL); 2134 } 2135 if (cookie_len > size_of_pkt || 2136 cookie_len < sizeof(struct sctp_cookie_echo_chunk) + 2137 sizeof(struct sctp_init_chunk) + 2138 sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) { 2139 /* cookie too long! or too small */ 2140 return (NULL); 2141 } 2142 /* 2143 * split off the signature into its own mbuf (since it should not be 2144 * calculated in the sctp_hmac_m() call). 2145 */ 2146 sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE; 2147 if (sig_offset > size_of_pkt) { 2148 /* packet not correct size! */ 2149 /* XXX this may already be accounted for earlier... */ 2150 return (NULL); 2151 } 2152 m_sig = m_split(m, sig_offset, M_DONTWAIT); 2153 if (m_sig == NULL) { 2154 /* out of memory or ?? */ 2155 return (NULL); 2156 } 2157 /* 2158 * compute the signature/digest for the cookie 2159 */ 2160 ep = &(*inp_p)->sctp_ep; 2161 l_inp = *inp_p; 2162 if (l_stcb) { 2163 SCTP_TCB_UNLOCK(l_stcb); 2164 } 2165 SCTP_INP_RLOCK(l_inp); 2166 if (l_stcb) { 2167 SCTP_TCB_LOCK(l_stcb); 2168 } 2169 /* which cookie is it? */ 2170 if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) && 2171 (ep->current_secret_number != ep->last_secret_number)) { 2172 /* it's the old cookie */ 2173 (void)sctp_hmac_m(SCTP_HMAC, 2174 (uint8_t *) ep->secret_key[(int)ep->last_secret_number], 2175 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2176 } else { 2177 /* it's the current cookie */ 2178 (void)sctp_hmac_m(SCTP_HMAC, 2179 (uint8_t *) ep->secret_key[(int)ep->current_secret_number], 2180 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2181 } 2182 /* get the signature */ 2183 SCTP_INP_RUNLOCK(l_inp); 2184 sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig); 2185 if (sig == NULL) { 2186 /* couldn't find signature */ 2187 sctp_m_freem(m_sig); 2188 return (NULL); 2189 } 2190 /* compare the received digest with the computed digest */ 2191 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) { 2192 /* try the old cookie? */ 2193 if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) && 2194 (ep->current_secret_number != ep->last_secret_number)) { 2195 /* compute digest with old */ 2196 (void)sctp_hmac_m(SCTP_HMAC, 2197 (uint8_t *) ep->secret_key[(int)ep->last_secret_number], 2198 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2199 /* compare */ 2200 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0) 2201 cookie_ok = 1; 2202 } 2203 } else { 2204 cookie_ok = 1; 2205 } 2206 2207 /* 2208 * Now before we continue we must reconstruct our mbuf so that 2209 * normal processing of any other chunks will work. 2210 */ 2211 { 2212 struct mbuf *m_at; 2213 2214 m_at = m; 2215 while (SCTP_BUF_NEXT(m_at) != NULL) { 2216 m_at = SCTP_BUF_NEXT(m_at); 2217 } 2218 SCTP_BUF_NEXT(m_at) = m_sig; 2219 } 2220 2221 if (cookie_ok == 0) { 2222 SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n"); 2223 SCTPDBG(SCTP_DEBUG_INPUT2, 2224 "offset = %u, cookie_offset = %u, sig_offset = %u\n", 2225 (uint32_t) offset, cookie_offset, sig_offset); 2226 return (NULL); 2227 } 2228 /* 2229 * check the cookie timestamps to be sure it's not stale 2230 */ 2231 (void)SCTP_GETTIME_TIMEVAL(&now); 2232 /* Expire time is in Ticks, so we convert to seconds */ 2233 time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life); 2234 time_expires.tv_usec = cookie->time_entered.tv_usec; 2235 if (timevalcmp(&now, &time_expires, >)) { 2236 /* cookie is stale! */ 2237 struct mbuf *op_err; 2238 struct sctp_stale_cookie_msg *scm; 2239 uint32_t tim; 2240 2241 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg), 2242 0, M_DONTWAIT, 1, MT_DATA); 2243 if (op_err == NULL) { 2244 /* FOOBAR */ 2245 return (NULL); 2246 } 2247 /* pre-reserve some space */ 2248 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 2249 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 2250 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 2251 2252 /* Set the len */ 2253 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg); 2254 scm = mtod(op_err, struct sctp_stale_cookie_msg *); 2255 scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE); 2256 scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) + 2257 (sizeof(uint32_t)))); 2258 /* seconds to usec */ 2259 tim = (now.tv_sec - time_expires.tv_sec) * 1000000; 2260 /* add in usec */ 2261 if (tim == 0) 2262 tim = now.tv_usec - cookie->time_entered.tv_usec; 2263 scm->time_usec = htonl(tim); 2264 sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag, 2265 vrf_id); 2266 return (NULL); 2267 } 2268 /* 2269 * Now we must see with the lookup address if we have an existing 2270 * asoc. This will only happen if we were in the COOKIE-WAIT state 2271 * and a INIT collided with us and somewhere the peer sent the 2272 * cookie on another address besides the single address our assoc 2273 * had for him. In this case we will have one of the tie-tags set at 2274 * least AND the address field in the cookie can be used to look it 2275 * up. 2276 */ 2277 to = NULL; 2278 if (cookie->addr_type == SCTP_IPV6_ADDRESS) { 2279 memset(&sin6, 0, sizeof(sin6)); 2280 sin6.sin6_family = AF_INET6; 2281 sin6.sin6_len = sizeof(sin6); 2282 sin6.sin6_port = sh->src_port; 2283 sin6.sin6_scope_id = cookie->scope_id; 2284 memcpy(&sin6.sin6_addr.s6_addr, cookie->address, 2285 sizeof(sin6.sin6_addr.s6_addr)); 2286 to = (struct sockaddr *)&sin6; 2287 } else if (cookie->addr_type == SCTP_IPV4_ADDRESS) { 2288 memset(&sin, 0, sizeof(sin)); 2289 sin.sin_family = AF_INET; 2290 sin.sin_len = sizeof(sin); 2291 sin.sin_port = sh->src_port; 2292 sin.sin_addr.s_addr = cookie->address[0]; 2293 to = (struct sockaddr *)&sin; 2294 } else { 2295 /* This should not happen */ 2296 return (NULL); 2297 } 2298 if ((*stcb == NULL) && to) { 2299 /* Yep, lets check */ 2300 *stcb = sctp_findassociation_ep_addr(inp_p, to, netp, localep_sa, NULL); 2301 if (*stcb == NULL) { 2302 /* 2303 * We should have only got back the same inp. If we 2304 * got back a different ep we have a problem. The 2305 * original findep got back l_inp and now 2306 */ 2307 if (l_inp != *inp_p) { 2308 SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n"); 2309 } 2310 } else { 2311 if (*locked_tcb == NULL) { 2312 /* 2313 * In this case we found the assoc only 2314 * after we locked the create lock. This 2315 * means we are in a colliding case and we 2316 * must make sure that we unlock the tcb if 2317 * its one of the cases where we throw away 2318 * the incoming packets. 2319 */ 2320 *locked_tcb = *stcb; 2321 2322 /* 2323 * We must also increment the inp ref count 2324 * since the ref_count flags was set when we 2325 * did not find the TCB, now we found it 2326 * which reduces the refcount.. we must 2327 * raise it back out to balance it all :-) 2328 */ 2329 SCTP_INP_INCR_REF((*stcb)->sctp_ep); 2330 if ((*stcb)->sctp_ep != l_inp) { 2331 SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n", 2332 (*stcb)->sctp_ep, l_inp); 2333 } 2334 } 2335 } 2336 } 2337 if (to == NULL) 2338 return (NULL); 2339 2340 cookie_len -= SCTP_SIGNATURE_SIZE; 2341 if (*stcb == NULL) { 2342 /* this is the "normal" case... get a new TCB */ 2343 *stcb = sctp_process_cookie_new(m, iphlen, offset, sh, cookie, 2344 cookie_len, *inp_p, netp, to, ¬ification, 2345 auth_skipped, auth_offset, auth_len, vrf_id); 2346 } else { 2347 /* this is abnormal... cookie-echo on existing TCB */ 2348 had_a_existing_tcb = 1; 2349 *stcb = sctp_process_cookie_existing(m, iphlen, offset, sh, 2350 cookie, cookie_len, *inp_p, *stcb, *netp, to, 2351 ¬ification, &sac_restart_id, vrf_id); 2352 } 2353 2354 if (*stcb == NULL) { 2355 /* still no TCB... must be bad cookie-echo */ 2356 return (NULL); 2357 } 2358 /* 2359 * Ok, we built an association so confirm the address we sent the 2360 * INIT-ACK to. 2361 */ 2362 netl = sctp_findnet(*stcb, to); 2363 /* 2364 * This code should in theory NOT run but 2365 */ 2366 if (netl == NULL) { 2367 /* TSNH! Huh, why do I need to add this address here? */ 2368 int ret; 2369 2370 ret = sctp_add_remote_addr(*stcb, to, SCTP_DONOT_SETSCOPE, 2371 SCTP_IN_COOKIE_PROC); 2372 netl = sctp_findnet(*stcb, to); 2373 } 2374 if (netl) { 2375 if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) { 2376 netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 2377 (void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL, 2378 netl); 2379 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 2380 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED); 2381 } 2382 } 2383 if (*stcb) { 2384 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, *inp_p, 2385 *stcb, NULL); 2386 } 2387 if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 2388 if (!had_a_existing_tcb || 2389 (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { 2390 /* 2391 * If we have a NEW cookie or the connect never 2392 * reached the connected state during collision we 2393 * must do the TCP accept thing. 2394 */ 2395 struct socket *so, *oso; 2396 struct sctp_inpcb *inp; 2397 2398 if (notification == SCTP_NOTIFY_ASSOC_RESTART) { 2399 /* 2400 * For a restart we will keep the same 2401 * socket, no need to do anything. I THINK!! 2402 */ 2403 sctp_ulp_notify(notification, *stcb, 0, (void *)&sac_restart_id, SCTP_SO_NOT_LOCKED); 2404 return (m); 2405 } 2406 oso = (*inp_p)->sctp_socket; 2407 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2408 SCTP_TCB_UNLOCK((*stcb)); 2409 so = sonewconn(oso, 0 2410 ); 2411 SCTP_TCB_LOCK((*stcb)); 2412 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2413 2414 if (so == NULL) { 2415 struct mbuf *op_err; 2416 2417#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2418 struct socket *pcb_so; 2419 2420#endif 2421 /* Too many sockets */ 2422 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n"); 2423 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 2424 sctp_abort_association(*inp_p, NULL, m, iphlen, 2425 sh, op_err, vrf_id); 2426#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2427 pcb_so = SCTP_INP_SO(*inp_p); 2428 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2429 SCTP_TCB_UNLOCK((*stcb)); 2430 SCTP_SOCKET_LOCK(pcb_so, 1); 2431 SCTP_TCB_LOCK((*stcb)); 2432 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2433#endif 2434 (void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20); 2435#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2436 SCTP_SOCKET_UNLOCK(pcb_so, 1); 2437#endif 2438 return (NULL); 2439 } 2440 inp = (struct sctp_inpcb *)so->so_pcb; 2441 SCTP_INP_INCR_REF(inp); 2442 /* 2443 * We add the unbound flag here so that if we get an 2444 * soabort() before we get the move_pcb done, we 2445 * will properly cleanup. 2446 */ 2447 inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE | 2448 SCTP_PCB_FLAGS_CONNECTED | 2449 SCTP_PCB_FLAGS_IN_TCPPOOL | 2450 SCTP_PCB_FLAGS_UNBOUND | 2451 (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) | 2452 SCTP_PCB_FLAGS_DONT_WAKE); 2453 inp->sctp_features = (*inp_p)->sctp_features; 2454 inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features; 2455 inp->sctp_socket = so; 2456 inp->sctp_frag_point = (*inp_p)->sctp_frag_point; 2457 inp->partial_delivery_point = (*inp_p)->partial_delivery_point; 2458 inp->sctp_context = (*inp_p)->sctp_context; 2459 inp->inp_starting_point_for_iterator = NULL; 2460 /* 2461 * copy in the authentication parameters from the 2462 * original endpoint 2463 */ 2464 if (inp->sctp_ep.local_hmacs) 2465 sctp_free_hmaclist(inp->sctp_ep.local_hmacs); 2466 inp->sctp_ep.local_hmacs = 2467 sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs); 2468 if (inp->sctp_ep.local_auth_chunks) 2469 sctp_free_chunklist(inp->sctp_ep.local_auth_chunks); 2470 inp->sctp_ep.local_auth_chunks = 2471 sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks); 2472 (void)sctp_copy_skeylist(&(*inp_p)->sctp_ep.shared_keys, 2473 &inp->sctp_ep.shared_keys); 2474 2475 /* 2476 * Now we must move it from one hash table to 2477 * another and get the tcb in the right place. 2478 */ 2479 sctp_move_pcb_and_assoc(*inp_p, inp, *stcb); 2480 2481 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2482 SCTP_TCB_UNLOCK((*stcb)); 2483 2484 sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, M_NOWAIT); 2485 SCTP_TCB_LOCK((*stcb)); 2486 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2487 2488 2489 /* 2490 * now we must check to see if we were aborted while 2491 * the move was going on and the lock/unlock 2492 * happened. 2493 */ 2494 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 2495 /* 2496 * yep it was, we leave the assoc attached 2497 * to the socket since the sctp_inpcb_free() 2498 * call will send an abort for us. 2499 */ 2500 SCTP_INP_DECR_REF(inp); 2501 return (NULL); 2502 } 2503 SCTP_INP_DECR_REF(inp); 2504 /* Switch over to the new guy */ 2505 *inp_p = inp; 2506 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2507 2508 /* 2509 * Pull it from the incomplete queue and wake the 2510 * guy 2511 */ 2512#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2513 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2514 SCTP_TCB_UNLOCK((*stcb)); 2515 SCTP_SOCKET_LOCK(so, 1); 2516#endif 2517 soisconnected(so); 2518#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2519 SCTP_TCB_LOCK((*stcb)); 2520 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2521 SCTP_SOCKET_UNLOCK(so, 1); 2522#endif 2523 return (m); 2524 } 2525 } 2526 if ((notification) && ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) { 2527 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2528 } 2529 return (m); 2530} 2531 2532static void 2533sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp, 2534 struct sctp_tcb *stcb, struct sctp_nets *net) 2535{ 2536 /* cp must not be used, others call this without a c-ack :-) */ 2537 struct sctp_association *asoc; 2538 2539 SCTPDBG(SCTP_DEBUG_INPUT2, 2540 "sctp_handle_cookie_ack: handling COOKIE-ACK\n"); 2541 if (stcb == NULL) 2542 return; 2543 2544 asoc = &stcb->asoc; 2545 2546 sctp_stop_all_cookie_timers(stcb); 2547 /* process according to association state */ 2548 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) { 2549 /* state change only needed when I am in right state */ 2550 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 2551 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 2552 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 2553 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 2554 stcb->sctp_ep, stcb, asoc->primary_destination); 2555 2556 } 2557 /* update RTO */ 2558 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 2559 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 2560 if (asoc->overall_error_count == 0) { 2561 net->RTO = sctp_calculate_rto(stcb, asoc, net, 2562 &asoc->time_entered, sctp_align_safe_nocopy); 2563 } 2564 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 2565 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2566 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2567 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 2568#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2569 struct socket *so; 2570 2571#endif 2572 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 2573#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2574 so = SCTP_INP_SO(stcb->sctp_ep); 2575 atomic_add_int(&stcb->asoc.refcnt, 1); 2576 SCTP_TCB_UNLOCK(stcb); 2577 SCTP_SOCKET_LOCK(so, 1); 2578 SCTP_TCB_LOCK(stcb); 2579 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2580 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2581 SCTP_SOCKET_UNLOCK(so, 1); 2582 return; 2583 } 2584#endif 2585 soisconnected(stcb->sctp_socket); 2586#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2587 SCTP_SOCKET_UNLOCK(so, 1); 2588#endif 2589 } 2590 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, 2591 stcb, net); 2592 /* 2593 * since we did not send a HB make sure we don't double 2594 * things 2595 */ 2596 net->hb_responded = 1; 2597 2598 if (stcb->asoc.sctp_autoclose_ticks && 2599 sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) { 2600 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 2601 stcb->sctp_ep, stcb, NULL); 2602 } 2603 /* 2604 * send ASCONF if parameters are pending and ASCONFs are 2605 * allowed (eg. addresses changed when init/cookie echo were 2606 * in flight) 2607 */ 2608 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) && 2609 (stcb->asoc.peer_supports_asconf) && 2610 (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) { 2611#ifdef SCTP_TIMER_BASED_ASCONF 2612 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, 2613 stcb->sctp_ep, stcb, 2614 stcb->asoc.primary_destination); 2615#else 2616 sctp_send_asconf(stcb, stcb->asoc.primary_destination); 2617#endif 2618 } 2619 } 2620 /* Toss the cookie if I can */ 2621 sctp_toss_old_cookies(stcb, asoc); 2622 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 2623 /* Restart the timer if we have pending data */ 2624 struct sctp_tmit_chunk *chk; 2625 2626 chk = TAILQ_FIRST(&asoc->sent_queue); 2627 if (chk) { 2628 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 2629 stcb, chk->whoTo); 2630 } 2631 } 2632} 2633 2634static void 2635sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp, 2636 struct sctp_tcb *stcb) 2637{ 2638 struct sctp_nets *net; 2639 struct sctp_tmit_chunk *lchk; 2640 uint32_t tsn; 2641 2642 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_ecne_chunk)) { 2643 return; 2644 } 2645 SCTP_STAT_INCR(sctps_recvecne); 2646 tsn = ntohl(cp->tsn); 2647 /* ECN Nonce stuff: need a resync and disable the nonce sum check */ 2648 /* Also we make sure we disable the nonce_wait */ 2649 lchk = TAILQ_FIRST(&stcb->asoc.send_queue); 2650 if (lchk == NULL) { 2651 stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq; 2652 } else { 2653 stcb->asoc.nonce_resync_tsn = lchk->rec.data.TSN_seq; 2654 } 2655 stcb->asoc.nonce_wait_for_ecne = 0; 2656 stcb->asoc.nonce_sum_check = 0; 2657 2658 /* Find where it was sent, if possible */ 2659 net = NULL; 2660 lchk = TAILQ_FIRST(&stcb->asoc.sent_queue); 2661 while (lchk) { 2662 if (lchk->rec.data.TSN_seq == tsn) { 2663 net = lchk->whoTo; 2664 break; 2665 } 2666 if (compare_with_wrap(lchk->rec.data.TSN_seq, tsn, MAX_SEQ)) 2667 break; 2668 lchk = TAILQ_NEXT(lchk, sctp_next); 2669 } 2670 if (net == NULL) 2671 /* default is we use the primary */ 2672 net = stcb->asoc.primary_destination; 2673 2674 if (compare_with_wrap(tsn, stcb->asoc.last_cwr_tsn, MAX_TSN)) { 2675 /* 2676 * JRS - Use the congestion control given in the pluggable 2677 * CC module 2678 */ 2679 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net); 2680 /* 2681 * we reduce once every RTT. So we will only lower cwnd at 2682 * the next sending seq i.e. the resync_tsn. 2683 */ 2684 stcb->asoc.last_cwr_tsn = stcb->asoc.nonce_resync_tsn; 2685 } 2686 /* 2687 * We always send a CWR this way if our previous one was lost our 2688 * peer will get an update, or if it is not time again to reduce we 2689 * still get the cwr to the peer. 2690 */ 2691 sctp_send_cwr(stcb, net, tsn); 2692} 2693 2694static void 2695sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb) 2696{ 2697 /* 2698 * Here we get a CWR from the peer. We must look in the outqueue and 2699 * make sure that we have a covered ECNE in teh control chunk part. 2700 * If so remove it. 2701 */ 2702 struct sctp_tmit_chunk *chk; 2703 struct sctp_ecne_chunk *ecne; 2704 2705 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 2706 if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) { 2707 continue; 2708 } 2709 /* 2710 * Look for and remove if it is the right TSN. Since there 2711 * is only ONE ECNE on the control queue at any one time we 2712 * don't need to worry about more than one! 2713 */ 2714 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 2715 if (compare_with_wrap(ntohl(cp->tsn), ntohl(ecne->tsn), 2716 MAX_TSN) || (cp->tsn == ecne->tsn)) { 2717 /* this covers this ECNE, we can remove it */ 2718 stcb->asoc.ecn_echo_cnt_onq--; 2719 TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, 2720 sctp_next); 2721 if (chk->data) { 2722 sctp_m_freem(chk->data); 2723 chk->data = NULL; 2724 } 2725 stcb->asoc.ctrl_queue_cnt--; 2726 sctp_free_a_chunk(stcb, chk); 2727 break; 2728 } 2729 } 2730} 2731 2732static void 2733sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp, 2734 struct sctp_tcb *stcb, struct sctp_nets *net) 2735{ 2736 struct sctp_association *asoc; 2737 2738#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2739 struct socket *so; 2740 2741#endif 2742 2743 SCTPDBG(SCTP_DEBUG_INPUT2, 2744 "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n"); 2745 if (stcb == NULL) 2746 return; 2747 2748 asoc = &stcb->asoc; 2749 /* process according to association state */ 2750 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) { 2751 /* unexpected SHUTDOWN-COMPLETE... so ignore... */ 2752 SCTPDBG(SCTP_DEBUG_INPUT2, 2753 "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n"); 2754 SCTP_TCB_UNLOCK(stcb); 2755 return; 2756 } 2757 /* notify upper layer protocol */ 2758 if (stcb->sctp_socket) { 2759 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2760 /* are the queues empty? they should be */ 2761 if (!TAILQ_EMPTY(&asoc->send_queue) || 2762 !TAILQ_EMPTY(&asoc->sent_queue) || 2763 !TAILQ_EMPTY(&asoc->out_wheel)) { 2764 sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED); 2765 } 2766 } 2767 /* stop the timer */ 2768 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22); 2769 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 2770 /* free the TCB */ 2771 SCTPDBG(SCTP_DEBUG_INPUT2, 2772 "sctp_handle_shutdown_complete: calls free-asoc\n"); 2773#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2774 so = SCTP_INP_SO(stcb->sctp_ep); 2775 atomic_add_int(&stcb->asoc.refcnt, 1); 2776 SCTP_TCB_UNLOCK(stcb); 2777 SCTP_SOCKET_LOCK(so, 1); 2778 SCTP_TCB_LOCK(stcb); 2779 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2780#endif 2781 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23); 2782#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2783 SCTP_SOCKET_UNLOCK(so, 1); 2784#endif 2785 return; 2786} 2787 2788static int 2789process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc, 2790 struct sctp_nets *net, uint8_t flg) 2791{ 2792 switch (desc->chunk_type) { 2793 case SCTP_DATA: 2794 /* find the tsn to resend (possibly */ 2795 { 2796 uint32_t tsn; 2797 struct sctp_tmit_chunk *tp1; 2798 2799 tsn = ntohl(desc->tsn_ifany); 2800 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2801 while (tp1) { 2802 if (tp1->rec.data.TSN_seq == tsn) { 2803 /* found it */ 2804 break; 2805 } 2806 if (compare_with_wrap(tp1->rec.data.TSN_seq, tsn, 2807 MAX_TSN)) { 2808 /* not found */ 2809 tp1 = NULL; 2810 break; 2811 } 2812 tp1 = TAILQ_NEXT(tp1, sctp_next); 2813 } 2814 if (tp1 == NULL) { 2815 /* 2816 * Do it the other way , aka without paying 2817 * attention to queue seq order. 2818 */ 2819 SCTP_STAT_INCR(sctps_pdrpdnfnd); 2820 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2821 while (tp1) { 2822 if (tp1->rec.data.TSN_seq == tsn) { 2823 /* found it */ 2824 break; 2825 } 2826 tp1 = TAILQ_NEXT(tp1, sctp_next); 2827 } 2828 } 2829 if (tp1 == NULL) { 2830 SCTP_STAT_INCR(sctps_pdrptsnnf); 2831 } 2832 if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) { 2833 uint8_t *ddp; 2834 2835 if ((stcb->asoc.peers_rwnd == 0) && 2836 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) { 2837 SCTP_STAT_INCR(sctps_pdrpdiwnp); 2838 return (0); 2839 } 2840 if (stcb->asoc.peers_rwnd == 0 && 2841 (flg & SCTP_FROM_MIDDLE_BOX)) { 2842 SCTP_STAT_INCR(sctps_pdrpdizrw); 2843 return (0); 2844 } 2845 ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+ 2846 sizeof(struct sctp_data_chunk)); 2847 { 2848 unsigned int iii; 2849 2850 for (iii = 0; iii < sizeof(desc->data_bytes); 2851 iii++) { 2852 if (ddp[iii] != desc->data_bytes[iii]) { 2853 SCTP_STAT_INCR(sctps_pdrpbadd); 2854 return (-1); 2855 } 2856 } 2857 } 2858 /* 2859 * We zero out the nonce so resync not 2860 * needed 2861 */ 2862 tp1->rec.data.ect_nonce = 0; 2863 2864 if (tp1->do_rtt) { 2865 /* 2866 * this guy had a RTO calculation 2867 * pending on it, cancel it 2868 */ 2869 tp1->do_rtt = 0; 2870 } 2871 SCTP_STAT_INCR(sctps_pdrpmark); 2872 if (tp1->sent != SCTP_DATAGRAM_RESEND) 2873 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 2874 tp1->sent = SCTP_DATAGRAM_RESEND; 2875 /* 2876 * mark it as if we were doing a FR, since 2877 * we will be getting gap ack reports behind 2878 * the info from the router. 2879 */ 2880 tp1->rec.data.doing_fast_retransmit = 1; 2881 /* 2882 * mark the tsn with what sequences can 2883 * cause a new FR. 2884 */ 2885 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) { 2886 tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq; 2887 } else { 2888 tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq; 2889 } 2890 2891 /* restart the timer */ 2892 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 2893 stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_24); 2894 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 2895 stcb, tp1->whoTo); 2896 2897 /* fix counts and things */ 2898 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 2899 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP, 2900 tp1->whoTo->flight_size, 2901 tp1->book_size, 2902 (uintptr_t) stcb, 2903 tp1->rec.data.TSN_seq); 2904 } 2905 sctp_flight_size_decrease(tp1); 2906 sctp_total_flight_decrease(stcb, tp1); 2907 } { 2908 /* audit code */ 2909 unsigned int audit; 2910 2911 audit = 0; 2912 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) { 2913 if (tp1->sent == SCTP_DATAGRAM_RESEND) 2914 audit++; 2915 } 2916 TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue, 2917 sctp_next) { 2918 if (tp1->sent == SCTP_DATAGRAM_RESEND) 2919 audit++; 2920 } 2921 if (audit != stcb->asoc.sent_queue_retran_cnt) { 2922 SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n", 2923 audit, stcb->asoc.sent_queue_retran_cnt); 2924#ifndef SCTP_AUDITING_ENABLED 2925 stcb->asoc.sent_queue_retran_cnt = audit; 2926#endif 2927 } 2928 } 2929 } 2930 break; 2931 case SCTP_ASCONF: 2932 { 2933 struct sctp_tmit_chunk *asconf; 2934 2935 TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue, 2936 sctp_next) { 2937 if (asconf->rec.chunk_id.id == SCTP_ASCONF) { 2938 break; 2939 } 2940 } 2941 if (asconf) { 2942 if (asconf->sent != SCTP_DATAGRAM_RESEND) 2943 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 2944 asconf->sent = SCTP_DATAGRAM_RESEND; 2945 asconf->snd_count--; 2946 } 2947 } 2948 break; 2949 case SCTP_INITIATION: 2950 /* resend the INIT */ 2951 stcb->asoc.dropped_special_cnt++; 2952 if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) { 2953 /* 2954 * If we can get it in, in a few attempts we do 2955 * this, otherwise we let the timer fire. 2956 */ 2957 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, 2958 stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25); 2959 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 2960 } 2961 break; 2962 case SCTP_SELECTIVE_ACK: 2963 /* resend the sack */ 2964 sctp_send_sack(stcb); 2965 break; 2966 case SCTP_HEARTBEAT_REQUEST: 2967 /* resend a demand HB */ 2968 if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) { 2969 /* 2970 * Only retransmit if we KNOW we wont destroy the 2971 * tcb 2972 */ 2973 (void)sctp_send_hb(stcb, 1, net); 2974 } 2975 break; 2976 case SCTP_SHUTDOWN: 2977 sctp_send_shutdown(stcb, net); 2978 break; 2979 case SCTP_SHUTDOWN_ACK: 2980 sctp_send_shutdown_ack(stcb, net); 2981 break; 2982 case SCTP_COOKIE_ECHO: 2983 { 2984 struct sctp_tmit_chunk *cookie; 2985 2986 cookie = NULL; 2987 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, 2988 sctp_next) { 2989 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 2990 break; 2991 } 2992 } 2993 if (cookie) { 2994 if (cookie->sent != SCTP_DATAGRAM_RESEND) 2995 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 2996 cookie->sent = SCTP_DATAGRAM_RESEND; 2997 sctp_stop_all_cookie_timers(stcb); 2998 } 2999 } 3000 break; 3001 case SCTP_COOKIE_ACK: 3002 sctp_send_cookie_ack(stcb); 3003 break; 3004 case SCTP_ASCONF_ACK: 3005 /* resend last asconf ack */ 3006 sctp_send_asconf_ack(stcb); 3007 break; 3008 case SCTP_FORWARD_CUM_TSN: 3009 send_forward_tsn(stcb, &stcb->asoc); 3010 break; 3011 /* can't do anything with these */ 3012 case SCTP_PACKET_DROPPED: 3013 case SCTP_INITIATION_ACK: /* this should not happen */ 3014 case SCTP_HEARTBEAT_ACK: 3015 case SCTP_ABORT_ASSOCIATION: 3016 case SCTP_OPERATION_ERROR: 3017 case SCTP_SHUTDOWN_COMPLETE: 3018 case SCTP_ECN_ECHO: 3019 case SCTP_ECN_CWR: 3020 default: 3021 break; 3022 } 3023 return (0); 3024} 3025 3026void 3027sctp_reset_in_stream(struct sctp_tcb *stcb, int number_entries, uint16_t * list) 3028{ 3029 int i; 3030 uint16_t temp; 3031 3032 /* 3033 * We set things to 0xffff since this is the last delivered sequence 3034 * and we will be sending in 0 after the reset. 3035 */ 3036 3037 if (number_entries) { 3038 for (i = 0; i < number_entries; i++) { 3039 temp = ntohs(list[i]); 3040 if (temp >= stcb->asoc.streamincnt) { 3041 continue; 3042 } 3043 stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff; 3044 } 3045 } else { 3046 list = NULL; 3047 for (i = 0; i < stcb->asoc.streamincnt; i++) { 3048 stcb->asoc.strmin[i].last_sequence_delivered = 0xffff; 3049 } 3050 } 3051 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED); 3052} 3053 3054static void 3055sctp_reset_out_streams(struct sctp_tcb *stcb, int number_entries, uint16_t * list) 3056{ 3057 int i; 3058 3059 if (number_entries == 0) { 3060 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3061 stcb->asoc.strmout[i].next_sequence_sent = 0; 3062 } 3063 } else if (number_entries) { 3064 for (i = 0; i < number_entries; i++) { 3065 uint16_t temp; 3066 3067 temp = ntohs(list[i]); 3068 if (temp >= stcb->asoc.streamoutcnt) { 3069 /* no such stream */ 3070 continue; 3071 } 3072 stcb->asoc.strmout[temp].next_sequence_sent = 0; 3073 } 3074 } 3075 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED); 3076} 3077 3078 3079struct sctp_stream_reset_out_request * 3080sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk) 3081{ 3082 struct sctp_association *asoc; 3083 struct sctp_stream_reset_out_req *req; 3084 struct sctp_stream_reset_out_request *r; 3085 struct sctp_tmit_chunk *chk; 3086 int len, clen; 3087 3088 asoc = &stcb->asoc; 3089 if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { 3090 asoc->stream_reset_outstanding = 0; 3091 return (NULL); 3092 } 3093 if (stcb->asoc.str_reset == NULL) { 3094 asoc->stream_reset_outstanding = 0; 3095 return (NULL); 3096 } 3097 chk = stcb->asoc.str_reset; 3098 if (chk->data == NULL) { 3099 return (NULL); 3100 } 3101 if (bchk) { 3102 /* he wants a copy of the chk pointer */ 3103 *bchk = chk; 3104 } 3105 clen = chk->send_size; 3106 req = mtod(chk->data, struct sctp_stream_reset_out_req *); 3107 r = &req->sr_req; 3108 if (ntohl(r->request_seq) == seq) { 3109 /* found it */ 3110 return (r); 3111 } 3112 len = SCTP_SIZE32(ntohs(r->ph.param_length)); 3113 if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) { 3114 /* move to the next one, there can only be a max of two */ 3115 r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len); 3116 if (ntohl(r->request_seq) == seq) { 3117 return (r); 3118 } 3119 } 3120 /* that seq is not here */ 3121 return (NULL); 3122} 3123 3124static void 3125sctp_clean_up_stream_reset(struct sctp_tcb *stcb) 3126{ 3127 struct sctp_association *asoc; 3128 struct sctp_tmit_chunk *chk = stcb->asoc.str_reset; 3129 3130 if (stcb->asoc.str_reset == NULL) { 3131 return; 3132 } 3133 asoc = &stcb->asoc; 3134 3135 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_26); 3136 TAILQ_REMOVE(&asoc->control_send_queue, 3137 chk, 3138 sctp_next); 3139 if (chk->data) { 3140 sctp_m_freem(chk->data); 3141 chk->data = NULL; 3142 } 3143 asoc->ctrl_queue_cnt--; 3144 sctp_free_a_chunk(stcb, chk); 3145 stcb->asoc.str_reset = NULL; 3146} 3147 3148 3149static int 3150sctp_handle_stream_reset_response(struct sctp_tcb *stcb, 3151 uint32_t seq, uint32_t action, 3152 struct sctp_stream_reset_response *respin) 3153{ 3154 uint16_t type; 3155 int lparm_len; 3156 struct sctp_association *asoc = &stcb->asoc; 3157 struct sctp_tmit_chunk *chk; 3158 struct sctp_stream_reset_out_request *srparam; 3159 int number_entries; 3160 3161 if (asoc->stream_reset_outstanding == 0) { 3162 /* duplicate */ 3163 return (0); 3164 } 3165 if (seq == stcb->asoc.str_reset_seq_out) { 3166 srparam = sctp_find_stream_reset(stcb, seq, &chk); 3167 if (srparam) { 3168 stcb->asoc.str_reset_seq_out++; 3169 type = ntohs(srparam->ph.param_type); 3170 lparm_len = ntohs(srparam->ph.param_length); 3171 if (type == SCTP_STR_RESET_OUT_REQUEST) { 3172 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t); 3173 asoc->stream_reset_out_is_outstanding = 0; 3174 if (asoc->stream_reset_outstanding) 3175 asoc->stream_reset_outstanding--; 3176 if (action == SCTP_STREAM_RESET_PERFORMED) { 3177 /* do it */ 3178 sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams); 3179 } else { 3180 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED); 3181 } 3182 } else if (type == SCTP_STR_RESET_IN_REQUEST) { 3183 /* Answered my request */ 3184 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t); 3185 if (asoc->stream_reset_outstanding) 3186 asoc->stream_reset_outstanding--; 3187 if (action != SCTP_STREAM_RESET_PERFORMED) { 3188 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED); 3189 } 3190 } else if (type == SCTP_STR_RESET_TSN_REQUEST) { 3191 /** 3192 * a) Adopt the new in tsn. 3193 * b) reset the map 3194 * c) Adopt the new out-tsn 3195 */ 3196 struct sctp_stream_reset_response_tsn *resp; 3197 struct sctp_forward_tsn_chunk fwdtsn; 3198 int abort_flag = 0; 3199 3200 if (respin == NULL) { 3201 /* huh ? */ 3202 return (0); 3203 } 3204 if (action == SCTP_STREAM_RESET_PERFORMED) { 3205 resp = (struct sctp_stream_reset_response_tsn *)respin; 3206 asoc->stream_reset_outstanding--; 3207 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 3208 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 3209 fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1); 3210 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0); 3211 if (abort_flag) { 3212 return (1); 3213 } 3214 stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1); 3215 stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map; 3216 stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn); 3217 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 3218 stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn); 3219 stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn; 3220 3221 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL); 3222 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL); 3223 3224 } 3225 } 3226 /* get rid of the request and get the request flags */ 3227 if (asoc->stream_reset_outstanding == 0) { 3228 sctp_clean_up_stream_reset(stcb); 3229 } 3230 } 3231 } 3232 return (0); 3233} 3234 3235static void 3236sctp_handle_str_reset_request_in(struct sctp_tcb *stcb, 3237 struct sctp_tmit_chunk *chk, 3238 struct sctp_stream_reset_in_request *req, int trunc) 3239{ 3240 uint32_t seq; 3241 int len, i; 3242 int number_entries; 3243 uint16_t temp; 3244 3245 /* 3246 * peer wants me to send a str-reset to him for my outgoing seq's if 3247 * seq_in is right. 3248 */ 3249 struct sctp_association *asoc = &stcb->asoc; 3250 3251 seq = ntohl(req->request_seq); 3252 if (asoc->str_reset_seq_in == seq) { 3253 if (trunc) { 3254 /* Can't do it, since they exceeded our buffer size */ 3255 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3256 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 3257 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3258 } else if (stcb->asoc.stream_reset_out_is_outstanding == 0) { 3259 len = ntohs(req->ph.param_length); 3260 number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t)); 3261 for (i = 0; i < number_entries; i++) { 3262 temp = ntohs(req->list_of_streams[i]); 3263 req->list_of_streams[i] = temp; 3264 } 3265 /* move the reset action back one */ 3266 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3267 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3268 sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams, 3269 asoc->str_reset_seq_out, 3270 seq, (asoc->sending_seq - 1)); 3271 asoc->stream_reset_out_is_outstanding = 1; 3272 asoc->str_reset = chk; 3273 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo); 3274 stcb->asoc.stream_reset_outstanding++; 3275 } else { 3276 /* Can't do it, since we have sent one out */ 3277 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3278 asoc->last_reset_action[0] = SCTP_STREAM_RESET_TRY_LATER; 3279 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3280 } 3281 asoc->str_reset_seq_in++; 3282 } else if (asoc->str_reset_seq_in - 1 == seq) { 3283 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3284 } else if (asoc->str_reset_seq_in - 2 == seq) { 3285 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 3286 } else { 3287 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 3288 } 3289} 3290 3291static int 3292sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb, 3293 struct sctp_tmit_chunk *chk, 3294 struct sctp_stream_reset_tsn_request *req) 3295{ 3296 /* reset all in and out and update the tsn */ 3297 /* 3298 * A) reset my str-seq's on in and out. B) Select a receive next, 3299 * and set cum-ack to it. Also process this selected number as a 3300 * fwd-tsn as well. C) set in the response my next sending seq. 3301 */ 3302 struct sctp_forward_tsn_chunk fwdtsn; 3303 struct sctp_association *asoc = &stcb->asoc; 3304 int abort_flag = 0; 3305 uint32_t seq; 3306 3307 seq = ntohl(req->request_seq); 3308 if (asoc->str_reset_seq_in == seq) { 3309 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 3310 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 3311 fwdtsn.ch.chunk_flags = 0; 3312 fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1); 3313 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0); 3314 if (abort_flag) { 3315 return (1); 3316 } 3317 stcb->asoc.highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA; 3318 stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map; 3319 stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1; 3320 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 3321 atomic_add_int(&stcb->asoc.sending_seq, 1); 3322 /* save off historical data for retrans */ 3323 stcb->asoc.last_sending_seq[1] = stcb->asoc.last_sending_seq[0]; 3324 stcb->asoc.last_sending_seq[0] = stcb->asoc.sending_seq; 3325 stcb->asoc.last_base_tsnsent[1] = stcb->asoc.last_base_tsnsent[0]; 3326 stcb->asoc.last_base_tsnsent[0] = stcb->asoc.mapping_array_base_tsn; 3327 3328 sctp_add_stream_reset_result_tsn(chk, 3329 ntohl(req->request_seq), 3330 SCTP_STREAM_RESET_PERFORMED, 3331 stcb->asoc.sending_seq, 3332 stcb->asoc.mapping_array_base_tsn); 3333 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL); 3334 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL); 3335 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0]; 3336 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3337 3338 asoc->str_reset_seq_in++; 3339 } else if (asoc->str_reset_seq_in - 1 == seq) { 3340 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0], 3341 stcb->asoc.last_sending_seq[0], 3342 stcb->asoc.last_base_tsnsent[0] 3343 ); 3344 } else if (asoc->str_reset_seq_in - 2 == seq) { 3345 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1], 3346 stcb->asoc.last_sending_seq[1], 3347 stcb->asoc.last_base_tsnsent[1] 3348 ); 3349 } else { 3350 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 3351 } 3352 return (0); 3353} 3354 3355static void 3356sctp_handle_str_reset_request_out(struct sctp_tcb *stcb, 3357 struct sctp_tmit_chunk *chk, 3358 struct sctp_stream_reset_out_request *req, int trunc) 3359{ 3360 uint32_t seq, tsn; 3361 int number_entries, len; 3362 struct sctp_association *asoc = &stcb->asoc; 3363 3364 seq = ntohl(req->request_seq); 3365 3366 /* now if its not a duplicate we process it */ 3367 if (asoc->str_reset_seq_in == seq) { 3368 len = ntohs(req->ph.param_length); 3369 number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t)); 3370 /* 3371 * the sender is resetting, handle the list issue.. we must 3372 * a) verify if we can do the reset, if so no problem b) If 3373 * we can't do the reset we must copy the request. c) queue 3374 * it, and setup the data in processor to trigger it off 3375 * when needed and dequeue all the queued data. 3376 */ 3377 tsn = ntohl(req->send_reset_at_tsn); 3378 3379 /* move the reset action back one */ 3380 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3381 if (trunc) { 3382 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED); 3383 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 3384 } else if ((tsn == asoc->cumulative_tsn) || 3385 (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN))) { 3386 /* we can do it now */ 3387 sctp_reset_in_stream(stcb, number_entries, req->list_of_streams); 3388 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED); 3389 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3390 } else { 3391 /* 3392 * we must queue it up and thus wait for the TSN's 3393 * to arrive that are at or before tsn 3394 */ 3395 struct sctp_stream_reset_list *liste; 3396 int siz; 3397 3398 siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t)); 3399 SCTP_MALLOC(liste, struct sctp_stream_reset_list *, 3400 siz, SCTP_M_STRESET); 3401 if (liste == NULL) { 3402 /* gak out of memory */ 3403 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED); 3404 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 3405 return; 3406 } 3407 liste->tsn = tsn; 3408 liste->number_entries = number_entries; 3409 memcpy(&liste->req, req, 3410 (sizeof(struct sctp_stream_reset_out_request) + (number_entries * sizeof(uint16_t)))); 3411 TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp); 3412 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED); 3413 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3414 } 3415 asoc->str_reset_seq_in++; 3416 } else if ((asoc->str_reset_seq_in - 1) == seq) { 3417 /* 3418 * one seq back, just echo back last action since my 3419 * response was lost. 3420 */ 3421 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3422 } else if ((asoc->str_reset_seq_in - 2) == seq) { 3423 /* 3424 * two seq back, just echo back last action since my 3425 * response was lost. 3426 */ 3427 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 3428 } else { 3429 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 3430 } 3431} 3432 3433#ifdef __GNUC__ 3434__attribute__((noinline)) 3435#endif 3436 static int 3437 sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset, 3438 struct sctp_stream_reset_out_req *sr_req) 3439{ 3440 int chk_length, param_len, ptype; 3441 struct sctp_paramhdr pstore; 3442 uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE]; 3443 3444 uint32_t seq; 3445 int num_req = 0; 3446 int trunc = 0; 3447 struct sctp_tmit_chunk *chk; 3448 struct sctp_chunkhdr *ch; 3449 struct sctp_paramhdr *ph; 3450 int ret_code = 0; 3451 int num_param = 0; 3452 3453 /* now it may be a reset or a reset-response */ 3454 chk_length = ntohs(sr_req->ch.chunk_length); 3455 3456 /* setup for adding the response */ 3457 sctp_alloc_a_chunk(stcb, chk); 3458 if (chk == NULL) { 3459 return (ret_code); 3460 } 3461 chk->rec.chunk_id.id = SCTP_STREAM_RESET; 3462 chk->rec.chunk_id.can_take_data = 0; 3463 chk->asoc = &stcb->asoc; 3464 chk->no_fr_allowed = 0; 3465 chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr); 3466 chk->book_size_scale = 0; 3467 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 3468 if (chk->data == NULL) { 3469strres_nochunk: 3470 if (chk->data) { 3471 sctp_m_freem(chk->data); 3472 chk->data = NULL; 3473 } 3474 sctp_free_a_chunk(stcb, chk); 3475 return (ret_code); 3476 } 3477 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 3478 3479 /* setup chunk parameters */ 3480 chk->sent = SCTP_DATAGRAM_UNSENT; 3481 chk->snd_count = 0; 3482 chk->whoTo = stcb->asoc.primary_destination; 3483 atomic_add_int(&chk->whoTo->ref_count, 1); 3484 3485 ch = mtod(chk->data, struct sctp_chunkhdr *); 3486 ch->chunk_type = SCTP_STREAM_RESET; 3487 ch->chunk_flags = 0; 3488 ch->chunk_length = htons(chk->send_size); 3489 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 3490 offset += sizeof(struct sctp_chunkhdr); 3491 while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) { 3492 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *) & pstore); 3493 if (ph == NULL) 3494 break; 3495 param_len = ntohs(ph->param_length); 3496 if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) { 3497 /* bad param */ 3498 break; 3499 } 3500 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, (int)sizeof(cstore)), 3501 (uint8_t *) & cstore); 3502 ptype = ntohs(ph->param_type); 3503 num_param++; 3504 if (param_len > (int)sizeof(cstore)) { 3505 trunc = 1; 3506 } else { 3507 trunc = 0; 3508 } 3509 3510 if (num_param > SCTP_MAX_RESET_PARAMS) { 3511 /* hit the max of parameters already sorry.. */ 3512 break; 3513 } 3514 if (ptype == SCTP_STR_RESET_OUT_REQUEST) { 3515 struct sctp_stream_reset_out_request *req_out; 3516 3517 req_out = (struct sctp_stream_reset_out_request *)ph; 3518 num_req++; 3519 if (stcb->asoc.stream_reset_outstanding) { 3520 seq = ntohl(req_out->response_seq); 3521 if (seq == stcb->asoc.str_reset_seq_out) { 3522 /* implicit ack */ 3523 (void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_PERFORMED, NULL); 3524 } 3525 } 3526 sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc); 3527 } else if (ptype == SCTP_STR_RESET_IN_REQUEST) { 3528 struct sctp_stream_reset_in_request *req_in; 3529 3530 num_req++; 3531 3532 req_in = (struct sctp_stream_reset_in_request *)ph; 3533 3534 sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc); 3535 } else if (ptype == SCTP_STR_RESET_TSN_REQUEST) { 3536 struct sctp_stream_reset_tsn_request *req_tsn; 3537 3538 num_req++; 3539 req_tsn = (struct sctp_stream_reset_tsn_request *)ph; 3540 3541 if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) { 3542 ret_code = 1; 3543 goto strres_nochunk; 3544 } 3545 /* no more */ 3546 break; 3547 } else if (ptype == SCTP_STR_RESET_RESPONSE) { 3548 struct sctp_stream_reset_response *resp; 3549 uint32_t result; 3550 3551 resp = (struct sctp_stream_reset_response *)ph; 3552 seq = ntohl(resp->response_seq); 3553 result = ntohl(resp->result); 3554 if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) { 3555 ret_code = 1; 3556 goto strres_nochunk; 3557 } 3558 } else { 3559 break; 3560 } 3561 offset += SCTP_SIZE32(param_len); 3562 chk_length -= SCTP_SIZE32(param_len); 3563 } 3564 if (num_req == 0) { 3565 /* we have no response free the stuff */ 3566 goto strres_nochunk; 3567 } 3568 /* ok we have a chunk to link in */ 3569 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, 3570 chk, 3571 sctp_next); 3572 stcb->asoc.ctrl_queue_cnt++; 3573 return (ret_code); 3574} 3575 3576/* 3577 * Handle a router or endpoints report of a packet loss, there are two ways 3578 * to handle this, either we get the whole packet and must disect it 3579 * ourselves (possibly with truncation and or corruption) or it is a summary 3580 * from a middle box that did the disectting for us. 3581 */ 3582static void 3583sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp, 3584 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit) 3585{ 3586 uint32_t bottle_bw, on_queue; 3587 uint16_t trunc_len; 3588 unsigned int chlen; 3589 unsigned int at; 3590 struct sctp_chunk_desc desc; 3591 struct sctp_chunkhdr *ch; 3592 3593 chlen = ntohs(cp->ch.chunk_length); 3594 chlen -= sizeof(struct sctp_pktdrop_chunk); 3595 /* XXX possible chlen underflow */ 3596 if (chlen == 0) { 3597 ch = NULL; 3598 if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) 3599 SCTP_STAT_INCR(sctps_pdrpbwrpt); 3600 } else { 3601 ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr)); 3602 chlen -= sizeof(struct sctphdr); 3603 /* XXX possible chlen underflow */ 3604 memset(&desc, 0, sizeof(desc)); 3605 } 3606 trunc_len = (uint16_t) ntohs(cp->trunc_len); 3607 if (trunc_len > limit) { 3608 trunc_len = limit; 3609 } 3610 /* now the chunks themselves */ 3611 while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) { 3612 desc.chunk_type = ch->chunk_type; 3613 /* get amount we need to move */ 3614 at = ntohs(ch->chunk_length); 3615 if (at < sizeof(struct sctp_chunkhdr)) { 3616 /* corrupt chunk, maybe at the end? */ 3617 SCTP_STAT_INCR(sctps_pdrpcrupt); 3618 break; 3619 } 3620 if (trunc_len == 0) { 3621 /* we are supposed to have all of it */ 3622 if (at > chlen) { 3623 /* corrupt skip it */ 3624 SCTP_STAT_INCR(sctps_pdrpcrupt); 3625 break; 3626 } 3627 } else { 3628 /* is there enough of it left ? */ 3629 if (desc.chunk_type == SCTP_DATA) { 3630 if (chlen < (sizeof(struct sctp_data_chunk) + 3631 sizeof(desc.data_bytes))) { 3632 break; 3633 } 3634 } else { 3635 if (chlen < sizeof(struct sctp_chunkhdr)) { 3636 break; 3637 } 3638 } 3639 } 3640 if (desc.chunk_type == SCTP_DATA) { 3641 /* can we get out the tsn? */ 3642 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) 3643 SCTP_STAT_INCR(sctps_pdrpmbda); 3644 3645 if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) { 3646 /* yep */ 3647 struct sctp_data_chunk *dcp; 3648 uint8_t *ddp; 3649 unsigned int iii; 3650 3651 dcp = (struct sctp_data_chunk *)ch; 3652 ddp = (uint8_t *) (dcp + 1); 3653 for (iii = 0; iii < sizeof(desc.data_bytes); iii++) { 3654 desc.data_bytes[iii] = ddp[iii]; 3655 } 3656 desc.tsn_ifany = dcp->dp.tsn; 3657 } else { 3658 /* nope we are done. */ 3659 SCTP_STAT_INCR(sctps_pdrpnedat); 3660 break; 3661 } 3662 } else { 3663 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) 3664 SCTP_STAT_INCR(sctps_pdrpmbct); 3665 } 3666 3667 if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) { 3668 SCTP_STAT_INCR(sctps_pdrppdbrk); 3669 break; 3670 } 3671 if (SCTP_SIZE32(at) > chlen) { 3672 break; 3673 } 3674 chlen -= SCTP_SIZE32(at); 3675 if (chlen < sizeof(struct sctp_chunkhdr)) { 3676 /* done, none left */ 3677 break; 3678 } 3679 ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at)); 3680 } 3681 /* Now update any rwnd --- possibly */ 3682 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) { 3683 /* From a peer, we get a rwnd report */ 3684 uint32_t a_rwnd; 3685 3686 SCTP_STAT_INCR(sctps_pdrpfehos); 3687 3688 bottle_bw = ntohl(cp->bottle_bw); 3689 on_queue = ntohl(cp->current_onq); 3690 if (bottle_bw && on_queue) { 3691 /* a rwnd report is in here */ 3692 if (bottle_bw > on_queue) 3693 a_rwnd = bottle_bw - on_queue; 3694 else 3695 a_rwnd = 0; 3696 3697 if (a_rwnd == 0) 3698 stcb->asoc.peers_rwnd = 0; 3699 else { 3700 if (a_rwnd > stcb->asoc.total_flight) { 3701 stcb->asoc.peers_rwnd = 3702 a_rwnd - stcb->asoc.total_flight; 3703 } else { 3704 stcb->asoc.peers_rwnd = 0; 3705 } 3706 if (stcb->asoc.peers_rwnd < 3707 stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 3708 /* SWS sender side engages */ 3709 stcb->asoc.peers_rwnd = 0; 3710 } 3711 } 3712 } 3713 } else { 3714 SCTP_STAT_INCR(sctps_pdrpfmbox); 3715 } 3716 3717 /* now middle boxes in sat networks get a cwnd bump */ 3718 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) && 3719 (stcb->asoc.sat_t3_loss_recovery == 0) && 3720 (stcb->asoc.sat_network)) { 3721 /* 3722 * This is debateable but for sat networks it makes sense 3723 * Note if a T3 timer has went off, we will prohibit any 3724 * changes to cwnd until we exit the t3 loss recovery. 3725 */ 3726 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb, 3727 net, cp, &bottle_bw, &on_queue); 3728 } 3729} 3730 3731/* 3732 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to 3733 * still contain IP/SCTP header - stcb: is the tcb found for this packet - 3734 * offset: offset into the mbuf chain to first chunkhdr - length: is the 3735 * length of the complete packet outputs: - length: modified to remaining 3736 * length after control processing - netp: modified to new sctp_nets after 3737 * cookie-echo processing - return NULL to discard the packet (ie. no asoc, 3738 * bad packet,...) otherwise return the tcb for this packet 3739 */ 3740#ifdef __GNUC__ 3741__attribute__((noinline)) 3742#endif 3743 static struct sctp_tcb * 3744 sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, 3745 struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp, 3746 struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen, 3747 uint32_t vrf_id) 3748{ 3749 struct sctp_association *asoc; 3750 uint32_t vtag_in; 3751 int num_chunks = 0; /* number of control chunks processed */ 3752 uint32_t chk_length; 3753 int ret; 3754 int abort_no_unlock = 0; 3755 3756 /* 3757 * How big should this be, and should it be alloc'd? Lets try the 3758 * d-mtu-ceiling for now (2k) and that should hopefully work ... 3759 * until we get into jumbo grams and such.. 3760 */ 3761 uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE]; 3762 struct sctp_tcb *locked_tcb = stcb; 3763 int got_auth = 0; 3764 uint32_t auth_offset = 0, auth_len = 0; 3765 int auth_skipped = 0; 3766 int asconf_cnt = 0; 3767 3768#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3769 struct socket *so; 3770 3771#endif 3772 3773 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n", 3774 iphlen, *offset, length, stcb); 3775 3776 /* validate chunk header length... */ 3777 if (ntohs(ch->chunk_length) < sizeof(*ch)) { 3778 SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n", 3779 ntohs(ch->chunk_length)); 3780 if (locked_tcb) { 3781 SCTP_TCB_UNLOCK(locked_tcb); 3782 } 3783 return (NULL); 3784 } 3785 /* 3786 * validate the verification tag 3787 */ 3788 vtag_in = ntohl(sh->v_tag); 3789 3790 if (locked_tcb) { 3791 SCTP_TCB_LOCK_ASSERT(locked_tcb); 3792 } 3793 if (ch->chunk_type == SCTP_INITIATION) { 3794 SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n", 3795 ntohs(ch->chunk_length), vtag_in); 3796 if (vtag_in != 0) { 3797 /* protocol error- silently discard... */ 3798 SCTP_STAT_INCR(sctps_badvtag); 3799 if (locked_tcb) { 3800 SCTP_TCB_UNLOCK(locked_tcb); 3801 } 3802 return (NULL); 3803 } 3804 } else if (ch->chunk_type != SCTP_COOKIE_ECHO) { 3805 /* 3806 * If there is no stcb, skip the AUTH chunk and process 3807 * later after a stcb is found (to validate the lookup was 3808 * valid. 3809 */ 3810 if ((ch->chunk_type == SCTP_AUTHENTICATION) && 3811 (stcb == NULL) && !sctp_auth_disable) { 3812 /* save this chunk for later processing */ 3813 auth_skipped = 1; 3814 auth_offset = *offset; 3815 auth_len = ntohs(ch->chunk_length); 3816 3817 /* (temporarily) move past this chunk */ 3818 *offset += SCTP_SIZE32(auth_len); 3819 if (*offset >= length) { 3820 /* no more data left in the mbuf chain */ 3821 *offset = length; 3822 if (locked_tcb) { 3823 SCTP_TCB_UNLOCK(locked_tcb); 3824 } 3825 return (NULL); 3826 } 3827 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 3828 sizeof(struct sctp_chunkhdr), chunk_buf); 3829 } 3830 if (ch == NULL) { 3831 /* Help */ 3832 *offset = length; 3833 if (locked_tcb) { 3834 SCTP_TCB_UNLOCK(locked_tcb); 3835 } 3836 return (NULL); 3837 } 3838 if (ch->chunk_type == SCTP_COOKIE_ECHO) { 3839 goto process_control_chunks; 3840 } 3841 /* 3842 * first check if it's an ASCONF with an unknown src addr we 3843 * need to look inside to find the association 3844 */ 3845 if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) { 3846 struct sctp_chunkhdr *asconf_ch = ch; 3847 uint32_t asconf_offset = 0, asconf_len = 0; 3848 3849 /* inp's refcount may be reduced */ 3850 SCTP_INP_INCR_REF(inp); 3851 3852 asconf_offset = *offset; 3853 do { 3854 asconf_len = ntohs(asconf_ch->chunk_length); 3855 if (asconf_len < sizeof(struct sctp_asconf_paramhdr)) 3856 break; 3857 stcb = sctp_findassociation_ep_asconf(m, iphlen, 3858 *offset, sh, &inp, netp); 3859 if (stcb != NULL) 3860 break; 3861 asconf_offset += SCTP_SIZE32(asconf_len); 3862 asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset, 3863 sizeof(struct sctp_chunkhdr), chunk_buf); 3864 } while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF); 3865 if (stcb == NULL) { 3866 /* 3867 * reduce inp's refcount if not reduced in 3868 * sctp_findassociation_ep_asconf(). 3869 */ 3870 SCTP_INP_DECR_REF(inp); 3871 } else { 3872 locked_tcb = stcb; 3873 } 3874 3875 /* now go back and verify any auth chunk to be sure */ 3876 if (auth_skipped && (stcb != NULL)) { 3877 struct sctp_auth_chunk *auth; 3878 3879 auth = (struct sctp_auth_chunk *) 3880 sctp_m_getptr(m, auth_offset, 3881 auth_len, chunk_buf); 3882 got_auth = 1; 3883 auth_skipped = 0; 3884 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, 3885 auth_offset)) { 3886 /* auth HMAC failed so dump it */ 3887 *offset = length; 3888 if (locked_tcb) { 3889 SCTP_TCB_UNLOCK(locked_tcb); 3890 } 3891 return (NULL); 3892 } else { 3893 /* remaining chunks are HMAC checked */ 3894 stcb->asoc.authenticated = 1; 3895 } 3896 } 3897 } 3898 if (stcb == NULL) { 3899 /* no association, so it's out of the blue... */ 3900 sctp_handle_ootb(m, iphlen, *offset, sh, inp, NULL, 3901 vrf_id); 3902 *offset = length; 3903 if (locked_tcb) { 3904 SCTP_TCB_UNLOCK(locked_tcb); 3905 } 3906 return (NULL); 3907 } 3908 asoc = &stcb->asoc; 3909 /* ABORT and SHUTDOWN can use either v_tag... */ 3910 if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) || 3911 (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) || 3912 (ch->chunk_type == SCTP_PACKET_DROPPED)) { 3913 if ((vtag_in == asoc->my_vtag) || 3914 ((ch->chunk_flags & SCTP_HAD_NO_TCB) && 3915 (vtag_in == asoc->peer_vtag))) { 3916 /* this is valid */ 3917 } else { 3918 /* drop this packet... */ 3919 SCTP_STAT_INCR(sctps_badvtag); 3920 if (locked_tcb) { 3921 SCTP_TCB_UNLOCK(locked_tcb); 3922 } 3923 return (NULL); 3924 } 3925 } else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 3926 if (vtag_in != asoc->my_vtag) { 3927 /* 3928 * this could be a stale SHUTDOWN-ACK or the 3929 * peer never got the SHUTDOWN-COMPLETE and 3930 * is still hung; we have started a new asoc 3931 * but it won't complete until the shutdown 3932 * is completed 3933 */ 3934 if (locked_tcb) { 3935 SCTP_TCB_UNLOCK(locked_tcb); 3936 } 3937 sctp_handle_ootb(m, iphlen, *offset, sh, inp, 3938 NULL, vrf_id); 3939 return (NULL); 3940 } 3941 } else { 3942 /* for all other chunks, vtag must match */ 3943 if (vtag_in != asoc->my_vtag) { 3944 /* invalid vtag... */ 3945 SCTPDBG(SCTP_DEBUG_INPUT3, 3946 "invalid vtag: %xh, expect %xh\n", 3947 vtag_in, asoc->my_vtag); 3948 SCTP_STAT_INCR(sctps_badvtag); 3949 if (locked_tcb) { 3950 SCTP_TCB_UNLOCK(locked_tcb); 3951 } 3952 *offset = length; 3953 return (NULL); 3954 } 3955 } 3956 } /* end if !SCTP_COOKIE_ECHO */ 3957 /* 3958 * process all control chunks... 3959 */ 3960 if (((ch->chunk_type == SCTP_SELECTIVE_ACK) || 3961 (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) && 3962 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) { 3963 /* implied cookie-ack.. we must have lost the ack */ 3964 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 3965 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 3966 stcb->asoc.overall_error_count, 3967 0, 3968 SCTP_FROM_SCTP_INPUT, 3969 __LINE__); 3970 } 3971 stcb->asoc.overall_error_count = 0; 3972 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, 3973 *netp); 3974 } 3975process_control_chunks: 3976 while (IS_SCTP_CONTROL(ch)) { 3977 /* validate chunk length */ 3978 chk_length = ntohs(ch->chunk_length); 3979 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n", 3980 ch->chunk_type, chk_length); 3981 SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length); 3982 if (chk_length < sizeof(*ch) || 3983 (*offset + (int)chk_length) > length) { 3984 *offset = length; 3985 if (locked_tcb) { 3986 SCTP_TCB_UNLOCK(locked_tcb); 3987 } 3988 return (NULL); 3989 } 3990 SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks); 3991 /* 3992 * INIT-ACK only gets the init ack "header" portion only 3993 * because we don't have to process the peer's COOKIE. All 3994 * others get a complete chunk. 3995 */ 3996 if ((ch->chunk_type == SCTP_INITIATION_ACK) || 3997 (ch->chunk_type == SCTP_INITIATION)) { 3998 /* get an init-ack chunk */ 3999 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4000 sizeof(struct sctp_init_ack_chunk), chunk_buf); 4001 if (ch == NULL) { 4002 *offset = length; 4003 if (locked_tcb) { 4004 SCTP_TCB_UNLOCK(locked_tcb); 4005 } 4006 return (NULL); 4007 } 4008 } else { 4009 /* For cookies and all other chunks. */ 4010 if (chk_length > sizeof(chunk_buf)) { 4011 /* 4012 * use just the size of the chunk buffer so 4013 * the front part of our chunks fit in 4014 * contiguous space up to the chunk buffer 4015 * size (508 bytes). For chunks that need to 4016 * get more than that they must use the 4017 * sctp_m_getptr() function or other means 4018 * (e.g. know how to parse mbuf chains). 4019 * Cookies do this already. 4020 */ 4021 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4022 (sizeof(chunk_buf) - 4), 4023 chunk_buf); 4024 if (ch == NULL) { 4025 *offset = length; 4026 if (locked_tcb) { 4027 SCTP_TCB_UNLOCK(locked_tcb); 4028 } 4029 return (NULL); 4030 } 4031 } else { 4032 /* We can fit it all */ 4033 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4034 chk_length, chunk_buf); 4035 if (ch == NULL) { 4036 SCTP_PRINTF("sctp_process_control: Can't get the all data....\n"); 4037 *offset = length; 4038 if (locked_tcb) { 4039 SCTP_TCB_UNLOCK(locked_tcb); 4040 } 4041 return (NULL); 4042 } 4043 } 4044 } 4045 num_chunks++; 4046 /* Save off the last place we got a control from */ 4047 if (stcb != NULL) { 4048 if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) { 4049 /* 4050 * allow last_control to be NULL if 4051 * ASCONF... ASCONF processing will find the 4052 * right net later 4053 */ 4054 if ((netp != NULL) && (*netp != NULL)) 4055 stcb->asoc.last_control_chunk_from = *netp; 4056 } 4057 } 4058#ifdef SCTP_AUDITING_ENABLED 4059 sctp_audit_log(0xB0, ch->chunk_type); 4060#endif 4061 4062 /* check to see if this chunk required auth, but isn't */ 4063 if ((stcb != NULL) && !sctp_auth_disable && 4064 sctp_auth_is_required_chunk(ch->chunk_type, 4065 stcb->asoc.local_auth_chunks) && 4066 !stcb->asoc.authenticated) { 4067 /* "silently" ignore */ 4068 SCTP_STAT_INCR(sctps_recvauthmissing); 4069 goto next_chunk; 4070 } 4071 switch (ch->chunk_type) { 4072 case SCTP_INITIATION: 4073 /* must be first and only chunk */ 4074 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n"); 4075 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4076 /* We are not interested anymore? */ 4077 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4078 /* 4079 * collision case where we are 4080 * sending to them too 4081 */ 4082 ; 4083 } else { 4084 if (locked_tcb) { 4085 SCTP_TCB_UNLOCK(locked_tcb); 4086 } 4087 *offset = length; 4088 return (NULL); 4089 } 4090 } 4091 if ((chk_length > SCTP_LARGEST_INIT_ACCEPTED) || 4092 (num_chunks > 1) || 4093 (sctp_strict_init && (length - *offset > (int)SCTP_SIZE32(chk_length)))) { 4094 *offset = length; 4095 if (locked_tcb) { 4096 SCTP_TCB_UNLOCK(locked_tcb); 4097 } 4098 return (NULL); 4099 } 4100 if ((stcb != NULL) && 4101 (SCTP_GET_STATE(&stcb->asoc) == 4102 SCTP_STATE_SHUTDOWN_ACK_SENT)) { 4103 sctp_send_shutdown_ack(stcb, 4104 stcb->asoc.primary_destination); 4105 *offset = length; 4106 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 4107 if (locked_tcb) { 4108 SCTP_TCB_UNLOCK(locked_tcb); 4109 } 4110 return (NULL); 4111 } 4112 if (netp) { 4113 sctp_handle_init(m, iphlen, *offset, sh, 4114 (struct sctp_init_chunk *)ch, inp, 4115 stcb, *netp, &abort_no_unlock, vrf_id); 4116 } 4117 if (abort_no_unlock) 4118 return (NULL); 4119 4120 *offset = length; 4121 if (locked_tcb) { 4122 SCTP_TCB_UNLOCK(locked_tcb); 4123 } 4124 return (NULL); 4125 break; 4126 case SCTP_PAD_CHUNK: 4127 break; 4128 case SCTP_INITIATION_ACK: 4129 /* must be first and only chunk */ 4130 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n"); 4131 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4132 /* We are not interested anymore */ 4133 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4134 ; 4135 } else { 4136 if (locked_tcb) { 4137 SCTP_TCB_UNLOCK(locked_tcb); 4138 } 4139 *offset = length; 4140 if (stcb) { 4141#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4142 so = SCTP_INP_SO(inp); 4143 atomic_add_int(&stcb->asoc.refcnt, 1); 4144 SCTP_TCB_UNLOCK(stcb); 4145 SCTP_SOCKET_LOCK(so, 1); 4146 SCTP_TCB_LOCK(stcb); 4147 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4148#endif 4149 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27); 4150#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4151 SCTP_SOCKET_UNLOCK(so, 1); 4152#endif 4153 } 4154 return (NULL); 4155 } 4156 } 4157 if ((num_chunks > 1) || 4158 (sctp_strict_init && (length - *offset > (int)SCTP_SIZE32(chk_length)))) { 4159 *offset = length; 4160 if (locked_tcb) { 4161 SCTP_TCB_UNLOCK(locked_tcb); 4162 } 4163 return (NULL); 4164 } 4165 if ((netp) && (*netp)) { 4166 ret = sctp_handle_init_ack(m, iphlen, *offset, sh, 4167 (struct sctp_init_ack_chunk *)ch, stcb, *netp, &abort_no_unlock, vrf_id); 4168 } else { 4169 ret = -1; 4170 } 4171 /* 4172 * Special case, I must call the output routine to 4173 * get the cookie echoed 4174 */ 4175 if (abort_no_unlock) 4176 return (NULL); 4177 4178 if ((stcb) && ret == 0) 4179 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 4180 *offset = length; 4181 if (locked_tcb) { 4182 SCTP_TCB_UNLOCK(locked_tcb); 4183 } 4184 return (NULL); 4185 break; 4186 case SCTP_SELECTIVE_ACK: 4187 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n"); 4188 SCTP_STAT_INCR(sctps_recvsacks); 4189 { 4190 struct sctp_sack_chunk *sack; 4191 int abort_now = 0; 4192 uint32_t a_rwnd, cum_ack; 4193 uint16_t num_seg; 4194 int nonce_sum_flag; 4195 4196 if ((stcb == NULL) || (chk_length < sizeof(struct sctp_sack_chunk))) { 4197 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on sack chunk, too small\n"); 4198 ignore_sack: 4199 *offset = length; 4200 if (locked_tcb) { 4201 SCTP_TCB_UNLOCK(locked_tcb); 4202 } 4203 return (NULL); 4204 } 4205 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 4206 /*- 4207 * If we have sent a shutdown-ack, we will pay no 4208 * attention to a sack sent in to us since 4209 * we don't care anymore. 4210 */ 4211 goto ignore_sack; 4212 } 4213 sack = (struct sctp_sack_chunk *)ch; 4214 nonce_sum_flag = ch->chunk_flags & SCTP_SACK_NONCE_SUM; 4215 cum_ack = ntohl(sack->sack.cum_tsn_ack); 4216 num_seg = ntohs(sack->sack.num_gap_ack_blks); 4217 a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd); 4218 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n", 4219 cum_ack, 4220 num_seg, 4221 a_rwnd 4222 ); 4223 stcb->asoc.seen_a_sack_this_pkt = 1; 4224 if ((stcb->asoc.pr_sctp_cnt == 0) && 4225 (num_seg == 0) && 4226 ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) || 4227 (cum_ack == stcb->asoc.last_acked_seq)) && 4228 (stcb->asoc.saw_sack_with_frags == 0) && 4229 (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) 4230 ) { 4231 /* 4232 * We have a SIMPLE sack having no 4233 * prior segments and data on sent 4234 * queue to be acked.. Use the 4235 * faster path sack processing. We 4236 * also allow window update sacks 4237 * with no missing segments to go 4238 * this way too. 4239 */ 4240 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag, 4241 &abort_now); 4242 } else { 4243 if (netp && *netp) 4244 sctp_handle_sack(m, *offset, 4245 sack, stcb, *netp, &abort_now, chk_length, a_rwnd); 4246 } 4247 if (abort_now) { 4248 /* ABORT signal from sack processing */ 4249 *offset = length; 4250 return (NULL); 4251 } 4252 } 4253 break; 4254 case SCTP_HEARTBEAT_REQUEST: 4255 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n"); 4256 if ((stcb) && netp && *netp) { 4257 SCTP_STAT_INCR(sctps_recvheartbeat); 4258 sctp_send_heartbeat_ack(stcb, m, *offset, 4259 chk_length, *netp); 4260 4261 /* He's alive so give him credit */ 4262 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4263 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4264 stcb->asoc.overall_error_count, 4265 0, 4266 SCTP_FROM_SCTP_INPUT, 4267 __LINE__); 4268 } 4269 stcb->asoc.overall_error_count = 0; 4270 } 4271 break; 4272 case SCTP_HEARTBEAT_ACK: 4273 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n"); 4274 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) { 4275 /* Its not ours */ 4276 *offset = length; 4277 if (locked_tcb) { 4278 SCTP_TCB_UNLOCK(locked_tcb); 4279 } 4280 return (NULL); 4281 } 4282 /* He's alive so give him credit */ 4283 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4284 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4285 stcb->asoc.overall_error_count, 4286 0, 4287 SCTP_FROM_SCTP_INPUT, 4288 __LINE__); 4289 } 4290 stcb->asoc.overall_error_count = 0; 4291 SCTP_STAT_INCR(sctps_recvheartbeatack); 4292 if (netp && *netp) 4293 sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch, 4294 stcb, *netp); 4295 break; 4296 case SCTP_ABORT_ASSOCIATION: 4297 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n", 4298 stcb); 4299 if ((stcb) && netp && *netp) 4300 sctp_handle_abort((struct sctp_abort_chunk *)ch, 4301 stcb, *netp); 4302 *offset = length; 4303 return (NULL); 4304 break; 4305 case SCTP_SHUTDOWN: 4306 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n", 4307 stcb); 4308 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) { 4309 *offset = length; 4310 if (locked_tcb) { 4311 SCTP_TCB_UNLOCK(locked_tcb); 4312 } 4313 return (NULL); 4314 } 4315 if (netp && *netp) { 4316 int abort_flag = 0; 4317 4318 sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch, 4319 stcb, *netp, &abort_flag); 4320 if (abort_flag) { 4321 *offset = length; 4322 return (NULL); 4323 } 4324 } 4325 break; 4326 case SCTP_SHUTDOWN_ACK: 4327 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", stcb); 4328 if ((stcb) && (netp) && (*netp)) 4329 sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp); 4330 *offset = length; 4331 return (NULL); 4332 break; 4333 4334 case SCTP_OPERATION_ERROR: 4335 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n"); 4336 if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) { 4337 4338 *offset = length; 4339 return (NULL); 4340 } 4341 break; 4342 case SCTP_COOKIE_ECHO: 4343 SCTPDBG(SCTP_DEBUG_INPUT3, 4344 "SCTP_COOKIE-ECHO, stcb %p\n", stcb); 4345 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4346 ; 4347 } else { 4348 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4349 /* We are not interested anymore */ 4350 *offset = length; 4351 return (NULL); 4352 } 4353 } 4354 /* 4355 * First are we accepting? We do this again here 4356 * sincen it is possible that a previous endpoint 4357 * WAS listening responded to a INIT-ACK and then 4358 * closed. We opened and bound.. and are now no 4359 * longer listening. 4360 */ 4361 if (inp->sctp_socket->so_qlimit == 0) { 4362 if ((stcb) && (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 4363 /* 4364 * special case, is this a retran'd 4365 * COOKIE-ECHO or a restarting assoc 4366 * that is a peeled off or 4367 * one-to-one style socket. 4368 */ 4369 goto process_cookie_anyway; 4370 } 4371 sctp_abort_association(inp, stcb, m, iphlen, 4372 sh, NULL, vrf_id); 4373 *offset = length; 4374 return (NULL); 4375 } else if (inp->sctp_socket->so_qlimit) { 4376 /* we are accepting so check limits like TCP */ 4377 if (inp->sctp_socket->so_qlen > 4378 inp->sctp_socket->so_qlimit) { 4379 /* no space */ 4380 struct mbuf *oper; 4381 struct sctp_paramhdr *phdr; 4382 4383 if (sctp_abort_if_one_2_one_hits_limit) { 4384 oper = NULL; 4385 oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 4386 0, M_DONTWAIT, 1, MT_DATA); 4387 if (oper) { 4388 SCTP_BUF_LEN(oper) = 4389 sizeof(struct sctp_paramhdr); 4390 phdr = mtod(oper, 4391 struct sctp_paramhdr *); 4392 phdr->param_type = 4393 htons(SCTP_CAUSE_OUT_OF_RESC); 4394 phdr->param_length = 4395 htons(sizeof(struct sctp_paramhdr)); 4396 } 4397 sctp_abort_association(inp, stcb, m, 4398 iphlen, sh, oper, vrf_id); 4399 } 4400 *offset = length; 4401 return (NULL); 4402 } 4403 } 4404 process_cookie_anyway: 4405 { 4406 struct mbuf *ret_buf; 4407 struct sctp_inpcb *linp; 4408 4409 if (stcb) { 4410 linp = NULL; 4411 } else { 4412 linp = inp; 4413 } 4414 4415 if (linp) { 4416 SCTP_ASOC_CREATE_LOCK(linp); 4417 } 4418 if (netp) { 4419 ret_buf = 4420 sctp_handle_cookie_echo(m, iphlen, 4421 *offset, sh, 4422 (struct sctp_cookie_echo_chunk *)ch, 4423 &inp, &stcb, netp, 4424 auth_skipped, 4425 auth_offset, 4426 auth_len, 4427 &locked_tcb, 4428 vrf_id); 4429 } else { 4430 ret_buf = NULL; 4431 } 4432 if (linp) { 4433 SCTP_ASOC_CREATE_UNLOCK(linp); 4434 } 4435 if (ret_buf == NULL) { 4436 if (locked_tcb) { 4437 SCTP_TCB_UNLOCK(locked_tcb); 4438 } 4439 SCTPDBG(SCTP_DEBUG_INPUT3, 4440 "GAK, null buffer\n"); 4441 auth_skipped = 0; 4442 *offset = length; 4443 return (NULL); 4444 } 4445 /* if AUTH skipped, see if it verified... */ 4446 if (auth_skipped) { 4447 got_auth = 1; 4448 auth_skipped = 0; 4449 } 4450 if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) { 4451 /* 4452 * Restart the timer if we have 4453 * pending data 4454 */ 4455 struct sctp_tmit_chunk *chk; 4456 4457 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 4458 if (chk) { 4459 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4460 stcb->sctp_ep, stcb, 4461 chk->whoTo); 4462 } 4463 } 4464 } 4465 break; 4466 case SCTP_COOKIE_ACK: 4467 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", stcb); 4468 if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) { 4469 if (locked_tcb) { 4470 SCTP_TCB_UNLOCK(locked_tcb); 4471 } 4472 return (NULL); 4473 } 4474 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4475 /* We are not interested anymore */ 4476 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4477 ; 4478 } else if (stcb) { 4479#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4480 so = SCTP_INP_SO(inp); 4481 atomic_add_int(&stcb->asoc.refcnt, 1); 4482 SCTP_TCB_UNLOCK(stcb); 4483 SCTP_SOCKET_LOCK(so, 1); 4484 SCTP_TCB_LOCK(stcb); 4485 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4486#endif 4487 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27); 4488#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4489 SCTP_SOCKET_UNLOCK(so, 1); 4490#endif 4491 *offset = length; 4492 return (NULL); 4493 } 4494 } 4495 /* He's alive so give him credit */ 4496 if ((stcb) && netp && *netp) { 4497 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4498 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4499 stcb->asoc.overall_error_count, 4500 0, 4501 SCTP_FROM_SCTP_INPUT, 4502 __LINE__); 4503 } 4504 stcb->asoc.overall_error_count = 0; 4505 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp); 4506 } 4507 break; 4508 case SCTP_ECN_ECHO: 4509 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n"); 4510 /* He's alive so give him credit */ 4511 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) { 4512 /* Its not ours */ 4513 if (locked_tcb) { 4514 SCTP_TCB_UNLOCK(locked_tcb); 4515 } 4516 *offset = length; 4517 return (NULL); 4518 } 4519 if (stcb) { 4520 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4521 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4522 stcb->asoc.overall_error_count, 4523 0, 4524 SCTP_FROM_SCTP_INPUT, 4525 __LINE__); 4526 } 4527 stcb->asoc.overall_error_count = 0; 4528 sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch, 4529 stcb); 4530 } 4531 break; 4532 case SCTP_ECN_CWR: 4533 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n"); 4534 /* He's alive so give him credit */ 4535 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) { 4536 /* Its not ours */ 4537 if (locked_tcb) { 4538 SCTP_TCB_UNLOCK(locked_tcb); 4539 } 4540 *offset = length; 4541 return (NULL); 4542 } 4543 if (stcb) { 4544 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4545 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4546 stcb->asoc.overall_error_count, 4547 0, 4548 SCTP_FROM_SCTP_INPUT, 4549 __LINE__); 4550 } 4551 stcb->asoc.overall_error_count = 0; 4552 sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb); 4553 } 4554 break; 4555 case SCTP_SHUTDOWN_COMPLETE: 4556 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", stcb); 4557 /* must be first and only chunk */ 4558 if ((num_chunks > 1) || 4559 (length - *offset > (int)SCTP_SIZE32(chk_length))) { 4560 *offset = length; 4561 if (locked_tcb) { 4562 SCTP_TCB_UNLOCK(locked_tcb); 4563 } 4564 return (NULL); 4565 } 4566 if ((stcb) && netp && *netp) { 4567 sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch, 4568 stcb, *netp); 4569 } 4570 *offset = length; 4571 return (NULL); 4572 break; 4573 case SCTP_ASCONF: 4574 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n"); 4575 /* He's alive so give him credit */ 4576 if (stcb) { 4577 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4578 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4579 stcb->asoc.overall_error_count, 4580 0, 4581 SCTP_FROM_SCTP_INPUT, 4582 __LINE__); 4583 } 4584 stcb->asoc.overall_error_count = 0; 4585 sctp_handle_asconf(m, *offset, 4586 (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0); 4587 asconf_cnt++; 4588 } 4589 break; 4590 case SCTP_ASCONF_ACK: 4591 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n"); 4592 if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) { 4593 /* Its not ours */ 4594 if (locked_tcb) { 4595 SCTP_TCB_UNLOCK(locked_tcb); 4596 } 4597 *offset = length; 4598 return (NULL); 4599 } 4600 if ((stcb) && netp && *netp) { 4601 /* He's alive so give him credit */ 4602 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4603 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4604 stcb->asoc.overall_error_count, 4605 0, 4606 SCTP_FROM_SCTP_INPUT, 4607 __LINE__); 4608 } 4609 stcb->asoc.overall_error_count = 0; 4610 sctp_handle_asconf_ack(m, *offset, 4611 (struct sctp_asconf_ack_chunk *)ch, stcb, *netp); 4612 } 4613 break; 4614 case SCTP_FORWARD_CUM_TSN: 4615 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n"); 4616 if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) { 4617 /* Its not ours */ 4618 if (locked_tcb) { 4619 SCTP_TCB_UNLOCK(locked_tcb); 4620 } 4621 *offset = length; 4622 return (NULL); 4623 } 4624 /* He's alive so give him credit */ 4625 if (stcb) { 4626 int abort_flag = 0; 4627 4628 stcb->asoc.overall_error_count = 0; 4629 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4630 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4631 stcb->asoc.overall_error_count, 4632 0, 4633 SCTP_FROM_SCTP_INPUT, 4634 __LINE__); 4635 } 4636 *fwd_tsn_seen = 1; 4637 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4638 /* We are not interested anymore */ 4639#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4640 so = SCTP_INP_SO(inp); 4641 atomic_add_int(&stcb->asoc.refcnt, 1); 4642 SCTP_TCB_UNLOCK(stcb); 4643 SCTP_SOCKET_LOCK(so, 1); 4644 SCTP_TCB_LOCK(stcb); 4645 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4646#endif 4647 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29); 4648#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4649 SCTP_SOCKET_UNLOCK(so, 1); 4650#endif 4651 *offset = length; 4652 return (NULL); 4653 } 4654 sctp_handle_forward_tsn(stcb, 4655 (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset); 4656 if (abort_flag) { 4657 *offset = length; 4658 return (NULL); 4659 } else { 4660 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4661 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4662 stcb->asoc.overall_error_count, 4663 0, 4664 SCTP_FROM_SCTP_INPUT, 4665 __LINE__); 4666 } 4667 stcb->asoc.overall_error_count = 0; 4668 } 4669 4670 } 4671 break; 4672 case SCTP_STREAM_RESET: 4673 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n"); 4674 if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) { 4675 /* Its not ours */ 4676 if (locked_tcb) { 4677 SCTP_TCB_UNLOCK(locked_tcb); 4678 } 4679 *offset = length; 4680 return (NULL); 4681 } 4682 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4683 /* We are not interested anymore */ 4684#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4685 so = SCTP_INP_SO(inp); 4686 atomic_add_int(&stcb->asoc.refcnt, 1); 4687 SCTP_TCB_UNLOCK(stcb); 4688 SCTP_SOCKET_LOCK(so, 1); 4689 SCTP_TCB_LOCK(stcb); 4690 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4691#endif 4692 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_30); 4693#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4694 SCTP_SOCKET_UNLOCK(so, 1); 4695#endif 4696 *offset = length; 4697 return (NULL); 4698 } 4699 if (stcb->asoc.peer_supports_strreset == 0) { 4700 /* 4701 * hmm, peer should have announced this, but 4702 * we will turn it on since he is sending us 4703 * a stream reset. 4704 */ 4705 stcb->asoc.peer_supports_strreset = 1; 4706 } 4707 if (sctp_handle_stream_reset(stcb, m, *offset, (struct sctp_stream_reset_out_req *)ch)) { 4708 /* stop processing */ 4709 *offset = length; 4710 return (NULL); 4711 } 4712 break; 4713 case SCTP_PACKET_DROPPED: 4714 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n"); 4715 /* re-get it all please */ 4716 if (chk_length < sizeof(struct sctp_pktdrop_chunk)) { 4717 /* Its not ours */ 4718 if (locked_tcb) { 4719 SCTP_TCB_UNLOCK(locked_tcb); 4720 } 4721 *offset = length; 4722 return (NULL); 4723 } 4724 if (ch && (stcb) && netp && (*netp)) { 4725 sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch, 4726 stcb, *netp, 4727 min(chk_length, (sizeof(chunk_buf) - 4))); 4728 4729 } 4730 break; 4731 4732 case SCTP_AUTHENTICATION: 4733 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n"); 4734 if (sctp_auth_disable) 4735 goto unknown_chunk; 4736 4737 if (stcb == NULL) { 4738 /* save the first AUTH for later processing */ 4739 if (auth_skipped == 0) { 4740 auth_offset = *offset; 4741 auth_len = chk_length; 4742 auth_skipped = 1; 4743 } 4744 /* skip this chunk (temporarily) */ 4745 goto next_chunk; 4746 } 4747 if ((chk_length < (sizeof(struct sctp_auth_chunk))) || 4748 (chk_length > (sizeof(struct sctp_auth_chunk) + 4749 SCTP_AUTH_DIGEST_LEN_MAX))) { 4750 /* Its not ours */ 4751 if (locked_tcb) { 4752 SCTP_TCB_UNLOCK(locked_tcb); 4753 } 4754 *offset = length; 4755 return (NULL); 4756 } 4757 if (got_auth == 1) { 4758 /* skip this chunk... it's already auth'd */ 4759 goto next_chunk; 4760 } 4761 got_auth = 1; 4762 if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch, 4763 m, *offset)) { 4764 /* auth HMAC failed so dump the packet */ 4765 *offset = length; 4766 return (stcb); 4767 } else { 4768 /* remaining chunks are HMAC checked */ 4769 stcb->asoc.authenticated = 1; 4770 } 4771 break; 4772 4773 default: 4774 unknown_chunk: 4775 /* it's an unknown chunk! */ 4776 if ((ch->chunk_type & 0x40) && (stcb != NULL)) { 4777 struct mbuf *mm; 4778 struct sctp_paramhdr *phd; 4779 4780 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 4781 0, M_DONTWAIT, 1, MT_DATA); 4782 if (mm) { 4783 phd = mtod(mm, struct sctp_paramhdr *); 4784 /* 4785 * We cheat and use param type since 4786 * we did not bother to define a 4787 * error cause struct. They are the 4788 * same basic format with different 4789 * names. 4790 */ 4791 phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK); 4792 phd->param_length = htons(chk_length + sizeof(*phd)); 4793 SCTP_BUF_LEN(mm) = sizeof(*phd); 4794 SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, SCTP_SIZE32(chk_length), 4795 M_DONTWAIT); 4796 if (SCTP_BUF_NEXT(mm)) { 4797 sctp_queue_op_err(stcb, mm); 4798 } else { 4799 sctp_m_freem(mm); 4800 } 4801 } 4802 } 4803 if ((ch->chunk_type & 0x80) == 0) { 4804 /* discard this packet */ 4805 *offset = length; 4806 return (stcb); 4807 } /* else skip this bad chunk and continue... */ 4808 break; 4809 } /* switch (ch->chunk_type) */ 4810 4811 4812next_chunk: 4813 /* get the next chunk */ 4814 *offset += SCTP_SIZE32(chk_length); 4815 if (*offset >= length) { 4816 /* no more data left in the mbuf chain */ 4817 break; 4818 } 4819 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4820 sizeof(struct sctp_chunkhdr), chunk_buf); 4821 if (ch == NULL) { 4822 if (locked_tcb) { 4823 SCTP_TCB_UNLOCK(locked_tcb); 4824 } 4825 *offset = length; 4826 return (NULL); 4827 } 4828 } /* while */ 4829 4830 if (asconf_cnt > 0 && stcb != NULL) { 4831 sctp_send_asconf_ack(stcb); 4832 } 4833 return (stcb); 4834} 4835 4836 4837/* 4838 * Process the ECN bits we have something set so we must look to see if it is 4839 * ECN(0) or ECN(1) or CE 4840 */ 4841static void 4842sctp_process_ecn_marked_a(struct sctp_tcb *stcb, struct sctp_nets *net, 4843 uint8_t ecn_bits) 4844{ 4845 if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) { 4846 ; 4847 } else if ((ecn_bits & SCTP_ECT1_BIT) == SCTP_ECT1_BIT) { 4848 /* 4849 * we only add to the nonce sum for ECT1, ECT0 does not 4850 * change the NS bit (that we have yet to find a way to send 4851 * it yet). 4852 */ 4853 4854 /* ECN Nonce stuff */ 4855 stcb->asoc.receiver_nonce_sum++; 4856 stcb->asoc.receiver_nonce_sum &= SCTP_SACK_NONCE_SUM; 4857 4858 /* 4859 * Drag up the last_echo point if cumack is larger since we 4860 * don't want the point falling way behind by more than 4861 * 2^^31 and then having it be incorrect. 4862 */ 4863 if (compare_with_wrap(stcb->asoc.cumulative_tsn, 4864 stcb->asoc.last_echo_tsn, MAX_TSN)) { 4865 stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn; 4866 } 4867 } else if ((ecn_bits & SCTP_ECT0_BIT) == SCTP_ECT0_BIT) { 4868 /* 4869 * Drag up the last_echo point if cumack is larger since we 4870 * don't want the point falling way behind by more than 4871 * 2^^31 and then having it be incorrect. 4872 */ 4873 if (compare_with_wrap(stcb->asoc.cumulative_tsn, 4874 stcb->asoc.last_echo_tsn, MAX_TSN)) { 4875 stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn; 4876 } 4877 } 4878} 4879 4880static void 4881sctp_process_ecn_marked_b(struct sctp_tcb *stcb, struct sctp_nets *net, 4882 uint32_t high_tsn, uint8_t ecn_bits) 4883{ 4884 if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) { 4885 /* 4886 * we possibly must notify the sender that a congestion 4887 * window reduction is in order. We do this by adding a ECNE 4888 * chunk to the output chunk queue. The incoming CWR will 4889 * remove this chunk. 4890 */ 4891 if (compare_with_wrap(high_tsn, stcb->asoc.last_echo_tsn, 4892 MAX_TSN)) { 4893 /* Yep, we need to add a ECNE */ 4894 sctp_send_ecn_echo(stcb, net, high_tsn); 4895 stcb->asoc.last_echo_tsn = high_tsn; 4896 } 4897 } 4898} 4899 4900#ifdef INVARIANTS 4901static void 4902sctp_validate_no_locks(struct sctp_inpcb *inp) 4903{ 4904 struct sctp_tcb *stcb; 4905 4906 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 4907 if (mtx_owned(&stcb->tcb_mtx)) { 4908 panic("Own lock on stcb at return from input"); 4909 } 4910 } 4911} 4912 4913#endif 4914 4915/* 4916 * common input chunk processing (v4 and v6) 4917 */ 4918void 4919sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, 4920 int length, struct sctphdr *sh, struct sctp_chunkhdr *ch, 4921 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, 4922 uint8_t ecn_bits, uint32_t vrf_id) 4923{ 4924 /* 4925 * Control chunk processing 4926 */ 4927 uint32_t high_tsn; 4928 int fwd_tsn_seen = 0, data_processed = 0; 4929 struct mbuf *m = *mm; 4930 int abort_flag = 0; 4931 int un_sent; 4932 4933 SCTP_STAT_INCR(sctps_recvdatagrams); 4934#ifdef SCTP_AUDITING_ENABLED 4935 sctp_audit_log(0xE0, 1); 4936 sctp_auditing(0, inp, stcb, net); 4937#endif 4938 4939 SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d stcb:%p\n", 4940 m, iphlen, offset, stcb); 4941 if (stcb) { 4942 /* always clear this before beginning a packet */ 4943 stcb->asoc.authenticated = 0; 4944 stcb->asoc.seen_a_sack_this_pkt = 0; 4945 SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n", 4946 stcb, stcb->asoc.state); 4947 4948 if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) || 4949 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 4950 /*- 4951 * If we hit here, we had a ref count 4952 * up when the assoc was aborted and the 4953 * timer is clearing out the assoc, we should 4954 * NOT respond to any packet.. its OOTB. 4955 */ 4956 SCTP_TCB_UNLOCK(stcb); 4957 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL, 4958 vrf_id); 4959 goto out_now; 4960 } 4961 } 4962 if (IS_SCTP_CONTROL(ch)) { 4963 /* process the control portion of the SCTP packet */ 4964 /* sa_ignore NO_NULL_CHK */ 4965 stcb = sctp_process_control(m, iphlen, &offset, length, sh, ch, 4966 inp, stcb, &net, &fwd_tsn_seen, vrf_id); 4967 if (stcb) { 4968 /* 4969 * This covers us if the cookie-echo was there and 4970 * it changes our INP. 4971 */ 4972 inp = stcb->sctp_ep; 4973 } 4974 } else { 4975 /* 4976 * no control chunks, so pre-process DATA chunks (these 4977 * checks are taken care of by control processing) 4978 */ 4979 4980 /* 4981 * if DATA only packet, and auth is required, then punt... 4982 * can't have authenticated without any AUTH (control) 4983 * chunks 4984 */ 4985 if ((stcb != NULL) && !sctp_auth_disable && 4986 sctp_auth_is_required_chunk(SCTP_DATA, 4987 stcb->asoc.local_auth_chunks)) { 4988 /* "silently" ignore */ 4989 SCTP_STAT_INCR(sctps_recvauthmissing); 4990 SCTP_TCB_UNLOCK(stcb); 4991 goto out_now; 4992 } 4993 if (stcb == NULL) { 4994 /* out of the blue DATA chunk */ 4995 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL, 4996 vrf_id); 4997 goto out_now; 4998 } 4999 if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) { 5000 /* v_tag mismatch! */ 5001 SCTP_STAT_INCR(sctps_badvtag); 5002 SCTP_TCB_UNLOCK(stcb); 5003 goto out_now; 5004 } 5005 } 5006 5007 if (stcb == NULL) { 5008 /* 5009 * no valid TCB for this packet, or we found it's a bad 5010 * packet while processing control, or we're done with this 5011 * packet (done or skip rest of data), so we drop it... 5012 */ 5013 goto out_now; 5014 } 5015 /* 5016 * DATA chunk processing 5017 */ 5018 /* plow through the data chunks while length > offset */ 5019 5020 /* 5021 * Rest should be DATA only. Check authentication state if AUTH for 5022 * DATA is required. 5023 */ 5024 if ((length > offset) && (stcb != NULL) && !sctp_auth_disable && 5025 sctp_auth_is_required_chunk(SCTP_DATA, 5026 stcb->asoc.local_auth_chunks) && 5027 !stcb->asoc.authenticated) { 5028 /* "silently" ignore */ 5029 SCTP_STAT_INCR(sctps_recvauthmissing); 5030 SCTPDBG(SCTP_DEBUG_AUTH1, 5031 "Data chunk requires AUTH, skipped\n"); 5032 goto trigger_send; 5033 } 5034 if (length > offset) { 5035 int retval; 5036 5037 /* 5038 * First check to make sure our state is correct. We would 5039 * not get here unless we really did have a tag, so we don't 5040 * abort if this happens, just dump the chunk silently. 5041 */ 5042 switch (SCTP_GET_STATE(&stcb->asoc)) { 5043 case SCTP_STATE_COOKIE_ECHOED: 5044 /* 5045 * we consider data with valid tags in this state 5046 * shows us the cookie-ack was lost. Imply it was 5047 * there. 5048 */ 5049 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 5050 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5051 stcb->asoc.overall_error_count, 5052 0, 5053 SCTP_FROM_SCTP_INPUT, 5054 __LINE__); 5055 } 5056 stcb->asoc.overall_error_count = 0; 5057 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net); 5058 break; 5059 case SCTP_STATE_COOKIE_WAIT: 5060 /* 5061 * We consider OOTB any data sent during asoc setup. 5062 */ 5063 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL, 5064 vrf_id); 5065 SCTP_TCB_UNLOCK(stcb); 5066 goto out_now; 5067 /* sa_ignore NOTREACHED */ 5068 break; 5069 case SCTP_STATE_EMPTY: /* should not happen */ 5070 case SCTP_STATE_INUSE: /* should not happen */ 5071 case SCTP_STATE_SHUTDOWN_RECEIVED: /* This is a peer error */ 5072 case SCTP_STATE_SHUTDOWN_ACK_SENT: 5073 default: 5074 SCTP_TCB_UNLOCK(stcb); 5075 goto out_now; 5076 /* sa_ignore NOTREACHED */ 5077 break; 5078 case SCTP_STATE_OPEN: 5079 case SCTP_STATE_SHUTDOWN_SENT: 5080 break; 5081 } 5082 /* take care of ECN, part 1. */ 5083 if (stcb->asoc.ecn_allowed && 5084 (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) { 5085 sctp_process_ecn_marked_a(stcb, net, ecn_bits); 5086 } 5087 /* plow through the data chunks while length > offset */ 5088 retval = sctp_process_data(mm, iphlen, &offset, length, sh, 5089 inp, stcb, net, &high_tsn); 5090 if (retval == 2) { 5091 /* 5092 * The association aborted, NO UNLOCK needed since 5093 * the association is destroyed. 5094 */ 5095 goto out_now; 5096 } 5097 data_processed = 1; 5098 if (retval == 0) { 5099 /* take care of ecn part 2. */ 5100 if (stcb->asoc.ecn_allowed && 5101 (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) { 5102 sctp_process_ecn_marked_b(stcb, net, high_tsn, 5103 ecn_bits); 5104 } 5105 } 5106 /* 5107 * Anything important needs to have been m_copy'ed in 5108 * process_data 5109 */ 5110 } 5111 if ((data_processed == 0) && (fwd_tsn_seen)) { 5112 int was_a_gap = 0; 5113 5114 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 5115 stcb->asoc.cumulative_tsn, MAX_TSN)) { 5116 /* there was a gap before this data was processed */ 5117 was_a_gap = 1; 5118 } 5119 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag); 5120 if (abort_flag) { 5121 /* Again, we aborted so NO UNLOCK needed */ 5122 goto out_now; 5123 } 5124 } 5125 /* trigger send of any chunks in queue... */ 5126trigger_send: 5127#ifdef SCTP_AUDITING_ENABLED 5128 sctp_audit_log(0xE0, 2); 5129 sctp_auditing(1, inp, stcb, net); 5130#endif 5131 SCTPDBG(SCTP_DEBUG_INPUT1, 5132 "Check for chunk output prw:%d tqe:%d tf=%d\n", 5133 stcb->asoc.peers_rwnd, 5134 TAILQ_EMPTY(&stcb->asoc.control_send_queue), 5135 stcb->asoc.total_flight); 5136 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight); 5137 5138 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue) || 5139 ((un_sent) && 5140 (stcb->asoc.peers_rwnd > 0 || 5141 (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) { 5142 SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n"); 5143 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 5144 SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n"); 5145 } 5146#ifdef SCTP_AUDITING_ENABLED 5147 sctp_audit_log(0xE0, 3); 5148 sctp_auditing(2, inp, stcb, net); 5149#endif 5150 SCTP_TCB_UNLOCK(stcb); 5151out_now: 5152#ifdef INVARIANTS 5153 sctp_validate_no_locks(inp); 5154#endif 5155 return; 5156} 5157 5158 5159 5160void 5161sctp_input(i_pak, off) 5162 struct mbuf *i_pak; 5163 int off; 5164 5165{ 5166#ifdef SCTP_MBUF_LOGGING 5167 struct mbuf *mat; 5168 5169#endif 5170 struct mbuf *m; 5171 int iphlen; 5172 uint32_t vrf_id = 0; 5173 uint8_t ecn_bits; 5174 struct ip *ip; 5175 struct sctphdr *sh; 5176 struct sctp_inpcb *inp = NULL; 5177 5178 uint32_t check, calc_check; 5179 struct sctp_nets *net; 5180 struct sctp_tcb *stcb = NULL; 5181 struct sctp_chunkhdr *ch; 5182 int refcount_up = 0; 5183 int length, mlen, offset; 5184 5185 5186 if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) { 5187 SCTP_RELEASE_PKT(i_pak); 5188 return; 5189 } 5190 mlen = SCTP_HEADER_LEN(i_pak); 5191 iphlen = off; 5192 m = SCTP_HEADER_TO_CHAIN(i_pak); 5193 5194 net = NULL; 5195 SCTP_STAT_INCR(sctps_recvpackets); 5196 SCTP_STAT_INCR_COUNTER64(sctps_inpackets); 5197 5198 5199#ifdef SCTP_MBUF_LOGGING 5200 /* Log in any input mbufs */ 5201 if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) { 5202 mat = m; 5203 while (mat) { 5204 if (SCTP_BUF_IS_EXTENDED(mat)) { 5205 sctp_log_mb(mat, SCTP_MBUF_INPUT); 5206 } 5207 mat = SCTP_BUF_NEXT(mat); 5208 } 5209 } 5210#endif 5211#ifdef SCTP_PACKET_LOGGING 5212 if (sctp_logging_level & SCTP_LAST_PACKET_TRACING) 5213 sctp_packet_log(m, mlen); 5214#endif 5215 /* 5216 * Must take out the iphlen, since mlen expects this (only effect lb 5217 * case) 5218 */ 5219 mlen -= iphlen; 5220 5221 /* 5222 * Get IP, SCTP, and first chunk header together in first mbuf. 5223 */ 5224 ip = mtod(m, struct ip *); 5225 offset = iphlen + sizeof(*sh) + sizeof(*ch); 5226 if (SCTP_BUF_LEN(m) < offset) { 5227 if ((m = m_pullup(m, offset)) == 0) { 5228 SCTP_STAT_INCR(sctps_hdrops); 5229 return; 5230 } 5231 ip = mtod(m, struct ip *); 5232 } 5233 sh = (struct sctphdr *)((caddr_t)ip + iphlen); 5234 ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh)); 5235 SCTPDBG(SCTP_DEBUG_INPUT1, 5236 "sctp_input() length:%d iphlen:%d\n", mlen, iphlen); 5237 5238 /* SCTP does not allow broadcasts or multicasts */ 5239 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 5240 goto bad; 5241 } 5242 if (SCTP_IS_IT_BROADCAST(ip->ip_dst, m)) { 5243 /* 5244 * We only look at broadcast if its a front state, All 5245 * others we will not have a tcb for anyway. 5246 */ 5247 goto bad; 5248 } 5249 /* validate SCTP checksum */ 5250 check = sh->checksum; /* save incoming checksum */ 5251 if ((check == 0) && (sctp_no_csum_on_loopback) && 5252 ((ip->ip_src.s_addr == ip->ip_dst.s_addr) || 5253 (SCTP_IS_IT_LOOPBACK(m))) 5254 ) { 5255 goto sctp_skip_csum_4; 5256 } 5257 sh->checksum = 0; /* prepare for calc */ 5258 calc_check = sctp_calculate_sum(m, &mlen, iphlen); 5259 if (calc_check != check) { 5260 SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n", 5261 calc_check, check, m, mlen, iphlen); 5262 5263 stcb = sctp_findassociation_addr(m, iphlen, 5264 offset - sizeof(*ch), 5265 sh, ch, &inp, &net, 5266 vrf_id); 5267 if ((inp) && (stcb)) { 5268 sctp_send_packet_dropped(stcb, net, m, iphlen, 1); 5269 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED); 5270 } else if ((inp != NULL) && (stcb == NULL)) { 5271 refcount_up = 1; 5272 } 5273 SCTP_STAT_INCR(sctps_badsum); 5274 SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors); 5275 goto bad; 5276 } 5277 sh->checksum = calc_check; 5278sctp_skip_csum_4: 5279 /* destination port of 0 is illegal, based on RFC2960. */ 5280 if (sh->dest_port == 0) { 5281 SCTP_STAT_INCR(sctps_hdrops); 5282 goto bad; 5283 } 5284 /* validate mbuf chain length with IP payload length */ 5285 if (mlen < (ip->ip_len - iphlen)) { 5286 SCTP_STAT_INCR(sctps_hdrops); 5287 goto bad; 5288 } 5289 /* 5290 * Locate pcb and tcb for datagram sctp_findassociation_addr() wants 5291 * IP/SCTP/first chunk header... 5292 */ 5293 stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch), 5294 sh, ch, &inp, &net, vrf_id); 5295 /* inp's ref-count increased && stcb locked */ 5296 if (inp == NULL) { 5297 struct sctp_init_chunk *init_chk, chunk_buf; 5298 5299 SCTP_STAT_INCR(sctps_noport); 5300#ifdef ICMP_BANDLIM 5301 /* 5302 * we use the bandwidth limiting to protect against sending 5303 * too many ABORTS all at once. In this case these count the 5304 * same as an ICMP message. 5305 */ 5306 if (badport_bandlim(0) < 0) 5307 goto bad; 5308#endif /* ICMP_BANDLIM */ 5309 SCTPDBG(SCTP_DEBUG_INPUT1, 5310 "Sending a ABORT from packet entry!\n"); 5311 if (ch->chunk_type == SCTP_INITIATION) { 5312 /* 5313 * we do a trick here to get the INIT tag, dig in 5314 * and get the tag from the INIT and put it in the 5315 * common header. 5316 */ 5317 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 5318 iphlen + sizeof(*sh), sizeof(*init_chk), 5319 (uint8_t *) & chunk_buf); 5320 if (init_chk != NULL) 5321 sh->v_tag = init_chk->init.initiate_tag; 5322 } 5323 if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 5324 sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id); 5325 goto bad; 5326 } 5327 if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) { 5328 goto bad; 5329 } 5330 if (ch->chunk_type != SCTP_ABORT_ASSOCIATION) 5331 sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id); 5332 goto bad; 5333 } else if (stcb == NULL) { 5334 refcount_up = 1; 5335 } 5336#ifdef IPSEC 5337 /* 5338 * I very much doubt any of the IPSEC stuff will work but I have no 5339 * idea, so I will leave it in place. 5340 */ 5341 if (inp && ipsec4_in_reject(m, &inp->ip_inp.inp)) { 5342 ipsec4stat.in_polvio++; 5343 SCTP_STAT_INCR(sctps_hdrops); 5344 goto bad; 5345 } 5346#endif /* IPSEC */ 5347 5348 /* 5349 * common chunk processing 5350 */ 5351 length = ip->ip_len + iphlen; 5352 offset -= sizeof(struct sctp_chunkhdr); 5353 5354 ecn_bits = ip->ip_tos; 5355 5356 /* sa_ignore NO_NULL_CHK */ 5357 sctp_common_input_processing(&m, iphlen, offset, length, sh, ch, 5358 inp, stcb, net, ecn_bits, vrf_id); 5359 /* inp's ref-count reduced && stcb unlocked */ 5360 if (m) { 5361 sctp_m_freem(m); 5362 } 5363 if ((inp) && (refcount_up)) { 5364 /* reduce ref-count */ 5365 SCTP_INP_DECR_REF(inp); 5366 } 5367 return; 5368bad: 5369 if (stcb) { 5370 SCTP_TCB_UNLOCK(stcb); 5371 } 5372 if ((inp) && (refcount_up)) { 5373 /* reduce ref-count */ 5374 SCTP_INP_DECR_REF(inp); 5375 } 5376 if (m) { 5377 sctp_m_freem(m); 5378 } 5379 return; 5380} 5381