sctp_input.c revision 178202
1/*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31/* $KAME: sctp_input.c,v 1.27 2005/03/06 16:04:17 itojun Exp $ */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: head/sys/netinet/sctp_input.c 178202 2008-04-14 18:13:33Z rrs $"); 35 36#include <netinet/sctp_os.h> 37#include <netinet/sctp_var.h> 38#include <netinet/sctp_sysctl.h> 39#include <netinet/sctp_pcb.h> 40#include <netinet/sctp_header.h> 41#include <netinet/sctputil.h> 42#include <netinet/sctp_output.h> 43#include <netinet/sctp_input.h> 44#include <netinet/sctp_auth.h> 45#include <netinet/sctp_indata.h> 46#include <netinet/sctp_asconf.h> 47#include <netinet/sctp_bsd_addr.h> 48#include <netinet/sctp_timer.h> 49 50 51 52static void 53sctp_stop_all_cookie_timers(struct sctp_tcb *stcb) 54{ 55 struct sctp_nets *net; 56 57 /* 58 * This now not only stops all cookie timers it also stops any INIT 59 * timers as well. This will make sure that the timers are stopped 60 * in all collision cases. 61 */ 62 SCTP_TCB_LOCK_ASSERT(stcb); 63 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 64 if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) { 65 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, 66 stcb->sctp_ep, 67 stcb, 68 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1); 69 } else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) { 70 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, 71 stcb->sctp_ep, 72 stcb, 73 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2); 74 } 75 } 76} 77 78/* INIT handler */ 79static void 80sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh, 81 struct sctp_init_chunk *cp, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 82 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id) 83{ 84 struct sctp_init *init; 85 struct mbuf *op_err; 86 uint32_t init_limit; 87 88 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n", 89 stcb); 90 if (stcb == NULL) { 91 SCTP_INP_RLOCK(inp); 92 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 93 goto outnow; 94 } 95 } 96 op_err = NULL; 97 init = &cp->init; 98 /* First are we accepting? */ 99 if ((inp->sctp_socket->so_qlimit == 0) && (stcb == NULL)) { 100 SCTPDBG(SCTP_DEBUG_INPUT2, 101 "sctp_handle_init: Abort, so_qlimit:%d\n", 102 inp->sctp_socket->so_qlimit); 103 /* 104 * FIX ME ?? What about TCP model and we have a 105 * match/restart case? 106 */ 107 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 108 vrf_id); 109 if (stcb) 110 *abort_no_unlock = 1; 111 goto outnow; 112 } 113 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) { 114 /* Invalid length */ 115 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 116 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 117 vrf_id); 118 if (stcb) 119 *abort_no_unlock = 1; 120 goto outnow; 121 } 122 /* validate parameters */ 123 if (init->initiate_tag == 0) { 124 /* protocol error... send abort */ 125 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 126 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 127 vrf_id); 128 if (stcb) 129 *abort_no_unlock = 1; 130 goto outnow; 131 } 132 if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) { 133 /* invalid parameter... send abort */ 134 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 135 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 136 vrf_id); 137 if (stcb) 138 *abort_no_unlock = 1; 139 goto outnow; 140 } 141 if (init->num_inbound_streams == 0) { 142 /* protocol error... send abort */ 143 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 144 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 145 vrf_id); 146 if (stcb) 147 *abort_no_unlock = 1; 148 goto outnow; 149 } 150 if (init->num_outbound_streams == 0) { 151 /* protocol error... send abort */ 152 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 153 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 154 vrf_id); 155 if (stcb) 156 *abort_no_unlock = 1; 157 goto outnow; 158 } 159 init_limit = offset + ntohs(cp->ch.chunk_length); 160 if (sctp_validate_init_auth_params(m, offset + sizeof(*cp), 161 init_limit)) { 162 /* auth parameter(s) error... send abort */ 163 sctp_abort_association(inp, stcb, m, iphlen, sh, NULL, vrf_id); 164 if (stcb) 165 *abort_no_unlock = 1; 166 goto outnow; 167 } 168 /* send an INIT-ACK w/cookie */ 169 SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n"); 170 sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, sh, cp, vrf_id, 171 ((stcb == NULL) ? SCTP_HOLDS_LOCK : SCTP_NOT_LOCKED)); 172outnow: 173 if (stcb == NULL) { 174 SCTP_INP_RUNLOCK(inp); 175 } 176} 177 178/* 179 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error 180 */ 181 182int 183sctp_is_there_unsent_data(struct sctp_tcb *stcb) 184{ 185 int unsent_data = 0; 186 struct sctp_stream_queue_pending *sp; 187 struct sctp_stream_out *strq; 188 struct sctp_association *asoc; 189 190 /* 191 * This function returns the number of streams that have true unsent 192 * data on them. Note that as it looks through it will clean up any 193 * places that have old data that has been sent but left at top of 194 * stream queue. 195 */ 196 asoc = &stcb->asoc; 197 SCTP_TCB_SEND_LOCK(stcb); 198 if (!TAILQ_EMPTY(&asoc->out_wheel)) { 199 /* Check to see if some data queued */ 200 TAILQ_FOREACH(strq, &asoc->out_wheel, next_spoke) { 201 is_there_another: 202 /* sa_ignore FREED_MEMORY */ 203 sp = TAILQ_FIRST(&strq->outqueue); 204 if (sp == NULL) { 205 continue; 206 } 207 if ((sp->msg_is_complete) && 208 (sp->length == 0) && 209 (sp->sender_all_done)) { 210 /* 211 * We are doing differed cleanup. Last time 212 * through when we took all the data the 213 * sender_all_done was not set. 214 */ 215 if (sp->put_last_out == 0) { 216 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n"); 217 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n", 218 sp->sender_all_done, 219 sp->length, 220 sp->msg_is_complete, 221 sp->put_last_out); 222 } 223 atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1); 224 TAILQ_REMOVE(&strq->outqueue, sp, next); 225 sctp_free_remote_addr(sp->net); 226 if (sp->data) { 227 sctp_m_freem(sp->data); 228 sp->data = NULL; 229 } 230 sctp_free_a_strmoq(stcb, sp); 231 goto is_there_another; 232 } else { 233 unsent_data++; 234 continue; 235 } 236 } 237 } 238 SCTP_TCB_SEND_UNLOCK(stcb); 239 return (unsent_data); 240} 241 242static int 243sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb, 244 struct sctp_nets *net) 245{ 246 struct sctp_init *init; 247 struct sctp_association *asoc; 248 struct sctp_nets *lnet; 249 unsigned int i; 250 251 init = &cp->init; 252 asoc = &stcb->asoc; 253 /* save off parameters */ 254 asoc->peer_vtag = ntohl(init->initiate_tag); 255 asoc->peers_rwnd = ntohl(init->a_rwnd); 256 if (TAILQ_FIRST(&asoc->nets)) { 257 /* update any ssthresh's that may have a default */ 258 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) { 259 lnet->ssthresh = asoc->peers_rwnd; 260 261 if (sctp_logging_level & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) { 262 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION); 263 } 264 } 265 } 266 SCTP_TCB_SEND_LOCK(stcb); 267 if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) { 268 unsigned int newcnt; 269 struct sctp_stream_out *outs; 270 struct sctp_stream_queue_pending *sp; 271 272 /* cut back on number of streams */ 273 newcnt = ntohs(init->num_inbound_streams); 274 /* This if is probably not needed but I am cautious */ 275 if (asoc->strmout) { 276 /* First make sure no data chunks are trapped */ 277 for (i = newcnt; i < asoc->pre_open_streams; i++) { 278 outs = &asoc->strmout[i]; 279 sp = TAILQ_FIRST(&outs->outqueue); 280 while (sp) { 281 TAILQ_REMOVE(&outs->outqueue, sp, 282 next); 283 asoc->stream_queue_cnt--; 284 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, 285 stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, 286 sp, SCTP_SO_NOT_LOCKED); 287 if (sp->data) { 288 sctp_m_freem(sp->data); 289 sp->data = NULL; 290 } 291 sctp_free_remote_addr(sp->net); 292 sp->net = NULL; 293 /* Free the chunk */ 294 SCTP_PRINTF("sp:%p tcb:%p weird free case\n", 295 sp, stcb); 296 297 sctp_free_a_strmoq(stcb, sp); 298 /* sa_ignore FREED_MEMORY */ 299 sp = TAILQ_FIRST(&outs->outqueue); 300 } 301 } 302 } 303 /* cut back the count and abandon the upper streams */ 304 asoc->pre_open_streams = newcnt; 305 } 306 SCTP_TCB_SEND_UNLOCK(stcb); 307 asoc->streamoutcnt = asoc->pre_open_streams; 308 /* init tsn's */ 309 asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1; 310 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 311 sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 312 } 313 /* This is the next one we expect */ 314 asoc->str_reset_seq_in = asoc->asconf_seq_in + 1; 315 316 asoc->mapping_array_base_tsn = ntohl(init->initial_tsn); 317 asoc->cumulative_tsn = asoc->asconf_seq_in; 318 asoc->last_echo_tsn = asoc->asconf_seq_in; 319 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 320 /* open the requested streams */ 321 322 if (asoc->strmin != NULL) { 323 /* Free the old ones */ 324 struct sctp_queued_to_read *ctl; 325 326 for (i = 0; i < asoc->streamincnt; i++) { 327 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue); 328 while (ctl) { 329 TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next); 330 sctp_free_remote_addr(ctl->whoFrom); 331 ctl->whoFrom = NULL; 332 sctp_m_freem(ctl->data); 333 ctl->data = NULL; 334 sctp_free_a_readq(stcb, ctl); 335 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue); 336 } 337 } 338 SCTP_FREE(asoc->strmin, SCTP_M_STRMI); 339 } 340 asoc->streamincnt = ntohs(init->num_outbound_streams); 341 if (asoc->streamincnt > MAX_SCTP_STREAMS) { 342 asoc->streamincnt = MAX_SCTP_STREAMS; 343 } 344 SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt * 345 sizeof(struct sctp_stream_in), SCTP_M_STRMI); 346 if (asoc->strmin == NULL) { 347 /* we didn't get memory for the streams! */ 348 SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n"); 349 return (-1); 350 } 351 for (i = 0; i < asoc->streamincnt; i++) { 352 asoc->strmin[i].stream_no = i; 353 asoc->strmin[i].last_sequence_delivered = 0xffff; 354 /* 355 * U-stream ranges will be set when the cookie is unpacked. 356 * Or for the INIT sender they are un set (if pr-sctp not 357 * supported) when the INIT-ACK arrives. 358 */ 359 TAILQ_INIT(&asoc->strmin[i].inqueue); 360 asoc->strmin[i].delivery_started = 0; 361 } 362 /* 363 * load_address_from_init will put the addresses into the 364 * association when the COOKIE is processed or the INIT-ACK is 365 * processed. Both types of COOKIE's existing and new call this 366 * routine. It will remove addresses that are no longer in the 367 * association (for the restarting case where addresses are 368 * removed). Up front when the INIT arrives we will discard it if it 369 * is a restart and new addresses have been added. 370 */ 371 /* sa_ignore MEMLEAK */ 372 return (0); 373} 374 375/* 376 * INIT-ACK message processing/consumption returns value < 0 on error 377 */ 378static int 379sctp_process_init_ack(struct mbuf *m, int iphlen, int offset, 380 struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 381 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id) 382{ 383 struct sctp_association *asoc; 384 struct mbuf *op_err; 385 int retval, abort_flag; 386 uint32_t initack_limit; 387 388 /* First verify that we have no illegal param's */ 389 abort_flag = 0; 390 op_err = NULL; 391 392 op_err = sctp_arethere_unrecognized_parameters(m, 393 (offset + sizeof(struct sctp_init_chunk)), 394 &abort_flag, (struct sctp_chunkhdr *)cp); 395 if (abort_flag) { 396 /* Send an abort and notify peer */ 397 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_CAUSE_PROTOCOL_VIOLATION, op_err, SCTP_SO_NOT_LOCKED); 398 *abort_no_unlock = 1; 399 return (-1); 400 } 401 asoc = &stcb->asoc; 402 /* process the peer's parameters in the INIT-ACK */ 403 retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb, net); 404 if (retval < 0) { 405 return (retval); 406 } 407 initack_limit = offset + ntohs(cp->ch.chunk_length); 408 /* load all addresses */ 409 if ((retval = sctp_load_addresses_from_init(stcb, m, iphlen, 410 (offset + sizeof(struct sctp_init_chunk)), initack_limit, sh, 411 NULL))) { 412 /* Huh, we should abort */ 413 SCTPDBG(SCTP_DEBUG_INPUT1, 414 "Load addresses from INIT causes an abort %d\n", 415 retval); 416 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 417 NULL, 0); 418 *abort_no_unlock = 1; 419 return (-1); 420 } 421 /* if the peer doesn't support asconf, flush the asconf queue */ 422 if (asoc->peer_supports_asconf == 0) { 423 struct sctp_asconf_addr *aparam; 424 425 while (!TAILQ_EMPTY(&asoc->asconf_queue)) { 426 /* sa_ignore FREED_MEMORY */ 427 aparam = TAILQ_FIRST(&asoc->asconf_queue); 428 TAILQ_REMOVE(&asoc->asconf_queue, aparam, next); 429 SCTP_FREE(aparam, SCTP_M_ASC_ADDR); 430 } 431 } 432 stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs, 433 stcb->asoc.local_hmacs); 434 if (op_err) { 435 sctp_queue_op_err(stcb, op_err); 436 /* queuing will steal away the mbuf chain to the out queue */ 437 op_err = NULL; 438 } 439 /* extract the cookie and queue it to "echo" it back... */ 440 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 441 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 442 stcb->asoc.overall_error_count, 443 0, 444 SCTP_FROM_SCTP_INPUT, 445 __LINE__); 446 } 447 stcb->asoc.overall_error_count = 0; 448 net->error_count = 0; 449 450 /* 451 * Cancel the INIT timer, We do this first before queueing the 452 * cookie. We always cancel at the primary to assue that we are 453 * canceling the timer started by the INIT which always goes to the 454 * primary. 455 */ 456 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb, 457 asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4); 458 459 /* calculate the RTO */ 460 net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered, sctp_align_safe_nocopy); 461 462 retval = sctp_send_cookie_echo(m, offset, stcb, net); 463 if (retval < 0) { 464 /* 465 * No cookie, we probably should send a op error. But in any 466 * case if there is no cookie in the INIT-ACK, we can 467 * abandon the peer, its broke. 468 */ 469 if (retval == -3) { 470 /* We abort with an error of missing mandatory param */ 471 op_err = 472 sctp_generate_invmanparam(SCTP_CAUSE_MISSING_PARAM); 473 if (op_err) { 474 /* 475 * Expand beyond to include the mandatory 476 * param cookie 477 */ 478 struct sctp_inv_mandatory_param *mp; 479 480 SCTP_BUF_LEN(op_err) = 481 sizeof(struct sctp_inv_mandatory_param); 482 mp = mtod(op_err, 483 struct sctp_inv_mandatory_param *); 484 /* Subtract the reserved param */ 485 mp->length = 486 htons(sizeof(struct sctp_inv_mandatory_param) - 2); 487 mp->num_param = htonl(1); 488 mp->param = htons(SCTP_STATE_COOKIE); 489 mp->resv = 0; 490 } 491 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 492 sh, op_err, 0); 493 *abort_no_unlock = 1; 494 } 495 return (retval); 496 } 497 return (0); 498} 499 500static void 501sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp, 502 struct sctp_tcb *stcb, struct sctp_nets *net) 503{ 504 struct sockaddr_storage store; 505 struct sockaddr_in *sin; 506 struct sockaddr_in6 *sin6; 507 struct sctp_nets *r_net; 508 struct timeval tv; 509 int req_prim = 0; 510 511 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) { 512 /* Invalid length */ 513 return; 514 } 515 sin = (struct sockaddr_in *)&store; 516 sin6 = (struct sockaddr_in6 *)&store; 517 518 memset(&store, 0, sizeof(store)); 519 if (cp->heartbeat.hb_info.addr_family == AF_INET && 520 cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) { 521 sin->sin_family = cp->heartbeat.hb_info.addr_family; 522 sin->sin_len = cp->heartbeat.hb_info.addr_len; 523 sin->sin_port = stcb->rport; 524 memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address, 525 sizeof(sin->sin_addr)); 526 } else if (cp->heartbeat.hb_info.addr_family == AF_INET6 && 527 cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) { 528 sin6->sin6_family = cp->heartbeat.hb_info.addr_family; 529 sin6->sin6_len = cp->heartbeat.hb_info.addr_len; 530 sin6->sin6_port = stcb->rport; 531 memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address, 532 sizeof(sin6->sin6_addr)); 533 } else { 534 return; 535 } 536 r_net = sctp_findnet(stcb, (struct sockaddr *)sin); 537 if (r_net == NULL) { 538 SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n"); 539 return; 540 } 541 if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) && 542 (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) && 543 (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) { 544 /* 545 * If the its a HB and it's random value is correct when can 546 * confirm the destination. 547 */ 548 r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 549 if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) { 550 stcb->asoc.primary_destination = r_net; 551 r_net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY; 552 r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY; 553 r_net = TAILQ_FIRST(&stcb->asoc.nets); 554 if (r_net != stcb->asoc.primary_destination) { 555 /* 556 * first one on the list is NOT the primary 557 * sctp_cmpaddr() is much more efficent if 558 * the primary is the first on the list, 559 * make it so. 560 */ 561 TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); 562 TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); 563 } 564 req_prim = 1; 565 } 566 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 567 stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED); 568 } 569 r_net->error_count = 0; 570 r_net->hb_responded = 1; 571 tv.tv_sec = cp->heartbeat.hb_info.time_value_1; 572 tv.tv_usec = cp->heartbeat.hb_info.time_value_2; 573 if (r_net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 574 r_net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE; 575 r_net->dest_state |= SCTP_ADDR_REACHABLE; 576 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 577 SCTP_HEARTBEAT_SUCCESS, (void *)r_net, SCTP_SO_NOT_LOCKED); 578 /* now was it the primary? if so restore */ 579 if (r_net->dest_state & SCTP_ADDR_WAS_PRIMARY) { 580 (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, r_net); 581 } 582 } 583 /* 584 * JRS 5/14/07 - If CMT PF is on and the destination is in PF state, 585 * set the destination to active state and set the cwnd to one or 586 * two MTU's based on whether PF1 or PF2 is being used. If a T3 587 * timer is running, for the destination, stop the timer because a 588 * PF-heartbeat was received. 589 */ 590 if (sctp_cmt_on_off && sctp_cmt_pf && (net->dest_state & SCTP_ADDR_PF) == 591 SCTP_ADDR_PF) { 592 if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 593 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 594 stcb, net, 595 SCTP_FROM_SCTP_INPUT + SCTP_LOC_5); 596 } 597 net->dest_state &= ~SCTP_ADDR_PF; 598 net->cwnd = net->mtu * sctp_cmt_pf; 599 SCTPDBG(SCTP_DEBUG_INPUT1, "Destination %p moved from PF to reachable with cwnd %d.\n", 600 net, net->cwnd); 601 } 602 /* Now lets do a RTO with this */ 603 r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv, sctp_align_safe_nocopy); 604 /* Mobility adaptation */ 605 if (req_prim) { 606 if ((sctp_is_mobility_feature_on(stcb->sctp_ep, 607 SCTP_MOBILITY_BASE) || 608 sctp_is_mobility_feature_on(stcb->sctp_ep, 609 SCTP_MOBILITY_FASTHANDOFF)) && 610 sctp_is_mobility_feature_on(stcb->sctp_ep, 611 SCTP_MOBILITY_PRIM_DELETED)) { 612 613 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_TIMER + SCTP_LOC_7); 614 if (sctp_is_mobility_feature_on(stcb->sctp_ep, 615 SCTP_MOBILITY_FASTHANDOFF)) { 616 sctp_assoc_immediate_retrans(stcb, 617 stcb->asoc.primary_destination); 618 } 619 if (sctp_is_mobility_feature_on(stcb->sctp_ep, 620 SCTP_MOBILITY_BASE)) { 621 sctp_move_chunks_from_deleted_prim(stcb, 622 stcb->asoc.primary_destination); 623 } 624 sctp_delete_prim_timer(stcb->sctp_ep, stcb, 625 stcb->asoc.deleted_primary); 626 } 627 } 628} 629 630static void 631sctp_handle_abort(struct sctp_abort_chunk *cp, 632 struct sctp_tcb *stcb, struct sctp_nets *net) 633{ 634#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 635 struct socket *so; 636 637#endif 638 639 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n"); 640 if (stcb == NULL) 641 return; 642 643 /* stop any receive timers */ 644 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6); 645 /* notify user of the abort and clean up... */ 646 sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED); 647 /* free the tcb */ 648#if defined(SCTP_PANIC_ON_ABORT) 649 printf("stcb:%p state:%d rport:%d net:%p\n", 650 stcb, stcb->asoc.state, stcb->rport, net); 651 if (!(stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) { 652 panic("Received an ABORT"); 653 } else { 654 printf("No panic its in state %x closed\n", stcb->asoc.state); 655 } 656#endif 657 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 658 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 659 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 660 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 661 } 662#ifdef SCTP_ASOCLOG_OF_TSNS 663 sctp_print_out_track_log(stcb); 664#endif 665#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 666 so = SCTP_INP_SO(stcb->sctp_ep); 667 atomic_add_int(&stcb->asoc.refcnt, 1); 668 SCTP_TCB_UNLOCK(stcb); 669 SCTP_SOCKET_LOCK(so, 1); 670 SCTP_TCB_LOCK(stcb); 671 atomic_subtract_int(&stcb->asoc.refcnt, 1); 672#endif 673 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED; 674 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 675 SCTP_FROM_SCTP_INPUT + SCTP_LOC_6); 676#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 677 SCTP_SOCKET_UNLOCK(so, 1); 678#endif 679 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n"); 680} 681 682static void 683sctp_handle_shutdown(struct sctp_shutdown_chunk *cp, 684 struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag) 685{ 686 struct sctp_association *asoc; 687 int some_on_streamwheel; 688 689#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 690 struct socket *so; 691 692#endif 693 694 SCTPDBG(SCTP_DEBUG_INPUT2, 695 "sctp_handle_shutdown: handling SHUTDOWN\n"); 696 if (stcb == NULL) 697 return; 698 asoc = &stcb->asoc; 699 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 700 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 701 return; 702 } 703 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) { 704 /* Shutdown NOT the expected size */ 705 return; 706 } else { 707 sctp_update_acked(stcb, cp, net, abort_flag); 708 } 709 if (asoc->control_pdapi) { 710 /* 711 * With a normal shutdown we assume the end of last record. 712 */ 713 SCTP_INP_READ_LOCK(stcb->sctp_ep); 714 asoc->control_pdapi->end_added = 1; 715 asoc->control_pdapi->pdapi_aborted = 1; 716 asoc->control_pdapi = NULL; 717 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 718#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 719 so = SCTP_INP_SO(stcb->sctp_ep); 720 atomic_add_int(&stcb->asoc.refcnt, 1); 721 SCTP_TCB_UNLOCK(stcb); 722 SCTP_SOCKET_LOCK(so, 1); 723 SCTP_TCB_LOCK(stcb); 724 atomic_subtract_int(&stcb->asoc.refcnt, 1); 725 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 726 /* assoc was freed while we were unlocked */ 727 SCTP_SOCKET_UNLOCK(so, 1); 728 return; 729 } 730#endif 731 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 732#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 733 SCTP_SOCKET_UNLOCK(so, 1); 734#endif 735 } 736 /* goto SHUTDOWN_RECEIVED state to block new requests */ 737 if (stcb->sctp_socket) { 738 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 739 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) && 740 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) { 741 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_RECEIVED); 742 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 743 /* 744 * notify upper layer that peer has initiated a 745 * shutdown 746 */ 747 sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 748 749 /* reset time */ 750 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 751 } 752 } 753 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 754 /* 755 * stop the shutdown timer, since we WILL move to 756 * SHUTDOWN-ACK-SENT. 757 */ 758 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8); 759 } 760 /* Now is there unsent data on a stream somewhere? */ 761 some_on_streamwheel = sctp_is_there_unsent_data(stcb); 762 763 if (!TAILQ_EMPTY(&asoc->send_queue) || 764 !TAILQ_EMPTY(&asoc->sent_queue) || 765 some_on_streamwheel) { 766 /* By returning we will push more data out */ 767 return; 768 } else { 769 /* no outstanding data to send, so move on... */ 770 /* send SHUTDOWN-ACK */ 771 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination); 772 /* move to SHUTDOWN-ACK-SENT state */ 773 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 774 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 775 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 776 } 777 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 778 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 779 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, 780 SCTP_FROM_SCTP_INPUT + SCTP_LOC_7); 781 /* start SHUTDOWN timer */ 782 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, 783 stcb, net); 784 } 785} 786 787static void 788sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp, 789 struct sctp_tcb *stcb, struct sctp_nets *net) 790{ 791 struct sctp_association *asoc; 792 793#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 794 struct socket *so; 795 796 so = SCTP_INP_SO(stcb->sctp_ep); 797#endif 798 SCTPDBG(SCTP_DEBUG_INPUT2, 799 "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n"); 800 if (stcb == NULL) 801 return; 802 803 asoc = &stcb->asoc; 804 /* process according to association state */ 805 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 806 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 807 /* unexpected SHUTDOWN-ACK... so ignore... */ 808 SCTP_TCB_UNLOCK(stcb); 809 return; 810 } 811 if (asoc->control_pdapi) { 812 /* 813 * With a normal shutdown we assume the end of last record. 814 */ 815 SCTP_INP_READ_LOCK(stcb->sctp_ep); 816 asoc->control_pdapi->end_added = 1; 817 asoc->control_pdapi->pdapi_aborted = 1; 818 asoc->control_pdapi = NULL; 819 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 820#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 821 atomic_add_int(&stcb->asoc.refcnt, 1); 822 SCTP_TCB_UNLOCK(stcb); 823 SCTP_SOCKET_LOCK(so, 1); 824 SCTP_TCB_LOCK(stcb); 825 atomic_subtract_int(&stcb->asoc.refcnt, 1); 826 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 827 /* assoc was freed while we were unlocked */ 828 SCTP_SOCKET_UNLOCK(so, 1); 829 return; 830 } 831#endif 832 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 833#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 834 SCTP_SOCKET_UNLOCK(so, 1); 835#endif 836 } 837 /* are the queues empty? */ 838 if (!TAILQ_EMPTY(&asoc->send_queue) || 839 !TAILQ_EMPTY(&asoc->sent_queue) || 840 !TAILQ_EMPTY(&asoc->out_wheel)) { 841 sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED); 842 } 843 /* stop the timer */ 844 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9); 845 /* send SHUTDOWN-COMPLETE */ 846 sctp_send_shutdown_complete(stcb, net); 847 /* notify upper layer protocol */ 848 if (stcb->sctp_socket) { 849 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 850 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 851 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 852 /* Set the connected flag to disconnected */ 853 stcb->sctp_ep->sctp_socket->so_snd.sb_cc = 0; 854 } 855 } 856 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 857 /* free the TCB but first save off the ep */ 858#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 859 atomic_add_int(&stcb->asoc.refcnt, 1); 860 SCTP_TCB_UNLOCK(stcb); 861 SCTP_SOCKET_LOCK(so, 1); 862 SCTP_TCB_LOCK(stcb); 863 atomic_subtract_int(&stcb->asoc.refcnt, 1); 864#endif 865 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 866 SCTP_FROM_SCTP_INPUT + SCTP_LOC_10); 867#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 868 SCTP_SOCKET_UNLOCK(so, 1); 869#endif 870} 871 872/* 873 * Skip past the param header and then we will find the chunk that caused the 874 * problem. There are two possiblities ASCONF or FWD-TSN other than that and 875 * our peer must be broken. 876 */ 877static void 878sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr, 879 struct sctp_nets *net) 880{ 881 struct sctp_chunkhdr *chk; 882 883 chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr)); 884 switch (chk->chunk_type) { 885 case SCTP_ASCONF_ACK: 886 case SCTP_ASCONF: 887 sctp_asconf_cleanup(stcb, net); 888 break; 889 case SCTP_FORWARD_CUM_TSN: 890 stcb->asoc.peer_supports_prsctp = 0; 891 break; 892 default: 893 SCTPDBG(SCTP_DEBUG_INPUT2, 894 "Peer does not support chunk type %d(%x)??\n", 895 chk->chunk_type, (uint32_t) chk->chunk_type); 896 break; 897 } 898} 899 900/* 901 * Skip past the param header and then we will find the param that caused the 902 * problem. There are a number of param's in a ASCONF OR the prsctp param 903 * these will turn of specific features. 904 */ 905static void 906sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr) 907{ 908 struct sctp_paramhdr *pbad; 909 910 pbad = phdr + 1; 911 switch (ntohs(pbad->param_type)) { 912 /* pr-sctp draft */ 913 case SCTP_PRSCTP_SUPPORTED: 914 stcb->asoc.peer_supports_prsctp = 0; 915 break; 916 case SCTP_SUPPORTED_CHUNK_EXT: 917 break; 918 /* draft-ietf-tsvwg-addip-sctp */ 919 case SCTP_ECN_NONCE_SUPPORTED: 920 stcb->asoc.peer_supports_ecn_nonce = 0; 921 stcb->asoc.ecn_nonce_allowed = 0; 922 stcb->asoc.ecn_allowed = 0; 923 break; 924 case SCTP_ADD_IP_ADDRESS: 925 case SCTP_DEL_IP_ADDRESS: 926 case SCTP_SET_PRIM_ADDR: 927 stcb->asoc.peer_supports_asconf = 0; 928 break; 929 case SCTP_SUCCESS_REPORT: 930 case SCTP_ERROR_CAUSE_IND: 931 SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n"); 932 SCTPDBG(SCTP_DEBUG_INPUT2, 933 "Turning off ASCONF to this strange peer\n"); 934 stcb->asoc.peer_supports_asconf = 0; 935 break; 936 default: 937 SCTPDBG(SCTP_DEBUG_INPUT2, 938 "Peer does not support param type %d(%x)??\n", 939 pbad->param_type, (uint32_t) pbad->param_type); 940 break; 941 } 942} 943 944static int 945sctp_handle_error(struct sctp_chunkhdr *ch, 946 struct sctp_tcb *stcb, struct sctp_nets *net) 947{ 948 int chklen; 949 struct sctp_paramhdr *phdr; 950 uint16_t error_type; 951 uint16_t error_len; 952 struct sctp_association *asoc; 953 int adjust; 954 955#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 956 struct socket *so; 957 958#endif 959 960 /* parse through all of the errors and process */ 961 asoc = &stcb->asoc; 962 phdr = (struct sctp_paramhdr *)((caddr_t)ch + 963 sizeof(struct sctp_chunkhdr)); 964 chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr); 965 while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) { 966 /* Process an Error Cause */ 967 error_type = ntohs(phdr->param_type); 968 error_len = ntohs(phdr->param_length); 969 if ((error_len > chklen) || (error_len == 0)) { 970 /* invalid param length for this param */ 971 SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n", 972 chklen, error_len); 973 return (0); 974 } 975 switch (error_type) { 976 case SCTP_CAUSE_INVALID_STREAM: 977 case SCTP_CAUSE_MISSING_PARAM: 978 case SCTP_CAUSE_INVALID_PARAM: 979 case SCTP_CAUSE_NO_USER_DATA: 980 SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n", 981 error_type); 982 break; 983 case SCTP_CAUSE_STALE_COOKIE: 984 /* 985 * We only act if we have echoed a cookie and are 986 * waiting. 987 */ 988 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) { 989 int *p; 990 991 p = (int *)((caddr_t)phdr + sizeof(*phdr)); 992 /* Save the time doubled */ 993 asoc->cookie_preserve_req = ntohl(*p) << 1; 994 asoc->stale_cookie_count++; 995 if (asoc->stale_cookie_count > 996 asoc->max_init_times) { 997 sctp_abort_notification(stcb, 0, SCTP_SO_NOT_LOCKED); 998 /* now free the asoc */ 999#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1000 so = SCTP_INP_SO(stcb->sctp_ep); 1001 atomic_add_int(&stcb->asoc.refcnt, 1); 1002 SCTP_TCB_UNLOCK(stcb); 1003 SCTP_SOCKET_LOCK(so, 1); 1004 SCTP_TCB_LOCK(stcb); 1005 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1006#endif 1007 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 1008 SCTP_FROM_SCTP_INPUT + SCTP_LOC_11); 1009#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1010 SCTP_SOCKET_UNLOCK(so, 1); 1011#endif 1012 return (-1); 1013 } 1014 /* blast back to INIT state */ 1015 asoc->state &= ~SCTP_STATE_COOKIE_ECHOED; 1016 asoc->state |= SCTP_STATE_COOKIE_WAIT; 1017 1018 sctp_stop_all_cookie_timers(stcb); 1019 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 1020 } 1021 break; 1022 case SCTP_CAUSE_UNRESOLVABLE_ADDR: 1023 /* 1024 * Nothing we can do here, we don't do hostname 1025 * addresses so if the peer does not like my IPv6 1026 * (or IPv4 for that matter) it does not matter. If 1027 * they don't support that type of address, they can 1028 * NOT possibly get that packet type... i.e. with no 1029 * IPv6 you can't recieve a IPv6 packet. so we can 1030 * safely ignore this one. If we ever added support 1031 * for HOSTNAME Addresses, then we would need to do 1032 * something here. 1033 */ 1034 break; 1035 case SCTP_CAUSE_UNRECOG_CHUNK: 1036 sctp_process_unrecog_chunk(stcb, phdr, net); 1037 break; 1038 case SCTP_CAUSE_UNRECOG_PARAM: 1039 sctp_process_unrecog_param(stcb, phdr); 1040 break; 1041 case SCTP_CAUSE_COOKIE_IN_SHUTDOWN: 1042 /* 1043 * We ignore this since the timer will drive out a 1044 * new cookie anyway and there timer will drive us 1045 * to send a SHUTDOWN_COMPLETE. We can't send one 1046 * here since we don't have their tag. 1047 */ 1048 break; 1049 case SCTP_CAUSE_DELETING_LAST_ADDR: 1050 case SCTP_CAUSE_RESOURCE_SHORTAGE: 1051 case SCTP_CAUSE_DELETING_SRC_ADDR: 1052 /* 1053 * We should NOT get these here, but in a 1054 * ASCONF-ACK. 1055 */ 1056 SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n", 1057 error_type); 1058 break; 1059 case SCTP_CAUSE_OUT_OF_RESC: 1060 /* 1061 * And what, pray tell do we do with the fact that 1062 * the peer is out of resources? Not really sure we 1063 * could do anything but abort. I suspect this 1064 * should have came WITH an abort instead of in a 1065 * OP-ERROR. 1066 */ 1067 break; 1068 default: 1069 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n", 1070 error_type); 1071 break; 1072 } 1073 adjust = SCTP_SIZE32(error_len); 1074 chklen -= adjust; 1075 phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust); 1076 } 1077 return (0); 1078} 1079 1080static int 1081sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset, 1082 struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 1083 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id) 1084{ 1085 struct sctp_init_ack *init_ack; 1086 struct mbuf *op_err; 1087 1088 SCTPDBG(SCTP_DEBUG_INPUT2, 1089 "sctp_handle_init_ack: handling INIT-ACK\n"); 1090 1091 if (stcb == NULL) { 1092 SCTPDBG(SCTP_DEBUG_INPUT2, 1093 "sctp_handle_init_ack: TCB is null\n"); 1094 return (-1); 1095 } 1096 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) { 1097 /* Invalid length */ 1098 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1099 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1100 op_err, 0); 1101 *abort_no_unlock = 1; 1102 return (-1); 1103 } 1104 init_ack = &cp->init; 1105 /* validate parameters */ 1106 if (init_ack->initiate_tag == 0) { 1107 /* protocol error... send an abort */ 1108 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1109 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1110 op_err, 0); 1111 *abort_no_unlock = 1; 1112 return (-1); 1113 } 1114 if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) { 1115 /* protocol error... send an abort */ 1116 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1117 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1118 op_err, 0); 1119 *abort_no_unlock = 1; 1120 return (-1); 1121 } 1122 if (init_ack->num_inbound_streams == 0) { 1123 /* protocol error... send an abort */ 1124 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1125 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1126 op_err, 0); 1127 *abort_no_unlock = 1; 1128 return (-1); 1129 } 1130 if (init_ack->num_outbound_streams == 0) { 1131 /* protocol error... send an abort */ 1132 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 1133 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 1134 op_err, 0); 1135 *abort_no_unlock = 1; 1136 return (-1); 1137 } 1138 /* process according to association state... */ 1139 switch (stcb->asoc.state & SCTP_STATE_MASK) { 1140 case SCTP_STATE_COOKIE_WAIT: 1141 /* this is the expected state for this chunk */ 1142 /* process the INIT-ACK parameters */ 1143 if (stcb->asoc.primary_destination->dest_state & 1144 SCTP_ADDR_UNCONFIRMED) { 1145 /* 1146 * The primary is where we sent the INIT, we can 1147 * always consider it confirmed when the INIT-ACK is 1148 * returned. Do this before we load addresses 1149 * though. 1150 */ 1151 stcb->asoc.primary_destination->dest_state &= 1152 ~SCTP_ADDR_UNCONFIRMED; 1153 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 1154 stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED); 1155 } 1156 if (sctp_process_init_ack(m, iphlen, offset, sh, cp, stcb, 1157 net, abort_no_unlock, vrf_id) < 0) { 1158 /* error in parsing parameters */ 1159 return (-1); 1160 } 1161 /* update our state */ 1162 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n"); 1163 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_ECHOED); 1164 1165 /* reset the RTO calc */ 1166 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 1167 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 1168 stcb->asoc.overall_error_count, 1169 0, 1170 SCTP_FROM_SCTP_INPUT, 1171 __LINE__); 1172 } 1173 stcb->asoc.overall_error_count = 0; 1174 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 1175 /* 1176 * collapse the init timer back in case of a exponential 1177 * backoff 1178 */ 1179 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep, 1180 stcb, net); 1181 /* 1182 * the send at the end of the inbound data processing will 1183 * cause the cookie to be sent 1184 */ 1185 break; 1186 case SCTP_STATE_SHUTDOWN_SENT: 1187 /* incorrect state... discard */ 1188 break; 1189 case SCTP_STATE_COOKIE_ECHOED: 1190 /* incorrect state... discard */ 1191 break; 1192 case SCTP_STATE_OPEN: 1193 /* incorrect state... discard */ 1194 break; 1195 case SCTP_STATE_EMPTY: 1196 case SCTP_STATE_INUSE: 1197 default: 1198 /* incorrect state... discard */ 1199 return (-1); 1200 break; 1201 } 1202 SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n"); 1203 return (0); 1204} 1205 1206 1207/* 1208 * handle a state cookie for an existing association m: input packet mbuf 1209 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a 1210 * "split" mbuf and the cookie signature does not exist offset: offset into 1211 * mbuf to the cookie-echo chunk 1212 */ 1213static struct sctp_tcb * 1214sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, 1215 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1216 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, 1217 struct sockaddr *init_src, int *notification, sctp_assoc_t * sac_assoc_id, 1218 uint32_t vrf_id) 1219{ 1220 struct sctp_association *asoc; 1221 struct sctp_init_chunk *init_cp, init_buf; 1222 struct sctp_init_ack_chunk *initack_cp, initack_buf; 1223 int chk_length; 1224 int init_offset, initack_offset, i; 1225 int retval; 1226 int spec_flag = 0; 1227 uint32_t how_indx; 1228 1229 /* I know that the TCB is non-NULL from the caller */ 1230 asoc = &stcb->asoc; 1231 for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) { 1232 if (asoc->cookie_how[how_indx] == 0) 1233 break; 1234 } 1235 if (how_indx < sizeof(asoc->cookie_how)) { 1236 asoc->cookie_how[how_indx] = 1; 1237 } 1238 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 1239 /* SHUTDOWN came in after sending INIT-ACK */ 1240 struct mbuf *op_err; 1241 struct sctp_paramhdr *ph; 1242 1243 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination); 1244 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 1245 0, M_DONTWAIT, 1, MT_DATA); 1246 if (op_err == NULL) { 1247 /* FOOBAR */ 1248 return (NULL); 1249 } 1250 /* pre-reserve some space */ 1251 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 1252 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 1253 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 1254 /* Set the len */ 1255 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr); 1256 ph = mtod(op_err, struct sctp_paramhdr *); 1257 ph->param_type = htons(SCTP_CAUSE_COOKIE_IN_SHUTDOWN); 1258 ph->param_length = htons(sizeof(struct sctp_paramhdr)); 1259 sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag, 1260 vrf_id); 1261 if (how_indx < sizeof(asoc->cookie_how)) 1262 asoc->cookie_how[how_indx] = 2; 1263 return (NULL); 1264 } 1265 /* 1266 * find and validate the INIT chunk in the cookie (peer's info) the 1267 * INIT should start after the cookie-echo header struct (chunk 1268 * header, state cookie header struct) 1269 */ 1270 init_offset = offset += sizeof(struct sctp_cookie_echo_chunk); 1271 1272 init_cp = (struct sctp_init_chunk *) 1273 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 1274 (uint8_t *) & init_buf); 1275 if (init_cp == NULL) { 1276 /* could not pull a INIT chunk in cookie */ 1277 return (NULL); 1278 } 1279 chk_length = ntohs(init_cp->ch.chunk_length); 1280 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 1281 return (NULL); 1282 } 1283 /* 1284 * find and validate the INIT-ACK chunk in the cookie (my info) the 1285 * INIT-ACK follows the INIT chunk 1286 */ 1287 initack_offset = init_offset + SCTP_SIZE32(chk_length); 1288 initack_cp = (struct sctp_init_ack_chunk *) 1289 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 1290 (uint8_t *) & initack_buf); 1291 if (initack_cp == NULL) { 1292 /* could not pull INIT-ACK chunk in cookie */ 1293 return (NULL); 1294 } 1295 chk_length = ntohs(initack_cp->ch.chunk_length); 1296 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 1297 return (NULL); 1298 } 1299 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && 1300 (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) { 1301 /* 1302 * case D in Section 5.2.4 Table 2: MMAA process accordingly 1303 * to get into the OPEN state 1304 */ 1305 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1306 /*- 1307 * Opps, this means that we somehow generated two vtag's 1308 * the same. I.e. we did: 1309 * Us Peer 1310 * <---INIT(tag=a)------ 1311 * ----INIT-ACK(tag=t)--> 1312 * ----INIT(tag=t)------> *1 1313 * <---INIT-ACK(tag=a)--- 1314 * <----CE(tag=t)------------- *2 1315 * 1316 * At point *1 we should be generating a different 1317 * tag t'. Which means we would throw away the CE and send 1318 * ours instead. Basically this is case C (throw away side). 1319 */ 1320 if (how_indx < sizeof(asoc->cookie_how)) 1321 asoc->cookie_how[how_indx] = 17; 1322 return (NULL); 1323 1324 } 1325 switch SCTP_GET_STATE 1326 (asoc) { 1327 case SCTP_STATE_COOKIE_WAIT: 1328 case SCTP_STATE_COOKIE_ECHOED: 1329 /* 1330 * INIT was sent but got a COOKIE_ECHO with the 1331 * correct tags... just accept it...but we must 1332 * process the init so that we can make sure we have 1333 * the right seq no's. 1334 */ 1335 /* First we must process the INIT !! */ 1336 retval = sctp_process_init(init_cp, stcb, net); 1337 if (retval < 0) { 1338 if (how_indx < sizeof(asoc->cookie_how)) 1339 asoc->cookie_how[how_indx] = 3; 1340 return (NULL); 1341 } 1342 /* we have already processed the INIT so no problem */ 1343 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, 1344 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12); 1345 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_13); 1346 /* update current state */ 1347 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) 1348 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1349 else 1350 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1351 1352 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1353 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1354 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1355 stcb->sctp_ep, stcb, asoc->primary_destination); 1356 } 1357 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1358 sctp_stop_all_cookie_timers(stcb); 1359 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1360 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1361 (inp->sctp_socket->so_qlimit == 0) 1362 ) { 1363#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1364 struct socket *so; 1365 1366#endif 1367 /* 1368 * Here is where collision would go if we 1369 * did a connect() and instead got a 1370 * init/init-ack/cookie done before the 1371 * init-ack came back.. 1372 */ 1373 stcb->sctp_ep->sctp_flags |= 1374 SCTP_PCB_FLAGS_CONNECTED; 1375#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1376 so = SCTP_INP_SO(stcb->sctp_ep); 1377 atomic_add_int(&stcb->asoc.refcnt, 1); 1378 SCTP_TCB_UNLOCK(stcb); 1379 SCTP_SOCKET_LOCK(so, 1); 1380 SCTP_TCB_LOCK(stcb); 1381 atomic_add_int(&stcb->asoc.refcnt, -1); 1382 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1383 SCTP_SOCKET_UNLOCK(so, 1); 1384 return (NULL); 1385 } 1386#endif 1387 soisconnected(stcb->sctp_socket); 1388#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1389 SCTP_SOCKET_UNLOCK(so, 1); 1390#endif 1391 } 1392 /* notify upper layer */ 1393 *notification = SCTP_NOTIFY_ASSOC_UP; 1394 /* 1395 * since we did not send a HB make sure we don't 1396 * double things 1397 */ 1398 net->hb_responded = 1; 1399 net->RTO = sctp_calculate_rto(stcb, asoc, net, 1400 &cookie->time_entered, sctp_align_unsafe_makecopy); 1401 1402 if (stcb->asoc.sctp_autoclose_ticks && 1403 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) { 1404 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 1405 inp, stcb, NULL); 1406 } 1407 break; 1408 default: 1409 /* 1410 * we're in the OPEN state (or beyond), so peer must 1411 * have simply lost the COOKIE-ACK 1412 */ 1413 break; 1414 } /* end switch */ 1415 sctp_stop_all_cookie_timers(stcb); 1416 /* 1417 * We ignore the return code here.. not sure if we should 1418 * somehow abort.. but we do have an existing asoc. This 1419 * really should not fail. 1420 */ 1421 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1422 init_offset + sizeof(struct sctp_init_chunk), 1423 initack_offset, sh, init_src)) { 1424 if (how_indx < sizeof(asoc->cookie_how)) 1425 asoc->cookie_how[how_indx] = 4; 1426 return (NULL); 1427 } 1428 /* respond with a COOKIE-ACK */ 1429 sctp_toss_old_cookies(stcb, asoc); 1430 sctp_send_cookie_ack(stcb); 1431 if (how_indx < sizeof(asoc->cookie_how)) 1432 asoc->cookie_how[how_indx] = 5; 1433 return (stcb); 1434 } 1435 if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1436 ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag && 1437 cookie->tie_tag_my_vtag == 0 && 1438 cookie->tie_tag_peer_vtag == 0) { 1439 /* 1440 * case C in Section 5.2.4 Table 2: XMOO silently discard 1441 */ 1442 if (how_indx < sizeof(asoc->cookie_how)) 1443 asoc->cookie_how[how_indx] = 6; 1444 return (NULL); 1445 } 1446 if (ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag && 1447 (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag || 1448 init_cp->init.initiate_tag == 0)) { 1449 /* 1450 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info 1451 * should be ok, re-accept peer info 1452 */ 1453 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1454 /* 1455 * Extension of case C. If we hit this, then the 1456 * random number generator returned the same vtag 1457 * when we first sent our INIT-ACK and when we later 1458 * sent our INIT. The side with the seq numbers that 1459 * are different will be the one that normnally 1460 * would have hit case C. This in effect "extends" 1461 * our vtags in this collision case to be 64 bits. 1462 * The same collision could occur aka you get both 1463 * vtag and seq number the same twice in a row.. but 1464 * is much less likely. If it did happen then we 1465 * would proceed through and bring up the assoc.. we 1466 * may end up with the wrong stream setup however.. 1467 * which would be bad.. but there is no way to 1468 * tell.. until we send on a stream that does not 1469 * exist :-) 1470 */ 1471 if (how_indx < sizeof(asoc->cookie_how)) 1472 asoc->cookie_how[how_indx] = 7; 1473 1474 return (NULL); 1475 } 1476 if (how_indx < sizeof(asoc->cookie_how)) 1477 asoc->cookie_how[how_indx] = 8; 1478 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_14); 1479 sctp_stop_all_cookie_timers(stcb); 1480 /* 1481 * since we did not send a HB make sure we don't double 1482 * things 1483 */ 1484 net->hb_responded = 1; 1485 if (stcb->asoc.sctp_autoclose_ticks && 1486 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1487 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, 1488 NULL); 1489 } 1490 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1491 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); 1492 1493 /* Note last_cwr_tsn? where is this used? */ 1494 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 1495 if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) { 1496 /* 1497 * Ok the peer probably discarded our data (if we 1498 * echoed a cookie+data). So anything on the 1499 * sent_queue should be marked for retransmit, we 1500 * may not get something to kick us so it COULD 1501 * still take a timeout to move these.. but it can't 1502 * hurt to mark them. 1503 */ 1504 struct sctp_tmit_chunk *chk; 1505 1506 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1507 if (chk->sent < SCTP_DATAGRAM_RESEND) { 1508 chk->sent = SCTP_DATAGRAM_RESEND; 1509 sctp_flight_size_decrease(chk); 1510 sctp_total_flight_decrease(stcb, chk); 1511 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1512 spec_flag++; 1513 } 1514 } 1515 1516 } 1517 /* process the INIT info (peer's info) */ 1518 retval = sctp_process_init(init_cp, stcb, net); 1519 if (retval < 0) { 1520 if (how_indx < sizeof(asoc->cookie_how)) 1521 asoc->cookie_how[how_indx] = 9; 1522 return (NULL); 1523 } 1524 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1525 init_offset + sizeof(struct sctp_init_chunk), 1526 initack_offset, sh, init_src)) { 1527 if (how_indx < sizeof(asoc->cookie_how)) 1528 asoc->cookie_how[how_indx] = 10; 1529 return (NULL); 1530 } 1531 if ((asoc->state & SCTP_STATE_COOKIE_WAIT) || 1532 (asoc->state & SCTP_STATE_COOKIE_ECHOED)) { 1533 *notification = SCTP_NOTIFY_ASSOC_UP; 1534 1535 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1536 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1537 (inp->sctp_socket->so_qlimit == 0)) { 1538#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1539 struct socket *so; 1540 1541#endif 1542 stcb->sctp_ep->sctp_flags |= 1543 SCTP_PCB_FLAGS_CONNECTED; 1544#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1545 so = SCTP_INP_SO(stcb->sctp_ep); 1546 atomic_add_int(&stcb->asoc.refcnt, 1); 1547 SCTP_TCB_UNLOCK(stcb); 1548 SCTP_SOCKET_LOCK(so, 1); 1549 SCTP_TCB_LOCK(stcb); 1550 atomic_add_int(&stcb->asoc.refcnt, -1); 1551 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1552 SCTP_SOCKET_UNLOCK(so, 1); 1553 return (NULL); 1554 } 1555#endif 1556 soisconnected(stcb->sctp_socket); 1557#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1558 SCTP_SOCKET_UNLOCK(so, 1); 1559#endif 1560 } 1561 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) 1562 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1563 else 1564 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1565 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1566 } else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1567 SCTP_STAT_INCR_COUNTER32(sctps_restartestab); 1568 } else { 1569 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1570 } 1571 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1572 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1573 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1574 stcb->sctp_ep, stcb, asoc->primary_destination); 1575 } 1576 sctp_stop_all_cookie_timers(stcb); 1577 sctp_toss_old_cookies(stcb, asoc); 1578 sctp_send_cookie_ack(stcb); 1579 if (spec_flag) { 1580 /* 1581 * only if we have retrans set do we do this. What 1582 * this call does is get only the COOKIE-ACK out and 1583 * then when we return the normal call to 1584 * sctp_chunk_output will get the retrans out behind 1585 * this. 1586 */ 1587 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED); 1588 } 1589 if (how_indx < sizeof(asoc->cookie_how)) 1590 asoc->cookie_how[how_indx] = 11; 1591 1592 return (stcb); 1593 } 1594 if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1595 ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) && 1596 cookie->tie_tag_my_vtag == asoc->my_vtag_nonce && 1597 cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce && 1598 cookie->tie_tag_peer_vtag != 0) { 1599 struct sctpasochead *head; 1600 1601 /* 1602 * case A in Section 5.2.4 Table 2: XXMM (peer restarted) 1603 */ 1604 /* temp code */ 1605 if (how_indx < sizeof(asoc->cookie_how)) 1606 asoc->cookie_how[how_indx] = 12; 1607 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_15); 1608 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 1609 1610 *sac_assoc_id = sctp_get_associd(stcb); 1611 /* notify upper layer */ 1612 *notification = SCTP_NOTIFY_ASSOC_RESTART; 1613 atomic_add_int(&stcb->asoc.refcnt, 1); 1614 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) && 1615 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 1616 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) { 1617 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1618 } 1619 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1620 SCTP_STAT_INCR_GAUGE32(sctps_restartestab); 1621 } else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1622 SCTP_STAT_INCR_GAUGE32(sctps_collisionestab); 1623 } 1624 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1625 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1626 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1627 stcb->sctp_ep, stcb, asoc->primary_destination); 1628 1629 } else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) { 1630 /* move to OPEN state, if not in SHUTDOWN_SENT */ 1631 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1632 } 1633 asoc->pre_open_streams = 1634 ntohs(initack_cp->init.num_outbound_streams); 1635 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 1636 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 1637 1638 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 1639 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 1640 1641 asoc->str_reset_seq_in = asoc->init_seq_number; 1642 1643 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1644 if (asoc->mapping_array) { 1645 memset(asoc->mapping_array, 0, 1646 asoc->mapping_array_size); 1647 } 1648 SCTP_TCB_UNLOCK(stcb); 1649 SCTP_INP_INFO_WLOCK(); 1650 SCTP_INP_WLOCK(stcb->sctp_ep); 1651 SCTP_TCB_LOCK(stcb); 1652 atomic_add_int(&stcb->asoc.refcnt, -1); 1653 /* send up all the data */ 1654 SCTP_TCB_SEND_LOCK(stcb); 1655 1656 sctp_report_all_outbound(stcb, 1, SCTP_SO_NOT_LOCKED); 1657 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1658 stcb->asoc.strmout[i].stream_no = i; 1659 stcb->asoc.strmout[i].next_sequence_sent = 0; 1660 stcb->asoc.strmout[i].last_msg_incomplete = 0; 1661 } 1662 /* process the INIT-ACK info (my info) */ 1663 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 1664 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1665 1666 /* pull from vtag hash */ 1667 LIST_REMOVE(stcb, sctp_asocs); 1668 /* re-insert to new vtag position */ 1669 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, 1670 sctppcbinfo.hashasocmark)]; 1671 /* 1672 * put it in the bucket in the vtag hash of assoc's for the 1673 * system 1674 */ 1675 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 1676 1677 /* Is this the first restart? */ 1678 if (stcb->asoc.in_restart_hash == 0) { 1679 /* Ok add it to assoc_id vtag hash */ 1680 head = &sctppcbinfo.sctp_restarthash[SCTP_PCBHASH_ASOC(stcb->asoc.assoc_id, 1681 sctppcbinfo.hashrestartmark)]; 1682 LIST_INSERT_HEAD(head, stcb, sctp_tcbrestarhash); 1683 stcb->asoc.in_restart_hash = 1; 1684 } 1685 /* process the INIT info (peer's info) */ 1686 SCTP_TCB_SEND_UNLOCK(stcb); 1687 SCTP_INP_WUNLOCK(stcb->sctp_ep); 1688 SCTP_INP_INFO_WUNLOCK(); 1689 1690 retval = sctp_process_init(init_cp, stcb, net); 1691 if (retval < 0) { 1692 if (how_indx < sizeof(asoc->cookie_how)) 1693 asoc->cookie_how[how_indx] = 13; 1694 1695 return (NULL); 1696 } 1697 /* 1698 * since we did not send a HB make sure we don't double 1699 * things 1700 */ 1701 net->hb_responded = 1; 1702 1703 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1704 init_offset + sizeof(struct sctp_init_chunk), 1705 initack_offset, sh, init_src)) { 1706 if (how_indx < sizeof(asoc->cookie_how)) 1707 asoc->cookie_how[how_indx] = 14; 1708 1709 return (NULL); 1710 } 1711 /* respond with a COOKIE-ACK */ 1712 sctp_stop_all_cookie_timers(stcb); 1713 sctp_toss_old_cookies(stcb, asoc); 1714 sctp_send_cookie_ack(stcb); 1715 if (how_indx < sizeof(asoc->cookie_how)) 1716 asoc->cookie_how[how_indx] = 15; 1717 1718 return (stcb); 1719 } 1720 if (how_indx < sizeof(asoc->cookie_how)) 1721 asoc->cookie_how[how_indx] = 16; 1722 /* all other cases... */ 1723 return (NULL); 1724} 1725 1726 1727/* 1728 * handle a state cookie for a new association m: input packet mbuf chain-- 1729 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf 1730 * and the cookie signature does not exist offset: offset into mbuf to the 1731 * cookie-echo chunk length: length of the cookie chunk to: where the init 1732 * was from returns a new TCB 1733 */ 1734static struct sctp_tcb * 1735sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, 1736 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1737 struct sctp_inpcb *inp, struct sctp_nets **netp, 1738 struct sockaddr *init_src, int *notification, 1739 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 1740 uint32_t vrf_id) 1741{ 1742 struct sctp_tcb *stcb; 1743 struct sctp_init_chunk *init_cp, init_buf; 1744 struct sctp_init_ack_chunk *initack_cp, initack_buf; 1745 struct sockaddr_storage sa_store; 1746 struct sockaddr *initack_src = (struct sockaddr *)&sa_store; 1747 struct sockaddr_in *sin; 1748 struct sockaddr_in6 *sin6; 1749 struct sctp_association *asoc; 1750 int chk_length; 1751 int init_offset, initack_offset, initack_limit; 1752 int retval; 1753 int error = 0; 1754 uint32_t old_tag; 1755 uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE]; 1756 1757#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1758 struct socket *so; 1759 1760 so = SCTP_INP_SO(inp); 1761#endif 1762 1763 /* 1764 * find and validate the INIT chunk in the cookie (peer's info) the 1765 * INIT should start after the cookie-echo header struct (chunk 1766 * header, state cookie header struct) 1767 */ 1768 init_offset = offset + sizeof(struct sctp_cookie_echo_chunk); 1769 init_cp = (struct sctp_init_chunk *) 1770 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 1771 (uint8_t *) & init_buf); 1772 if (init_cp == NULL) { 1773 /* could not pull a INIT chunk in cookie */ 1774 SCTPDBG(SCTP_DEBUG_INPUT1, 1775 "process_cookie_new: could not pull INIT chunk hdr\n"); 1776 return (NULL); 1777 } 1778 chk_length = ntohs(init_cp->ch.chunk_length); 1779 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 1780 SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n"); 1781 return (NULL); 1782 } 1783 initack_offset = init_offset + SCTP_SIZE32(chk_length); 1784 /* 1785 * find and validate the INIT-ACK chunk in the cookie (my info) the 1786 * INIT-ACK follows the INIT chunk 1787 */ 1788 initack_cp = (struct sctp_init_ack_chunk *) 1789 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 1790 (uint8_t *) & initack_buf); 1791 if (initack_cp == NULL) { 1792 /* could not pull INIT-ACK chunk in cookie */ 1793 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n"); 1794 return (NULL); 1795 } 1796 chk_length = ntohs(initack_cp->ch.chunk_length); 1797 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 1798 return (NULL); 1799 } 1800 /* 1801 * NOTE: We can't use the INIT_ACK's chk_length to determine the 1802 * "initack_limit" value. This is because the chk_length field 1803 * includes the length of the cookie, but the cookie is omitted when 1804 * the INIT and INIT_ACK are tacked onto the cookie... 1805 */ 1806 initack_limit = offset + cookie_len; 1807 1808 /* 1809 * now that we know the INIT/INIT-ACK are in place, create a new TCB 1810 * and popluate 1811 */ 1812 1813 /* 1814 * Here we do a trick, we set in NULL for the proc/thread argument. 1815 * We do this since in effect we only use the p argument when the 1816 * socket is unbound and we must do an implicit bind. Since we are 1817 * getting a cookie, we cannot be unbound. 1818 */ 1819 stcb = sctp_aloc_assoc(inp, init_src, 0, &error, 1820 ntohl(initack_cp->init.initiate_tag), vrf_id, 1821 (struct thread *)NULL 1822 ); 1823 if (stcb == NULL) { 1824 struct mbuf *op_err; 1825 1826 /* memory problem? */ 1827 SCTPDBG(SCTP_DEBUG_INPUT1, 1828 "process_cookie_new: no room for another TCB!\n"); 1829 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1830 1831 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 1832 sh, op_err, vrf_id); 1833 return (NULL); 1834 } 1835 /* get the correct sctp_nets */ 1836 if (netp) 1837 *netp = sctp_findnet(stcb, init_src); 1838 1839 asoc = &stcb->asoc; 1840 /* get scope variables out of cookie */ 1841 asoc->ipv4_local_scope = cookie->ipv4_scope; 1842 asoc->site_scope = cookie->site_scope; 1843 asoc->local_scope = cookie->local_scope; 1844 asoc->loopback_scope = cookie->loopback_scope; 1845 1846 if ((asoc->ipv4_addr_legal != cookie->ipv4_addr_legal) || 1847 (asoc->ipv6_addr_legal != cookie->ipv6_addr_legal)) { 1848 struct mbuf *op_err; 1849 1850 /* 1851 * Houston we have a problem. The EP changed while the 1852 * cookie was in flight. Only recourse is to abort the 1853 * association. 1854 */ 1855 atomic_add_int(&stcb->asoc.refcnt, 1); 1856 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1857 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 1858 sh, op_err, vrf_id); 1859#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1860 SCTP_TCB_UNLOCK(stcb); 1861 SCTP_SOCKET_LOCK(so, 1); 1862 SCTP_TCB_LOCK(stcb); 1863#endif 1864 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 1865 SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 1866#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1867 SCTP_SOCKET_UNLOCK(so, 1); 1868#endif 1869 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1870 return (NULL); 1871 } 1872 /* process the INIT-ACK info (my info) */ 1873 old_tag = asoc->my_vtag; 1874 asoc->assoc_id = asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 1875 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1876 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); 1877 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 1878 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 1879 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 1880 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 1881 asoc->str_reset_seq_in = asoc->init_seq_number; 1882 1883 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1884 1885 /* process the INIT info (peer's info) */ 1886 if (netp) 1887 retval = sctp_process_init(init_cp, stcb, *netp); 1888 else 1889 retval = 0; 1890 if (retval < 0) { 1891 atomic_add_int(&stcb->asoc.refcnt, 1); 1892#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1893 SCTP_TCB_UNLOCK(stcb); 1894 SCTP_SOCKET_LOCK(so, 1); 1895 SCTP_TCB_LOCK(stcb); 1896#endif 1897 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 1898#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1899 SCTP_SOCKET_UNLOCK(so, 1); 1900#endif 1901 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1902 return (NULL); 1903 } 1904 /* load all addresses */ 1905 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1906 init_offset + sizeof(struct sctp_init_chunk), initack_offset, sh, 1907 init_src)) { 1908 atomic_add_int(&stcb->asoc.refcnt, 1); 1909#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1910 SCTP_TCB_UNLOCK(stcb); 1911 SCTP_SOCKET_LOCK(so, 1); 1912 SCTP_TCB_LOCK(stcb); 1913#endif 1914 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_17); 1915#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1916 SCTP_SOCKET_UNLOCK(so, 1); 1917#endif 1918 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1919 return (NULL); 1920 } 1921 /* 1922 * verify any preceding AUTH chunk that was skipped 1923 */ 1924 /* pull the local authentication parameters from the cookie/init-ack */ 1925 sctp_auth_get_cookie_params(stcb, m, 1926 initack_offset + sizeof(struct sctp_init_ack_chunk), 1927 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk))); 1928 if (auth_skipped) { 1929 struct sctp_auth_chunk *auth; 1930 1931 auth = (struct sctp_auth_chunk *) 1932 sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf); 1933 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) { 1934 /* auth HMAC failed, dump the assoc and packet */ 1935 SCTPDBG(SCTP_DEBUG_AUTH1, 1936 "COOKIE-ECHO: AUTH failed\n"); 1937 atomic_add_int(&stcb->asoc.refcnt, 1); 1938#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1939 SCTP_TCB_UNLOCK(stcb); 1940 SCTP_SOCKET_LOCK(so, 1); 1941 SCTP_TCB_LOCK(stcb); 1942#endif 1943 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18); 1944#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1945 SCTP_SOCKET_UNLOCK(so, 1); 1946#endif 1947 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1948 return (NULL); 1949 } else { 1950 /* remaining chunks checked... good to go */ 1951 stcb->asoc.authenticated = 1; 1952 } 1953 } 1954 /* update current state */ 1955 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 1956 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1957 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1958 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1959 stcb->sctp_ep, stcb, asoc->primary_destination); 1960 } 1961 sctp_stop_all_cookie_timers(stcb); 1962 SCTP_STAT_INCR_COUNTER32(sctps_passiveestab); 1963 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1964 1965 /* 1966 * if we're doing ASCONFs, check to see if we have any new local 1967 * addresses that need to get added to the peer (eg. addresses 1968 * changed while cookie echo in flight). This needs to be done 1969 * after we go to the OPEN state to do the correct asconf 1970 * processing. else, make sure we have the correct addresses in our 1971 * lists 1972 */ 1973 1974 /* warning, we re-use sin, sin6, sa_store here! */ 1975 /* pull in local_address (our "from" address) */ 1976 if (cookie->laddr_type == SCTP_IPV4_ADDRESS) { 1977 /* source addr is IPv4 */ 1978 sin = (struct sockaddr_in *)initack_src; 1979 memset(sin, 0, sizeof(*sin)); 1980 sin->sin_family = AF_INET; 1981 sin->sin_len = sizeof(struct sockaddr_in); 1982 sin->sin_addr.s_addr = cookie->laddress[0]; 1983 } else if (cookie->laddr_type == SCTP_IPV6_ADDRESS) { 1984 /* source addr is IPv6 */ 1985 sin6 = (struct sockaddr_in6 *)initack_src; 1986 memset(sin6, 0, sizeof(*sin6)); 1987 sin6->sin6_family = AF_INET6; 1988 sin6->sin6_len = sizeof(struct sockaddr_in6); 1989 sin6->sin6_scope_id = cookie->scope_id; 1990 memcpy(&sin6->sin6_addr, cookie->laddress, 1991 sizeof(sin6->sin6_addr)); 1992 } else { 1993 atomic_add_int(&stcb->asoc.refcnt, 1); 1994#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1995 SCTP_TCB_UNLOCK(stcb); 1996 SCTP_SOCKET_LOCK(so, 1); 1997 SCTP_TCB_LOCK(stcb); 1998#endif 1999 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19); 2000#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2001 SCTP_SOCKET_UNLOCK(so, 1); 2002#endif 2003 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2004 return (NULL); 2005 } 2006 2007 /* set up to notify upper layer */ 2008 *notification = SCTP_NOTIFY_ASSOC_UP; 2009 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2010 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2011 (inp->sctp_socket->so_qlimit == 0)) { 2012 /* 2013 * This is an endpoint that called connect() how it got a 2014 * cookie that is NEW is a bit of a mystery. It must be that 2015 * the INIT was sent, but before it got there.. a complete 2016 * INIT/INIT-ACK/COOKIE arrived. But of course then it 2017 * should have went to the other code.. not here.. oh well.. 2018 * a bit of protection is worth having.. 2019 */ 2020 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 2021#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2022 atomic_add_int(&stcb->asoc.refcnt, 1); 2023 SCTP_TCB_UNLOCK(stcb); 2024 SCTP_SOCKET_LOCK(so, 1); 2025 SCTP_TCB_LOCK(stcb); 2026 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2027 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2028 SCTP_SOCKET_UNLOCK(so, 1); 2029 return (NULL); 2030 } 2031#endif 2032 soisconnected(stcb->sctp_socket); 2033#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2034 SCTP_SOCKET_UNLOCK(so, 1); 2035#endif 2036 } else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 2037 (inp->sctp_socket->so_qlimit)) { 2038 /* 2039 * We don't want to do anything with this one. Since it is 2040 * the listening guy. The timer will get started for 2041 * accepted connections in the caller. 2042 */ 2043 ; 2044 } 2045 /* since we did not send a HB make sure we don't double things */ 2046 if ((netp) && (*netp)) 2047 (*netp)->hb_responded = 1; 2048 2049 if (stcb->asoc.sctp_autoclose_ticks && 2050 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 2051 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL); 2052 } 2053 /* calculate the RTT */ 2054 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 2055 if ((netp) && (*netp)) { 2056 (*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp, 2057 &cookie->time_entered, sctp_align_unsafe_makecopy); 2058 } 2059 /* respond with a COOKIE-ACK */ 2060 sctp_send_cookie_ack(stcb); 2061 2062 /* 2063 * check the address lists for any ASCONFs that need to be sent 2064 * AFTER the cookie-ack is sent 2065 */ 2066 sctp_check_address_list(stcb, m, 2067 initack_offset + sizeof(struct sctp_init_ack_chunk), 2068 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)), 2069 initack_src, cookie->local_scope, cookie->site_scope, 2070 cookie->ipv4_scope, cookie->loopback_scope); 2071 2072 2073 return (stcb); 2074} 2075 2076 2077/* 2078 * handles a COOKIE-ECHO message stcb: modified to either a new or left as 2079 * existing (non-NULL) TCB 2080 */ 2081static struct mbuf * 2082sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, 2083 struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp, 2084 struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp, 2085 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 2086 struct sctp_tcb **locked_tcb, uint32_t vrf_id) 2087{ 2088 struct sctp_state_cookie *cookie; 2089 struct sockaddr_in6 sin6; 2090 struct sockaddr_in sin; 2091 struct sctp_tcb *l_stcb = *stcb; 2092 struct sctp_inpcb *l_inp; 2093 struct sockaddr *to; 2094 sctp_assoc_t sac_restart_id; 2095 struct sctp_pcb *ep; 2096 struct mbuf *m_sig; 2097 uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE]; 2098 uint8_t *sig; 2099 uint8_t cookie_ok = 0; 2100 unsigned int size_of_pkt, sig_offset, cookie_offset; 2101 unsigned int cookie_len; 2102 struct timeval now; 2103 struct timeval time_expires; 2104 struct sockaddr_storage dest_store; 2105 struct sockaddr *localep_sa = (struct sockaddr *)&dest_store; 2106 struct ip *iph; 2107 int notification = 0; 2108 struct sctp_nets *netl; 2109 int had_a_existing_tcb = 0; 2110 2111 SCTPDBG(SCTP_DEBUG_INPUT2, 2112 "sctp_handle_cookie: handling COOKIE-ECHO\n"); 2113 2114 if (inp_p == NULL) { 2115 return (NULL); 2116 } 2117 /* First get the destination address setup too. */ 2118 iph = mtod(m, struct ip *); 2119 if (iph->ip_v == IPVERSION) { 2120 /* its IPv4 */ 2121 struct sockaddr_in *lsin; 2122 2123 lsin = (struct sockaddr_in *)(localep_sa); 2124 memset(lsin, 0, sizeof(*lsin)); 2125 lsin->sin_family = AF_INET; 2126 lsin->sin_len = sizeof(*lsin); 2127 lsin->sin_port = sh->dest_port; 2128 lsin->sin_addr.s_addr = iph->ip_dst.s_addr; 2129 size_of_pkt = SCTP_GET_IPV4_LENGTH(iph); 2130 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 2131 /* its IPv6 */ 2132 struct ip6_hdr *ip6; 2133 struct sockaddr_in6 *lsin6; 2134 2135 lsin6 = (struct sockaddr_in6 *)(localep_sa); 2136 memset(lsin6, 0, sizeof(*lsin6)); 2137 lsin6->sin6_family = AF_INET6; 2138 lsin6->sin6_len = sizeof(struct sockaddr_in6); 2139 ip6 = mtod(m, struct ip6_hdr *); 2140 lsin6->sin6_port = sh->dest_port; 2141 lsin6->sin6_addr = ip6->ip6_dst; 2142 size_of_pkt = SCTP_GET_IPV6_LENGTH(ip6) + iphlen; 2143 } else { 2144 return (NULL); 2145 } 2146 2147 cookie = &cp->cookie; 2148 cookie_offset = offset + sizeof(struct sctp_chunkhdr); 2149 cookie_len = ntohs(cp->ch.chunk_length); 2150 2151 if ((cookie->peerport != sh->src_port) && 2152 (cookie->myport != sh->dest_port) && 2153 (cookie->my_vtag != sh->v_tag)) { 2154 /* 2155 * invalid ports or bad tag. Note that we always leave the 2156 * v_tag in the header in network order and when we stored 2157 * it in the my_vtag slot we also left it in network order. 2158 * This maintains the match even though it may be in the 2159 * opposite byte order of the machine :-> 2160 */ 2161 return (NULL); 2162 } 2163 if (cookie_len > size_of_pkt || 2164 cookie_len < sizeof(struct sctp_cookie_echo_chunk) + 2165 sizeof(struct sctp_init_chunk) + 2166 sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) { 2167 /* cookie too long! or too small */ 2168 return (NULL); 2169 } 2170 /* 2171 * split off the signature into its own mbuf (since it should not be 2172 * calculated in the sctp_hmac_m() call). 2173 */ 2174 sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE; 2175 if (sig_offset > size_of_pkt) { 2176 /* packet not correct size! */ 2177 /* XXX this may already be accounted for earlier... */ 2178 return (NULL); 2179 } 2180 m_sig = m_split(m, sig_offset, M_DONTWAIT); 2181 if (m_sig == NULL) { 2182 /* out of memory or ?? */ 2183 return (NULL); 2184 } 2185#ifdef SCTP_MBUF_LOGGING 2186 if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) { 2187 struct mbuf *mat; 2188 2189 mat = m_sig; 2190 while (mat) { 2191 if (SCTP_BUF_IS_EXTENDED(mat)) { 2192 sctp_log_mb(mat, SCTP_MBUF_SPLIT); 2193 } 2194 mat = SCTP_BUF_NEXT(mat); 2195 } 2196 } 2197#endif 2198 2199 /* 2200 * compute the signature/digest for the cookie 2201 */ 2202 ep = &(*inp_p)->sctp_ep; 2203 l_inp = *inp_p; 2204 if (l_stcb) { 2205 SCTP_TCB_UNLOCK(l_stcb); 2206 } 2207 SCTP_INP_RLOCK(l_inp); 2208 if (l_stcb) { 2209 SCTP_TCB_LOCK(l_stcb); 2210 } 2211 /* which cookie is it? */ 2212 if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) && 2213 (ep->current_secret_number != ep->last_secret_number)) { 2214 /* it's the old cookie */ 2215 (void)sctp_hmac_m(SCTP_HMAC, 2216 (uint8_t *) ep->secret_key[(int)ep->last_secret_number], 2217 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2218 } else { 2219 /* it's the current cookie */ 2220 (void)sctp_hmac_m(SCTP_HMAC, 2221 (uint8_t *) ep->secret_key[(int)ep->current_secret_number], 2222 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2223 } 2224 /* get the signature */ 2225 SCTP_INP_RUNLOCK(l_inp); 2226 sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig); 2227 if (sig == NULL) { 2228 /* couldn't find signature */ 2229 sctp_m_freem(m_sig); 2230 return (NULL); 2231 } 2232 /* compare the received digest with the computed digest */ 2233 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) { 2234 /* try the old cookie? */ 2235 if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) && 2236 (ep->current_secret_number != ep->last_secret_number)) { 2237 /* compute digest with old */ 2238 (void)sctp_hmac_m(SCTP_HMAC, 2239 (uint8_t *) ep->secret_key[(int)ep->last_secret_number], 2240 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2241 /* compare */ 2242 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0) 2243 cookie_ok = 1; 2244 } 2245 } else { 2246 cookie_ok = 1; 2247 } 2248 2249 /* 2250 * Now before we continue we must reconstruct our mbuf so that 2251 * normal processing of any other chunks will work. 2252 */ 2253 { 2254 struct mbuf *m_at; 2255 2256 m_at = m; 2257 while (SCTP_BUF_NEXT(m_at) != NULL) { 2258 m_at = SCTP_BUF_NEXT(m_at); 2259 } 2260 SCTP_BUF_NEXT(m_at) = m_sig; 2261 } 2262 2263 if (cookie_ok == 0) { 2264 SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n"); 2265 SCTPDBG(SCTP_DEBUG_INPUT2, 2266 "offset = %u, cookie_offset = %u, sig_offset = %u\n", 2267 (uint32_t) offset, cookie_offset, sig_offset); 2268 return (NULL); 2269 } 2270 /* 2271 * check the cookie timestamps to be sure it's not stale 2272 */ 2273 (void)SCTP_GETTIME_TIMEVAL(&now); 2274 /* Expire time is in Ticks, so we convert to seconds */ 2275 time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life); 2276 time_expires.tv_usec = cookie->time_entered.tv_usec; 2277 if (timevalcmp(&now, &time_expires, >)) { 2278 /* cookie is stale! */ 2279 struct mbuf *op_err; 2280 struct sctp_stale_cookie_msg *scm; 2281 uint32_t tim; 2282 2283 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg), 2284 0, M_DONTWAIT, 1, MT_DATA); 2285 if (op_err == NULL) { 2286 /* FOOBAR */ 2287 return (NULL); 2288 } 2289 /* pre-reserve some space */ 2290 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 2291 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 2292 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 2293 2294 /* Set the len */ 2295 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg); 2296 scm = mtod(op_err, struct sctp_stale_cookie_msg *); 2297 scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE); 2298 scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) + 2299 (sizeof(uint32_t)))); 2300 /* seconds to usec */ 2301 tim = (now.tv_sec - time_expires.tv_sec) * 1000000; 2302 /* add in usec */ 2303 if (tim == 0) 2304 tim = now.tv_usec - cookie->time_entered.tv_usec; 2305 scm->time_usec = htonl(tim); 2306 sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag, 2307 vrf_id); 2308 return (NULL); 2309 } 2310 /* 2311 * Now we must see with the lookup address if we have an existing 2312 * asoc. This will only happen if we were in the COOKIE-WAIT state 2313 * and a INIT collided with us and somewhere the peer sent the 2314 * cookie on another address besides the single address our assoc 2315 * had for him. In this case we will have one of the tie-tags set at 2316 * least AND the address field in the cookie can be used to look it 2317 * up. 2318 */ 2319 to = NULL; 2320 if (cookie->addr_type == SCTP_IPV6_ADDRESS) { 2321 memset(&sin6, 0, sizeof(sin6)); 2322 sin6.sin6_family = AF_INET6; 2323 sin6.sin6_len = sizeof(sin6); 2324 sin6.sin6_port = sh->src_port; 2325 sin6.sin6_scope_id = cookie->scope_id; 2326 memcpy(&sin6.sin6_addr.s6_addr, cookie->address, 2327 sizeof(sin6.sin6_addr.s6_addr)); 2328 to = (struct sockaddr *)&sin6; 2329 } else if (cookie->addr_type == SCTP_IPV4_ADDRESS) { 2330 memset(&sin, 0, sizeof(sin)); 2331 sin.sin_family = AF_INET; 2332 sin.sin_len = sizeof(sin); 2333 sin.sin_port = sh->src_port; 2334 sin.sin_addr.s_addr = cookie->address[0]; 2335 to = (struct sockaddr *)&sin; 2336 } else { 2337 /* This should not happen */ 2338 return (NULL); 2339 } 2340 if ((*stcb == NULL) && to) { 2341 /* Yep, lets check */ 2342 *stcb = sctp_findassociation_ep_addr(inp_p, to, netp, localep_sa, NULL); 2343 if (*stcb == NULL) { 2344 /* 2345 * We should have only got back the same inp. If we 2346 * got back a different ep we have a problem. The 2347 * original findep got back l_inp and now 2348 */ 2349 if (l_inp != *inp_p) { 2350 SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n"); 2351 } 2352 } else { 2353 if (*locked_tcb == NULL) { 2354 /* 2355 * In this case we found the assoc only 2356 * after we locked the create lock. This 2357 * means we are in a colliding case and we 2358 * must make sure that we unlock the tcb if 2359 * its one of the cases where we throw away 2360 * the incoming packets. 2361 */ 2362 *locked_tcb = *stcb; 2363 2364 /* 2365 * We must also increment the inp ref count 2366 * since the ref_count flags was set when we 2367 * did not find the TCB, now we found it 2368 * which reduces the refcount.. we must 2369 * raise it back out to balance it all :-) 2370 */ 2371 SCTP_INP_INCR_REF((*stcb)->sctp_ep); 2372 if ((*stcb)->sctp_ep != l_inp) { 2373 SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n", 2374 (*stcb)->sctp_ep, l_inp); 2375 } 2376 } 2377 } 2378 } 2379 if (to == NULL) 2380 return (NULL); 2381 2382 cookie_len -= SCTP_SIGNATURE_SIZE; 2383 if (*stcb == NULL) { 2384 /* this is the "normal" case... get a new TCB */ 2385 *stcb = sctp_process_cookie_new(m, iphlen, offset, sh, cookie, 2386 cookie_len, *inp_p, netp, to, ¬ification, 2387 auth_skipped, auth_offset, auth_len, vrf_id); 2388 } else { 2389 /* this is abnormal... cookie-echo on existing TCB */ 2390 had_a_existing_tcb = 1; 2391 *stcb = sctp_process_cookie_existing(m, iphlen, offset, sh, 2392 cookie, cookie_len, *inp_p, *stcb, *netp, to, 2393 ¬ification, &sac_restart_id, vrf_id); 2394 } 2395 2396 if (*stcb == NULL) { 2397 /* still no TCB... must be bad cookie-echo */ 2398 return (NULL); 2399 } 2400 /* 2401 * Ok, we built an association so confirm the address we sent the 2402 * INIT-ACK to. 2403 */ 2404 netl = sctp_findnet(*stcb, to); 2405 /* 2406 * This code should in theory NOT run but 2407 */ 2408 if (netl == NULL) { 2409 /* TSNH! Huh, why do I need to add this address here? */ 2410 int ret; 2411 2412 ret = sctp_add_remote_addr(*stcb, to, SCTP_DONOT_SETSCOPE, 2413 SCTP_IN_COOKIE_PROC); 2414 netl = sctp_findnet(*stcb, to); 2415 } 2416 if (netl) { 2417 if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) { 2418 netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 2419 (void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL, 2420 netl); 2421 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 2422 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED); 2423 } 2424 } 2425 if (*stcb) { 2426 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, *inp_p, 2427 *stcb, NULL); 2428 } 2429 if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 2430 if (!had_a_existing_tcb || 2431 (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { 2432 /* 2433 * If we have a NEW cookie or the connect never 2434 * reached the connected state during collision we 2435 * must do the TCP accept thing. 2436 */ 2437 struct socket *so, *oso; 2438 struct sctp_inpcb *inp; 2439 2440 if (notification == SCTP_NOTIFY_ASSOC_RESTART) { 2441 /* 2442 * For a restart we will keep the same 2443 * socket, no need to do anything. I THINK!! 2444 */ 2445 sctp_ulp_notify(notification, *stcb, 0, (void *)&sac_restart_id, SCTP_SO_NOT_LOCKED); 2446 return (m); 2447 } 2448 oso = (*inp_p)->sctp_socket; 2449 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2450 SCTP_TCB_UNLOCK((*stcb)); 2451 so = sonewconn(oso, 0 2452 ); 2453 SCTP_TCB_LOCK((*stcb)); 2454 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2455 2456 if (so == NULL) { 2457 struct mbuf *op_err; 2458 2459#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2460 struct socket *pcb_so; 2461 2462#endif 2463 /* Too many sockets */ 2464 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n"); 2465 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 2466 sctp_abort_association(*inp_p, NULL, m, iphlen, 2467 sh, op_err, vrf_id); 2468#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2469 pcb_so = SCTP_INP_SO(*inp_p); 2470 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2471 SCTP_TCB_UNLOCK((*stcb)); 2472 SCTP_SOCKET_LOCK(pcb_so, 1); 2473 SCTP_TCB_LOCK((*stcb)); 2474 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2475#endif 2476 (void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20); 2477#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2478 SCTP_SOCKET_UNLOCK(pcb_so, 1); 2479#endif 2480 return (NULL); 2481 } 2482 inp = (struct sctp_inpcb *)so->so_pcb; 2483 SCTP_INP_INCR_REF(inp); 2484 /* 2485 * We add the unbound flag here so that if we get an 2486 * soabort() before we get the move_pcb done, we 2487 * will properly cleanup. 2488 */ 2489 inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE | 2490 SCTP_PCB_FLAGS_CONNECTED | 2491 SCTP_PCB_FLAGS_IN_TCPPOOL | 2492 SCTP_PCB_FLAGS_UNBOUND | 2493 (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) | 2494 SCTP_PCB_FLAGS_DONT_WAKE); 2495 inp->sctp_features = (*inp_p)->sctp_features; 2496 inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features; 2497 inp->sctp_socket = so; 2498 inp->sctp_frag_point = (*inp_p)->sctp_frag_point; 2499 inp->partial_delivery_point = (*inp_p)->partial_delivery_point; 2500 inp->sctp_context = (*inp_p)->sctp_context; 2501 inp->inp_starting_point_for_iterator = NULL; 2502 /* 2503 * copy in the authentication parameters from the 2504 * original endpoint 2505 */ 2506 if (inp->sctp_ep.local_hmacs) 2507 sctp_free_hmaclist(inp->sctp_ep.local_hmacs); 2508 inp->sctp_ep.local_hmacs = 2509 sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs); 2510 if (inp->sctp_ep.local_auth_chunks) 2511 sctp_free_chunklist(inp->sctp_ep.local_auth_chunks); 2512 inp->sctp_ep.local_auth_chunks = 2513 sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks); 2514 (void)sctp_copy_skeylist(&(*inp_p)->sctp_ep.shared_keys, 2515 &inp->sctp_ep.shared_keys); 2516 2517 /* 2518 * Now we must move it from one hash table to 2519 * another and get the tcb in the right place. 2520 */ 2521 sctp_move_pcb_and_assoc(*inp_p, inp, *stcb); 2522 2523 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2524 SCTP_TCB_UNLOCK((*stcb)); 2525 2526 sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, 2527 0); 2528 SCTP_TCB_LOCK((*stcb)); 2529 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2530 2531 2532 /* 2533 * now we must check to see if we were aborted while 2534 * the move was going on and the lock/unlock 2535 * happened. 2536 */ 2537 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 2538 /* 2539 * yep it was, we leave the assoc attached 2540 * to the socket since the sctp_inpcb_free() 2541 * call will send an abort for us. 2542 */ 2543 SCTP_INP_DECR_REF(inp); 2544 return (NULL); 2545 } 2546 SCTP_INP_DECR_REF(inp); 2547 /* Switch over to the new guy */ 2548 *inp_p = inp; 2549 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2550 2551 /* 2552 * Pull it from the incomplete queue and wake the 2553 * guy 2554 */ 2555#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2556 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2557 SCTP_TCB_UNLOCK((*stcb)); 2558 SCTP_SOCKET_LOCK(so, 1); 2559#endif 2560 soisconnected(so); 2561#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2562 SCTP_TCB_LOCK((*stcb)); 2563 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2564 SCTP_SOCKET_UNLOCK(so, 1); 2565#endif 2566 return (m); 2567 } 2568 } 2569 if ((notification) && ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) { 2570 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2571 } 2572 return (m); 2573} 2574 2575static void 2576sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp, 2577 struct sctp_tcb *stcb, struct sctp_nets *net) 2578{ 2579 /* cp must not be used, others call this without a c-ack :-) */ 2580 struct sctp_association *asoc; 2581 2582 SCTPDBG(SCTP_DEBUG_INPUT2, 2583 "sctp_handle_cookie_ack: handling COOKIE-ACK\n"); 2584 if (stcb == NULL) 2585 return; 2586 2587 asoc = &stcb->asoc; 2588 2589 sctp_stop_all_cookie_timers(stcb); 2590 /* process according to association state */ 2591 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) { 2592 /* state change only needed when I am in right state */ 2593 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 2594 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 2595 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 2596 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 2597 stcb->sctp_ep, stcb, asoc->primary_destination); 2598 2599 } 2600 /* update RTO */ 2601 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 2602 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 2603 if (asoc->overall_error_count == 0) { 2604 net->RTO = sctp_calculate_rto(stcb, asoc, net, 2605 &asoc->time_entered, sctp_align_safe_nocopy); 2606 } 2607 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 2608 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2609 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2610 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 2611#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2612 struct socket *so; 2613 2614#endif 2615 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 2616#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2617 so = SCTP_INP_SO(stcb->sctp_ep); 2618 atomic_add_int(&stcb->asoc.refcnt, 1); 2619 SCTP_TCB_UNLOCK(stcb); 2620 SCTP_SOCKET_LOCK(so, 1); 2621 SCTP_TCB_LOCK(stcb); 2622 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2623 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2624 SCTP_SOCKET_UNLOCK(so, 1); 2625 return; 2626 } 2627#endif 2628 soisconnected(stcb->sctp_socket); 2629#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2630 SCTP_SOCKET_UNLOCK(so, 1); 2631#endif 2632 } 2633 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, 2634 stcb, net); 2635 /* 2636 * since we did not send a HB make sure we don't double 2637 * things 2638 */ 2639 net->hb_responded = 1; 2640 2641 if (stcb->asoc.sctp_autoclose_ticks && 2642 sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) { 2643 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 2644 stcb->sctp_ep, stcb, NULL); 2645 } 2646 /* 2647 * send ASCONF if parameters are pending and ASCONFs are 2648 * allowed (eg. addresses changed when init/cookie echo were 2649 * in flight) 2650 */ 2651 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) && 2652 (stcb->asoc.peer_supports_asconf) && 2653 (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) { 2654#ifdef SCTP_TIMER_BASED_ASCONF 2655 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, 2656 stcb->sctp_ep, stcb, 2657 stcb->asoc.primary_destination); 2658#else 2659 sctp_send_asconf(stcb, stcb->asoc.primary_destination, 2660 SCTP_ADDR_NOT_LOCKED); 2661#endif 2662 } 2663 } 2664 /* Toss the cookie if I can */ 2665 sctp_toss_old_cookies(stcb, asoc); 2666 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 2667 /* Restart the timer if we have pending data */ 2668 struct sctp_tmit_chunk *chk; 2669 2670 chk = TAILQ_FIRST(&asoc->sent_queue); 2671 if (chk) { 2672 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 2673 stcb, chk->whoTo); 2674 } 2675 } 2676} 2677 2678static void 2679sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp, 2680 struct sctp_tcb *stcb) 2681{ 2682 struct sctp_nets *net; 2683 struct sctp_tmit_chunk *lchk; 2684 uint32_t tsn; 2685 2686 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_ecne_chunk)) { 2687 return; 2688 } 2689 SCTP_STAT_INCR(sctps_recvecne); 2690 tsn = ntohl(cp->tsn); 2691 /* ECN Nonce stuff: need a resync and disable the nonce sum check */ 2692 /* Also we make sure we disable the nonce_wait */ 2693 lchk = TAILQ_FIRST(&stcb->asoc.send_queue); 2694 if (lchk == NULL) { 2695 stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq; 2696 } else { 2697 stcb->asoc.nonce_resync_tsn = lchk->rec.data.TSN_seq; 2698 } 2699 stcb->asoc.nonce_wait_for_ecne = 0; 2700 stcb->asoc.nonce_sum_check = 0; 2701 2702 /* Find where it was sent, if possible */ 2703 net = NULL; 2704 lchk = TAILQ_FIRST(&stcb->asoc.sent_queue); 2705 while (lchk) { 2706 if (lchk->rec.data.TSN_seq == tsn) { 2707 net = lchk->whoTo; 2708 break; 2709 } 2710 if (compare_with_wrap(lchk->rec.data.TSN_seq, tsn, MAX_SEQ)) 2711 break; 2712 lchk = TAILQ_NEXT(lchk, sctp_next); 2713 } 2714 if (net == NULL) 2715 /* default is we use the primary */ 2716 net = stcb->asoc.primary_destination; 2717 2718 if (compare_with_wrap(tsn, stcb->asoc.last_cwr_tsn, MAX_TSN)) { 2719 /* 2720 * JRS - Use the congestion control given in the pluggable 2721 * CC module 2722 */ 2723 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net); 2724 /* 2725 * we reduce once every RTT. So we will only lower cwnd at 2726 * the next sending seq i.e. the resync_tsn. 2727 */ 2728 stcb->asoc.last_cwr_tsn = stcb->asoc.nonce_resync_tsn; 2729 } 2730 /* 2731 * We always send a CWR this way if our previous one was lost our 2732 * peer will get an update, or if it is not time again to reduce we 2733 * still get the cwr to the peer. 2734 */ 2735 sctp_send_cwr(stcb, net, tsn); 2736} 2737 2738static void 2739sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb) 2740{ 2741 /* 2742 * Here we get a CWR from the peer. We must look in the outqueue and 2743 * make sure that we have a covered ECNE in teh control chunk part. 2744 * If so remove it. 2745 */ 2746 struct sctp_tmit_chunk *chk; 2747 struct sctp_ecne_chunk *ecne; 2748 2749 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 2750 if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) { 2751 continue; 2752 } 2753 /* 2754 * Look for and remove if it is the right TSN. Since there 2755 * is only ONE ECNE on the control queue at any one time we 2756 * don't need to worry about more than one! 2757 */ 2758 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 2759 if (compare_with_wrap(ntohl(cp->tsn), ntohl(ecne->tsn), 2760 MAX_TSN) || (cp->tsn == ecne->tsn)) { 2761 /* this covers this ECNE, we can remove it */ 2762 stcb->asoc.ecn_echo_cnt_onq--; 2763 TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, 2764 sctp_next); 2765 if (chk->data) { 2766 sctp_m_freem(chk->data); 2767 chk->data = NULL; 2768 } 2769 stcb->asoc.ctrl_queue_cnt--; 2770 sctp_free_a_chunk(stcb, chk); 2771 break; 2772 } 2773 } 2774} 2775 2776static void 2777sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp, 2778 struct sctp_tcb *stcb, struct sctp_nets *net) 2779{ 2780 struct sctp_association *asoc; 2781 2782#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2783 struct socket *so; 2784 2785#endif 2786 2787 SCTPDBG(SCTP_DEBUG_INPUT2, 2788 "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n"); 2789 if (stcb == NULL) 2790 return; 2791 2792 asoc = &stcb->asoc; 2793 /* process according to association state */ 2794 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) { 2795 /* unexpected SHUTDOWN-COMPLETE... so ignore... */ 2796 SCTPDBG(SCTP_DEBUG_INPUT2, 2797 "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n"); 2798 SCTP_TCB_UNLOCK(stcb); 2799 return; 2800 } 2801 /* notify upper layer protocol */ 2802 if (stcb->sctp_socket) { 2803 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2804 /* are the queues empty? they should be */ 2805 if (!TAILQ_EMPTY(&asoc->send_queue) || 2806 !TAILQ_EMPTY(&asoc->sent_queue) || 2807 !TAILQ_EMPTY(&asoc->out_wheel)) { 2808 sctp_report_all_outbound(stcb, 0, SCTP_SO_NOT_LOCKED); 2809 } 2810 } 2811 /* stop the timer */ 2812 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22); 2813 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 2814 /* free the TCB */ 2815 SCTPDBG(SCTP_DEBUG_INPUT2, 2816 "sctp_handle_shutdown_complete: calls free-asoc\n"); 2817#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2818 so = SCTP_INP_SO(stcb->sctp_ep); 2819 atomic_add_int(&stcb->asoc.refcnt, 1); 2820 SCTP_TCB_UNLOCK(stcb); 2821 SCTP_SOCKET_LOCK(so, 1); 2822 SCTP_TCB_LOCK(stcb); 2823 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2824#endif 2825 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23); 2826#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2827 SCTP_SOCKET_UNLOCK(so, 1); 2828#endif 2829 return; 2830} 2831 2832static int 2833process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc, 2834 struct sctp_nets *net, uint8_t flg) 2835{ 2836 switch (desc->chunk_type) { 2837 case SCTP_DATA: 2838 /* find the tsn to resend (possibly */ 2839 { 2840 uint32_t tsn; 2841 struct sctp_tmit_chunk *tp1; 2842 2843 tsn = ntohl(desc->tsn_ifany); 2844 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2845 while (tp1) { 2846 if (tp1->rec.data.TSN_seq == tsn) { 2847 /* found it */ 2848 break; 2849 } 2850 if (compare_with_wrap(tp1->rec.data.TSN_seq, tsn, 2851 MAX_TSN)) { 2852 /* not found */ 2853 tp1 = NULL; 2854 break; 2855 } 2856 tp1 = TAILQ_NEXT(tp1, sctp_next); 2857 } 2858 if (tp1 == NULL) { 2859 /* 2860 * Do it the other way , aka without paying 2861 * attention to queue seq order. 2862 */ 2863 SCTP_STAT_INCR(sctps_pdrpdnfnd); 2864 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2865 while (tp1) { 2866 if (tp1->rec.data.TSN_seq == tsn) { 2867 /* found it */ 2868 break; 2869 } 2870 tp1 = TAILQ_NEXT(tp1, sctp_next); 2871 } 2872 } 2873 if (tp1 == NULL) { 2874 SCTP_STAT_INCR(sctps_pdrptsnnf); 2875 } 2876 if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) { 2877 uint8_t *ddp; 2878 2879 if ((stcb->asoc.peers_rwnd == 0) && 2880 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) { 2881 SCTP_STAT_INCR(sctps_pdrpdiwnp); 2882 return (0); 2883 } 2884 if (stcb->asoc.peers_rwnd == 0 && 2885 (flg & SCTP_FROM_MIDDLE_BOX)) { 2886 SCTP_STAT_INCR(sctps_pdrpdizrw); 2887 return (0); 2888 } 2889 ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+ 2890 sizeof(struct sctp_data_chunk)); 2891 { 2892 unsigned int iii; 2893 2894 for (iii = 0; iii < sizeof(desc->data_bytes); 2895 iii++) { 2896 if (ddp[iii] != desc->data_bytes[iii]) { 2897 SCTP_STAT_INCR(sctps_pdrpbadd); 2898 return (-1); 2899 } 2900 } 2901 } 2902 /* 2903 * We zero out the nonce so resync not 2904 * needed 2905 */ 2906 tp1->rec.data.ect_nonce = 0; 2907 2908 if (tp1->do_rtt) { 2909 /* 2910 * this guy had a RTO calculation 2911 * pending on it, cancel it 2912 */ 2913 tp1->do_rtt = 0; 2914 } 2915 SCTP_STAT_INCR(sctps_pdrpmark); 2916 if (tp1->sent != SCTP_DATAGRAM_RESEND) 2917 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 2918 tp1->sent = SCTP_DATAGRAM_RESEND; 2919 /* 2920 * mark it as if we were doing a FR, since 2921 * we will be getting gap ack reports behind 2922 * the info from the router. 2923 */ 2924 tp1->rec.data.doing_fast_retransmit = 1; 2925 /* 2926 * mark the tsn with what sequences can 2927 * cause a new FR. 2928 */ 2929 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) { 2930 tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq; 2931 } else { 2932 tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq; 2933 } 2934 2935 /* restart the timer */ 2936 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 2937 stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_24); 2938 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 2939 stcb, tp1->whoTo); 2940 2941 /* fix counts and things */ 2942 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 2943 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP, 2944 tp1->whoTo->flight_size, 2945 tp1->book_size, 2946 (uintptr_t) stcb, 2947 tp1->rec.data.TSN_seq); 2948 } 2949 sctp_flight_size_decrease(tp1); 2950 sctp_total_flight_decrease(stcb, tp1); 2951 } { 2952 /* audit code */ 2953 unsigned int audit; 2954 2955 audit = 0; 2956 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) { 2957 if (tp1->sent == SCTP_DATAGRAM_RESEND) 2958 audit++; 2959 } 2960 TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue, 2961 sctp_next) { 2962 if (tp1->sent == SCTP_DATAGRAM_RESEND) 2963 audit++; 2964 } 2965 if (audit != stcb->asoc.sent_queue_retran_cnt) { 2966 SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n", 2967 audit, stcb->asoc.sent_queue_retran_cnt); 2968#ifndef SCTP_AUDITING_ENABLED 2969 stcb->asoc.sent_queue_retran_cnt = audit; 2970#endif 2971 } 2972 } 2973 } 2974 break; 2975 case SCTP_ASCONF: 2976 { 2977 struct sctp_tmit_chunk *asconf; 2978 2979 TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue, 2980 sctp_next) { 2981 if (asconf->rec.chunk_id.id == SCTP_ASCONF) { 2982 break; 2983 } 2984 } 2985 if (asconf) { 2986 if (asconf->sent != SCTP_DATAGRAM_RESEND) 2987 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 2988 asconf->sent = SCTP_DATAGRAM_RESEND; 2989 asconf->snd_count--; 2990 } 2991 } 2992 break; 2993 case SCTP_INITIATION: 2994 /* resend the INIT */ 2995 stcb->asoc.dropped_special_cnt++; 2996 if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) { 2997 /* 2998 * If we can get it in, in a few attempts we do 2999 * this, otherwise we let the timer fire. 3000 */ 3001 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, 3002 stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25); 3003 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 3004 } 3005 break; 3006 case SCTP_SELECTIVE_ACK: 3007 /* resend the sack */ 3008 sctp_send_sack(stcb); 3009 break; 3010 case SCTP_HEARTBEAT_REQUEST: 3011 /* resend a demand HB */ 3012 if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) { 3013 /* 3014 * Only retransmit if we KNOW we wont destroy the 3015 * tcb 3016 */ 3017 (void)sctp_send_hb(stcb, 1, net); 3018 } 3019 break; 3020 case SCTP_SHUTDOWN: 3021 sctp_send_shutdown(stcb, net); 3022 break; 3023 case SCTP_SHUTDOWN_ACK: 3024 sctp_send_shutdown_ack(stcb, net); 3025 break; 3026 case SCTP_COOKIE_ECHO: 3027 { 3028 struct sctp_tmit_chunk *cookie; 3029 3030 cookie = NULL; 3031 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, 3032 sctp_next) { 3033 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 3034 break; 3035 } 3036 } 3037 if (cookie) { 3038 if (cookie->sent != SCTP_DATAGRAM_RESEND) 3039 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3040 cookie->sent = SCTP_DATAGRAM_RESEND; 3041 sctp_stop_all_cookie_timers(stcb); 3042 } 3043 } 3044 break; 3045 case SCTP_COOKIE_ACK: 3046 sctp_send_cookie_ack(stcb); 3047 break; 3048 case SCTP_ASCONF_ACK: 3049 /* resend last asconf ack */ 3050 sctp_send_asconf_ack(stcb); 3051 break; 3052 case SCTP_FORWARD_CUM_TSN: 3053 send_forward_tsn(stcb, &stcb->asoc); 3054 break; 3055 /* can't do anything with these */ 3056 case SCTP_PACKET_DROPPED: 3057 case SCTP_INITIATION_ACK: /* this should not happen */ 3058 case SCTP_HEARTBEAT_ACK: 3059 case SCTP_ABORT_ASSOCIATION: 3060 case SCTP_OPERATION_ERROR: 3061 case SCTP_SHUTDOWN_COMPLETE: 3062 case SCTP_ECN_ECHO: 3063 case SCTP_ECN_CWR: 3064 default: 3065 break; 3066 } 3067 return (0); 3068} 3069 3070void 3071sctp_reset_in_stream(struct sctp_tcb *stcb, int number_entries, uint16_t * list) 3072{ 3073 int i; 3074 uint16_t temp; 3075 3076 /* 3077 * We set things to 0xffff since this is the last delivered sequence 3078 * and we will be sending in 0 after the reset. 3079 */ 3080 3081 if (number_entries) { 3082 for (i = 0; i < number_entries; i++) { 3083 temp = ntohs(list[i]); 3084 if (temp >= stcb->asoc.streamincnt) { 3085 continue; 3086 } 3087 stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff; 3088 } 3089 } else { 3090 list = NULL; 3091 for (i = 0; i < stcb->asoc.streamincnt; i++) { 3092 stcb->asoc.strmin[i].last_sequence_delivered = 0xffff; 3093 } 3094 } 3095 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED); 3096} 3097 3098static void 3099sctp_reset_out_streams(struct sctp_tcb *stcb, int number_entries, uint16_t * list) 3100{ 3101 int i; 3102 3103 if (number_entries == 0) { 3104 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3105 stcb->asoc.strmout[i].next_sequence_sent = 0; 3106 } 3107 } else if (number_entries) { 3108 for (i = 0; i < number_entries; i++) { 3109 uint16_t temp; 3110 3111 temp = ntohs(list[i]); 3112 if (temp >= stcb->asoc.streamoutcnt) { 3113 /* no such stream */ 3114 continue; 3115 } 3116 stcb->asoc.strmout[temp].next_sequence_sent = 0; 3117 } 3118 } 3119 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED); 3120} 3121 3122 3123struct sctp_stream_reset_out_request * 3124sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk) 3125{ 3126 struct sctp_association *asoc; 3127 struct sctp_stream_reset_out_req *req; 3128 struct sctp_stream_reset_out_request *r; 3129 struct sctp_tmit_chunk *chk; 3130 int len, clen; 3131 3132 asoc = &stcb->asoc; 3133 if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { 3134 asoc->stream_reset_outstanding = 0; 3135 return (NULL); 3136 } 3137 if (stcb->asoc.str_reset == NULL) { 3138 asoc->stream_reset_outstanding = 0; 3139 return (NULL); 3140 } 3141 chk = stcb->asoc.str_reset; 3142 if (chk->data == NULL) { 3143 return (NULL); 3144 } 3145 if (bchk) { 3146 /* he wants a copy of the chk pointer */ 3147 *bchk = chk; 3148 } 3149 clen = chk->send_size; 3150 req = mtod(chk->data, struct sctp_stream_reset_out_req *); 3151 r = &req->sr_req; 3152 if (ntohl(r->request_seq) == seq) { 3153 /* found it */ 3154 return (r); 3155 } 3156 len = SCTP_SIZE32(ntohs(r->ph.param_length)); 3157 if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) { 3158 /* move to the next one, there can only be a max of two */ 3159 r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len); 3160 if (ntohl(r->request_seq) == seq) { 3161 return (r); 3162 } 3163 } 3164 /* that seq is not here */ 3165 return (NULL); 3166} 3167 3168static void 3169sctp_clean_up_stream_reset(struct sctp_tcb *stcb) 3170{ 3171 struct sctp_association *asoc; 3172 struct sctp_tmit_chunk *chk = stcb->asoc.str_reset; 3173 3174 if (stcb->asoc.str_reset == NULL) { 3175 return; 3176 } 3177 asoc = &stcb->asoc; 3178 3179 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_26); 3180 TAILQ_REMOVE(&asoc->control_send_queue, 3181 chk, 3182 sctp_next); 3183 if (chk->data) { 3184 sctp_m_freem(chk->data); 3185 chk->data = NULL; 3186 } 3187 asoc->ctrl_queue_cnt--; 3188 sctp_free_a_chunk(stcb, chk); 3189 /* sa_ignore NO_NULL_CHK */ 3190 stcb->asoc.str_reset = NULL; 3191} 3192 3193 3194static int 3195sctp_handle_stream_reset_response(struct sctp_tcb *stcb, 3196 uint32_t seq, uint32_t action, 3197 struct sctp_stream_reset_response *respin) 3198{ 3199 uint16_t type; 3200 int lparm_len; 3201 struct sctp_association *asoc = &stcb->asoc; 3202 struct sctp_tmit_chunk *chk; 3203 struct sctp_stream_reset_out_request *srparam; 3204 int number_entries; 3205 3206 if (asoc->stream_reset_outstanding == 0) { 3207 /* duplicate */ 3208 return (0); 3209 } 3210 if (seq == stcb->asoc.str_reset_seq_out) { 3211 srparam = sctp_find_stream_reset(stcb, seq, &chk); 3212 if (srparam) { 3213 stcb->asoc.str_reset_seq_out++; 3214 type = ntohs(srparam->ph.param_type); 3215 lparm_len = ntohs(srparam->ph.param_length); 3216 if (type == SCTP_STR_RESET_OUT_REQUEST) { 3217 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t); 3218 asoc->stream_reset_out_is_outstanding = 0; 3219 if (asoc->stream_reset_outstanding) 3220 asoc->stream_reset_outstanding--; 3221 if (action == SCTP_STREAM_RESET_PERFORMED) { 3222 /* do it */ 3223 sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams); 3224 } else { 3225 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED); 3226 } 3227 } else if (type == SCTP_STR_RESET_IN_REQUEST) { 3228 /* Answered my request */ 3229 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t); 3230 if (asoc->stream_reset_outstanding) 3231 asoc->stream_reset_outstanding--; 3232 if (action != SCTP_STREAM_RESET_PERFORMED) { 3233 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED); 3234 } 3235 } else if (type == SCTP_STR_RESET_TSN_REQUEST) { 3236 /** 3237 * a) Adopt the new in tsn. 3238 * b) reset the map 3239 * c) Adopt the new out-tsn 3240 */ 3241 struct sctp_stream_reset_response_tsn *resp; 3242 struct sctp_forward_tsn_chunk fwdtsn; 3243 int abort_flag = 0; 3244 3245 if (respin == NULL) { 3246 /* huh ? */ 3247 return (0); 3248 } 3249 if (action == SCTP_STREAM_RESET_PERFORMED) { 3250 resp = (struct sctp_stream_reset_response_tsn *)respin; 3251 asoc->stream_reset_outstanding--; 3252 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 3253 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 3254 fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1); 3255 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0); 3256 if (abort_flag) { 3257 return (1); 3258 } 3259 stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1); 3260 stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map; 3261 stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn); 3262 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 3263 stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn); 3264 stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn; 3265 3266 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL); 3267 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL); 3268 3269 } 3270 } 3271 /* get rid of the request and get the request flags */ 3272 if (asoc->stream_reset_outstanding == 0) { 3273 sctp_clean_up_stream_reset(stcb); 3274 } 3275 } 3276 } 3277 return (0); 3278} 3279 3280static void 3281sctp_handle_str_reset_request_in(struct sctp_tcb *stcb, 3282 struct sctp_tmit_chunk *chk, 3283 struct sctp_stream_reset_in_request *req, int trunc) 3284{ 3285 uint32_t seq; 3286 int len, i; 3287 int number_entries; 3288 uint16_t temp; 3289 3290 /* 3291 * peer wants me to send a str-reset to him for my outgoing seq's if 3292 * seq_in is right. 3293 */ 3294 struct sctp_association *asoc = &stcb->asoc; 3295 3296 seq = ntohl(req->request_seq); 3297 if (asoc->str_reset_seq_in == seq) { 3298 if (trunc) { 3299 /* Can't do it, since they exceeded our buffer size */ 3300 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3301 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 3302 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3303 } else if (stcb->asoc.stream_reset_out_is_outstanding == 0) { 3304 len = ntohs(req->ph.param_length); 3305 number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t)); 3306 for (i = 0; i < number_entries; i++) { 3307 temp = ntohs(req->list_of_streams[i]); 3308 req->list_of_streams[i] = temp; 3309 } 3310 /* move the reset action back one */ 3311 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3312 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3313 sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams, 3314 asoc->str_reset_seq_out, 3315 seq, (asoc->sending_seq - 1)); 3316 asoc->stream_reset_out_is_outstanding = 1; 3317 asoc->str_reset = chk; 3318 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo); 3319 stcb->asoc.stream_reset_outstanding++; 3320 } else { 3321 /* Can't do it, since we have sent one out */ 3322 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3323 asoc->last_reset_action[0] = SCTP_STREAM_RESET_TRY_LATER; 3324 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3325 } 3326 asoc->str_reset_seq_in++; 3327 } else if (asoc->str_reset_seq_in - 1 == seq) { 3328 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3329 } else if (asoc->str_reset_seq_in - 2 == seq) { 3330 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 3331 } else { 3332 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 3333 } 3334} 3335 3336static int 3337sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb, 3338 struct sctp_tmit_chunk *chk, 3339 struct sctp_stream_reset_tsn_request *req) 3340{ 3341 /* reset all in and out and update the tsn */ 3342 /* 3343 * A) reset my str-seq's on in and out. B) Select a receive next, 3344 * and set cum-ack to it. Also process this selected number as a 3345 * fwd-tsn as well. C) set in the response my next sending seq. 3346 */ 3347 struct sctp_forward_tsn_chunk fwdtsn; 3348 struct sctp_association *asoc = &stcb->asoc; 3349 int abort_flag = 0; 3350 uint32_t seq; 3351 3352 seq = ntohl(req->request_seq); 3353 if (asoc->str_reset_seq_in == seq) { 3354 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 3355 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 3356 fwdtsn.ch.chunk_flags = 0; 3357 fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1); 3358 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0); 3359 if (abort_flag) { 3360 return (1); 3361 } 3362 stcb->asoc.highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA; 3363 stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map; 3364 stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1; 3365 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 3366 atomic_add_int(&stcb->asoc.sending_seq, 1); 3367 /* save off historical data for retrans */ 3368 stcb->asoc.last_sending_seq[1] = stcb->asoc.last_sending_seq[0]; 3369 stcb->asoc.last_sending_seq[0] = stcb->asoc.sending_seq; 3370 stcb->asoc.last_base_tsnsent[1] = stcb->asoc.last_base_tsnsent[0]; 3371 stcb->asoc.last_base_tsnsent[0] = stcb->asoc.mapping_array_base_tsn; 3372 3373 sctp_add_stream_reset_result_tsn(chk, 3374 ntohl(req->request_seq), 3375 SCTP_STREAM_RESET_PERFORMED, 3376 stcb->asoc.sending_seq, 3377 stcb->asoc.mapping_array_base_tsn); 3378 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL); 3379 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL); 3380 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0]; 3381 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3382 3383 asoc->str_reset_seq_in++; 3384 } else if (asoc->str_reset_seq_in - 1 == seq) { 3385 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0], 3386 stcb->asoc.last_sending_seq[0], 3387 stcb->asoc.last_base_tsnsent[0] 3388 ); 3389 } else if (asoc->str_reset_seq_in - 2 == seq) { 3390 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1], 3391 stcb->asoc.last_sending_seq[1], 3392 stcb->asoc.last_base_tsnsent[1] 3393 ); 3394 } else { 3395 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 3396 } 3397 return (0); 3398} 3399 3400static void 3401sctp_handle_str_reset_request_out(struct sctp_tcb *stcb, 3402 struct sctp_tmit_chunk *chk, 3403 struct sctp_stream_reset_out_request *req, int trunc) 3404{ 3405 uint32_t seq, tsn; 3406 int number_entries, len; 3407 struct sctp_association *asoc = &stcb->asoc; 3408 3409 seq = ntohl(req->request_seq); 3410 3411 /* now if its not a duplicate we process it */ 3412 if (asoc->str_reset_seq_in == seq) { 3413 len = ntohs(req->ph.param_length); 3414 number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t)); 3415 /* 3416 * the sender is resetting, handle the list issue.. we must 3417 * a) verify if we can do the reset, if so no problem b) If 3418 * we can't do the reset we must copy the request. c) queue 3419 * it, and setup the data in processor to trigger it off 3420 * when needed and dequeue all the queued data. 3421 */ 3422 tsn = ntohl(req->send_reset_at_tsn); 3423 3424 /* move the reset action back one */ 3425 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3426 if (trunc) { 3427 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED); 3428 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 3429 } else if ((tsn == asoc->cumulative_tsn) || 3430 (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN))) { 3431 /* we can do it now */ 3432 sctp_reset_in_stream(stcb, number_entries, req->list_of_streams); 3433 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED); 3434 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3435 } else { 3436 /* 3437 * we must queue it up and thus wait for the TSN's 3438 * to arrive that are at or before tsn 3439 */ 3440 struct sctp_stream_reset_list *liste; 3441 int siz; 3442 3443 siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t)); 3444 SCTP_MALLOC(liste, struct sctp_stream_reset_list *, 3445 siz, SCTP_M_STRESET); 3446 if (liste == NULL) { 3447 /* gak out of memory */ 3448 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED); 3449 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 3450 return; 3451 } 3452 liste->tsn = tsn; 3453 liste->number_entries = number_entries; 3454 memcpy(&liste->req, req, 3455 (sizeof(struct sctp_stream_reset_out_request) + (number_entries * sizeof(uint16_t)))); 3456 TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp); 3457 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED); 3458 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3459 } 3460 asoc->str_reset_seq_in++; 3461 } else if ((asoc->str_reset_seq_in - 1) == seq) { 3462 /* 3463 * one seq back, just echo back last action since my 3464 * response was lost. 3465 */ 3466 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3467 } else if ((asoc->str_reset_seq_in - 2) == seq) { 3468 /* 3469 * two seq back, just echo back last action since my 3470 * response was lost. 3471 */ 3472 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 3473 } else { 3474 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 3475 } 3476} 3477 3478#ifdef __GNUC__ 3479__attribute__((noinline)) 3480#endif 3481 static int 3482 sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset, 3483 struct sctp_stream_reset_out_req *sr_req) 3484{ 3485 int chk_length, param_len, ptype; 3486 struct sctp_paramhdr pstore; 3487 uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE]; 3488 3489 uint32_t seq; 3490 int num_req = 0; 3491 int trunc = 0; 3492 struct sctp_tmit_chunk *chk; 3493 struct sctp_chunkhdr *ch; 3494 struct sctp_paramhdr *ph; 3495 int ret_code = 0; 3496 int num_param = 0; 3497 3498 /* now it may be a reset or a reset-response */ 3499 chk_length = ntohs(sr_req->ch.chunk_length); 3500 3501 /* setup for adding the response */ 3502 sctp_alloc_a_chunk(stcb, chk); 3503 if (chk == NULL) { 3504 return (ret_code); 3505 } 3506 chk->rec.chunk_id.id = SCTP_STREAM_RESET; 3507 chk->rec.chunk_id.can_take_data = 0; 3508 chk->asoc = &stcb->asoc; 3509 chk->no_fr_allowed = 0; 3510 chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr); 3511 chk->book_size_scale = 0; 3512 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 3513 if (chk->data == NULL) { 3514strres_nochunk: 3515 if (chk->data) { 3516 sctp_m_freem(chk->data); 3517 chk->data = NULL; 3518 } 3519 sctp_free_a_chunk(stcb, chk); 3520 return (ret_code); 3521 } 3522 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 3523 3524 /* setup chunk parameters */ 3525 chk->sent = SCTP_DATAGRAM_UNSENT; 3526 chk->snd_count = 0; 3527 chk->whoTo = stcb->asoc.primary_destination; 3528 atomic_add_int(&chk->whoTo->ref_count, 1); 3529 3530 ch = mtod(chk->data, struct sctp_chunkhdr *); 3531 ch->chunk_type = SCTP_STREAM_RESET; 3532 ch->chunk_flags = 0; 3533 ch->chunk_length = htons(chk->send_size); 3534 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 3535 offset += sizeof(struct sctp_chunkhdr); 3536 while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) { 3537 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *) & pstore); 3538 if (ph == NULL) 3539 break; 3540 param_len = ntohs(ph->param_length); 3541 if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) { 3542 /* bad param */ 3543 break; 3544 } 3545 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, (int)sizeof(cstore)), 3546 (uint8_t *) & cstore); 3547 ptype = ntohs(ph->param_type); 3548 num_param++; 3549 if (param_len > (int)sizeof(cstore)) { 3550 trunc = 1; 3551 } else { 3552 trunc = 0; 3553 } 3554 3555 if (num_param > SCTP_MAX_RESET_PARAMS) { 3556 /* hit the max of parameters already sorry.. */ 3557 break; 3558 } 3559 if (ptype == SCTP_STR_RESET_OUT_REQUEST) { 3560 struct sctp_stream_reset_out_request *req_out; 3561 3562 req_out = (struct sctp_stream_reset_out_request *)ph; 3563 num_req++; 3564 if (stcb->asoc.stream_reset_outstanding) { 3565 seq = ntohl(req_out->response_seq); 3566 if (seq == stcb->asoc.str_reset_seq_out) { 3567 /* implicit ack */ 3568 (void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_PERFORMED, NULL); 3569 } 3570 } 3571 sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc); 3572 } else if (ptype == SCTP_STR_RESET_IN_REQUEST) { 3573 struct sctp_stream_reset_in_request *req_in; 3574 3575 num_req++; 3576 3577 req_in = (struct sctp_stream_reset_in_request *)ph; 3578 3579 sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc); 3580 } else if (ptype == SCTP_STR_RESET_TSN_REQUEST) { 3581 struct sctp_stream_reset_tsn_request *req_tsn; 3582 3583 num_req++; 3584 req_tsn = (struct sctp_stream_reset_tsn_request *)ph; 3585 3586 if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) { 3587 ret_code = 1; 3588 goto strres_nochunk; 3589 } 3590 /* no more */ 3591 break; 3592 } else if (ptype == SCTP_STR_RESET_RESPONSE) { 3593 struct sctp_stream_reset_response *resp; 3594 uint32_t result; 3595 3596 resp = (struct sctp_stream_reset_response *)ph; 3597 seq = ntohl(resp->response_seq); 3598 result = ntohl(resp->result); 3599 if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) { 3600 ret_code = 1; 3601 goto strres_nochunk; 3602 } 3603 } else { 3604 break; 3605 } 3606 offset += SCTP_SIZE32(param_len); 3607 chk_length -= SCTP_SIZE32(param_len); 3608 } 3609 if (num_req == 0) { 3610 /* we have no response free the stuff */ 3611 goto strres_nochunk; 3612 } 3613 /* ok we have a chunk to link in */ 3614 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, 3615 chk, 3616 sctp_next); 3617 stcb->asoc.ctrl_queue_cnt++; 3618 return (ret_code); 3619} 3620 3621/* 3622 * Handle a router or endpoints report of a packet loss, there are two ways 3623 * to handle this, either we get the whole packet and must disect it 3624 * ourselves (possibly with truncation and or corruption) or it is a summary 3625 * from a middle box that did the disectting for us. 3626 */ 3627static void 3628sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp, 3629 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit) 3630{ 3631 uint32_t bottle_bw, on_queue; 3632 uint16_t trunc_len; 3633 unsigned int chlen; 3634 unsigned int at; 3635 struct sctp_chunk_desc desc; 3636 struct sctp_chunkhdr *ch; 3637 3638 chlen = ntohs(cp->ch.chunk_length); 3639 chlen -= sizeof(struct sctp_pktdrop_chunk); 3640 /* XXX possible chlen underflow */ 3641 if (chlen == 0) { 3642 ch = NULL; 3643 if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) 3644 SCTP_STAT_INCR(sctps_pdrpbwrpt); 3645 } else { 3646 ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr)); 3647 chlen -= sizeof(struct sctphdr); 3648 /* XXX possible chlen underflow */ 3649 memset(&desc, 0, sizeof(desc)); 3650 } 3651 trunc_len = (uint16_t) ntohs(cp->trunc_len); 3652 if (trunc_len > limit) { 3653 trunc_len = limit; 3654 } 3655 /* now the chunks themselves */ 3656 while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) { 3657 desc.chunk_type = ch->chunk_type; 3658 /* get amount we need to move */ 3659 at = ntohs(ch->chunk_length); 3660 if (at < sizeof(struct sctp_chunkhdr)) { 3661 /* corrupt chunk, maybe at the end? */ 3662 SCTP_STAT_INCR(sctps_pdrpcrupt); 3663 break; 3664 } 3665 if (trunc_len == 0) { 3666 /* we are supposed to have all of it */ 3667 if (at > chlen) { 3668 /* corrupt skip it */ 3669 SCTP_STAT_INCR(sctps_pdrpcrupt); 3670 break; 3671 } 3672 } else { 3673 /* is there enough of it left ? */ 3674 if (desc.chunk_type == SCTP_DATA) { 3675 if (chlen < (sizeof(struct sctp_data_chunk) + 3676 sizeof(desc.data_bytes))) { 3677 break; 3678 } 3679 } else { 3680 if (chlen < sizeof(struct sctp_chunkhdr)) { 3681 break; 3682 } 3683 } 3684 } 3685 if (desc.chunk_type == SCTP_DATA) { 3686 /* can we get out the tsn? */ 3687 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) 3688 SCTP_STAT_INCR(sctps_pdrpmbda); 3689 3690 if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) { 3691 /* yep */ 3692 struct sctp_data_chunk *dcp; 3693 uint8_t *ddp; 3694 unsigned int iii; 3695 3696 dcp = (struct sctp_data_chunk *)ch; 3697 ddp = (uint8_t *) (dcp + 1); 3698 for (iii = 0; iii < sizeof(desc.data_bytes); iii++) { 3699 desc.data_bytes[iii] = ddp[iii]; 3700 } 3701 desc.tsn_ifany = dcp->dp.tsn; 3702 } else { 3703 /* nope we are done. */ 3704 SCTP_STAT_INCR(sctps_pdrpnedat); 3705 break; 3706 } 3707 } else { 3708 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) 3709 SCTP_STAT_INCR(sctps_pdrpmbct); 3710 } 3711 3712 if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) { 3713 SCTP_STAT_INCR(sctps_pdrppdbrk); 3714 break; 3715 } 3716 if (SCTP_SIZE32(at) > chlen) { 3717 break; 3718 } 3719 chlen -= SCTP_SIZE32(at); 3720 if (chlen < sizeof(struct sctp_chunkhdr)) { 3721 /* done, none left */ 3722 break; 3723 } 3724 ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at)); 3725 } 3726 /* Now update any rwnd --- possibly */ 3727 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) { 3728 /* From a peer, we get a rwnd report */ 3729 uint32_t a_rwnd; 3730 3731 SCTP_STAT_INCR(sctps_pdrpfehos); 3732 3733 bottle_bw = ntohl(cp->bottle_bw); 3734 on_queue = ntohl(cp->current_onq); 3735 if (bottle_bw && on_queue) { 3736 /* a rwnd report is in here */ 3737 if (bottle_bw > on_queue) 3738 a_rwnd = bottle_bw - on_queue; 3739 else 3740 a_rwnd = 0; 3741 3742 if (a_rwnd == 0) 3743 stcb->asoc.peers_rwnd = 0; 3744 else { 3745 if (a_rwnd > stcb->asoc.total_flight) { 3746 stcb->asoc.peers_rwnd = 3747 a_rwnd - stcb->asoc.total_flight; 3748 } else { 3749 stcb->asoc.peers_rwnd = 0; 3750 } 3751 if (stcb->asoc.peers_rwnd < 3752 stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 3753 /* SWS sender side engages */ 3754 stcb->asoc.peers_rwnd = 0; 3755 } 3756 } 3757 } 3758 } else { 3759 SCTP_STAT_INCR(sctps_pdrpfmbox); 3760 } 3761 3762 /* now middle boxes in sat networks get a cwnd bump */ 3763 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) && 3764 (stcb->asoc.sat_t3_loss_recovery == 0) && 3765 (stcb->asoc.sat_network)) { 3766 /* 3767 * This is debateable but for sat networks it makes sense 3768 * Note if a T3 timer has went off, we will prohibit any 3769 * changes to cwnd until we exit the t3 loss recovery. 3770 */ 3771 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb, 3772 net, cp, &bottle_bw, &on_queue); 3773 } 3774} 3775 3776/* 3777 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to 3778 * still contain IP/SCTP header - stcb: is the tcb found for this packet - 3779 * offset: offset into the mbuf chain to first chunkhdr - length: is the 3780 * length of the complete packet outputs: - length: modified to remaining 3781 * length after control processing - netp: modified to new sctp_nets after 3782 * cookie-echo processing - return NULL to discard the packet (ie. no asoc, 3783 * bad packet,...) otherwise return the tcb for this packet 3784 */ 3785#ifdef __GNUC__ 3786__attribute__((noinline)) 3787#endif 3788 static struct sctp_tcb * 3789 sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, 3790 struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp, 3791 struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen, 3792 uint32_t vrf_id) 3793{ 3794 struct sctp_association *asoc; 3795 uint32_t vtag_in; 3796 int num_chunks = 0; /* number of control chunks processed */ 3797 uint32_t chk_length; 3798 int ret; 3799 int abort_no_unlock = 0; 3800 3801 /* 3802 * How big should this be, and should it be alloc'd? Lets try the 3803 * d-mtu-ceiling for now (2k) and that should hopefully work ... 3804 * until we get into jumbo grams and such.. 3805 */ 3806 uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE]; 3807 struct sctp_tcb *locked_tcb = stcb; 3808 int got_auth = 0; 3809 uint32_t auth_offset = 0, auth_len = 0; 3810 int auth_skipped = 0; 3811 int asconf_cnt = 0; 3812 3813#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3814 struct socket *so; 3815 3816#endif 3817 3818 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n", 3819 iphlen, *offset, length, stcb); 3820 3821 /* validate chunk header length... */ 3822 if (ntohs(ch->chunk_length) < sizeof(*ch)) { 3823 SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n", 3824 ntohs(ch->chunk_length)); 3825 if (locked_tcb) { 3826 SCTP_TCB_UNLOCK(locked_tcb); 3827 } 3828 return (NULL); 3829 } 3830 /* 3831 * validate the verification tag 3832 */ 3833 vtag_in = ntohl(sh->v_tag); 3834 3835 if (locked_tcb) { 3836 SCTP_TCB_LOCK_ASSERT(locked_tcb); 3837 } 3838 if (ch->chunk_type == SCTP_INITIATION) { 3839 SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n", 3840 ntohs(ch->chunk_length), vtag_in); 3841 if (vtag_in != 0) { 3842 /* protocol error- silently discard... */ 3843 SCTP_STAT_INCR(sctps_badvtag); 3844 if (locked_tcb) { 3845 SCTP_TCB_UNLOCK(locked_tcb); 3846 } 3847 return (NULL); 3848 } 3849 } else if (ch->chunk_type != SCTP_COOKIE_ECHO) { 3850 /* 3851 * If there is no stcb, skip the AUTH chunk and process 3852 * later after a stcb is found (to validate the lookup was 3853 * valid. 3854 */ 3855 if ((ch->chunk_type == SCTP_AUTHENTICATION) && 3856 (stcb == NULL) && !sctp_auth_disable) { 3857 /* save this chunk for later processing */ 3858 auth_skipped = 1; 3859 auth_offset = *offset; 3860 auth_len = ntohs(ch->chunk_length); 3861 3862 /* (temporarily) move past this chunk */ 3863 *offset += SCTP_SIZE32(auth_len); 3864 if (*offset >= length) { 3865 /* no more data left in the mbuf chain */ 3866 *offset = length; 3867 if (locked_tcb) { 3868 SCTP_TCB_UNLOCK(locked_tcb); 3869 } 3870 return (NULL); 3871 } 3872 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 3873 sizeof(struct sctp_chunkhdr), chunk_buf); 3874 } 3875 if (ch == NULL) { 3876 /* Help */ 3877 *offset = length; 3878 if (locked_tcb) { 3879 SCTP_TCB_UNLOCK(locked_tcb); 3880 } 3881 return (NULL); 3882 } 3883 if (ch->chunk_type == SCTP_COOKIE_ECHO) { 3884 goto process_control_chunks; 3885 } 3886 /* 3887 * first check if it's an ASCONF with an unknown src addr we 3888 * need to look inside to find the association 3889 */ 3890 if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) { 3891 struct sctp_chunkhdr *asconf_ch = ch; 3892 uint32_t asconf_offset = 0, asconf_len = 0; 3893 3894 /* inp's refcount may be reduced */ 3895 SCTP_INP_INCR_REF(inp); 3896 3897 asconf_offset = *offset; 3898 do { 3899 asconf_len = ntohs(asconf_ch->chunk_length); 3900 if (asconf_len < sizeof(struct sctp_asconf_paramhdr)) 3901 break; 3902 stcb = sctp_findassociation_ep_asconf(m, iphlen, 3903 *offset, sh, &inp, netp); 3904 if (stcb != NULL) 3905 break; 3906 asconf_offset += SCTP_SIZE32(asconf_len); 3907 asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset, 3908 sizeof(struct sctp_chunkhdr), chunk_buf); 3909 } while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF); 3910 if (stcb == NULL) { 3911 /* 3912 * reduce inp's refcount if not reduced in 3913 * sctp_findassociation_ep_asconf(). 3914 */ 3915 SCTP_INP_DECR_REF(inp); 3916 } else { 3917 locked_tcb = stcb; 3918 } 3919 3920 /* now go back and verify any auth chunk to be sure */ 3921 if (auth_skipped && (stcb != NULL)) { 3922 struct sctp_auth_chunk *auth; 3923 3924 auth = (struct sctp_auth_chunk *) 3925 sctp_m_getptr(m, auth_offset, 3926 auth_len, chunk_buf); 3927 got_auth = 1; 3928 auth_skipped = 0; 3929 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, 3930 auth_offset)) { 3931 /* auth HMAC failed so dump it */ 3932 *offset = length; 3933 if (locked_tcb) { 3934 SCTP_TCB_UNLOCK(locked_tcb); 3935 } 3936 return (NULL); 3937 } else { 3938 /* remaining chunks are HMAC checked */ 3939 stcb->asoc.authenticated = 1; 3940 } 3941 } 3942 } 3943 if (stcb == NULL) { 3944 /* no association, so it's out of the blue... */ 3945 sctp_handle_ootb(m, iphlen, *offset, sh, inp, NULL, 3946 vrf_id); 3947 *offset = length; 3948 if (locked_tcb) { 3949 SCTP_TCB_UNLOCK(locked_tcb); 3950 } 3951 return (NULL); 3952 } 3953 asoc = &stcb->asoc; 3954 /* ABORT and SHUTDOWN can use either v_tag... */ 3955 if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) || 3956 (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) || 3957 (ch->chunk_type == SCTP_PACKET_DROPPED)) { 3958 if ((vtag_in == asoc->my_vtag) || 3959 ((ch->chunk_flags & SCTP_HAD_NO_TCB) && 3960 (vtag_in == asoc->peer_vtag))) { 3961 /* this is valid */ 3962 } else { 3963 /* drop this packet... */ 3964 SCTP_STAT_INCR(sctps_badvtag); 3965 if (locked_tcb) { 3966 SCTP_TCB_UNLOCK(locked_tcb); 3967 } 3968 return (NULL); 3969 } 3970 } else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 3971 if (vtag_in != asoc->my_vtag) { 3972 /* 3973 * this could be a stale SHUTDOWN-ACK or the 3974 * peer never got the SHUTDOWN-COMPLETE and 3975 * is still hung; we have started a new asoc 3976 * but it won't complete until the shutdown 3977 * is completed 3978 */ 3979 if (locked_tcb) { 3980 SCTP_TCB_UNLOCK(locked_tcb); 3981 } 3982 sctp_handle_ootb(m, iphlen, *offset, sh, inp, 3983 NULL, vrf_id); 3984 return (NULL); 3985 } 3986 } else { 3987 /* for all other chunks, vtag must match */ 3988 if (vtag_in != asoc->my_vtag) { 3989 /* invalid vtag... */ 3990 SCTPDBG(SCTP_DEBUG_INPUT3, 3991 "invalid vtag: %xh, expect %xh\n", 3992 vtag_in, asoc->my_vtag); 3993 SCTP_STAT_INCR(sctps_badvtag); 3994 if (locked_tcb) { 3995 SCTP_TCB_UNLOCK(locked_tcb); 3996 } 3997 *offset = length; 3998 return (NULL); 3999 } 4000 } 4001 } /* end if !SCTP_COOKIE_ECHO */ 4002 /* 4003 * process all control chunks... 4004 */ 4005 if (((ch->chunk_type == SCTP_SELECTIVE_ACK) || 4006 (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) && 4007 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) { 4008 /* implied cookie-ack.. we must have lost the ack */ 4009 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4010 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4011 stcb->asoc.overall_error_count, 4012 0, 4013 SCTP_FROM_SCTP_INPUT, 4014 __LINE__); 4015 } 4016 stcb->asoc.overall_error_count = 0; 4017 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, 4018 *netp); 4019 } 4020process_control_chunks: 4021 while (IS_SCTP_CONTROL(ch)) { 4022 /* validate chunk length */ 4023 chk_length = ntohs(ch->chunk_length); 4024 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n", 4025 ch->chunk_type, chk_length); 4026 SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length); 4027 if (chk_length < sizeof(*ch) || 4028 (*offset + (int)chk_length) > length) { 4029 *offset = length; 4030 if (locked_tcb) { 4031 SCTP_TCB_UNLOCK(locked_tcb); 4032 } 4033 return (NULL); 4034 } 4035 SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks); 4036 /* 4037 * INIT-ACK only gets the init ack "header" portion only 4038 * because we don't have to process the peer's COOKIE. All 4039 * others get a complete chunk. 4040 */ 4041 if ((ch->chunk_type == SCTP_INITIATION_ACK) || 4042 (ch->chunk_type == SCTP_INITIATION)) { 4043 /* get an init-ack chunk */ 4044 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4045 sizeof(struct sctp_init_ack_chunk), chunk_buf); 4046 if (ch == NULL) { 4047 *offset = length; 4048 if (locked_tcb) { 4049 SCTP_TCB_UNLOCK(locked_tcb); 4050 } 4051 return (NULL); 4052 } 4053 } else { 4054 /* For cookies and all other chunks. */ 4055 if (chk_length > sizeof(chunk_buf)) { 4056 /* 4057 * use just the size of the chunk buffer so 4058 * the front part of our chunks fit in 4059 * contiguous space up to the chunk buffer 4060 * size (508 bytes). For chunks that need to 4061 * get more than that they must use the 4062 * sctp_m_getptr() function or other means 4063 * (e.g. know how to parse mbuf chains). 4064 * Cookies do this already. 4065 */ 4066 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4067 (sizeof(chunk_buf) - 4), 4068 chunk_buf); 4069 if (ch == NULL) { 4070 *offset = length; 4071 if (locked_tcb) { 4072 SCTP_TCB_UNLOCK(locked_tcb); 4073 } 4074 return (NULL); 4075 } 4076 } else { 4077 /* We can fit it all */ 4078 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4079 chk_length, chunk_buf); 4080 if (ch == NULL) { 4081 SCTP_PRINTF("sctp_process_control: Can't get the all data....\n"); 4082 *offset = length; 4083 if (locked_tcb) { 4084 SCTP_TCB_UNLOCK(locked_tcb); 4085 } 4086 return (NULL); 4087 } 4088 } 4089 } 4090 num_chunks++; 4091 /* Save off the last place we got a control from */ 4092 if (stcb != NULL) { 4093 if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) { 4094 /* 4095 * allow last_control to be NULL if 4096 * ASCONF... ASCONF processing will find the 4097 * right net later 4098 */ 4099 if ((netp != NULL) && (*netp != NULL)) 4100 stcb->asoc.last_control_chunk_from = *netp; 4101 } 4102 } 4103#ifdef SCTP_AUDITING_ENABLED 4104 sctp_audit_log(0xB0, ch->chunk_type); 4105#endif 4106 4107 /* check to see if this chunk required auth, but isn't */ 4108 if ((stcb != NULL) && !sctp_auth_disable && 4109 sctp_auth_is_required_chunk(ch->chunk_type, 4110 stcb->asoc.local_auth_chunks) && 4111 !stcb->asoc.authenticated) { 4112 /* "silently" ignore */ 4113 SCTP_STAT_INCR(sctps_recvauthmissing); 4114 goto next_chunk; 4115 } 4116 switch (ch->chunk_type) { 4117 case SCTP_INITIATION: 4118 /* must be first and only chunk */ 4119 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n"); 4120 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4121 /* We are not interested anymore? */ 4122 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4123 /* 4124 * collision case where we are 4125 * sending to them too 4126 */ 4127 ; 4128 } else { 4129 if (locked_tcb) { 4130 SCTP_TCB_UNLOCK(locked_tcb); 4131 } 4132 *offset = length; 4133 return (NULL); 4134 } 4135 } 4136 if ((chk_length > SCTP_LARGEST_INIT_ACCEPTED) || 4137 (num_chunks > 1) || 4138 (sctp_strict_init && (length - *offset > (int)SCTP_SIZE32(chk_length)))) { 4139 *offset = length; 4140 if (locked_tcb) { 4141 SCTP_TCB_UNLOCK(locked_tcb); 4142 } 4143 return (NULL); 4144 } 4145 if ((stcb != NULL) && 4146 (SCTP_GET_STATE(&stcb->asoc) == 4147 SCTP_STATE_SHUTDOWN_ACK_SENT)) { 4148 sctp_send_shutdown_ack(stcb, 4149 stcb->asoc.primary_destination); 4150 *offset = length; 4151 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 4152 if (locked_tcb) { 4153 SCTP_TCB_UNLOCK(locked_tcb); 4154 } 4155 return (NULL); 4156 } 4157 if (netp) { 4158 sctp_handle_init(m, iphlen, *offset, sh, 4159 (struct sctp_init_chunk *)ch, inp, 4160 stcb, *netp, &abort_no_unlock, vrf_id); 4161 } 4162 if (abort_no_unlock) 4163 return (NULL); 4164 4165 *offset = length; 4166 if (locked_tcb) { 4167 SCTP_TCB_UNLOCK(locked_tcb); 4168 } 4169 return (NULL); 4170 break; 4171 case SCTP_PAD_CHUNK: 4172 break; 4173 case SCTP_INITIATION_ACK: 4174 /* must be first and only chunk */ 4175 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n"); 4176 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4177 /* We are not interested anymore */ 4178 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4179 ; 4180 } else { 4181 if (locked_tcb) { 4182 SCTP_TCB_UNLOCK(locked_tcb); 4183 } 4184 *offset = length; 4185 if (stcb) { 4186#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4187 so = SCTP_INP_SO(inp); 4188 atomic_add_int(&stcb->asoc.refcnt, 1); 4189 SCTP_TCB_UNLOCK(stcb); 4190 SCTP_SOCKET_LOCK(so, 1); 4191 SCTP_TCB_LOCK(stcb); 4192 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4193#endif 4194 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27); 4195#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4196 SCTP_SOCKET_UNLOCK(so, 1); 4197#endif 4198 } 4199 return (NULL); 4200 } 4201 } 4202 if ((num_chunks > 1) || 4203 (sctp_strict_init && (length - *offset > (int)SCTP_SIZE32(chk_length)))) { 4204 *offset = length; 4205 if (locked_tcb) { 4206 SCTP_TCB_UNLOCK(locked_tcb); 4207 } 4208 return (NULL); 4209 } 4210 if ((netp) && (*netp)) { 4211 ret = sctp_handle_init_ack(m, iphlen, *offset, sh, 4212 (struct sctp_init_ack_chunk *)ch, stcb, *netp, &abort_no_unlock, vrf_id); 4213 } else { 4214 ret = -1; 4215 } 4216 /* 4217 * Special case, I must call the output routine to 4218 * get the cookie echoed 4219 */ 4220 if (abort_no_unlock) 4221 return (NULL); 4222 4223 if ((stcb) && ret == 0) 4224 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 4225 *offset = length; 4226 if (locked_tcb) { 4227 SCTP_TCB_UNLOCK(locked_tcb); 4228 } 4229 return (NULL); 4230 break; 4231 case SCTP_SELECTIVE_ACK: 4232 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n"); 4233 SCTP_STAT_INCR(sctps_recvsacks); 4234 { 4235 struct sctp_sack_chunk *sack; 4236 int abort_now = 0; 4237 uint32_t a_rwnd, cum_ack; 4238 uint16_t num_seg; 4239 int nonce_sum_flag; 4240 4241 if ((stcb == NULL) || (chk_length < sizeof(struct sctp_sack_chunk))) { 4242 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on sack chunk, too small\n"); 4243 ignore_sack: 4244 *offset = length; 4245 if (locked_tcb) { 4246 SCTP_TCB_UNLOCK(locked_tcb); 4247 } 4248 return (NULL); 4249 } 4250 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 4251 /*- 4252 * If we have sent a shutdown-ack, we will pay no 4253 * attention to a sack sent in to us since 4254 * we don't care anymore. 4255 */ 4256 goto ignore_sack; 4257 } 4258 sack = (struct sctp_sack_chunk *)ch; 4259 nonce_sum_flag = ch->chunk_flags & SCTP_SACK_NONCE_SUM; 4260 cum_ack = ntohl(sack->sack.cum_tsn_ack); 4261 num_seg = ntohs(sack->sack.num_gap_ack_blks); 4262 a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd); 4263 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n", 4264 cum_ack, 4265 num_seg, 4266 a_rwnd 4267 ); 4268 stcb->asoc.seen_a_sack_this_pkt = 1; 4269 if ((stcb->asoc.pr_sctp_cnt == 0) && 4270 (num_seg == 0) && 4271 ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) || 4272 (cum_ack == stcb->asoc.last_acked_seq)) && 4273 (stcb->asoc.saw_sack_with_frags == 0) && 4274 (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) 4275 ) { 4276 /* 4277 * We have a SIMPLE sack having no 4278 * prior segments and data on sent 4279 * queue to be acked.. Use the 4280 * faster path sack processing. We 4281 * also allow window update sacks 4282 * with no missing segments to go 4283 * this way too. 4284 */ 4285 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag, 4286 &abort_now); 4287 } else { 4288 if (netp && *netp) 4289 sctp_handle_sack(m, *offset, 4290 sack, stcb, *netp, &abort_now, chk_length, a_rwnd); 4291 } 4292 if (abort_now) { 4293 /* ABORT signal from sack processing */ 4294 *offset = length; 4295 return (NULL); 4296 } 4297 } 4298 break; 4299 case SCTP_HEARTBEAT_REQUEST: 4300 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n"); 4301 if ((stcb) && netp && *netp) { 4302 SCTP_STAT_INCR(sctps_recvheartbeat); 4303 sctp_send_heartbeat_ack(stcb, m, *offset, 4304 chk_length, *netp); 4305 4306 /* He's alive so give him credit */ 4307 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4308 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4309 stcb->asoc.overall_error_count, 4310 0, 4311 SCTP_FROM_SCTP_INPUT, 4312 __LINE__); 4313 } 4314 stcb->asoc.overall_error_count = 0; 4315 } 4316 break; 4317 case SCTP_HEARTBEAT_ACK: 4318 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n"); 4319 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) { 4320 /* Its not ours */ 4321 *offset = length; 4322 if (locked_tcb) { 4323 SCTP_TCB_UNLOCK(locked_tcb); 4324 } 4325 return (NULL); 4326 } 4327 /* He's alive so give him credit */ 4328 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4329 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4330 stcb->asoc.overall_error_count, 4331 0, 4332 SCTP_FROM_SCTP_INPUT, 4333 __LINE__); 4334 } 4335 stcb->asoc.overall_error_count = 0; 4336 SCTP_STAT_INCR(sctps_recvheartbeatack); 4337 if (netp && *netp) 4338 sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch, 4339 stcb, *netp); 4340 break; 4341 case SCTP_ABORT_ASSOCIATION: 4342 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n", 4343 stcb); 4344 if ((stcb) && netp && *netp) 4345 sctp_handle_abort((struct sctp_abort_chunk *)ch, 4346 stcb, *netp); 4347 *offset = length; 4348 return (NULL); 4349 break; 4350 case SCTP_SHUTDOWN: 4351 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n", 4352 stcb); 4353 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) { 4354 *offset = length; 4355 if (locked_tcb) { 4356 SCTP_TCB_UNLOCK(locked_tcb); 4357 } 4358 return (NULL); 4359 } 4360 if (netp && *netp) { 4361 int abort_flag = 0; 4362 4363 sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch, 4364 stcb, *netp, &abort_flag); 4365 if (abort_flag) { 4366 *offset = length; 4367 return (NULL); 4368 } 4369 } 4370 break; 4371 case SCTP_SHUTDOWN_ACK: 4372 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", stcb); 4373 if ((stcb) && (netp) && (*netp)) 4374 sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp); 4375 *offset = length; 4376 return (NULL); 4377 break; 4378 4379 case SCTP_OPERATION_ERROR: 4380 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n"); 4381 if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) { 4382 4383 *offset = length; 4384 return (NULL); 4385 } 4386 break; 4387 case SCTP_COOKIE_ECHO: 4388 SCTPDBG(SCTP_DEBUG_INPUT3, 4389 "SCTP_COOKIE-ECHO, stcb %p\n", stcb); 4390 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4391 ; 4392 } else { 4393 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4394 /* We are not interested anymore */ 4395 *offset = length; 4396 return (NULL); 4397 } 4398 } 4399 /* 4400 * First are we accepting? We do this again here 4401 * sincen it is possible that a previous endpoint 4402 * WAS listening responded to a INIT-ACK and then 4403 * closed. We opened and bound.. and are now no 4404 * longer listening. 4405 */ 4406 4407 if ((stcb == NULL) && (inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit)) { 4408 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 4409 (sctp_abort_if_one_2_one_hits_limit)) { 4410 struct mbuf *oper; 4411 struct sctp_paramhdr *phdr; 4412 4413 oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 4414 0, M_DONTWAIT, 1, MT_DATA); 4415 if (oper) { 4416 SCTP_BUF_LEN(oper) = 4417 sizeof(struct sctp_paramhdr); 4418 phdr = mtod(oper, 4419 struct sctp_paramhdr *); 4420 phdr->param_type = 4421 htons(SCTP_CAUSE_OUT_OF_RESC); 4422 phdr->param_length = 4423 htons(sizeof(struct sctp_paramhdr)); 4424 } 4425 sctp_abort_association(inp, stcb, m, 4426 iphlen, sh, oper, vrf_id); 4427 } 4428 *offset = length; 4429 return (NULL); 4430 } else { 4431 struct mbuf *ret_buf; 4432 struct sctp_inpcb *linp; 4433 4434 if (stcb) { 4435 linp = NULL; 4436 } else { 4437 linp = inp; 4438 } 4439 4440 if (linp) { 4441 SCTP_ASOC_CREATE_LOCK(linp); 4442 } 4443 if (netp) { 4444 ret_buf = 4445 sctp_handle_cookie_echo(m, iphlen, 4446 *offset, sh, 4447 (struct sctp_cookie_echo_chunk *)ch, 4448 &inp, &stcb, netp, 4449 auth_skipped, 4450 auth_offset, 4451 auth_len, 4452 &locked_tcb, 4453 vrf_id); 4454 } else { 4455 ret_buf = NULL; 4456 } 4457 if (linp) { 4458 SCTP_ASOC_CREATE_UNLOCK(linp); 4459 } 4460 if (ret_buf == NULL) { 4461 if (locked_tcb) { 4462 SCTP_TCB_UNLOCK(locked_tcb); 4463 } 4464 SCTPDBG(SCTP_DEBUG_INPUT3, 4465 "GAK, null buffer\n"); 4466 auth_skipped = 0; 4467 *offset = length; 4468 return (NULL); 4469 } 4470 /* if AUTH skipped, see if it verified... */ 4471 if (auth_skipped) { 4472 got_auth = 1; 4473 auth_skipped = 0; 4474 } 4475 if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) { 4476 /* 4477 * Restart the timer if we have 4478 * pending data 4479 */ 4480 struct sctp_tmit_chunk *chk; 4481 4482 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 4483 if (chk) { 4484 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4485 stcb->sctp_ep, stcb, 4486 chk->whoTo); 4487 } 4488 } 4489 } 4490 break; 4491 case SCTP_COOKIE_ACK: 4492 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", stcb); 4493 if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) { 4494 if (locked_tcb) { 4495 SCTP_TCB_UNLOCK(locked_tcb); 4496 } 4497 return (NULL); 4498 } 4499 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4500 /* We are not interested anymore */ 4501 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4502 ; 4503 } else if (stcb) { 4504#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4505 so = SCTP_INP_SO(inp); 4506 atomic_add_int(&stcb->asoc.refcnt, 1); 4507 SCTP_TCB_UNLOCK(stcb); 4508 SCTP_SOCKET_LOCK(so, 1); 4509 SCTP_TCB_LOCK(stcb); 4510 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4511#endif 4512 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27); 4513#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4514 SCTP_SOCKET_UNLOCK(so, 1); 4515#endif 4516 *offset = length; 4517 return (NULL); 4518 } 4519 } 4520 /* He's alive so give him credit */ 4521 if ((stcb) && netp && *netp) { 4522 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4523 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4524 stcb->asoc.overall_error_count, 4525 0, 4526 SCTP_FROM_SCTP_INPUT, 4527 __LINE__); 4528 } 4529 stcb->asoc.overall_error_count = 0; 4530 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp); 4531 } 4532 break; 4533 case SCTP_ECN_ECHO: 4534 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n"); 4535 /* He's alive so give him credit */ 4536 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) { 4537 /* Its not ours */ 4538 if (locked_tcb) { 4539 SCTP_TCB_UNLOCK(locked_tcb); 4540 } 4541 *offset = length; 4542 return (NULL); 4543 } 4544 if (stcb) { 4545 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4546 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4547 stcb->asoc.overall_error_count, 4548 0, 4549 SCTP_FROM_SCTP_INPUT, 4550 __LINE__); 4551 } 4552 stcb->asoc.overall_error_count = 0; 4553 sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch, 4554 stcb); 4555 } 4556 break; 4557 case SCTP_ECN_CWR: 4558 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n"); 4559 /* He's alive so give him credit */ 4560 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) { 4561 /* Its not ours */ 4562 if (locked_tcb) { 4563 SCTP_TCB_UNLOCK(locked_tcb); 4564 } 4565 *offset = length; 4566 return (NULL); 4567 } 4568 if (stcb) { 4569 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4570 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4571 stcb->asoc.overall_error_count, 4572 0, 4573 SCTP_FROM_SCTP_INPUT, 4574 __LINE__); 4575 } 4576 stcb->asoc.overall_error_count = 0; 4577 sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb); 4578 } 4579 break; 4580 case SCTP_SHUTDOWN_COMPLETE: 4581 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", stcb); 4582 /* must be first and only chunk */ 4583 if ((num_chunks > 1) || 4584 (length - *offset > (int)SCTP_SIZE32(chk_length))) { 4585 *offset = length; 4586 if (locked_tcb) { 4587 SCTP_TCB_UNLOCK(locked_tcb); 4588 } 4589 return (NULL); 4590 } 4591 if ((stcb) && netp && *netp) { 4592 sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch, 4593 stcb, *netp); 4594 } 4595 *offset = length; 4596 return (NULL); 4597 break; 4598 case SCTP_ASCONF: 4599 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n"); 4600 /* He's alive so give him credit */ 4601 if (stcb) { 4602 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4603 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4604 stcb->asoc.overall_error_count, 4605 0, 4606 SCTP_FROM_SCTP_INPUT, 4607 __LINE__); 4608 } 4609 stcb->asoc.overall_error_count = 0; 4610 sctp_handle_asconf(m, *offset, 4611 (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0); 4612 asconf_cnt++; 4613 } 4614 break; 4615 case SCTP_ASCONF_ACK: 4616 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n"); 4617 if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) { 4618 /* Its not ours */ 4619 if (locked_tcb) { 4620 SCTP_TCB_UNLOCK(locked_tcb); 4621 } 4622 *offset = length; 4623 return (NULL); 4624 } 4625 if ((stcb) && netp && *netp) { 4626 /* He's alive so give him credit */ 4627 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4628 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4629 stcb->asoc.overall_error_count, 4630 0, 4631 SCTP_FROM_SCTP_INPUT, 4632 __LINE__); 4633 } 4634 stcb->asoc.overall_error_count = 0; 4635 sctp_handle_asconf_ack(m, *offset, 4636 (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock); 4637 if (abort_no_unlock) 4638 return (NULL); 4639 } 4640 break; 4641 case SCTP_FORWARD_CUM_TSN: 4642 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n"); 4643 if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) { 4644 /* Its not ours */ 4645 if (locked_tcb) { 4646 SCTP_TCB_UNLOCK(locked_tcb); 4647 } 4648 *offset = length; 4649 return (NULL); 4650 } 4651 /* He's alive so give him credit */ 4652 if (stcb) { 4653 int abort_flag = 0; 4654 4655 stcb->asoc.overall_error_count = 0; 4656 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4657 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4658 stcb->asoc.overall_error_count, 4659 0, 4660 SCTP_FROM_SCTP_INPUT, 4661 __LINE__); 4662 } 4663 *fwd_tsn_seen = 1; 4664 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4665 /* We are not interested anymore */ 4666#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4667 so = SCTP_INP_SO(inp); 4668 atomic_add_int(&stcb->asoc.refcnt, 1); 4669 SCTP_TCB_UNLOCK(stcb); 4670 SCTP_SOCKET_LOCK(so, 1); 4671 SCTP_TCB_LOCK(stcb); 4672 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4673#endif 4674 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29); 4675#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4676 SCTP_SOCKET_UNLOCK(so, 1); 4677#endif 4678 *offset = length; 4679 return (NULL); 4680 } 4681 sctp_handle_forward_tsn(stcb, 4682 (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset); 4683 if (abort_flag) { 4684 *offset = length; 4685 return (NULL); 4686 } else { 4687 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 4688 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4689 stcb->asoc.overall_error_count, 4690 0, 4691 SCTP_FROM_SCTP_INPUT, 4692 __LINE__); 4693 } 4694 stcb->asoc.overall_error_count = 0; 4695 } 4696 4697 } 4698 break; 4699 case SCTP_STREAM_RESET: 4700 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n"); 4701 if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) { 4702 /* Its not ours */ 4703 if (locked_tcb) { 4704 SCTP_TCB_UNLOCK(locked_tcb); 4705 } 4706 *offset = length; 4707 return (NULL); 4708 } 4709 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4710 /* We are not interested anymore */ 4711#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4712 so = SCTP_INP_SO(inp); 4713 atomic_add_int(&stcb->asoc.refcnt, 1); 4714 SCTP_TCB_UNLOCK(stcb); 4715 SCTP_SOCKET_LOCK(so, 1); 4716 SCTP_TCB_LOCK(stcb); 4717 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4718#endif 4719 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_30); 4720#if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4721 SCTP_SOCKET_UNLOCK(so, 1); 4722#endif 4723 *offset = length; 4724 return (NULL); 4725 } 4726 if (stcb->asoc.peer_supports_strreset == 0) { 4727 /* 4728 * hmm, peer should have announced this, but 4729 * we will turn it on since he is sending us 4730 * a stream reset. 4731 */ 4732 stcb->asoc.peer_supports_strreset = 1; 4733 } 4734 if (sctp_handle_stream_reset(stcb, m, *offset, (struct sctp_stream_reset_out_req *)ch)) { 4735 /* stop processing */ 4736 *offset = length; 4737 return (NULL); 4738 } 4739 break; 4740 case SCTP_PACKET_DROPPED: 4741 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n"); 4742 /* re-get it all please */ 4743 if (chk_length < sizeof(struct sctp_pktdrop_chunk)) { 4744 /* Its not ours */ 4745 if (locked_tcb) { 4746 SCTP_TCB_UNLOCK(locked_tcb); 4747 } 4748 *offset = length; 4749 return (NULL); 4750 } 4751 if (ch && (stcb) && netp && (*netp)) { 4752 sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch, 4753 stcb, *netp, 4754 min(chk_length, (sizeof(chunk_buf) - 4))); 4755 4756 } 4757 break; 4758 4759 case SCTP_AUTHENTICATION: 4760 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n"); 4761 if (sctp_auth_disable) 4762 goto unknown_chunk; 4763 4764 if (stcb == NULL) { 4765 /* save the first AUTH for later processing */ 4766 if (auth_skipped == 0) { 4767 auth_offset = *offset; 4768 auth_len = chk_length; 4769 auth_skipped = 1; 4770 } 4771 /* skip this chunk (temporarily) */ 4772 goto next_chunk; 4773 } 4774 if ((chk_length < (sizeof(struct sctp_auth_chunk))) || 4775 (chk_length > (sizeof(struct sctp_auth_chunk) + 4776 SCTP_AUTH_DIGEST_LEN_MAX))) { 4777 /* Its not ours */ 4778 if (locked_tcb) { 4779 SCTP_TCB_UNLOCK(locked_tcb); 4780 } 4781 *offset = length; 4782 return (NULL); 4783 } 4784 if (got_auth == 1) { 4785 /* skip this chunk... it's already auth'd */ 4786 goto next_chunk; 4787 } 4788 got_auth = 1; 4789 if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch, 4790 m, *offset)) { 4791 /* auth HMAC failed so dump the packet */ 4792 *offset = length; 4793 return (stcb); 4794 } else { 4795 /* remaining chunks are HMAC checked */ 4796 stcb->asoc.authenticated = 1; 4797 } 4798 break; 4799 4800 default: 4801 unknown_chunk: 4802 /* it's an unknown chunk! */ 4803 if ((ch->chunk_type & 0x40) && (stcb != NULL)) { 4804 struct mbuf *mm; 4805 struct sctp_paramhdr *phd; 4806 4807 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 4808 0, M_DONTWAIT, 1, MT_DATA); 4809 if (mm) { 4810 phd = mtod(mm, struct sctp_paramhdr *); 4811 /* 4812 * We cheat and use param type since 4813 * we did not bother to define a 4814 * error cause struct. They are the 4815 * same basic format with different 4816 * names. 4817 */ 4818 phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK); 4819 phd->param_length = htons(chk_length + sizeof(*phd)); 4820 SCTP_BUF_LEN(mm) = sizeof(*phd); 4821 SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, SCTP_SIZE32(chk_length), 4822 M_DONTWAIT); 4823 if (SCTP_BUF_NEXT(mm)) { 4824#ifdef SCTP_MBUF_LOGGING 4825 if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) { 4826 struct mbuf *mat; 4827 4828 mat = SCTP_BUF_NEXT(mm); 4829 while (mat) { 4830 if (SCTP_BUF_IS_EXTENDED(mat)) { 4831 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 4832 } 4833 mat = SCTP_BUF_NEXT(mat); 4834 } 4835 } 4836#endif 4837 sctp_queue_op_err(stcb, mm); 4838 } else { 4839 sctp_m_freem(mm); 4840 } 4841 } 4842 } 4843 if ((ch->chunk_type & 0x80) == 0) { 4844 /* discard this packet */ 4845 *offset = length; 4846 return (stcb); 4847 } /* else skip this bad chunk and continue... */ 4848 break; 4849 } /* switch (ch->chunk_type) */ 4850 4851 4852next_chunk: 4853 /* get the next chunk */ 4854 *offset += SCTP_SIZE32(chk_length); 4855 if (*offset >= length) { 4856 /* no more data left in the mbuf chain */ 4857 break; 4858 } 4859 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4860 sizeof(struct sctp_chunkhdr), chunk_buf); 4861 if (ch == NULL) { 4862 if (locked_tcb) { 4863 SCTP_TCB_UNLOCK(locked_tcb); 4864 } 4865 *offset = length; 4866 return (NULL); 4867 } 4868 } /* while */ 4869 4870 if (asconf_cnt > 0 && stcb != NULL) { 4871 sctp_send_asconf_ack(stcb); 4872 } 4873 return (stcb); 4874} 4875 4876 4877/* 4878 * Process the ECN bits we have something set so we must look to see if it is 4879 * ECN(0) or ECN(1) or CE 4880 */ 4881static void 4882sctp_process_ecn_marked_a(struct sctp_tcb *stcb, struct sctp_nets *net, 4883 uint8_t ecn_bits) 4884{ 4885 if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) { 4886 ; 4887 } else if ((ecn_bits & SCTP_ECT1_BIT) == SCTP_ECT1_BIT) { 4888 /* 4889 * we only add to the nonce sum for ECT1, ECT0 does not 4890 * change the NS bit (that we have yet to find a way to send 4891 * it yet). 4892 */ 4893 4894 /* ECN Nonce stuff */ 4895 stcb->asoc.receiver_nonce_sum++; 4896 stcb->asoc.receiver_nonce_sum &= SCTP_SACK_NONCE_SUM; 4897 4898 /* 4899 * Drag up the last_echo point if cumack is larger since we 4900 * don't want the point falling way behind by more than 4901 * 2^^31 and then having it be incorrect. 4902 */ 4903 if (compare_with_wrap(stcb->asoc.cumulative_tsn, 4904 stcb->asoc.last_echo_tsn, MAX_TSN)) { 4905 stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn; 4906 } 4907 } else if ((ecn_bits & SCTP_ECT0_BIT) == SCTP_ECT0_BIT) { 4908 /* 4909 * Drag up the last_echo point if cumack is larger since we 4910 * don't want the point falling way behind by more than 4911 * 2^^31 and then having it be incorrect. 4912 */ 4913 if (compare_with_wrap(stcb->asoc.cumulative_tsn, 4914 stcb->asoc.last_echo_tsn, MAX_TSN)) { 4915 stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn; 4916 } 4917 } 4918} 4919 4920static void 4921sctp_process_ecn_marked_b(struct sctp_tcb *stcb, struct sctp_nets *net, 4922 uint32_t high_tsn, uint8_t ecn_bits) 4923{ 4924 if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) { 4925 /* 4926 * we possibly must notify the sender that a congestion 4927 * window reduction is in order. We do this by adding a ECNE 4928 * chunk to the output chunk queue. The incoming CWR will 4929 * remove this chunk. 4930 */ 4931 if (compare_with_wrap(high_tsn, stcb->asoc.last_echo_tsn, 4932 MAX_TSN)) { 4933 /* Yep, we need to add a ECNE */ 4934 sctp_send_ecn_echo(stcb, net, high_tsn); 4935 stcb->asoc.last_echo_tsn = high_tsn; 4936 } 4937 } 4938} 4939 4940#ifdef INVARIANTS 4941static void 4942sctp_validate_no_locks(struct sctp_inpcb *inp) 4943{ 4944 struct sctp_tcb *stcb; 4945 4946 LIST_FOREACH(stcb, &inp->sctp_asoc_list, sctp_tcblist) { 4947 if (mtx_owned(&stcb->tcb_mtx)) { 4948 panic("Own lock on stcb at return from input"); 4949 } 4950 } 4951} 4952 4953#endif 4954 4955/* 4956 * common input chunk processing (v4 and v6) 4957 */ 4958void 4959sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, 4960 int length, struct sctphdr *sh, struct sctp_chunkhdr *ch, 4961 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, 4962 uint8_t ecn_bits, uint32_t vrf_id) 4963{ 4964 /* 4965 * Control chunk processing 4966 */ 4967 uint32_t high_tsn; 4968 int fwd_tsn_seen = 0, data_processed = 0; 4969 struct mbuf *m = *mm; 4970 int abort_flag = 0; 4971 int un_sent; 4972 4973 SCTP_STAT_INCR(sctps_recvdatagrams); 4974#ifdef SCTP_AUDITING_ENABLED 4975 sctp_audit_log(0xE0, 1); 4976 sctp_auditing(0, inp, stcb, net); 4977#endif 4978 4979 SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d stcb:%p\n", 4980 m, iphlen, offset, stcb); 4981 if (stcb) { 4982 /* always clear this before beginning a packet */ 4983 stcb->asoc.authenticated = 0; 4984 stcb->asoc.seen_a_sack_this_pkt = 0; 4985 SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n", 4986 stcb, stcb->asoc.state); 4987 4988 if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) || 4989 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 4990 /*- 4991 * If we hit here, we had a ref count 4992 * up when the assoc was aborted and the 4993 * timer is clearing out the assoc, we should 4994 * NOT respond to any packet.. its OOTB. 4995 */ 4996 SCTP_TCB_UNLOCK(stcb); 4997 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL, 4998 vrf_id); 4999 goto out_now; 5000 } 5001 } 5002 if (IS_SCTP_CONTROL(ch)) { 5003 /* process the control portion of the SCTP packet */ 5004 /* sa_ignore NO_NULL_CHK */ 5005 stcb = sctp_process_control(m, iphlen, &offset, length, sh, ch, 5006 inp, stcb, &net, &fwd_tsn_seen, vrf_id); 5007 if (stcb) { 5008 /* 5009 * This covers us if the cookie-echo was there and 5010 * it changes our INP. 5011 */ 5012 inp = stcb->sctp_ep; 5013 } 5014 } else { 5015 /* 5016 * no control chunks, so pre-process DATA chunks (these 5017 * checks are taken care of by control processing) 5018 */ 5019 5020 /* 5021 * if DATA only packet, and auth is required, then punt... 5022 * can't have authenticated without any AUTH (control) 5023 * chunks 5024 */ 5025 if ((stcb != NULL) && !sctp_auth_disable && 5026 sctp_auth_is_required_chunk(SCTP_DATA, 5027 stcb->asoc.local_auth_chunks)) { 5028 /* "silently" ignore */ 5029 SCTP_STAT_INCR(sctps_recvauthmissing); 5030 SCTP_TCB_UNLOCK(stcb); 5031 goto out_now; 5032 } 5033 if (stcb == NULL) { 5034 /* out of the blue DATA chunk */ 5035 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL, 5036 vrf_id); 5037 goto out_now; 5038 } 5039 if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) { 5040 /* v_tag mismatch! */ 5041 SCTP_STAT_INCR(sctps_badvtag); 5042 SCTP_TCB_UNLOCK(stcb); 5043 goto out_now; 5044 } 5045 } 5046 5047 if (stcb == NULL) { 5048 /* 5049 * no valid TCB for this packet, or we found it's a bad 5050 * packet while processing control, or we're done with this 5051 * packet (done or skip rest of data), so we drop it... 5052 */ 5053 goto out_now; 5054 } 5055 /* 5056 * DATA chunk processing 5057 */ 5058 /* plow through the data chunks while length > offset */ 5059 5060 /* 5061 * Rest should be DATA only. Check authentication state if AUTH for 5062 * DATA is required. 5063 */ 5064 if ((length > offset) && (stcb != NULL) && !sctp_auth_disable && 5065 sctp_auth_is_required_chunk(SCTP_DATA, 5066 stcb->asoc.local_auth_chunks) && 5067 !stcb->asoc.authenticated) { 5068 /* "silently" ignore */ 5069 SCTP_STAT_INCR(sctps_recvauthmissing); 5070 SCTPDBG(SCTP_DEBUG_AUTH1, 5071 "Data chunk requires AUTH, skipped\n"); 5072 goto trigger_send; 5073 } 5074 if (length > offset) { 5075 int retval; 5076 5077 /* 5078 * First check to make sure our state is correct. We would 5079 * not get here unless we really did have a tag, so we don't 5080 * abort if this happens, just dump the chunk silently. 5081 */ 5082 switch (SCTP_GET_STATE(&stcb->asoc)) { 5083 case SCTP_STATE_COOKIE_ECHOED: 5084 /* 5085 * we consider data with valid tags in this state 5086 * shows us the cookie-ack was lost. Imply it was 5087 * there. 5088 */ 5089 if (sctp_logging_level & SCTP_THRESHOLD_LOGGING) { 5090 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5091 stcb->asoc.overall_error_count, 5092 0, 5093 SCTP_FROM_SCTP_INPUT, 5094 __LINE__); 5095 } 5096 stcb->asoc.overall_error_count = 0; 5097 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net); 5098 break; 5099 case SCTP_STATE_COOKIE_WAIT: 5100 /* 5101 * We consider OOTB any data sent during asoc setup. 5102 */ 5103 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL, 5104 vrf_id); 5105 SCTP_TCB_UNLOCK(stcb); 5106 goto out_now; 5107 /* sa_ignore NOTREACHED */ 5108 break; 5109 case SCTP_STATE_EMPTY: /* should not happen */ 5110 case SCTP_STATE_INUSE: /* should not happen */ 5111 case SCTP_STATE_SHUTDOWN_RECEIVED: /* This is a peer error */ 5112 case SCTP_STATE_SHUTDOWN_ACK_SENT: 5113 default: 5114 SCTP_TCB_UNLOCK(stcb); 5115 goto out_now; 5116 /* sa_ignore NOTREACHED */ 5117 break; 5118 case SCTP_STATE_OPEN: 5119 case SCTP_STATE_SHUTDOWN_SENT: 5120 break; 5121 } 5122 /* take care of ECN, part 1. */ 5123 if (stcb->asoc.ecn_allowed && 5124 (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) { 5125 sctp_process_ecn_marked_a(stcb, net, ecn_bits); 5126 } 5127 /* plow through the data chunks while length > offset */ 5128 retval = sctp_process_data(mm, iphlen, &offset, length, sh, 5129 inp, stcb, net, &high_tsn); 5130 if (retval == 2) { 5131 /* 5132 * The association aborted, NO UNLOCK needed since 5133 * the association is destroyed. 5134 */ 5135 goto out_now; 5136 } 5137 data_processed = 1; 5138 if (retval == 0) { 5139 /* take care of ecn part 2. */ 5140 if (stcb->asoc.ecn_allowed && 5141 (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) { 5142 sctp_process_ecn_marked_b(stcb, net, high_tsn, 5143 ecn_bits); 5144 } 5145 } 5146 /* 5147 * Anything important needs to have been m_copy'ed in 5148 * process_data 5149 */ 5150 } 5151 if ((data_processed == 0) && (fwd_tsn_seen)) { 5152 int was_a_gap = 0; 5153 5154 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 5155 stcb->asoc.cumulative_tsn, MAX_TSN)) { 5156 /* there was a gap before this data was processed */ 5157 was_a_gap = 1; 5158 } 5159 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag); 5160 if (abort_flag) { 5161 /* Again, we aborted so NO UNLOCK needed */ 5162 goto out_now; 5163 } 5164 } 5165 /* trigger send of any chunks in queue... */ 5166trigger_send: 5167#ifdef SCTP_AUDITING_ENABLED 5168 sctp_audit_log(0xE0, 2); 5169 sctp_auditing(1, inp, stcb, net); 5170#endif 5171 SCTPDBG(SCTP_DEBUG_INPUT1, 5172 "Check for chunk output prw:%d tqe:%d tf=%d\n", 5173 stcb->asoc.peers_rwnd, 5174 TAILQ_EMPTY(&stcb->asoc.control_send_queue), 5175 stcb->asoc.total_flight); 5176 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight); 5177 5178 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue) || 5179 ((un_sent) && 5180 (stcb->asoc.peers_rwnd > 0 || 5181 (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) { 5182 SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n"); 5183 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 5184 SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n"); 5185 } 5186#ifdef SCTP_AUDITING_ENABLED 5187 sctp_audit_log(0xE0, 3); 5188 sctp_auditing(2, inp, stcb, net); 5189#endif 5190 SCTP_TCB_UNLOCK(stcb); 5191out_now: 5192#ifdef INVARIANTS 5193 sctp_validate_no_locks(inp); 5194#endif 5195 return; 5196} 5197 5198 5199 5200void 5201sctp_input(i_pak, off) 5202 struct mbuf *i_pak; 5203 int off; 5204 5205{ 5206#ifdef SCTP_MBUF_LOGGING 5207 struct mbuf *mat; 5208 5209#endif 5210 struct mbuf *m; 5211 int iphlen; 5212 uint32_t vrf_id = 0; 5213 uint8_t ecn_bits; 5214 struct ip *ip; 5215 struct sctphdr *sh; 5216 struct sctp_inpcb *inp = NULL; 5217 5218 uint32_t check, calc_check; 5219 struct sctp_nets *net; 5220 struct sctp_tcb *stcb = NULL; 5221 struct sctp_chunkhdr *ch; 5222 int refcount_up = 0; 5223 int length, mlen, offset; 5224 5225 5226 if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) { 5227 SCTP_RELEASE_PKT(i_pak); 5228 return; 5229 } 5230 mlen = SCTP_HEADER_LEN(i_pak); 5231 iphlen = off; 5232 m = SCTP_HEADER_TO_CHAIN(i_pak); 5233 5234 net = NULL; 5235 SCTP_STAT_INCR(sctps_recvpackets); 5236 SCTP_STAT_INCR_COUNTER64(sctps_inpackets); 5237 5238 5239#ifdef SCTP_MBUF_LOGGING 5240 /* Log in any input mbufs */ 5241 if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) { 5242 mat = m; 5243 while (mat) { 5244 if (SCTP_BUF_IS_EXTENDED(mat)) { 5245 sctp_log_mb(mat, SCTP_MBUF_INPUT); 5246 } 5247 mat = SCTP_BUF_NEXT(mat); 5248 } 5249 } 5250#endif 5251#ifdef SCTP_PACKET_LOGGING 5252 if (sctp_logging_level & SCTP_LAST_PACKET_TRACING) 5253 sctp_packet_log(m, mlen); 5254#endif 5255 /* 5256 * Must take out the iphlen, since mlen expects this (only effect lb 5257 * case) 5258 */ 5259 mlen -= iphlen; 5260 5261 /* 5262 * Get IP, SCTP, and first chunk header together in first mbuf. 5263 */ 5264 ip = mtod(m, struct ip *); 5265 offset = iphlen + sizeof(*sh) + sizeof(*ch); 5266 if (SCTP_BUF_LEN(m) < offset) { 5267 if ((m = m_pullup(m, offset)) == 0) { 5268 SCTP_STAT_INCR(sctps_hdrops); 5269 return; 5270 } 5271 ip = mtod(m, struct ip *); 5272 } 5273 sh = (struct sctphdr *)((caddr_t)ip + iphlen); 5274 ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh)); 5275 SCTPDBG(SCTP_DEBUG_INPUT1, 5276 "sctp_input() length:%d iphlen:%d\n", mlen, iphlen); 5277 5278 /* SCTP does not allow broadcasts or multicasts */ 5279 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 5280 goto bad; 5281 } 5282 if (SCTP_IS_IT_BROADCAST(ip->ip_dst, m)) { 5283 /* 5284 * We only look at broadcast if its a front state, All 5285 * others we will not have a tcb for anyway. 5286 */ 5287 goto bad; 5288 } 5289 /* validate SCTP checksum */ 5290 check = sh->checksum; /* save incoming checksum */ 5291 if ((check == 0) && (sctp_no_csum_on_loopback) && 5292 ((ip->ip_src.s_addr == ip->ip_dst.s_addr) || 5293 (SCTP_IS_IT_LOOPBACK(m))) 5294 ) { 5295 goto sctp_skip_csum_4; 5296 } 5297 sh->checksum = 0; /* prepare for calc */ 5298 calc_check = sctp_calculate_sum(m, &mlen, iphlen); 5299 if (calc_check != check) { 5300 SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n", 5301 calc_check, check, m, mlen, iphlen); 5302 5303 stcb = sctp_findassociation_addr(m, iphlen, 5304 offset - sizeof(*ch), 5305 sh, ch, &inp, &net, 5306 vrf_id); 5307 if ((inp) && (stcb)) { 5308 sctp_send_packet_dropped(stcb, net, m, iphlen, 1); 5309 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED); 5310 } else if ((inp != NULL) && (stcb == NULL)) { 5311 refcount_up = 1; 5312 } 5313 SCTP_STAT_INCR(sctps_badsum); 5314 SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors); 5315 goto bad; 5316 } 5317 sh->checksum = calc_check; 5318sctp_skip_csum_4: 5319 /* destination port of 0 is illegal, based on RFC2960. */ 5320 if (sh->dest_port == 0) { 5321 SCTP_STAT_INCR(sctps_hdrops); 5322 goto bad; 5323 } 5324 /* validate mbuf chain length with IP payload length */ 5325 if (mlen < (ip->ip_len - iphlen)) { 5326 SCTP_STAT_INCR(sctps_hdrops); 5327 goto bad; 5328 } 5329 /* 5330 * Locate pcb and tcb for datagram sctp_findassociation_addr() wants 5331 * IP/SCTP/first chunk header... 5332 */ 5333 stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch), 5334 sh, ch, &inp, &net, vrf_id); 5335 /* inp's ref-count increased && stcb locked */ 5336 if (inp == NULL) { 5337 struct sctp_init_chunk *init_chk, chunk_buf; 5338 5339 SCTP_STAT_INCR(sctps_noport); 5340#ifdef ICMP_BANDLIM 5341 /* 5342 * we use the bandwidth limiting to protect against sending 5343 * too many ABORTS all at once. In this case these count the 5344 * same as an ICMP message. 5345 */ 5346 if (badport_bandlim(0) < 0) 5347 goto bad; 5348#endif /* ICMP_BANDLIM */ 5349 SCTPDBG(SCTP_DEBUG_INPUT1, 5350 "Sending a ABORT from packet entry!\n"); 5351 if (ch->chunk_type == SCTP_INITIATION) { 5352 /* 5353 * we do a trick here to get the INIT tag, dig in 5354 * and get the tag from the INIT and put it in the 5355 * common header. 5356 */ 5357 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 5358 iphlen + sizeof(*sh), sizeof(*init_chk), 5359 (uint8_t *) & chunk_buf); 5360 if (init_chk != NULL) 5361 sh->v_tag = init_chk->init.initiate_tag; 5362 } 5363 if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 5364 sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id); 5365 goto bad; 5366 } 5367 if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) { 5368 goto bad; 5369 } 5370 if (ch->chunk_type != SCTP_ABORT_ASSOCIATION) 5371 sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id); 5372 goto bad; 5373 } else if (stcb == NULL) { 5374 refcount_up = 1; 5375 } 5376#ifdef IPSEC 5377 /* 5378 * I very much doubt any of the IPSEC stuff will work but I have no 5379 * idea, so I will leave it in place. 5380 */ 5381 if (inp && ipsec4_in_reject(m, &inp->ip_inp.inp)) { 5382 ipsec4stat.in_polvio++; 5383 SCTP_STAT_INCR(sctps_hdrops); 5384 goto bad; 5385 } 5386#endif /* IPSEC */ 5387 5388 /* 5389 * common chunk processing 5390 */ 5391 length = ip->ip_len + iphlen; 5392 offset -= sizeof(struct sctp_chunkhdr); 5393 5394 ecn_bits = ip->ip_tos; 5395 5396 /* sa_ignore NO_NULL_CHK */ 5397 sctp_common_input_processing(&m, iphlen, offset, length, sh, ch, 5398 inp, stcb, net, ecn_bits, vrf_id); 5399 /* inp's ref-count reduced && stcb unlocked */ 5400 if (m) { 5401 sctp_m_freem(m); 5402 } 5403 if ((inp) && (refcount_up)) { 5404 /* reduce ref-count */ 5405 SCTP_INP_DECR_REF(inp); 5406 } 5407 return; 5408bad: 5409 if (stcb) { 5410 SCTP_TCB_UNLOCK(stcb); 5411 } 5412 if ((inp) && (refcount_up)) { 5413 /* reduce ref-count */ 5414 SCTP_INP_DECR_REF(inp); 5415 } 5416 if (m) { 5417 sctp_m_freem(m); 5418 } 5419 return; 5420} 5421