sctp_input.c revision 171158
178344Sobrien/*- 278344Sobrien * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 398184Sgordon * 498184Sgordon * Redistribution and use in source and binary forms, with or without 578344Sobrien * modification, are permitted provided that the following conditions are met: 678344Sobrien * 778344Sobrien * a) Redistributions of source code must retain the above copyright notice, 878344Sobrien * this list of conditions and the following disclaimer. 9124616Smtm * 1078344Sobrien * b) Redistributions in binary form must reproduce the above copyright 1178344Sobrien * notice, this list of conditions and the following disclaimer in 1278344Sobrien * the documentation and/or other materials provided with the distribution. 1378344Sobrien * 1498184Sgordon * c) Neither the name of Cisco Systems, Inc. nor the names of its 1599550Sgordon * contributors may be used to endorse or promote products derived 1678344Sobrien * from this software without specific prior written permission. 17109770Smtm * 18124622Smtm * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19124622Smtm * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20124622Smtm * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2198184Sgordon * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 2298184Sgordon * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 2398184Sgordon * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 2498184Sgordon * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 2598184Sgordon * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 2698184Sgordon * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 2798184Sgordon * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28101851Sgordon * THE POSSIBILITY OF SUCH DAMAGE. 29101851Sgordon */ 30101851Sgordon 31101851Sgordon/* $KAME: sctp_input.c,v 1.27 2005/03/06 16:04:17 itojun Exp $ */ 32101851Sgordon 33101851Sgordon#include <sys/cdefs.h> 34101851Sgordon__FBSDID("$FreeBSD: head/sys/netinet/sctp_input.c 171158 2007-07-02 19:22:22Z rrs $"); 35101851Sgordon 36101851Sgordon#include <netinet/sctp_os.h> 37101851Sgordon#include <netinet/sctp_var.h> 38101851Sgordon#include <netinet/sctp_sysctl.h> 39101851Sgordon#include <netinet/sctp_pcb.h> 40104980Sschweikh#include <netinet/sctp_header.h> 41102154Sgordon#include <netinet/sctputil.h> 4298184Sgordon#include <netinet/sctp_output.h> 4398184Sgordon#include <netinet/sctp_input.h> 4498184Sgordon#include <netinet/sctp_auth.h> 4598184Sgordon#include <netinet/sctp_indata.h> 4698184Sgordon#include <netinet/sctp_asconf.h> 4778344Sobrien#include <netinet/sctp_bsd_addr.h> 48 49 50 51static void 52sctp_stop_all_cookie_timers(struct sctp_tcb *stcb) 53{ 54 struct sctp_nets *net; 55 56 /* 57 * This now not only stops all cookie timers it also stops any INIT 58 * timers as well. This will make sure that the timers are stopped 59 * in all collision cases. 60 */ 61 SCTP_TCB_LOCK_ASSERT(stcb); 62 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 63 if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) { 64 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, 65 stcb->sctp_ep, 66 stcb, 67 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1); 68 } else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) { 69 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, 70 stcb->sctp_ep, 71 stcb, 72 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2); 73 } 74 } 75} 76 77/* INIT handler */ 78static void 79sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh, 80 struct sctp_init_chunk *cp, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 81 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id) 82{ 83 struct sctp_init *init; 84 struct mbuf *op_err; 85 uint32_t init_limit; 86 87 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n", 88 stcb); 89 op_err = NULL; 90 init = &cp->init; 91 /* First are we accepting? */ 92 if ((inp->sctp_socket->so_qlimit == 0) && (stcb == NULL)) { 93 SCTPDBG(SCTP_DEBUG_INPUT2, 94 "sctp_handle_init: Abort, so_qlimit:%d\n", 95 inp->sctp_socket->so_qlimit); 96 /* 97 * FIX ME ?? What about TCP model and we have a 98 * match/restart case? 99 */ 100 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 101 vrf_id); 102 if (stcb) 103 *abort_no_unlock = 1; 104 return; 105 } 106 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) { 107 /* Invalid length */ 108 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 109 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 110 vrf_id); 111 if (stcb) 112 *abort_no_unlock = 1; 113 return; 114 } 115 /* validate parameters */ 116 if (init->initiate_tag == 0) { 117 /* protocol error... send abort */ 118 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 119 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 120 vrf_id); 121 if (stcb) 122 *abort_no_unlock = 1; 123 return; 124 } 125 if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) { 126 /* invalid parameter... send abort */ 127 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 128 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 129 vrf_id); 130 return; 131 } 132 if (init->num_inbound_streams == 0) { 133 /* protocol error... send abort */ 134 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 135 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 136 vrf_id); 137 if (stcb) 138 *abort_no_unlock = 1; 139 return; 140 } 141 if (init->num_outbound_streams == 0) { 142 /* protocol error... send abort */ 143 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 144 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 145 vrf_id); 146 if (stcb) 147 *abort_no_unlock = 1; 148 return; 149 } 150 init_limit = offset + ntohs(cp->ch.chunk_length); 151 if (sctp_validate_init_auth_params(m, offset + sizeof(*cp), 152 init_limit)) { 153 /* auth parameter(s) error... send abort */ 154 sctp_abort_association(inp, stcb, m, iphlen, sh, NULL, vrf_id); 155 if (stcb) 156 *abort_no_unlock = 1; 157 return; 158 } 159 /* send an INIT-ACK w/cookie */ 160 SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n"); 161 sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, sh, cp, vrf_id); 162} 163 164/* 165 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error 166 */ 167 168int 169sctp_is_there_unsent_data(struct sctp_tcb *stcb) 170{ 171 int unsent_data = 0; 172 struct sctp_stream_queue_pending *sp; 173 struct sctp_stream_out *strq; 174 struct sctp_association *asoc; 175 176 /* 177 * This function returns the number of streams that have true unsent 178 * data on them. Note that as it looks through it will clean up any 179 * places that have old data that has been sent but left at top of 180 * stream queue. 181 */ 182 asoc = &stcb->asoc; 183 SCTP_TCB_SEND_LOCK(stcb); 184 if (!TAILQ_EMPTY(&asoc->out_wheel)) { 185 /* Check to see if some data queued */ 186 TAILQ_FOREACH(strq, &asoc->out_wheel, next_spoke) { 187 /* sa_ignore FREED_MEMORY */ 188 is_there_another: 189 sp = TAILQ_FIRST(&strq->outqueue); 190 if (sp == NULL) { 191 continue; 192 } 193 if ((sp->msg_is_complete) && 194 (sp->length == 0) && 195 (sp->sender_all_done)) { 196 /* 197 * We are doing differed cleanup. Last time 198 * through when we took all the data the 199 * sender_all_done was not set. 200 */ 201 if (sp->put_last_out == 0) { 202 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n"); 203 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n", 204 sp->sender_all_done, 205 sp->length, 206 sp->msg_is_complete, 207 sp->put_last_out); 208 } 209 atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1); 210 TAILQ_REMOVE(&strq->outqueue, sp, next); 211 sctp_free_remote_addr(sp->net); 212 if (sp->data) { 213 sctp_m_freem(sp->data); 214 sp->data = NULL; 215 } 216 sctp_free_a_strmoq(stcb, sp); 217 goto is_there_another; 218 } else { 219 unsent_data++; 220 continue; 221 } 222 } 223 } 224 SCTP_TCB_SEND_UNLOCK(stcb); 225 return (unsent_data); 226} 227 228static int 229sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb, 230 struct sctp_nets *net) 231{ 232 struct sctp_init *init; 233 struct sctp_association *asoc; 234 struct sctp_nets *lnet; 235 unsigned int i; 236 237 init = &cp->init; 238 asoc = &stcb->asoc; 239 /* save off parameters */ 240 asoc->peer_vtag = ntohl(init->initiate_tag); 241 asoc->peers_rwnd = ntohl(init->a_rwnd); 242 if (TAILQ_FIRST(&asoc->nets)) { 243 /* update any ssthresh's that may have a default */ 244 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) { 245 lnet->ssthresh = asoc->peers_rwnd; 246 247 if (sctp_logging_level & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) { 248 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION); 249 } 250 } 251 } 252 SCTP_TCB_SEND_LOCK(stcb); 253 if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) { 254 unsigned int newcnt; 255 struct sctp_stream_out *outs; 256 struct sctp_stream_queue_pending *sp; 257 258 /* cut back on number of streams */ 259 newcnt = ntohs(init->num_inbound_streams); 260 /* This if is probably not needed but I am cautious */ 261 if (asoc->strmout) { 262 /* First make sure no data chunks are trapped */ 263 for (i = newcnt; i < asoc->pre_open_streams; i++) { 264 outs = &asoc->strmout[i]; 265 sp = TAILQ_FIRST(&outs->outqueue); 266 while (sp) { 267 TAILQ_REMOVE(&outs->outqueue, sp, 268 next); 269 asoc->stream_queue_cnt--; 270 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, 271 stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, 272 sp); 273 if (sp->data) { 274 sctp_m_freem(sp->data); 275 sp->data = NULL; 276 } 277 sctp_free_remote_addr(sp->net); 278 sp->net = NULL; 279 /* Free the chunk */ 280 SCTP_PRINTF("sp:%p tcb:%p weird free case\n", 281 sp, stcb); 282 283 sctp_free_a_strmoq(stcb, sp); 284 /* sa_ignore FREED_MEMORY */ 285 sp = TAILQ_FIRST(&outs->outqueue); 286 } 287 } 288 } 289 /* cut back the count and abandon the upper streams */ 290 asoc->pre_open_streams = newcnt; 291 } 292 SCTP_TCB_SEND_UNLOCK(stcb); 293 asoc->streamoutcnt = asoc->pre_open_streams; 294 /* init tsn's */ 295 asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1; 296 if (sctp_logging_level & SCTP_MAP_LOGGING_ENABLE) { 297 sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 298 } 299 /* This is the next one we expect */ 300 asoc->str_reset_seq_in = asoc->asconf_seq_in + 1; 301 302 asoc->mapping_array_base_tsn = ntohl(init->initial_tsn); 303 asoc->cumulative_tsn = asoc->asconf_seq_in; 304 asoc->last_echo_tsn = asoc->asconf_seq_in; 305 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 306 /* open the requested streams */ 307 308 if (asoc->strmin != NULL) { 309 /* Free the old ones */ 310 struct sctp_queued_to_read *ctl; 311 312 for (i = 0; i < asoc->streamincnt; i++) { 313 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue); 314 while (ctl) { 315 TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next); 316 sctp_free_remote_addr(ctl->whoFrom); 317 ctl->whoFrom = NULL; 318 sctp_m_freem(ctl->data); 319 ctl->data = NULL; 320 sctp_free_a_readq(stcb, ctl); 321 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue); 322 } 323 } 324 SCTP_FREE(asoc->strmin, SCTP_M_STRMI); 325 } 326 asoc->streamincnt = ntohs(init->num_outbound_streams); 327 if (asoc->streamincnt > MAX_SCTP_STREAMS) { 328 asoc->streamincnt = MAX_SCTP_STREAMS; 329 } 330 SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt * 331 sizeof(struct sctp_stream_in), SCTP_M_STRMI); 332 if (asoc->strmin == NULL) { 333 /* we didn't get memory for the streams! */ 334 SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n"); 335 return (-1); 336 } 337 for (i = 0; i < asoc->streamincnt; i++) { 338 asoc->strmin[i].stream_no = i; 339 asoc->strmin[i].last_sequence_delivered = 0xffff; 340 /* 341 * U-stream ranges will be set when the cookie is unpacked. 342 * Or for the INIT sender they are un set (if pr-sctp not 343 * supported) when the INIT-ACK arrives. 344 */ 345 TAILQ_INIT(&asoc->strmin[i].inqueue); 346 asoc->strmin[i].delivery_started = 0; 347 } 348 /* 349 * load_address_from_init will put the addresses into the 350 * association when the COOKIE is processed or the INIT-ACK is 351 * processed. Both types of COOKIE's existing and new call this 352 * routine. It will remove addresses that are no longer in the 353 * association (for the restarting case where addresses are 354 * removed). Up front when the INIT arrives we will discard it if it 355 * is a restart and new addresses have been added. 356 */ 357 /* sa_ignore MEMLEAK */ 358 return (0); 359} 360 361/* 362 * INIT-ACK message processing/consumption returns value < 0 on error 363 */ 364static int 365sctp_process_init_ack(struct mbuf *m, int iphlen, int offset, 366 struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 367 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id) 368{ 369 struct sctp_association *asoc; 370 struct mbuf *op_err; 371 int retval, abort_flag; 372 uint32_t initack_limit; 373 374 /* First verify that we have no illegal param's */ 375 abort_flag = 0; 376 op_err = NULL; 377 378 op_err = sctp_arethere_unrecognized_parameters(m, 379 (offset + sizeof(struct sctp_init_chunk)), 380 &abort_flag, (struct sctp_chunkhdr *)cp); 381 if (abort_flag) { 382 /* Send an abort and notify peer */ 383 sctp_abort_an_association(stcb->sctp_ep, stcb, SCTP_CAUSE_PROTOCOL_VIOLATION, op_err); 384 *abort_no_unlock = 1; 385 return (-1); 386 } 387 asoc = &stcb->asoc; 388 /* process the peer's parameters in the INIT-ACK */ 389 retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb, net); 390 if (retval < 0) { 391 return (retval); 392 } 393 initack_limit = offset + ntohs(cp->ch.chunk_length); 394 /* load all addresses */ 395 if ((retval = sctp_load_addresses_from_init(stcb, m, iphlen, 396 (offset + sizeof(struct sctp_init_chunk)), initack_limit, sh, 397 NULL))) { 398 /* Huh, we should abort */ 399 SCTPDBG(SCTP_DEBUG_INPUT1, 400 "Load addresses from INIT causes an abort %d\n", 401 retval); 402 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 403 NULL, 0); 404 *abort_no_unlock = 1; 405 return (-1); 406 } 407 stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs, 408 stcb->asoc.local_hmacs); 409 if (op_err) { 410 sctp_queue_op_err(stcb, op_err); 411 /* queuing will steal away the mbuf chain to the out queue */ 412 op_err = NULL; 413 } 414 /* extract the cookie and queue it to "echo" it back... */ 415 stcb->asoc.overall_error_count = 0; 416 net->error_count = 0; 417 418 /* 419 * Cancel the INIT timer, We do this first before queueing the 420 * cookie. We always cancel at the primary to assue that we are 421 * canceling the timer started by the INIT which always goes to the 422 * primary. 423 */ 424 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb, 425 asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4); 426 427 /* calculate the RTO */ 428 net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered); 429 430 retval = sctp_send_cookie_echo(m, offset, stcb, net); 431 if (retval < 0) { 432 /* 433 * No cookie, we probably should send a op error. But in any 434 * case if there is no cookie in the INIT-ACK, we can 435 * abandon the peer, its broke. 436 */ 437 if (retval == -3) { 438 /* We abort with an error of missing mandatory param */ 439 op_err = 440 sctp_generate_invmanparam(SCTP_CAUSE_MISSING_PARAM); 441 if (op_err) { 442 /* 443 * Expand beyond to include the mandatory 444 * param cookie 445 */ 446 struct sctp_inv_mandatory_param *mp; 447 448 SCTP_BUF_LEN(op_err) = 449 sizeof(struct sctp_inv_mandatory_param); 450 mp = mtod(op_err, 451 struct sctp_inv_mandatory_param *); 452 /* Subtract the reserved param */ 453 mp->length = 454 htons(sizeof(struct sctp_inv_mandatory_param) - 2); 455 mp->num_param = htonl(1); 456 mp->param = htons(SCTP_STATE_COOKIE); 457 mp->resv = 0; 458 } 459 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 460 sh, op_err, 0); 461 *abort_no_unlock = 1; 462 } 463 return (retval); 464 } 465 return (0); 466} 467 468static void 469sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp, 470 struct sctp_tcb *stcb, struct sctp_nets *net) 471{ 472 struct sockaddr_storage store; 473 struct sockaddr_in *sin; 474 struct sockaddr_in6 *sin6; 475 struct sctp_nets *r_net; 476 struct timeval tv; 477 478 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) { 479 /* Invalid length */ 480 return; 481 } 482 sin = (struct sockaddr_in *)&store; 483 sin6 = (struct sockaddr_in6 *)&store; 484 485 memset(&store, 0, sizeof(store)); 486 if (cp->heartbeat.hb_info.addr_family == AF_INET && 487 cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) { 488 sin->sin_family = cp->heartbeat.hb_info.addr_family; 489 sin->sin_len = cp->heartbeat.hb_info.addr_len; 490 sin->sin_port = stcb->rport; 491 memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address, 492 sizeof(sin->sin_addr)); 493 } else if (cp->heartbeat.hb_info.addr_family == AF_INET6 && 494 cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) { 495 sin6->sin6_family = cp->heartbeat.hb_info.addr_family; 496 sin6->sin6_len = cp->heartbeat.hb_info.addr_len; 497 sin6->sin6_port = stcb->rport; 498 memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address, 499 sizeof(sin6->sin6_addr)); 500 } else { 501 return; 502 } 503 r_net = sctp_findnet(stcb, (struct sockaddr *)sin); 504 if (r_net == NULL) { 505 SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n"); 506 return; 507 } 508 if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) && 509 (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) && 510 (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) { 511 /* 512 * If the its a HB and it's random value is correct when can 513 * confirm the destination. 514 */ 515 r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 516 if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) { 517 stcb->asoc.primary_destination = r_net; 518 r_net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY; 519 r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY; 520 r_net = TAILQ_FIRST(&stcb->asoc.nets); 521 if (r_net != stcb->asoc.primary_destination) { 522 /* 523 * first one on the list is NOT the primary 524 * sctp_cmpaddr() is much more efficent if 525 * the primary is the first on the list, 526 * make it so. 527 */ 528 TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); 529 TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); 530 } 531 } 532 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 533 stcb, 0, (void *)r_net); 534 } 535 r_net->error_count = 0; 536 r_net->hb_responded = 1; 537 tv.tv_sec = cp->heartbeat.hb_info.time_value_1; 538 tv.tv_usec = cp->heartbeat.hb_info.time_value_2; 539 if (r_net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 540 r_net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE; 541 r_net->dest_state |= SCTP_ADDR_REACHABLE; 542 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 543 SCTP_HEARTBEAT_SUCCESS, (void *)r_net); 544 /* now was it the primary? if so restore */ 545 if (r_net->dest_state & SCTP_ADDR_WAS_PRIMARY) { 546 (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, r_net); 547 } 548 } 549 /* Now lets do a RTO with this */ 550 r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv); 551} 552 553static void 554sctp_handle_abort(struct sctp_abort_chunk *cp, 555 struct sctp_tcb *stcb, struct sctp_nets *net) 556{ 557 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n"); 558 if (stcb == NULL) 559 return; 560 561 /* stop any receive timers */ 562 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_5); 563 /* notify user of the abort and clean up... */ 564 sctp_abort_notification(stcb, 0); 565 /* free the tcb */ 566 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 567 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 568 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 569 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 570 } 571#ifdef SCTP_ASOCLOG_OF_TSNS 572 sctp_print_out_track_log(stcb); 573#endif 574 sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6); 575 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n"); 576} 577 578static void 579sctp_handle_shutdown(struct sctp_shutdown_chunk *cp, 580 struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag) 581{ 582 struct sctp_association *asoc; 583 int some_on_streamwheel; 584 585 SCTPDBG(SCTP_DEBUG_INPUT2, 586 "sctp_handle_shutdown: handling SHUTDOWN\n"); 587 if (stcb == NULL) 588 return; 589 asoc = &stcb->asoc; 590 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 591 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 592 return; 593 } 594 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) { 595 /* Shutdown NOT the expected size */ 596 return; 597 } else { 598 sctp_update_acked(stcb, cp, net, abort_flag); 599 } 600 if (asoc->control_pdapi) { 601 /* 602 * With a normal shutdown we assume the end of last record. 603 */ 604 SCTP_INP_READ_LOCK(stcb->sctp_ep); 605 asoc->control_pdapi->end_added = 1; 606 asoc->control_pdapi->pdapi_aborted = 1; 607 asoc->control_pdapi = NULL; 608 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 609 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 610 } 611 /* goto SHUTDOWN_RECEIVED state to block new requests */ 612 if (stcb->sctp_socket) { 613 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 614 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) && 615 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) { 616 asoc->state = SCTP_STATE_SHUTDOWN_RECEIVED; 617 /* 618 * notify upper layer that peer has initiated a 619 * shutdown 620 */ 621 sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL); 622 623 /* reset time */ 624 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 625 } 626 } 627 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 628 /* 629 * stop the shutdown timer, since we WILL move to 630 * SHUTDOWN-ACK-SENT. 631 */ 632 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_7); 633 } 634 /* Now is there unsent data on a stream somewhere? */ 635 some_on_streamwheel = sctp_is_there_unsent_data(stcb); 636 637 if (!TAILQ_EMPTY(&asoc->send_queue) || 638 !TAILQ_EMPTY(&asoc->sent_queue) || 639 some_on_streamwheel) { 640 /* By returning we will push more data out */ 641 return; 642 } else { 643 /* no outstanding data to send, so move on... */ 644 /* send SHUTDOWN-ACK */ 645 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination); 646 /* move to SHUTDOWN-ACK-SENT state */ 647 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 648 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 649 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 650 } 651 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT; 652 653 /* start SHUTDOWN timer */ 654 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, 655 stcb, net); 656 } 657} 658 659static void 660sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp, 661 struct sctp_tcb *stcb, struct sctp_nets *net) 662{ 663 struct sctp_association *asoc; 664 665 SCTPDBG(SCTP_DEBUG_INPUT2, 666 "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n"); 667 if (stcb == NULL) 668 return; 669 670 asoc = &stcb->asoc; 671 /* process according to association state */ 672 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 673 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 674 /* unexpected SHUTDOWN-ACK... so ignore... */ 675 SCTP_TCB_UNLOCK(stcb); 676 return; 677 } 678 if (asoc->control_pdapi) { 679 /* 680 * With a normal shutdown we assume the end of last record. 681 */ 682 SCTP_INP_READ_LOCK(stcb->sctp_ep); 683 asoc->control_pdapi->end_added = 1; 684 asoc->control_pdapi->pdapi_aborted = 1; 685 asoc->control_pdapi = NULL; 686 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 687 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 688 } 689 /* are the queues empty? */ 690 if (!TAILQ_EMPTY(&asoc->send_queue) || 691 !TAILQ_EMPTY(&asoc->sent_queue) || 692 !TAILQ_EMPTY(&asoc->out_wheel)) { 693 sctp_report_all_outbound(stcb, 0); 694 } 695 /* stop the timer */ 696 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8); 697 /* send SHUTDOWN-COMPLETE */ 698 sctp_send_shutdown_complete(stcb, net); 699 /* notify upper layer protocol */ 700 if (stcb->sctp_socket) { 701 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL); 702 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 703 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 704 /* Set the connected flag to disconnected */ 705 stcb->sctp_ep->sctp_socket->so_snd.sb_cc = 0; 706 } 707 } 708 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 709 /* free the TCB but first save off the ep */ 710 sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 711 SCTP_FROM_SCTP_INPUT + SCTP_LOC_9); 712} 713 714/* 715 * Skip past the param header and then we will find the chunk that caused the 716 * problem. There are two possiblities ASCONF or FWD-TSN other than that and 717 * our peer must be broken. 718 */ 719static void 720sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr, 721 struct sctp_nets *net) 722{ 723 struct sctp_chunkhdr *chk; 724 725 chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr)); 726 switch (chk->chunk_type) { 727 case SCTP_ASCONF_ACK: 728 case SCTP_ASCONF: 729 sctp_asconf_cleanup(stcb, net); 730 break; 731 case SCTP_FORWARD_CUM_TSN: 732 stcb->asoc.peer_supports_prsctp = 0; 733 break; 734 default: 735 SCTPDBG(SCTP_DEBUG_INPUT2, 736 "Peer does not support chunk type %d(%x)??\n", 737 chk->chunk_type, (uint32_t) chk->chunk_type); 738 break; 739 } 740} 741 742/* 743 * Skip past the param header and then we will find the param that caused the 744 * problem. There are a number of param's in a ASCONF OR the prsctp param 745 * these will turn of specific features. 746 */ 747static void 748sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr) 749{ 750 struct sctp_paramhdr *pbad; 751 752 pbad = phdr + 1; 753 switch (ntohs(pbad->param_type)) { 754 /* pr-sctp draft */ 755 case SCTP_PRSCTP_SUPPORTED: 756 stcb->asoc.peer_supports_prsctp = 0; 757 break; 758 case SCTP_SUPPORTED_CHUNK_EXT: 759 break; 760 /* draft-ietf-tsvwg-addip-sctp */ 761 case SCTP_ECN_NONCE_SUPPORTED: 762 stcb->asoc.peer_supports_ecn_nonce = 0; 763 stcb->asoc.ecn_nonce_allowed = 0; 764 stcb->asoc.ecn_allowed = 0; 765 break; 766 case SCTP_ADD_IP_ADDRESS: 767 case SCTP_DEL_IP_ADDRESS: 768 case SCTP_SET_PRIM_ADDR: 769 stcb->asoc.peer_supports_asconf = 0; 770 break; 771 case SCTP_SUCCESS_REPORT: 772 case SCTP_ERROR_CAUSE_IND: 773 SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n"); 774 SCTPDBG(SCTP_DEBUG_INPUT2, 775 "Turning off ASCONF to this strange peer\n"); 776 stcb->asoc.peer_supports_asconf = 0; 777 break; 778 default: 779 SCTPDBG(SCTP_DEBUG_INPUT2, 780 "Peer does not support param type %d(%x)??\n", 781 pbad->param_type, (uint32_t) pbad->param_type); 782 break; 783 } 784} 785 786static int 787sctp_handle_error(struct sctp_chunkhdr *ch, 788 struct sctp_tcb *stcb, struct sctp_nets *net) 789{ 790 int chklen; 791 struct sctp_paramhdr *phdr; 792 uint16_t error_type; 793 uint16_t error_len; 794 struct sctp_association *asoc; 795 796 int adjust; 797 798 /* parse through all of the errors and process */ 799 asoc = &stcb->asoc; 800 phdr = (struct sctp_paramhdr *)((caddr_t)ch + 801 sizeof(struct sctp_chunkhdr)); 802 chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr); 803 while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) { 804 /* Process an Error Cause */ 805 error_type = ntohs(phdr->param_type); 806 error_len = ntohs(phdr->param_length); 807 if ((error_len > chklen) || (error_len == 0)) { 808 /* invalid param length for this param */ 809 SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n", 810 chklen, error_len); 811 return (0); 812 } 813 switch (error_type) { 814 case SCTP_CAUSE_INVALID_STREAM: 815 case SCTP_CAUSE_MISSING_PARAM: 816 case SCTP_CAUSE_INVALID_PARAM: 817 case SCTP_CAUSE_NO_USER_DATA: 818 SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n", 819 error_type); 820 break; 821 case SCTP_CAUSE_STALE_COOKIE: 822 /* 823 * We only act if we have echoed a cookie and are 824 * waiting. 825 */ 826 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) { 827 int *p; 828 829 p = (int *)((caddr_t)phdr + sizeof(*phdr)); 830 /* Save the time doubled */ 831 asoc->cookie_preserve_req = ntohl(*p) << 1; 832 asoc->stale_cookie_count++; 833 if (asoc->stale_cookie_count > 834 asoc->max_init_times) { 835 sctp_abort_notification(stcb, 0); 836 /* now free the asoc */ 837 sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_10); 838 return (-1); 839 } 840 /* blast back to INIT state */ 841 asoc->state &= ~SCTP_STATE_COOKIE_ECHOED; 842 asoc->state |= SCTP_STATE_COOKIE_WAIT; 843 844 sctp_stop_all_cookie_timers(stcb); 845 sctp_send_initiate(stcb->sctp_ep, stcb); 846 } 847 break; 848 case SCTP_CAUSE_UNRESOLVABLE_ADDR: 849 /* 850 * Nothing we can do here, we don't do hostname 851 * addresses so if the peer does not like my IPv6 852 * (or IPv4 for that matter) it does not matter. If 853 * they don't support that type of address, they can 854 * NOT possibly get that packet type... i.e. with no 855 * IPv6 you can't recieve a IPv6 packet. so we can 856 * safely ignore this one. If we ever added support 857 * for HOSTNAME Addresses, then we would need to do 858 * something here. 859 */ 860 break; 861 case SCTP_CAUSE_UNRECOG_CHUNK: 862 sctp_process_unrecog_chunk(stcb, phdr, net); 863 break; 864 case SCTP_CAUSE_UNRECOG_PARAM: 865 sctp_process_unrecog_param(stcb, phdr); 866 break; 867 case SCTP_CAUSE_COOKIE_IN_SHUTDOWN: 868 /* 869 * We ignore this since the timer will drive out a 870 * new cookie anyway and there timer will drive us 871 * to send a SHUTDOWN_COMPLETE. We can't send one 872 * here since we don't have their tag. 873 */ 874 break; 875 case SCTP_CAUSE_DELETING_LAST_ADDR: 876 case SCTP_CAUSE_RESOURCE_SHORTAGE: 877 case SCTP_CAUSE_DELETING_SRC_ADDR: 878 /* 879 * We should NOT get these here, but in a 880 * ASCONF-ACK. 881 */ 882 SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n", 883 error_type); 884 break; 885 case SCTP_CAUSE_OUT_OF_RESC: 886 /* 887 * And what, pray tell do we do with the fact that 888 * the peer is out of resources? Not really sure we 889 * could do anything but abort. I suspect this 890 * should have came WITH an abort instead of in a 891 * OP-ERROR. 892 */ 893 break; 894 default: 895 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n", 896 error_type); 897 break; 898 } 899 adjust = SCTP_SIZE32(error_len); 900 chklen -= adjust; 901 phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust); 902 } 903 return (0); 904} 905 906static int 907sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset, 908 struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 909 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id) 910{ 911 struct sctp_init_ack *init_ack; 912 int *state; 913 struct mbuf *op_err; 914 915 SCTPDBG(SCTP_DEBUG_INPUT2, 916 "sctp_handle_init_ack: handling INIT-ACK\n"); 917 918 if (stcb == NULL) { 919 SCTPDBG(SCTP_DEBUG_INPUT2, 920 "sctp_handle_init_ack: TCB is null\n"); 921 return (-1); 922 } 923 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) { 924 /* Invalid length */ 925 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 926 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 927 op_err, 0); 928 *abort_no_unlock = 1; 929 return (-1); 930 } 931 init_ack = &cp->init; 932 /* validate parameters */ 933 if (init_ack->initiate_tag == 0) { 934 /* protocol error... send an abort */ 935 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 936 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 937 op_err, 0); 938 *abort_no_unlock = 1; 939 return (-1); 940 } 941 if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) { 942 /* protocol error... send an abort */ 943 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 944 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 945 op_err, 0); 946 *abort_no_unlock = 1; 947 return (-1); 948 } 949 if (init_ack->num_inbound_streams == 0) { 950 /* protocol error... send an abort */ 951 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 952 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 953 op_err, 0); 954 *abort_no_unlock = 1; 955 return (-1); 956 } 957 if (init_ack->num_outbound_streams == 0) { 958 /* protocol error... send an abort */ 959 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 960 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 961 op_err, 0); 962 *abort_no_unlock = 1; 963 return (-1); 964 } 965 /* process according to association state... */ 966 state = &stcb->asoc.state; 967 switch (*state & SCTP_STATE_MASK) { 968 case SCTP_STATE_COOKIE_WAIT: 969 /* this is the expected state for this chunk */ 970 /* process the INIT-ACK parameters */ 971 if (stcb->asoc.primary_destination->dest_state & 972 SCTP_ADDR_UNCONFIRMED) { 973 /* 974 * The primary is where we sent the INIT, we can 975 * always consider it confirmed when the INIT-ACK is 976 * returned. Do this before we load addresses 977 * though. 978 */ 979 stcb->asoc.primary_destination->dest_state &= 980 ~SCTP_ADDR_UNCONFIRMED; 981 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 982 stcb, 0, (void *)stcb->asoc.primary_destination); 983 } 984 if (sctp_process_init_ack(m, iphlen, offset, sh, cp, stcb, 985 net, abort_no_unlock, vrf_id) < 0) { 986 /* error in parsing parameters */ 987 return (-1); 988 } 989 /* update our state */ 990 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n"); 991 if (*state & SCTP_STATE_SHUTDOWN_PENDING) { 992 *state = SCTP_STATE_COOKIE_ECHOED | 993 SCTP_STATE_SHUTDOWN_PENDING; 994 } else { 995 *state = SCTP_STATE_COOKIE_ECHOED; 996 } 997 998 /* reset the RTO calc */ 999 stcb->asoc.overall_error_count = 0; 1000 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 1001 /* 1002 * collapse the init timer back in case of a exponential 1003 * backoff 1004 */ 1005 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep, 1006 stcb, net); 1007 /* 1008 * the send at the end of the inbound data processing will 1009 * cause the cookie to be sent 1010 */ 1011 break; 1012 case SCTP_STATE_SHUTDOWN_SENT: 1013 /* incorrect state... discard */ 1014 break; 1015 case SCTP_STATE_COOKIE_ECHOED: 1016 /* incorrect state... discard */ 1017 break; 1018 case SCTP_STATE_OPEN: 1019 /* incorrect state... discard */ 1020 break; 1021 case SCTP_STATE_EMPTY: 1022 case SCTP_STATE_INUSE: 1023 default: 1024 /* incorrect state... discard */ 1025 return (-1); 1026 break; 1027 } 1028 SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n"); 1029 return (0); 1030} 1031 1032 1033/* 1034 * handle a state cookie for an existing association m: input packet mbuf 1035 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a 1036 * "split" mbuf and the cookie signature does not exist offset: offset into 1037 * mbuf to the cookie-echo chunk 1038 */ 1039static struct sctp_tcb * 1040sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, 1041 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1042 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, 1043 struct sockaddr *init_src, int *notification, sctp_assoc_t * sac_assoc_id, 1044 uint32_t vrf_id) 1045{ 1046 struct sctp_association *asoc; 1047 struct sctp_init_chunk *init_cp, init_buf; 1048 struct sctp_init_ack_chunk *initack_cp, initack_buf; 1049 int chk_length; 1050 int init_offset, initack_offset, i; 1051 int retval; 1052 int spec_flag = 0; 1053 uint32_t how_indx; 1054 1055 /* I know that the TCB is non-NULL from the caller */ 1056 asoc = &stcb->asoc; 1057 for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) { 1058 if (asoc->cookie_how[how_indx] == 0) 1059 break; 1060 } 1061 if (how_indx < sizeof(asoc->cookie_how)) { 1062 asoc->cookie_how[how_indx] = 1; 1063 } 1064 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 1065 /* SHUTDOWN came in after sending INIT-ACK */ 1066 struct mbuf *op_err; 1067 struct sctp_paramhdr *ph; 1068 1069 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination); 1070 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 1071 0, M_DONTWAIT, 1, MT_DATA); 1072 if (op_err == NULL) { 1073 /* FOOBAR */ 1074 return (NULL); 1075 } 1076 /* pre-reserve some space */ 1077 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 1078 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 1079 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 1080 /* Set the len */ 1081 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr); 1082 ph = mtod(op_err, struct sctp_paramhdr *); 1083 ph->param_type = htons(SCTP_CAUSE_COOKIE_IN_SHUTDOWN); 1084 ph->param_length = htons(sizeof(struct sctp_paramhdr)); 1085 sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag, 1086 vrf_id); 1087 if (how_indx < sizeof(asoc->cookie_how)) 1088 asoc->cookie_how[how_indx] = 2; 1089 return (NULL); 1090 } 1091 /* 1092 * find and validate the INIT chunk in the cookie (peer's info) the 1093 * INIT should start after the cookie-echo header struct (chunk 1094 * header, state cookie header struct) 1095 */ 1096 init_offset = offset += sizeof(struct sctp_cookie_echo_chunk); 1097 1098 init_cp = (struct sctp_init_chunk *) 1099 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 1100 (uint8_t *) & init_buf); 1101 if (init_cp == NULL) { 1102 /* could not pull a INIT chunk in cookie */ 1103 return (NULL); 1104 } 1105 chk_length = ntohs(init_cp->ch.chunk_length); 1106 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 1107 return (NULL); 1108 } 1109 /* 1110 * find and validate the INIT-ACK chunk in the cookie (my info) the 1111 * INIT-ACK follows the INIT chunk 1112 */ 1113 initack_offset = init_offset + SCTP_SIZE32(chk_length); 1114 initack_cp = (struct sctp_init_ack_chunk *) 1115 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 1116 (uint8_t *) & initack_buf); 1117 if (initack_cp == NULL) { 1118 /* could not pull INIT-ACK chunk in cookie */ 1119 return (NULL); 1120 } 1121 chk_length = ntohs(initack_cp->ch.chunk_length); 1122 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 1123 return (NULL); 1124 } 1125 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && 1126 (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) { 1127 /* 1128 * case D in Section 5.2.4 Table 2: MMAA process accordingly 1129 * to get into the OPEN state 1130 */ 1131 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1132#ifdef INVARIANTS 1133 panic("Case D and non-match seq?"); 1134#else 1135 SCTP_PRINTF("Case D, seq non-match %x vs %x?\n", 1136 ntohl(initack_cp->init.initial_tsn), 1137 asoc->init_seq_number); 1138#endif 1139 } 1140 switch SCTP_GET_STATE 1141 (asoc) { 1142 case SCTP_STATE_COOKIE_WAIT: 1143 case SCTP_STATE_COOKIE_ECHOED: 1144 /* 1145 * INIT was sent but got a COOKIE_ECHO with the 1146 * correct tags... just accept it...but we must 1147 * process the init so that we can make sure we have 1148 * the right seq no's. 1149 */ 1150 /* First we must process the INIT !! */ 1151 retval = sctp_process_init(init_cp, stcb, net); 1152 if (retval < 0) { 1153 if (how_indx < sizeof(asoc->cookie_how)) 1154 asoc->cookie_how[how_indx] = 3; 1155 return (NULL); 1156 } 1157 /* we have already processed the INIT so no problem */ 1158 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, 1159 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_11); 1160 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12); 1161 /* update current state */ 1162 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) 1163 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1164 else 1165 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1166 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1167 asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING; 1168 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1169 stcb->sctp_ep, stcb, asoc->primary_destination); 1170 1171 } else { 1172 /* if ok, move to OPEN state */ 1173 asoc->state = SCTP_STATE_OPEN; 1174 } 1175 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1176 sctp_stop_all_cookie_timers(stcb); 1177 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1178 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1179 (inp->sctp_socket->so_qlimit == 0) 1180 ) { 1181 /* 1182 * Here is where collision would go if we 1183 * did a connect() and instead got a 1184 * init/init-ack/cookie done before the 1185 * init-ack came back.. 1186 */ 1187 stcb->sctp_ep->sctp_flags |= 1188 SCTP_PCB_FLAGS_CONNECTED; 1189 soisconnected(stcb->sctp_ep->sctp_socket); 1190 } 1191 /* notify upper layer */ 1192 *notification = SCTP_NOTIFY_ASSOC_UP; 1193 /* 1194 * since we did not send a HB make sure we don't 1195 * double things 1196 */ 1197 net->hb_responded = 1; 1198 net->RTO = sctp_calculate_rto(stcb, asoc, net, 1199 &cookie->time_entered); 1200 1201 if (stcb->asoc.sctp_autoclose_ticks && 1202 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) { 1203 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 1204 inp, stcb, NULL); 1205 } 1206 break; 1207 default: 1208 /* 1209 * we're in the OPEN state (or beyond), so peer must 1210 * have simply lost the COOKIE-ACK 1211 */ 1212 break; 1213 } /* end switch */ 1214 sctp_stop_all_cookie_timers(stcb); 1215 /* 1216 * We ignore the return code here.. not sure if we should 1217 * somehow abort.. but we do have an existing asoc. This 1218 * really should not fail. 1219 */ 1220 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1221 init_offset + sizeof(struct sctp_init_chunk), 1222 initack_offset, sh, init_src)) { 1223 if (how_indx < sizeof(asoc->cookie_how)) 1224 asoc->cookie_how[how_indx] = 4; 1225 return (NULL); 1226 } 1227 /* respond with a COOKIE-ACK */ 1228 sctp_toss_old_cookies(stcb, asoc); 1229 sctp_send_cookie_ack(stcb); 1230 if (how_indx < sizeof(asoc->cookie_how)) 1231 asoc->cookie_how[how_indx] = 5; 1232 return (stcb); 1233 } 1234 if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1235 ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag && 1236 cookie->tie_tag_my_vtag == 0 && 1237 cookie->tie_tag_peer_vtag == 0) { 1238 /* 1239 * case C in Section 5.2.4 Table 2: XMOO silently discard 1240 */ 1241 if (how_indx < sizeof(asoc->cookie_how)) 1242 asoc->cookie_how[how_indx] = 6; 1243 return (NULL); 1244 } 1245 if (ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag && 1246 (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag || 1247 init_cp->init.initiate_tag == 0)) { 1248 /* 1249 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info 1250 * should be ok, re-accept peer info 1251 */ 1252 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1253 /* 1254 * Extension of case C. If we hit this, then the 1255 * random number generator returned the same vtag 1256 * when we first sent our INIT-ACK and when we later 1257 * sent our INIT. The side with the seq numbers that 1258 * are different will be the one that normnally 1259 * would have hit case C. This in effect "extends" 1260 * our vtags in this collision case to be 64 bits. 1261 * The same collision could occur aka you get both 1262 * vtag and seq number the same twice in a row.. but 1263 * is much less likely. If it did happen then we 1264 * would proceed through and bring up the assoc.. we 1265 * may end up with the wrong stream setup however.. 1266 * which would be bad.. but there is no way to 1267 * tell.. until we send on a stream that does not 1268 * exist :-) 1269 */ 1270 if (how_indx < sizeof(asoc->cookie_how)) 1271 asoc->cookie_how[how_indx] = 7; 1272 1273 return (NULL); 1274 } 1275 if (how_indx < sizeof(asoc->cookie_how)) 1276 asoc->cookie_how[how_indx] = 8; 1277 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_13); 1278 sctp_stop_all_cookie_timers(stcb); 1279 /* 1280 * since we did not send a HB make sure we don't double 1281 * things 1282 */ 1283 net->hb_responded = 1; 1284 if (stcb->asoc.sctp_autoclose_ticks && 1285 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1286 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, 1287 NULL); 1288 } 1289 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1290 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); 1291 1292 /* Note last_cwr_tsn? where is this used? */ 1293 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 1294 if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) { 1295 /* 1296 * Ok the peer probably discarded our data (if we 1297 * echoed a cookie+data). So anything on the 1298 * sent_queue should be marked for retransmit, we 1299 * may not get something to kick us so it COULD 1300 * still take a timeout to move these.. but it can't 1301 * hurt to mark them. 1302 */ 1303 struct sctp_tmit_chunk *chk; 1304 1305 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1306 if (chk->sent < SCTP_DATAGRAM_RESEND) { 1307 chk->sent = SCTP_DATAGRAM_RESEND; 1308 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1309 spec_flag++; 1310 } 1311 } 1312 1313 } 1314 /* process the INIT info (peer's info) */ 1315 retval = sctp_process_init(init_cp, stcb, net); 1316 if (retval < 0) { 1317 if (how_indx < sizeof(asoc->cookie_how)) 1318 asoc->cookie_how[how_indx] = 9; 1319 return (NULL); 1320 } 1321 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1322 init_offset + sizeof(struct sctp_init_chunk), 1323 initack_offset, sh, init_src)) { 1324 if (how_indx < sizeof(asoc->cookie_how)) 1325 asoc->cookie_how[how_indx] = 10; 1326 return (NULL); 1327 } 1328 if ((asoc->state & SCTP_STATE_COOKIE_WAIT) || 1329 (asoc->state & SCTP_STATE_COOKIE_ECHOED)) { 1330 *notification = SCTP_NOTIFY_ASSOC_UP; 1331 1332 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1333 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1334 (inp->sctp_socket->so_qlimit == 0)) { 1335 stcb->sctp_ep->sctp_flags |= 1336 SCTP_PCB_FLAGS_CONNECTED; 1337 soisconnected(stcb->sctp_ep->sctp_socket); 1338 } 1339 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) 1340 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1341 else 1342 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1343 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1344 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1345 } else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1346 SCTP_STAT_INCR_COUNTER32(sctps_restartestab); 1347 } else { 1348 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1349 } 1350 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1351 asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING; 1352 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1353 stcb->sctp_ep, stcb, asoc->primary_destination); 1354 1355 } else { 1356 asoc->state = SCTP_STATE_OPEN; 1357 } 1358 sctp_stop_all_cookie_timers(stcb); 1359 sctp_toss_old_cookies(stcb, asoc); 1360 sctp_send_cookie_ack(stcb); 1361 if (spec_flag) { 1362 /* 1363 * only if we have retrans set do we do this. What 1364 * this call does is get only the COOKIE-ACK out and 1365 * then when we return the normal call to 1366 * sctp_chunk_output will get the retrans out behind 1367 * this. 1368 */ 1369 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK); 1370 } 1371 if (how_indx < sizeof(asoc->cookie_how)) 1372 asoc->cookie_how[how_indx] = 11; 1373 1374 return (stcb); 1375 } 1376 if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1377 ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) && 1378 cookie->tie_tag_my_vtag == asoc->my_vtag_nonce && 1379 cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce && 1380 cookie->tie_tag_peer_vtag != 0) { 1381 struct sctpasochead *head; 1382 1383 /* 1384 * case A in Section 5.2.4 Table 2: XXMM (peer restarted) 1385 */ 1386 /* temp code */ 1387 if (how_indx < sizeof(asoc->cookie_how)) 1388 asoc->cookie_how[how_indx] = 12; 1389 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_14); 1390 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_15); 1391 1392 *sac_assoc_id = sctp_get_associd(stcb); 1393 /* notify upper layer */ 1394 *notification = SCTP_NOTIFY_ASSOC_RESTART; 1395 atomic_add_int(&stcb->asoc.refcnt, 1); 1396 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) && 1397 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 1398 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) { 1399 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1400 } 1401 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1402 SCTP_STAT_INCR_GAUGE32(sctps_restartestab); 1403 } else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1404 SCTP_STAT_INCR_GAUGE32(sctps_collisionestab); 1405 } 1406 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1407 asoc->state = SCTP_STATE_OPEN | 1408 SCTP_STATE_SHUTDOWN_PENDING; 1409 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1410 stcb->sctp_ep, stcb, asoc->primary_destination); 1411 1412 } else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) { 1413 /* move to OPEN state, if not in SHUTDOWN_SENT */ 1414 asoc->state = SCTP_STATE_OPEN; 1415 } 1416 asoc->pre_open_streams = 1417 ntohs(initack_cp->init.num_outbound_streams); 1418 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 1419 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 1420 1421 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 1422 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 1423 1424 asoc->str_reset_seq_in = asoc->init_seq_number; 1425 1426 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1427 if (asoc->mapping_array) { 1428 memset(asoc->mapping_array, 0, 1429 asoc->mapping_array_size); 1430 } 1431 SCTP_TCB_UNLOCK(stcb); 1432 SCTP_INP_INFO_WLOCK(); 1433 SCTP_INP_WLOCK(stcb->sctp_ep); 1434 SCTP_TCB_LOCK(stcb); 1435 atomic_add_int(&stcb->asoc.refcnt, -1); 1436 /* send up all the data */ 1437 SCTP_TCB_SEND_LOCK(stcb); 1438 1439 sctp_report_all_outbound(stcb, 1); 1440 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1441 stcb->asoc.strmout[i].stream_no = i; 1442 stcb->asoc.strmout[i].next_sequence_sent = 0; 1443 stcb->asoc.strmout[i].last_msg_incomplete = 0; 1444 } 1445 /* process the INIT-ACK info (my info) */ 1446 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 1447 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1448 1449 /* pull from vtag hash */ 1450 LIST_REMOVE(stcb, sctp_asocs); 1451 /* re-insert to new vtag position */ 1452 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, 1453 sctppcbinfo.hashasocmark)]; 1454 /* 1455 * put it in the bucket in the vtag hash of assoc's for the 1456 * system 1457 */ 1458 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 1459 1460 /* Is this the first restart? */ 1461 if (stcb->asoc.in_restart_hash == 0) { 1462 /* Ok add it to assoc_id vtag hash */ 1463 head = &sctppcbinfo.sctp_restarthash[SCTP_PCBHASH_ASOC(stcb->asoc.assoc_id, 1464 sctppcbinfo.hashrestartmark)]; 1465 LIST_INSERT_HEAD(head, stcb, sctp_tcbrestarhash); 1466 stcb->asoc.in_restart_hash = 1; 1467 } 1468 /* process the INIT info (peer's info) */ 1469 SCTP_TCB_SEND_UNLOCK(stcb); 1470 SCTP_INP_WUNLOCK(stcb->sctp_ep); 1471 SCTP_INP_INFO_WUNLOCK(); 1472 1473 retval = sctp_process_init(init_cp, stcb, net); 1474 if (retval < 0) { 1475 if (how_indx < sizeof(asoc->cookie_how)) 1476 asoc->cookie_how[how_indx] = 13; 1477 1478 return (NULL); 1479 } 1480 /* 1481 * since we did not send a HB make sure we don't double 1482 * things 1483 */ 1484 net->hb_responded = 1; 1485 1486 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1487 init_offset + sizeof(struct sctp_init_chunk), 1488 initack_offset, sh, init_src)) { 1489 if (how_indx < sizeof(asoc->cookie_how)) 1490 asoc->cookie_how[how_indx] = 14; 1491 1492 return (NULL); 1493 } 1494 /* respond with a COOKIE-ACK */ 1495 sctp_stop_all_cookie_timers(stcb); 1496 sctp_toss_old_cookies(stcb, asoc); 1497 sctp_send_cookie_ack(stcb); 1498 if (how_indx < sizeof(asoc->cookie_how)) 1499 asoc->cookie_how[how_indx] = 15; 1500 1501 return (stcb); 1502 } 1503 if (how_indx < sizeof(asoc->cookie_how)) 1504 asoc->cookie_how[how_indx] = 16; 1505 /* all other cases... */ 1506 return (NULL); 1507} 1508 1509 1510/* 1511 * handle a state cookie for a new association m: input packet mbuf chain-- 1512 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf 1513 * and the cookie signature does not exist offset: offset into mbuf to the 1514 * cookie-echo chunk length: length of the cookie chunk to: where the init 1515 * was from returns a new TCB 1516 */ 1517static struct sctp_tcb * 1518sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, 1519 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1520 struct sctp_inpcb *inp, struct sctp_nets **netp, 1521 struct sockaddr *init_src, int *notification, 1522 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 1523 uint32_t vrf_id) 1524{ 1525 struct sctp_tcb *stcb; 1526 struct sctp_init_chunk *init_cp, init_buf; 1527 struct sctp_init_ack_chunk *initack_cp, initack_buf; 1528 struct sockaddr_storage sa_store; 1529 struct sockaddr *initack_src = (struct sockaddr *)&sa_store; 1530 struct sockaddr_in *sin; 1531 struct sockaddr_in6 *sin6; 1532 struct sctp_association *asoc; 1533 int chk_length; 1534 int init_offset, initack_offset, initack_limit; 1535 int retval; 1536 int error = 0; 1537 uint32_t old_tag; 1538 uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE]; 1539 1540 /* 1541 * find and validate the INIT chunk in the cookie (peer's info) the 1542 * INIT should start after the cookie-echo header struct (chunk 1543 * header, state cookie header struct) 1544 */ 1545 init_offset = offset + sizeof(struct sctp_cookie_echo_chunk); 1546 init_cp = (struct sctp_init_chunk *) 1547 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 1548 (uint8_t *) & init_buf); 1549 if (init_cp == NULL) { 1550 /* could not pull a INIT chunk in cookie */ 1551 SCTPDBG(SCTP_DEBUG_INPUT1, 1552 "process_cookie_new: could not pull INIT chunk hdr\n"); 1553 return (NULL); 1554 } 1555 chk_length = ntohs(init_cp->ch.chunk_length); 1556 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 1557 SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n"); 1558 return (NULL); 1559 } 1560 initack_offset = init_offset + SCTP_SIZE32(chk_length); 1561 /* 1562 * find and validate the INIT-ACK chunk in the cookie (my info) the 1563 * INIT-ACK follows the INIT chunk 1564 */ 1565 initack_cp = (struct sctp_init_ack_chunk *) 1566 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 1567 (uint8_t *) & initack_buf); 1568 if (initack_cp == NULL) { 1569 /* could not pull INIT-ACK chunk in cookie */ 1570 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n"); 1571 return (NULL); 1572 } 1573 chk_length = ntohs(initack_cp->ch.chunk_length); 1574 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 1575 return (NULL); 1576 } 1577 /* 1578 * NOTE: We can't use the INIT_ACK's chk_length to determine the 1579 * "initack_limit" value. This is because the chk_length field 1580 * includes the length of the cookie, but the cookie is omitted when 1581 * the INIT and INIT_ACK are tacked onto the cookie... 1582 */ 1583 initack_limit = offset + cookie_len; 1584 1585 /* 1586 * now that we know the INIT/INIT-ACK are in place, create a new TCB 1587 * and popluate 1588 */ 1589 stcb = sctp_aloc_assoc(inp, init_src, 0, &error, 1590 ntohl(initack_cp->init.initiate_tag), vrf_id); 1591 if (stcb == NULL) { 1592 struct mbuf *op_err; 1593 1594 /* memory problem? */ 1595 SCTPDBG(SCTP_DEBUG_INPUT1, 1596 "process_cookie_new: no room for another TCB!\n"); 1597 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1598 1599 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 1600 sh, op_err, vrf_id); 1601 return (NULL); 1602 } 1603 /* get the correct sctp_nets */ 1604 if (netp) 1605 *netp = sctp_findnet(stcb, init_src); 1606 1607 asoc = &stcb->asoc; 1608 /* get scope variables out of cookie */ 1609 asoc->ipv4_local_scope = cookie->ipv4_scope; 1610 asoc->site_scope = cookie->site_scope; 1611 asoc->local_scope = cookie->local_scope; 1612 asoc->loopback_scope = cookie->loopback_scope; 1613 1614 if ((asoc->ipv4_addr_legal != cookie->ipv4_addr_legal) || 1615 (asoc->ipv6_addr_legal != cookie->ipv6_addr_legal)) { 1616 struct mbuf *op_err; 1617 1618 /* 1619 * Houston we have a problem. The EP changed while the 1620 * cookie was in flight. Only recourse is to abort the 1621 * association. 1622 */ 1623 atomic_add_int(&stcb->asoc.refcnt, 1); 1624 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1625 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 1626 sh, op_err, vrf_id); 1627 atomic_add_int(&stcb->asoc.refcnt, -1); 1628 return (NULL); 1629 } 1630 /* process the INIT-ACK info (my info) */ 1631 old_tag = asoc->my_vtag; 1632 asoc->assoc_id = asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 1633 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1634 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); 1635 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 1636 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 1637 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 1638 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 1639 asoc->str_reset_seq_in = asoc->init_seq_number; 1640 1641 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1642 1643 /* process the INIT info (peer's info) */ 1644 if (netp) 1645 retval = sctp_process_init(init_cp, stcb, *netp); 1646 else 1647 retval = 0; 1648 if (retval < 0) { 1649 atomic_add_int(&stcb->asoc.refcnt, 1); 1650 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 1651 atomic_add_int(&stcb->asoc.refcnt, -1); 1652 return (NULL); 1653 } 1654 /* load all addresses */ 1655 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1656 init_offset + sizeof(struct sctp_init_chunk), initack_offset, sh, 1657 init_src)) { 1658 atomic_add_int(&stcb->asoc.refcnt, 1); 1659 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_17); 1660 atomic_add_int(&stcb->asoc.refcnt, -1); 1661 return (NULL); 1662 } 1663 /* 1664 * verify any preceding AUTH chunk that was skipped 1665 */ 1666 /* pull the local authentication parameters from the cookie/init-ack */ 1667 sctp_auth_get_cookie_params(stcb, m, 1668 initack_offset + sizeof(struct sctp_init_ack_chunk), 1669 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk))); 1670 if (auth_skipped) { 1671 struct sctp_auth_chunk *auth; 1672 1673 auth = (struct sctp_auth_chunk *) 1674 sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf); 1675 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) { 1676 /* auth HMAC failed, dump the assoc and packet */ 1677 SCTPDBG(SCTP_DEBUG_AUTH1, 1678 "COOKIE-ECHO: AUTH failed\n"); 1679 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18); 1680 return (NULL); 1681 } else { 1682 /* remaining chunks checked... good to go */ 1683 stcb->asoc.authenticated = 1; 1684 } 1685 } 1686 /* update current state */ 1687 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 1688 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1689 asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING; 1690 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1691 stcb->sctp_ep, stcb, asoc->primary_destination); 1692 } else { 1693 asoc->state = SCTP_STATE_OPEN; 1694 } 1695 sctp_stop_all_cookie_timers(stcb); 1696 SCTP_STAT_INCR_COUNTER32(sctps_passiveestab); 1697 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1698 1699 /* 1700 * if we're doing ASCONFs, check to see if we have any new local 1701 * addresses that need to get added to the peer (eg. addresses 1702 * changed while cookie echo in flight). This needs to be done 1703 * after we go to the OPEN state to do the correct asconf 1704 * processing. else, make sure we have the correct addresses in our 1705 * lists 1706 */ 1707 1708 /* warning, we re-use sin, sin6, sa_store here! */ 1709 /* pull in local_address (our "from" address) */ 1710 if (cookie->laddr_type == SCTP_IPV4_ADDRESS) { 1711 /* source addr is IPv4 */ 1712 sin = (struct sockaddr_in *)initack_src; 1713 memset(sin, 0, sizeof(*sin)); 1714 sin->sin_family = AF_INET; 1715 sin->sin_len = sizeof(struct sockaddr_in); 1716 sin->sin_addr.s_addr = cookie->laddress[0]; 1717 } else if (cookie->laddr_type == SCTP_IPV6_ADDRESS) { 1718 /* source addr is IPv6 */ 1719 sin6 = (struct sockaddr_in6 *)initack_src; 1720 memset(sin6, 0, sizeof(*sin6)); 1721 sin6->sin6_family = AF_INET6; 1722 sin6->sin6_len = sizeof(struct sockaddr_in6); 1723 sin6->sin6_scope_id = cookie->scope_id; 1724 memcpy(&sin6->sin6_addr, cookie->laddress, 1725 sizeof(sin6->sin6_addr)); 1726 } else { 1727 atomic_add_int(&stcb->asoc.refcnt, 1); 1728 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19); 1729 atomic_add_int(&stcb->asoc.refcnt, -1); 1730 return (NULL); 1731 } 1732 1733 sctp_check_address_list(stcb, m, 1734 initack_offset + sizeof(struct sctp_init_ack_chunk), 1735 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)), 1736 initack_src, cookie->local_scope, cookie->site_scope, 1737 cookie->ipv4_scope, cookie->loopback_scope); 1738 1739 1740 /* set up to notify upper layer */ 1741 *notification = SCTP_NOTIFY_ASSOC_UP; 1742 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1743 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1744 (inp->sctp_socket->so_qlimit == 0)) { 1745 /* 1746 * This is an endpoint that called connect() how it got a 1747 * cookie that is NEW is a bit of a mystery. It must be that 1748 * the INIT was sent, but before it got there.. a complete 1749 * INIT/INIT-ACK/COOKIE arrived. But of course then it 1750 * should have went to the other code.. not here.. oh well.. 1751 * a bit of protection is worth having.. 1752 */ 1753 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 1754 soisconnected(stcb->sctp_ep->sctp_socket); 1755 } else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 1756 (inp->sctp_socket->so_qlimit)) { 1757 /* 1758 * We don't want to do anything with this one. Since it is 1759 * the listening guy. The timer will get started for 1760 * accepted connections in the caller. 1761 */ 1762 ; 1763 } 1764 /* since we did not send a HB make sure we don't double things */ 1765 if ((netp) && (*netp)) 1766 (*netp)->hb_responded = 1; 1767 1768 if (stcb->asoc.sctp_autoclose_ticks && 1769 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1770 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL); 1771 } 1772 /* respond with a COOKIE-ACK */ 1773 /* calculate the RTT */ 1774 if ((netp) && (*netp)) { 1775 (*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp, 1776 &cookie->time_entered); 1777 } 1778 sctp_send_cookie_ack(stcb); 1779 return (stcb); 1780} 1781 1782 1783/* 1784 * handles a COOKIE-ECHO message stcb: modified to either a new or left as 1785 * existing (non-NULL) TCB 1786 */ 1787static struct mbuf * 1788sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, 1789 struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp, 1790 struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp, 1791 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 1792 struct sctp_tcb **locked_tcb, uint32_t vrf_id) 1793{ 1794 struct sctp_state_cookie *cookie; 1795 struct sockaddr_in6 sin6; 1796 struct sockaddr_in sin; 1797 struct sctp_tcb *l_stcb = *stcb; 1798 struct sctp_inpcb *l_inp; 1799 struct sockaddr *to; 1800 sctp_assoc_t sac_restart_id; 1801 struct sctp_pcb *ep; 1802 struct mbuf *m_sig; 1803 uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE]; 1804 uint8_t *sig; 1805 uint8_t cookie_ok = 0; 1806 unsigned int size_of_pkt, sig_offset, cookie_offset; 1807 unsigned int cookie_len; 1808 struct timeval now; 1809 struct timeval time_expires; 1810 struct sockaddr_storage dest_store; 1811 struct sockaddr *localep_sa = (struct sockaddr *)&dest_store; 1812 struct ip *iph; 1813 int notification = 0; 1814 struct sctp_nets *netl; 1815 int had_a_existing_tcb = 0; 1816 1817 SCTPDBG(SCTP_DEBUG_INPUT2, 1818 "sctp_handle_cookie: handling COOKIE-ECHO\n"); 1819 1820 if (inp_p == NULL) { 1821 return (NULL); 1822 } 1823 /* First get the destination address setup too. */ 1824 iph = mtod(m, struct ip *); 1825 if (iph->ip_v == IPVERSION) { 1826 /* its IPv4 */ 1827 struct sockaddr_in *lsin; 1828 1829 lsin = (struct sockaddr_in *)(localep_sa); 1830 memset(lsin, 0, sizeof(*lsin)); 1831 lsin->sin_family = AF_INET; 1832 lsin->sin_len = sizeof(*lsin); 1833 lsin->sin_port = sh->dest_port; 1834 lsin->sin_addr.s_addr = iph->ip_dst.s_addr; 1835 size_of_pkt = SCTP_GET_IPV4_LENGTH(iph); 1836 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 1837 /* its IPv6 */ 1838 struct ip6_hdr *ip6; 1839 struct sockaddr_in6 *lsin6; 1840 1841 lsin6 = (struct sockaddr_in6 *)(localep_sa); 1842 memset(lsin6, 0, sizeof(*lsin6)); 1843 lsin6->sin6_family = AF_INET6; 1844 lsin6->sin6_len = sizeof(struct sockaddr_in6); 1845 ip6 = mtod(m, struct ip6_hdr *); 1846 lsin6->sin6_port = sh->dest_port; 1847 lsin6->sin6_addr = ip6->ip6_dst; 1848 size_of_pkt = SCTP_GET_IPV6_LENGTH(ip6) + iphlen; 1849 } else { 1850 return (NULL); 1851 } 1852 1853 cookie = &cp->cookie; 1854 cookie_offset = offset + sizeof(struct sctp_chunkhdr); 1855 cookie_len = ntohs(cp->ch.chunk_length); 1856 1857 if ((cookie->peerport != sh->src_port) && 1858 (cookie->myport != sh->dest_port) && 1859 (cookie->my_vtag != sh->v_tag)) { 1860 /* 1861 * invalid ports or bad tag. Note that we always leave the 1862 * v_tag in the header in network order and when we stored 1863 * it in the my_vtag slot we also left it in network order. 1864 * This maintains the match even though it may be in the 1865 * opposite byte order of the machine :-> 1866 */ 1867 return (NULL); 1868 } 1869 if (cookie_len > size_of_pkt || 1870 cookie_len < sizeof(struct sctp_cookie_echo_chunk) + 1871 sizeof(struct sctp_init_chunk) + 1872 sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) { 1873 /* cookie too long! or too small */ 1874 return (NULL); 1875 } 1876 /* 1877 * split off the signature into its own mbuf (since it should not be 1878 * calculated in the sctp_hmac_m() call). 1879 */ 1880 sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE; 1881 if (sig_offset > size_of_pkt) { 1882 /* packet not correct size! */ 1883 /* XXX this may already be accounted for earlier... */ 1884 return (NULL); 1885 } 1886 m_sig = m_split(m, sig_offset, M_DONTWAIT); 1887 if (m_sig == NULL) { 1888 /* out of memory or ?? */ 1889 return (NULL); 1890 } 1891 /* 1892 * compute the signature/digest for the cookie 1893 */ 1894 ep = &(*inp_p)->sctp_ep; 1895 l_inp = *inp_p; 1896 if (l_stcb) { 1897 SCTP_TCB_UNLOCK(l_stcb); 1898 } 1899 SCTP_INP_RLOCK(l_inp); 1900 if (l_stcb) { 1901 SCTP_TCB_LOCK(l_stcb); 1902 } 1903 /* which cookie is it? */ 1904 if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) && 1905 (ep->current_secret_number != ep->last_secret_number)) { 1906 /* it's the old cookie */ 1907 (void)sctp_hmac_m(SCTP_HMAC, 1908 (uint8_t *) ep->secret_key[(int)ep->last_secret_number], 1909 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 1910 } else { 1911 /* it's the current cookie */ 1912 (void)sctp_hmac_m(SCTP_HMAC, 1913 (uint8_t *) ep->secret_key[(int)ep->current_secret_number], 1914 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 1915 } 1916 /* get the signature */ 1917 SCTP_INP_RUNLOCK(l_inp); 1918 sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig); 1919 if (sig == NULL) { 1920 /* couldn't find signature */ 1921 sctp_m_freem(m_sig); 1922 return (NULL); 1923 } 1924 /* compare the received digest with the computed digest */ 1925 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) { 1926 /* try the old cookie? */ 1927 if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) && 1928 (ep->current_secret_number != ep->last_secret_number)) { 1929 /* compute digest with old */ 1930 (void)sctp_hmac_m(SCTP_HMAC, 1931 (uint8_t *) ep->secret_key[(int)ep->last_secret_number], 1932 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 1933 /* compare */ 1934 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0) 1935 cookie_ok = 1; 1936 } 1937 } else { 1938 cookie_ok = 1; 1939 } 1940 1941 /* 1942 * Now before we continue we must reconstruct our mbuf so that 1943 * normal processing of any other chunks will work. 1944 */ 1945 { 1946 struct mbuf *m_at; 1947 1948 m_at = m; 1949 while (SCTP_BUF_NEXT(m_at) != NULL) { 1950 m_at = SCTP_BUF_NEXT(m_at); 1951 } 1952 SCTP_BUF_NEXT(m_at) = m_sig; 1953 } 1954 1955 if (cookie_ok == 0) { 1956 SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n"); 1957 SCTPDBG(SCTP_DEBUG_INPUT2, 1958 "offset = %u, cookie_offset = %u, sig_offset = %u\n", 1959 (uint32_t) offset, cookie_offset, sig_offset); 1960 return (NULL); 1961 } 1962 /* 1963 * check the cookie timestamps to be sure it's not stale 1964 */ 1965 (void)SCTP_GETTIME_TIMEVAL(&now); 1966 /* Expire time is in Ticks, so we convert to seconds */ 1967 time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life); 1968 time_expires.tv_usec = cookie->time_entered.tv_usec; 1969 if (timevalcmp(&now, &time_expires, >)) { 1970 /* cookie is stale! */ 1971 struct mbuf *op_err; 1972 struct sctp_stale_cookie_msg *scm; 1973 uint32_t tim; 1974 1975 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg), 1976 0, M_DONTWAIT, 1, MT_DATA); 1977 if (op_err == NULL) { 1978 /* FOOBAR */ 1979 return (NULL); 1980 } 1981 /* pre-reserve some space */ 1982 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 1983 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 1984 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 1985 1986 /* Set the len */ 1987 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg); 1988 scm = mtod(op_err, struct sctp_stale_cookie_msg *); 1989 scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE); 1990 scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) + 1991 (sizeof(uint32_t)))); 1992 /* seconds to usec */ 1993 tim = (now.tv_sec - time_expires.tv_sec) * 1000000; 1994 /* add in usec */ 1995 if (tim == 0) 1996 tim = now.tv_usec - cookie->time_entered.tv_usec; 1997 scm->time_usec = htonl(tim); 1998 sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag, 1999 vrf_id); 2000 return (NULL); 2001 } 2002 /* 2003 * Now we must see with the lookup address if we have an existing 2004 * asoc. This will only happen if we were in the COOKIE-WAIT state 2005 * and a INIT collided with us and somewhere the peer sent the 2006 * cookie on another address besides the single address our assoc 2007 * had for him. In this case we will have one of the tie-tags set at 2008 * least AND the address field in the cookie can be used to look it 2009 * up. 2010 */ 2011 to = NULL; 2012 if (cookie->addr_type == SCTP_IPV6_ADDRESS) { 2013 memset(&sin6, 0, sizeof(sin6)); 2014 sin6.sin6_family = AF_INET6; 2015 sin6.sin6_len = sizeof(sin6); 2016 sin6.sin6_port = sh->src_port; 2017 sin6.sin6_scope_id = cookie->scope_id; 2018 memcpy(&sin6.sin6_addr.s6_addr, cookie->address, 2019 sizeof(sin6.sin6_addr.s6_addr)); 2020 to = (struct sockaddr *)&sin6; 2021 } else if (cookie->addr_type == SCTP_IPV4_ADDRESS) { 2022 memset(&sin, 0, sizeof(sin)); 2023 sin.sin_family = AF_INET; 2024 sin.sin_len = sizeof(sin); 2025 sin.sin_port = sh->src_port; 2026 sin.sin_addr.s_addr = cookie->address[0]; 2027 to = (struct sockaddr *)&sin; 2028 } else { 2029 /* This should not happen */ 2030 return (NULL); 2031 } 2032 if ((*stcb == NULL) && to) { 2033 /* Yep, lets check */ 2034 *stcb = sctp_findassociation_ep_addr(inp_p, to, netp, localep_sa, NULL); 2035 if (*stcb == NULL) { 2036 /* 2037 * We should have only got back the same inp. If we 2038 * got back a different ep we have a problem. The 2039 * original findep got back l_inp and now 2040 */ 2041 if (l_inp != *inp_p) { 2042 SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n"); 2043 } 2044 } else { 2045 if (*locked_tcb == NULL) { 2046 /* 2047 * In this case we found the assoc only 2048 * after we locked the create lock. This 2049 * means we are in a colliding case and we 2050 * must make sure that we unlock the tcb if 2051 * its one of the cases where we throw away 2052 * the incoming packets. 2053 */ 2054 *locked_tcb = *stcb; 2055 2056 /* 2057 * We must also increment the inp ref count 2058 * since the ref_count flags was set when we 2059 * did not find the TCB, now we found it 2060 * which reduces the refcount.. we must 2061 * raise it back out to balance it all :-) 2062 */ 2063 SCTP_INP_INCR_REF((*stcb)->sctp_ep); 2064 if ((*stcb)->sctp_ep != l_inp) { 2065 SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n", 2066 (*stcb)->sctp_ep, l_inp); 2067 } 2068 } 2069 } 2070 } 2071 if (to == NULL) 2072 return (NULL); 2073 2074 cookie_len -= SCTP_SIGNATURE_SIZE; 2075 if (*stcb == NULL) { 2076 /* this is the "normal" case... get a new TCB */ 2077 *stcb = sctp_process_cookie_new(m, iphlen, offset, sh, cookie, 2078 cookie_len, *inp_p, netp, to, ¬ification, 2079 auth_skipped, auth_offset, auth_len, vrf_id); 2080 } else { 2081 /* this is abnormal... cookie-echo on existing TCB */ 2082 had_a_existing_tcb = 1; 2083 *stcb = sctp_process_cookie_existing(m, iphlen, offset, sh, 2084 cookie, cookie_len, *inp_p, *stcb, *netp, to, 2085 ¬ification, &sac_restart_id, vrf_id); 2086 } 2087 2088 if (*stcb == NULL) { 2089 /* still no TCB... must be bad cookie-echo */ 2090 return (NULL); 2091 } 2092 /* 2093 * Ok, we built an association so confirm the address we sent the 2094 * INIT-ACK to. 2095 */ 2096 netl = sctp_findnet(*stcb, to); 2097 /* 2098 * This code should in theory NOT run but 2099 */ 2100 if (netl == NULL) { 2101 /* TSNH! Huh, why do I need to add this address here? */ 2102 int ret; 2103 2104 ret = sctp_add_remote_addr(*stcb, to, SCTP_DONOT_SETSCOPE, 2105 SCTP_IN_COOKIE_PROC); 2106 netl = sctp_findnet(*stcb, to); 2107 } 2108 if (netl) { 2109 if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) { 2110 netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 2111 (void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL, 2112 netl); 2113 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 2114 (*stcb), 0, (void *)netl); 2115 } 2116 } 2117 if (*stcb) { 2118 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, *inp_p, 2119 *stcb, NULL); 2120 } 2121 if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 2122 if (!had_a_existing_tcb || 2123 (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { 2124 /* 2125 * If we have a NEW cookie or the connect never 2126 * reached the connected state during collision we 2127 * must do the TCP accept thing. 2128 */ 2129 struct socket *so, *oso; 2130 struct sctp_inpcb *inp; 2131 2132 if (notification == SCTP_NOTIFY_ASSOC_RESTART) { 2133 /* 2134 * For a restart we will keep the same 2135 * socket, no need to do anything. I THINK!! 2136 */ 2137 sctp_ulp_notify(notification, *stcb, 0, (void *)&sac_restart_id); 2138 return (m); 2139 } 2140 oso = (*inp_p)->sctp_socket; 2141 /* 2142 * We do this to keep the sockets side happy durin 2143 * the sonewcon ONLY. 2144 */ 2145 NET_LOCK_GIANT(); 2146 SCTP_TCB_UNLOCK((*stcb)); 2147 so = sonewconn(oso, 0 2148 ); 2149 NET_UNLOCK_GIANT(); 2150 SCTP_INP_WLOCK((*stcb)->sctp_ep); 2151 SCTP_TCB_LOCK((*stcb)); 2152 SCTP_INP_WUNLOCK((*stcb)->sctp_ep); 2153 if (so == NULL) { 2154 struct mbuf *op_err; 2155 2156 /* Too many sockets */ 2157 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n"); 2158 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 2159 sctp_abort_association(*inp_p, NULL, m, iphlen, 2160 sh, op_err, vrf_id); 2161 sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20); 2162 return (NULL); 2163 } 2164 inp = (struct sctp_inpcb *)so->so_pcb; 2165 SCTP_INP_INCR_REF(inp); 2166 /* 2167 * We add the unbound flag here so that if we get an 2168 * soabort() before we get the move_pcb done, we 2169 * will properly cleanup. 2170 */ 2171 inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE | 2172 SCTP_PCB_FLAGS_CONNECTED | 2173 SCTP_PCB_FLAGS_IN_TCPPOOL | 2174 SCTP_PCB_FLAGS_UNBOUND | 2175 (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) | 2176 SCTP_PCB_FLAGS_DONT_WAKE); 2177 inp->sctp_features = (*inp_p)->sctp_features; 2178 inp->sctp_socket = so; 2179 inp->sctp_frag_point = (*inp_p)->sctp_frag_point; 2180 inp->partial_delivery_point = (*inp_p)->partial_delivery_point; 2181 inp->sctp_context = (*inp_p)->sctp_context; 2182 inp->inp_starting_point_for_iterator = NULL; 2183 /* 2184 * copy in the authentication parameters from the 2185 * original endpoint 2186 */ 2187 if (inp->sctp_ep.local_hmacs) 2188 sctp_free_hmaclist(inp->sctp_ep.local_hmacs); 2189 inp->sctp_ep.local_hmacs = 2190 sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs); 2191 if (inp->sctp_ep.local_auth_chunks) 2192 sctp_free_chunklist(inp->sctp_ep.local_auth_chunks); 2193 inp->sctp_ep.local_auth_chunks = 2194 sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks); 2195 (void)sctp_copy_skeylist(&(*inp_p)->sctp_ep.shared_keys, 2196 &inp->sctp_ep.shared_keys); 2197 2198 /* 2199 * Now we must move it from one hash table to 2200 * another and get the tcb in the right place. 2201 */ 2202 sctp_move_pcb_and_assoc(*inp_p, inp, *stcb); 2203 2204 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2205 SCTP_TCB_UNLOCK((*stcb)); 2206 2207 sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, M_NOWAIT); 2208 SCTP_TCB_LOCK((*stcb)); 2209 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2210 2211 2212 /* 2213 * now we must check to see if we were aborted while 2214 * the move was going on and the lock/unlock 2215 * happened. 2216 */ 2217 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 2218 /* 2219 * yep it was, we leave the assoc attached 2220 * to the socket since the sctp_inpcb_free() 2221 * call will send an abort for us. 2222 */ 2223 SCTP_INP_DECR_REF(inp); 2224 return (NULL); 2225 } 2226 SCTP_INP_DECR_REF(inp); 2227 /* Switch over to the new guy */ 2228 *inp_p = inp; 2229 sctp_ulp_notify(notification, *stcb, 0, NULL); 2230 2231 /* 2232 * Pull it from the incomplete queue and wake the 2233 * guy 2234 */ 2235 soisconnected(so); 2236 return (m); 2237 } 2238 } 2239 if ((notification) && ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) { 2240 sctp_ulp_notify(notification, *stcb, 0, NULL); 2241 } 2242 return (m); 2243} 2244 2245static void 2246sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp, 2247 struct sctp_tcb *stcb, struct sctp_nets *net) 2248{ 2249 /* cp must not be used, others call this without a c-ack :-) */ 2250 struct sctp_association *asoc; 2251 2252 SCTPDBG(SCTP_DEBUG_INPUT2, 2253 "sctp_handle_cookie_ack: handling COOKIE-ACK\n"); 2254 if (stcb == NULL) 2255 return; 2256 2257 asoc = &stcb->asoc; 2258 2259 sctp_stop_all_cookie_timers(stcb); 2260 /* process according to association state */ 2261 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) { 2262 /* state change only needed when I am in right state */ 2263 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 2264 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 2265 asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING; 2266 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 2267 stcb->sctp_ep, stcb, asoc->primary_destination); 2268 2269 } else { 2270 asoc->state = SCTP_STATE_OPEN; 2271 } 2272 /* update RTO */ 2273 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 2274 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 2275 if (asoc->overall_error_count == 0) { 2276 net->RTO = sctp_calculate_rto(stcb, asoc, net, 2277 &asoc->time_entered); 2278 } 2279 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 2280 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL); 2281 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2282 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 2283 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 2284 soisconnected(stcb->sctp_ep->sctp_socket); 2285 } 2286 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, 2287 stcb, net); 2288 /* 2289 * since we did not send a HB make sure we don't double 2290 * things 2291 */ 2292 net->hb_responded = 1; 2293 2294 if (stcb->asoc.sctp_autoclose_ticks && 2295 sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) { 2296 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 2297 stcb->sctp_ep, stcb, NULL); 2298 } 2299 /* 2300 * set ASCONF timer if ASCONFs are pending and allowed (eg. 2301 * addresses changed when init/cookie echo in flight) 2302 */ 2303 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) && 2304 (stcb->asoc.peer_supports_asconf) && 2305 (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) { 2306 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, 2307 stcb->sctp_ep, stcb, 2308 stcb->asoc.primary_destination); 2309 } 2310 } 2311 /* Toss the cookie if I can */ 2312 sctp_toss_old_cookies(stcb, asoc); 2313 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 2314 /* Restart the timer if we have pending data */ 2315 struct sctp_tmit_chunk *chk; 2316 2317 chk = TAILQ_FIRST(&asoc->sent_queue); 2318 if (chk) { 2319 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 2320 stcb, chk->whoTo); 2321 } 2322 } 2323} 2324 2325static void 2326sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp, 2327 struct sctp_tcb *stcb) 2328{ 2329 struct sctp_nets *net; 2330 struct sctp_tmit_chunk *lchk; 2331 uint32_t tsn; 2332 2333 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_ecne_chunk)) { 2334 return; 2335 } 2336 SCTP_STAT_INCR(sctps_recvecne); 2337 tsn = ntohl(cp->tsn); 2338 /* ECN Nonce stuff: need a resync and disable the nonce sum check */ 2339 /* Also we make sure we disable the nonce_wait */ 2340 lchk = TAILQ_FIRST(&stcb->asoc.send_queue); 2341 if (lchk == NULL) { 2342 stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq; 2343 } else { 2344 stcb->asoc.nonce_resync_tsn = lchk->rec.data.TSN_seq; 2345 } 2346 stcb->asoc.nonce_wait_for_ecne = 0; 2347 stcb->asoc.nonce_sum_check = 0; 2348 2349 /* Find where it was sent, if possible */ 2350 net = NULL; 2351 lchk = TAILQ_FIRST(&stcb->asoc.sent_queue); 2352 while (lchk) { 2353 if (lchk->rec.data.TSN_seq == tsn) { 2354 net = lchk->whoTo; 2355 break; 2356 } 2357 if (compare_with_wrap(lchk->rec.data.TSN_seq, tsn, MAX_SEQ)) 2358 break; 2359 lchk = TAILQ_NEXT(lchk, sctp_next); 2360 } 2361 if (net == NULL) 2362 /* default is we use the primary */ 2363 net = stcb->asoc.primary_destination; 2364 2365 if (compare_with_wrap(tsn, stcb->asoc.last_cwr_tsn, MAX_TSN)) { 2366 int old_cwnd; 2367 2368 old_cwnd = net->cwnd; 2369 SCTP_STAT_INCR(sctps_ecnereducedcwnd); 2370 net->ssthresh = net->cwnd / 2; 2371 if (net->ssthresh < net->mtu) { 2372 net->ssthresh = net->mtu; 2373 /* here back off the timer as well, to slow us down */ 2374 net->RTO <<= 1; 2375 } 2376 net->cwnd = net->ssthresh; 2377 if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) { 2378 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT); 2379 } 2380 /* 2381 * we reduce once every RTT. So we will only lower cwnd at 2382 * the next sending seq i.e. the resync_tsn. 2383 */ 2384 stcb->asoc.last_cwr_tsn = stcb->asoc.nonce_resync_tsn; 2385 } 2386 /* 2387 * We always send a CWR this way if our previous one was lost our 2388 * peer will get an update, or if it is not time again to reduce we 2389 * still get the cwr to the peer. 2390 */ 2391 sctp_send_cwr(stcb, net, tsn); 2392} 2393 2394static void 2395sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb) 2396{ 2397 /* 2398 * Here we get a CWR from the peer. We must look in the outqueue and 2399 * make sure that we have a covered ECNE in teh control chunk part. 2400 * If so remove it. 2401 */ 2402 struct sctp_tmit_chunk *chk; 2403 struct sctp_ecne_chunk *ecne; 2404 2405 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 2406 if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) { 2407 continue; 2408 } 2409 /* 2410 * Look for and remove if it is the right TSN. Since there 2411 * is only ONE ECNE on the control queue at any one time we 2412 * don't need to worry about more than one! 2413 */ 2414 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 2415 if (compare_with_wrap(ntohl(cp->tsn), ntohl(ecne->tsn), 2416 MAX_TSN) || (cp->tsn == ecne->tsn)) { 2417 /* this covers this ECNE, we can remove it */ 2418 stcb->asoc.ecn_echo_cnt_onq--; 2419 TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, 2420 sctp_next); 2421 if (chk->data) { 2422 sctp_m_freem(chk->data); 2423 chk->data = NULL; 2424 } 2425 stcb->asoc.ctrl_queue_cnt--; 2426 sctp_free_a_chunk(stcb, chk); 2427 break; 2428 } 2429 } 2430} 2431 2432static void 2433sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp, 2434 struct sctp_tcb *stcb, struct sctp_nets *net) 2435{ 2436 struct sctp_association *asoc; 2437 2438 SCTPDBG(SCTP_DEBUG_INPUT2, 2439 "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n"); 2440 if (stcb == NULL) 2441 return; 2442 2443 asoc = &stcb->asoc; 2444 /* process according to association state */ 2445 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) { 2446 /* unexpected SHUTDOWN-COMPLETE... so ignore... */ 2447 SCTP_TCB_UNLOCK(stcb); 2448 return; 2449 } 2450 /* notify upper layer protocol */ 2451 if (stcb->sctp_socket) { 2452 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL); 2453 /* are the queues empty? they should be */ 2454 if (!TAILQ_EMPTY(&asoc->send_queue) || 2455 !TAILQ_EMPTY(&asoc->sent_queue) || 2456 !TAILQ_EMPTY(&asoc->out_wheel)) { 2457 sctp_report_all_outbound(stcb, 0); 2458 } 2459 } 2460 /* stop the timer */ 2461 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_21); 2462 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 2463 /* free the TCB */ 2464 sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22); 2465 return; 2466} 2467 2468static int 2469process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc, 2470 struct sctp_nets *net, uint8_t flg) 2471{ 2472 switch (desc->chunk_type) { 2473 case SCTP_DATA: 2474 /* find the tsn to resend (possibly */ 2475 { 2476 uint32_t tsn; 2477 struct sctp_tmit_chunk *tp1; 2478 2479 tsn = ntohl(desc->tsn_ifany); 2480 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2481 while (tp1) { 2482 if (tp1->rec.data.TSN_seq == tsn) { 2483 /* found it */ 2484 break; 2485 } 2486 if (compare_with_wrap(tp1->rec.data.TSN_seq, tsn, 2487 MAX_TSN)) { 2488 /* not found */ 2489 tp1 = NULL; 2490 break; 2491 } 2492 tp1 = TAILQ_NEXT(tp1, sctp_next); 2493 } 2494 if (tp1 == NULL) { 2495 /* 2496 * Do it the other way , aka without paying 2497 * attention to queue seq order. 2498 */ 2499 SCTP_STAT_INCR(sctps_pdrpdnfnd); 2500 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2501 while (tp1) { 2502 if (tp1->rec.data.TSN_seq == tsn) { 2503 /* found it */ 2504 break; 2505 } 2506 tp1 = TAILQ_NEXT(tp1, sctp_next); 2507 } 2508 } 2509 if (tp1 == NULL) { 2510 SCTP_STAT_INCR(sctps_pdrptsnnf); 2511 } 2512 if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) { 2513 uint8_t *ddp; 2514 2515 if ((stcb->asoc.peers_rwnd == 0) && 2516 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) { 2517 SCTP_STAT_INCR(sctps_pdrpdiwnp); 2518 return (0); 2519 } 2520 if (stcb->asoc.peers_rwnd == 0 && 2521 (flg & SCTP_FROM_MIDDLE_BOX)) { 2522 SCTP_STAT_INCR(sctps_pdrpdizrw); 2523 return (0); 2524 } 2525 ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+ 2526 sizeof(struct sctp_data_chunk)); 2527 { 2528 unsigned int iii; 2529 2530 for (iii = 0; iii < sizeof(desc->data_bytes); 2531 iii++) { 2532 if (ddp[iii] != desc->data_bytes[iii]) { 2533 SCTP_STAT_INCR(sctps_pdrpbadd); 2534 return (-1); 2535 } 2536 } 2537 } 2538 /* 2539 * We zero out the nonce so resync not 2540 * needed 2541 */ 2542 tp1->rec.data.ect_nonce = 0; 2543 2544 if (tp1->do_rtt) { 2545 /* 2546 * this guy had a RTO calculation 2547 * pending on it, cancel it 2548 */ 2549 tp1->do_rtt = 0; 2550 } 2551 SCTP_STAT_INCR(sctps_pdrpmark); 2552 if (tp1->sent != SCTP_DATAGRAM_RESEND) 2553 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 2554 tp1->sent = SCTP_DATAGRAM_RESEND; 2555 /* 2556 * mark it as if we were doing a FR, since 2557 * we will be getting gap ack reports behind 2558 * the info from the router. 2559 */ 2560 tp1->rec.data.doing_fast_retransmit = 1; 2561 /* 2562 * mark the tsn with what sequences can 2563 * cause a new FR. 2564 */ 2565 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) { 2566 tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq; 2567 } else { 2568 tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq; 2569 } 2570 2571 /* restart the timer */ 2572 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 2573 stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23); 2574 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 2575 stcb, tp1->whoTo); 2576 2577 /* fix counts and things */ 2578 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) { 2579 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP, 2580 tp1->whoTo->flight_size, 2581 tp1->book_size, 2582 (uintptr_t) stcb, 2583 tp1->rec.data.TSN_seq); 2584 } 2585 sctp_flight_size_decrease(tp1); 2586 sctp_total_flight_decrease(stcb, tp1); 2587 } { 2588 /* audit code */ 2589 unsigned int audit; 2590 2591 audit = 0; 2592 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) { 2593 if (tp1->sent == SCTP_DATAGRAM_RESEND) 2594 audit++; 2595 } 2596 TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue, 2597 sctp_next) { 2598 if (tp1->sent == SCTP_DATAGRAM_RESEND) 2599 audit++; 2600 } 2601 if (audit != stcb->asoc.sent_queue_retran_cnt) { 2602 SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n", 2603 audit, stcb->asoc.sent_queue_retran_cnt); 2604#ifndef SCTP_AUDITING_ENABLED 2605 stcb->asoc.sent_queue_retran_cnt = audit; 2606#endif 2607 } 2608 } 2609 } 2610 break; 2611 case SCTP_ASCONF: 2612 { 2613 struct sctp_tmit_chunk *asconf; 2614 2615 TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue, 2616 sctp_next) { 2617 if (asconf->rec.chunk_id.id == SCTP_ASCONF) { 2618 break; 2619 } 2620 } 2621 if (asconf) { 2622 if (asconf->sent != SCTP_DATAGRAM_RESEND) 2623 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 2624 asconf->sent = SCTP_DATAGRAM_RESEND; 2625 asconf->snd_count--; 2626 } 2627 } 2628 break; 2629 case SCTP_INITIATION: 2630 /* resend the INIT */ 2631 stcb->asoc.dropped_special_cnt++; 2632 if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) { 2633 /* 2634 * If we can get it in, in a few attempts we do 2635 * this, otherwise we let the timer fire. 2636 */ 2637 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, 2638 stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_24); 2639 sctp_send_initiate(stcb->sctp_ep, stcb); 2640 } 2641 break; 2642 case SCTP_SELECTIVE_ACK: 2643 /* resend the sack */ 2644 sctp_send_sack(stcb); 2645 break; 2646 case SCTP_HEARTBEAT_REQUEST: 2647 /* resend a demand HB */ 2648 (void)sctp_send_hb(stcb, 1, net); 2649 break; 2650 case SCTP_SHUTDOWN: 2651 sctp_send_shutdown(stcb, net); 2652 break; 2653 case SCTP_SHUTDOWN_ACK: 2654 sctp_send_shutdown_ack(stcb, net); 2655 break; 2656 case SCTP_COOKIE_ECHO: 2657 { 2658 struct sctp_tmit_chunk *cookie; 2659 2660 cookie = NULL; 2661 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, 2662 sctp_next) { 2663 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 2664 break; 2665 } 2666 } 2667 if (cookie) { 2668 if (cookie->sent != SCTP_DATAGRAM_RESEND) 2669 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 2670 cookie->sent = SCTP_DATAGRAM_RESEND; 2671 sctp_stop_all_cookie_timers(stcb); 2672 } 2673 } 2674 break; 2675 case SCTP_COOKIE_ACK: 2676 sctp_send_cookie_ack(stcb); 2677 break; 2678 case SCTP_ASCONF_ACK: 2679 /* resend last asconf ack */ 2680 sctp_send_asconf_ack(stcb, 1); 2681 break; 2682 case SCTP_FORWARD_CUM_TSN: 2683 send_forward_tsn(stcb, &stcb->asoc); 2684 break; 2685 /* can't do anything with these */ 2686 case SCTP_PACKET_DROPPED: 2687 case SCTP_INITIATION_ACK: /* this should not happen */ 2688 case SCTP_HEARTBEAT_ACK: 2689 case SCTP_ABORT_ASSOCIATION: 2690 case SCTP_OPERATION_ERROR: 2691 case SCTP_SHUTDOWN_COMPLETE: 2692 case SCTP_ECN_ECHO: 2693 case SCTP_ECN_CWR: 2694 default: 2695 break; 2696 } 2697 return (0); 2698} 2699 2700void 2701sctp_reset_in_stream(struct sctp_tcb *stcb, int number_entries, uint16_t * list) 2702{ 2703 int i; 2704 uint16_t temp; 2705 2706 /* 2707 * We set things to 0xffff since this is the last delivered sequence 2708 * and we will be sending in 0 after the reset. 2709 */ 2710 2711 if (number_entries) { 2712 for (i = 0; i < number_entries; i++) { 2713 temp = ntohs(list[i]); 2714 if (temp >= stcb->asoc.streamincnt) { 2715 continue; 2716 } 2717 stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff; 2718 } 2719 } else { 2720 list = NULL; 2721 for (i = 0; i < stcb->asoc.streamincnt; i++) { 2722 stcb->asoc.strmin[i].last_sequence_delivered = 0xffff; 2723 } 2724 } 2725 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list); 2726} 2727 2728static void 2729sctp_reset_out_streams(struct sctp_tcb *stcb, int number_entries, uint16_t * list) 2730{ 2731 int i; 2732 2733 if (number_entries == 0) { 2734 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 2735 stcb->asoc.strmout[i].next_sequence_sent = 0; 2736 } 2737 } else if (number_entries) { 2738 for (i = 0; i < number_entries; i++) { 2739 uint16_t temp; 2740 2741 temp = ntohs(list[i]); 2742 if (temp >= stcb->asoc.streamoutcnt) { 2743 /* no such stream */ 2744 continue; 2745 } 2746 stcb->asoc.strmout[temp].next_sequence_sent = 0; 2747 } 2748 } 2749 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list); 2750} 2751 2752 2753struct sctp_stream_reset_out_request * 2754sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk) 2755{ 2756 struct sctp_association *asoc; 2757 struct sctp_stream_reset_out_req *req; 2758 struct sctp_stream_reset_out_request *r; 2759 struct sctp_tmit_chunk *chk; 2760 int len, clen; 2761 2762 asoc = &stcb->asoc; 2763 if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { 2764 asoc->stream_reset_outstanding = 0; 2765 return (NULL); 2766 } 2767 if (stcb->asoc.str_reset == NULL) { 2768 asoc->stream_reset_outstanding = 0; 2769 return (NULL); 2770 } 2771 chk = stcb->asoc.str_reset; 2772 if (chk->data == NULL) { 2773 return (NULL); 2774 } 2775 if (bchk) { 2776 /* he wants a copy of the chk pointer */ 2777 *bchk = chk; 2778 } 2779 clen = chk->send_size; 2780 req = mtod(chk->data, struct sctp_stream_reset_out_req *); 2781 r = &req->sr_req; 2782 if (ntohl(r->request_seq) == seq) { 2783 /* found it */ 2784 return (r); 2785 } 2786 len = SCTP_SIZE32(ntohs(r->ph.param_length)); 2787 if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) { 2788 /* move to the next one, there can only be a max of two */ 2789 r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len); 2790 if (ntohl(r->request_seq) == seq) { 2791 return (r); 2792 } 2793 } 2794 /* that seq is not here */ 2795 return (NULL); 2796} 2797 2798static void 2799sctp_clean_up_stream_reset(struct sctp_tcb *stcb) 2800{ 2801 struct sctp_association *asoc; 2802 struct sctp_tmit_chunk *chk = stcb->asoc.str_reset; 2803 2804 if (stcb->asoc.str_reset == NULL) { 2805 return; 2806 } 2807 asoc = &stcb->asoc; 2808 2809 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25); 2810 TAILQ_REMOVE(&asoc->control_send_queue, 2811 chk, 2812 sctp_next); 2813 if (chk->data) { 2814 sctp_m_freem(chk->data); 2815 chk->data = NULL; 2816 } 2817 asoc->ctrl_queue_cnt--; 2818 sctp_free_a_chunk(stcb, chk); 2819 stcb->asoc.str_reset = NULL; 2820} 2821 2822 2823static int 2824sctp_handle_stream_reset_response(struct sctp_tcb *stcb, 2825 uint32_t seq, uint32_t action, 2826 struct sctp_stream_reset_response *respin) 2827{ 2828 uint16_t type; 2829 int lparm_len; 2830 struct sctp_association *asoc = &stcb->asoc; 2831 struct sctp_tmit_chunk *chk; 2832 struct sctp_stream_reset_out_request *srparam; 2833 int number_entries; 2834 2835 if (asoc->stream_reset_outstanding == 0) { 2836 /* duplicate */ 2837 return (0); 2838 } 2839 if (seq == stcb->asoc.str_reset_seq_out) { 2840 srparam = sctp_find_stream_reset(stcb, seq, &chk); 2841 if (srparam) { 2842 stcb->asoc.str_reset_seq_out++; 2843 type = ntohs(srparam->ph.param_type); 2844 lparm_len = ntohs(srparam->ph.param_length); 2845 if (type == SCTP_STR_RESET_OUT_REQUEST) { 2846 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t); 2847 asoc->stream_reset_out_is_outstanding = 0; 2848 if (asoc->stream_reset_outstanding) 2849 asoc->stream_reset_outstanding--; 2850 if (action == SCTP_STREAM_RESET_PERFORMED) { 2851 /* do it */ 2852 sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams); 2853 } else { 2854 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams); 2855 } 2856 } else if (type == SCTP_STR_RESET_IN_REQUEST) { 2857 /* Answered my request */ 2858 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t); 2859 if (asoc->stream_reset_outstanding) 2860 asoc->stream_reset_outstanding--; 2861 if (action != SCTP_STREAM_RESET_PERFORMED) { 2862 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, number_entries, srparam->list_of_streams); 2863 } 2864 } else if (type == SCTP_STR_RESET_TSN_REQUEST) { 2865 /** 2866 * a) Adopt the new in tsn. 2867 * b) reset the map 2868 * c) Adopt the new out-tsn 2869 */ 2870 struct sctp_stream_reset_response_tsn *resp; 2871 struct sctp_forward_tsn_chunk fwdtsn; 2872 int abort_flag = 0; 2873 2874 if (respin == NULL) { 2875 /* huh ? */ 2876 return (0); 2877 } 2878 if (action == SCTP_STREAM_RESET_PERFORMED) { 2879 resp = (struct sctp_stream_reset_response_tsn *)respin; 2880 asoc->stream_reset_outstanding--; 2881 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 2882 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 2883 fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1); 2884 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0); 2885 if (abort_flag) { 2886 return (1); 2887 } 2888 stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1); 2889 stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map; 2890 stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn); 2891 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 2892 stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn); 2893 stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn; 2894 2895 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL); 2896 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL); 2897 2898 } 2899 } 2900 /* get rid of the request and get the request flags */ 2901 if (asoc->stream_reset_outstanding == 0) { 2902 sctp_clean_up_stream_reset(stcb); 2903 } 2904 } 2905 } 2906 return (0); 2907} 2908 2909static void 2910sctp_handle_str_reset_request_in(struct sctp_tcb *stcb, 2911 struct sctp_tmit_chunk *chk, 2912 struct sctp_stream_reset_in_request *req, int trunc) 2913{ 2914 uint32_t seq; 2915 int len, i; 2916 int number_entries; 2917 uint16_t temp; 2918 2919 /* 2920 * peer wants me to send a str-reset to him for my outgoing seq's if 2921 * seq_in is right. 2922 */ 2923 struct sctp_association *asoc = &stcb->asoc; 2924 2925 seq = ntohl(req->request_seq); 2926 if (asoc->str_reset_seq_in == seq) { 2927 if (trunc) { 2928 /* Can't do it, since they exceeded our buffer size */ 2929 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 2930 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 2931 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 2932 } else if (stcb->asoc.stream_reset_out_is_outstanding == 0) { 2933 len = ntohs(req->ph.param_length); 2934 number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t)); 2935 for (i = 0; i < number_entries; i++) { 2936 temp = ntohs(req->list_of_streams[i]); 2937 req->list_of_streams[i] = temp; 2938 } 2939 /* move the reset action back one */ 2940 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 2941 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 2942 sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams, 2943 asoc->str_reset_seq_out, 2944 seq, (asoc->sending_seq - 1)); 2945 asoc->stream_reset_out_is_outstanding = 1; 2946 asoc->str_reset = chk; 2947 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo); 2948 stcb->asoc.stream_reset_outstanding++; 2949 } else { 2950 /* Can't do it, since we have sent one out */ 2951 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 2952 asoc->last_reset_action[0] = SCTP_STREAM_RESET_TRY_LATER; 2953 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 2954 } 2955 asoc->str_reset_seq_in++; 2956 } else if (asoc->str_reset_seq_in - 1 == seq) { 2957 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 2958 } else if (asoc->str_reset_seq_in - 2 == seq) { 2959 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 2960 } else { 2961 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 2962 } 2963} 2964 2965static int 2966sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb, 2967 struct sctp_tmit_chunk *chk, 2968 struct sctp_stream_reset_tsn_request *req) 2969{ 2970 /* reset all in and out and update the tsn */ 2971 /* 2972 * A) reset my str-seq's on in and out. B) Select a receive next, 2973 * and set cum-ack to it. Also process this selected number as a 2974 * fwd-tsn as well. C) set in the response my next sending seq. 2975 */ 2976 struct sctp_forward_tsn_chunk fwdtsn; 2977 struct sctp_association *asoc = &stcb->asoc; 2978 int abort_flag = 0; 2979 uint32_t seq; 2980 2981 seq = ntohl(req->request_seq); 2982 if (asoc->str_reset_seq_in == seq) { 2983 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 2984 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 2985 fwdtsn.ch.chunk_flags = 0; 2986 fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1); 2987 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0); 2988 if (abort_flag) { 2989 return (1); 2990 } 2991 stcb->asoc.highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA; 2992 stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map; 2993 stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1; 2994 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 2995 atomic_add_int(&stcb->asoc.sending_seq, 1); 2996 /* save off historical data for retrans */ 2997 stcb->asoc.last_sending_seq[1] = stcb->asoc.last_sending_seq[0]; 2998 stcb->asoc.last_sending_seq[0] = stcb->asoc.sending_seq; 2999 stcb->asoc.last_base_tsnsent[1] = stcb->asoc.last_base_tsnsent[0]; 3000 stcb->asoc.last_base_tsnsent[0] = stcb->asoc.mapping_array_base_tsn; 3001 3002 sctp_add_stream_reset_result_tsn(chk, 3003 ntohl(req->request_seq), 3004 SCTP_STREAM_RESET_PERFORMED, 3005 stcb->asoc.sending_seq, 3006 stcb->asoc.mapping_array_base_tsn); 3007 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL); 3008 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL); 3009 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0]; 3010 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3011 3012 asoc->str_reset_seq_in++; 3013 } else if (asoc->str_reset_seq_in - 1 == seq) { 3014 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0], 3015 stcb->asoc.last_sending_seq[0], 3016 stcb->asoc.last_base_tsnsent[0] 3017 ); 3018 } else if (asoc->str_reset_seq_in - 2 == seq) { 3019 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1], 3020 stcb->asoc.last_sending_seq[1], 3021 stcb->asoc.last_base_tsnsent[1] 3022 ); 3023 } else { 3024 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 3025 } 3026 return (0); 3027} 3028 3029static void 3030sctp_handle_str_reset_request_out(struct sctp_tcb *stcb, 3031 struct sctp_tmit_chunk *chk, 3032 struct sctp_stream_reset_out_request *req, int trunc) 3033{ 3034 uint32_t seq, tsn; 3035 int number_entries, len; 3036 struct sctp_association *asoc = &stcb->asoc; 3037 3038 seq = ntohl(req->request_seq); 3039 3040 /* now if its not a duplicate we process it */ 3041 if (asoc->str_reset_seq_in == seq) { 3042 len = ntohs(req->ph.param_length); 3043 number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t)); 3044 /* 3045 * the sender is resetting, handle the list issue.. we must 3046 * a) verify if we can do the reset, if so no problem b) If 3047 * we can't do the reset we must copy the request. c) queue 3048 * it, and setup the data in processor to trigger it off 3049 * when needed and dequeue all the queued data. 3050 */ 3051 tsn = ntohl(req->send_reset_at_tsn); 3052 3053 /* move the reset action back one */ 3054 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3055 if (trunc) { 3056 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED); 3057 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 3058 } else if ((tsn == asoc->cumulative_tsn) || 3059 (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN))) { 3060 /* we can do it now */ 3061 sctp_reset_in_stream(stcb, number_entries, req->list_of_streams); 3062 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED); 3063 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3064 } else { 3065 /* 3066 * we must queue it up and thus wait for the TSN's 3067 * to arrive that are at or before tsn 3068 */ 3069 struct sctp_stream_reset_list *liste; 3070 int siz; 3071 3072 siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t)); 3073 SCTP_MALLOC(liste, struct sctp_stream_reset_list *, 3074 siz, SCTP_M_STRESET); 3075 if (liste == NULL) { 3076 /* gak out of memory */ 3077 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED); 3078 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 3079 return; 3080 } 3081 liste->tsn = tsn; 3082 liste->number_entries = number_entries; 3083 memcpy(&liste->req, req, 3084 (sizeof(struct sctp_stream_reset_out_request) + (number_entries * sizeof(uint16_t)))); 3085 TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp); 3086 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED); 3087 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3088 } 3089 asoc->str_reset_seq_in++; 3090 } else if ((asoc->str_reset_seq_in - 1) == seq) { 3091 /* 3092 * one seq back, just echo back last action since my 3093 * response was lost. 3094 */ 3095 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3096 } else if ((asoc->str_reset_seq_in - 2) == seq) { 3097 /* 3098 * two seq back, just echo back last action since my 3099 * response was lost. 3100 */ 3101 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 3102 } else { 3103 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 3104 } 3105} 3106 3107#ifdef __GNUC__ 3108__attribute__((noinline)) 3109#endif 3110 static int 3111 sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset, 3112 struct sctp_stream_reset_out_req *sr_req) 3113{ 3114 int chk_length, param_len, ptype; 3115 struct sctp_paramhdr pstore; 3116 uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE]; 3117 3118 uint32_t seq; 3119 int num_req = 0; 3120 int trunc = 0; 3121 struct sctp_tmit_chunk *chk; 3122 struct sctp_chunkhdr *ch; 3123 struct sctp_paramhdr *ph; 3124 int ret_code = 0; 3125 int num_param = 0; 3126 3127 /* now it may be a reset or a reset-response */ 3128 chk_length = ntohs(sr_req->ch.chunk_length); 3129 3130 /* setup for adding the response */ 3131 sctp_alloc_a_chunk(stcb, chk); 3132 if (chk == NULL) { 3133 return (ret_code); 3134 } 3135 chk->rec.chunk_id.id = SCTP_STREAM_RESET; 3136 chk->rec.chunk_id.can_take_data = 0; 3137 chk->asoc = &stcb->asoc; 3138 chk->no_fr_allowed = 0; 3139 chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr); 3140 chk->book_size_scale = 0; 3141 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 3142 if (chk->data == NULL) { 3143strres_nochunk: 3144 if (chk->data) { 3145 sctp_m_freem(chk->data); 3146 chk->data = NULL; 3147 } 3148 sctp_free_a_chunk(stcb, chk); 3149 return (ret_code); 3150 } 3151 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 3152 3153 /* setup chunk parameters */ 3154 chk->sent = SCTP_DATAGRAM_UNSENT; 3155 chk->snd_count = 0; 3156 chk->whoTo = stcb->asoc.primary_destination; 3157 atomic_add_int(&chk->whoTo->ref_count, 1); 3158 3159 ch = mtod(chk->data, struct sctp_chunkhdr *); 3160 ch->chunk_type = SCTP_STREAM_RESET; 3161 ch->chunk_flags = 0; 3162 ch->chunk_length = htons(chk->send_size); 3163 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 3164 offset += sizeof(struct sctp_chunkhdr); 3165 while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) { 3166 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *) & pstore); 3167 if (ph == NULL) 3168 break; 3169 param_len = ntohs(ph->param_length); 3170 if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) { 3171 /* bad param */ 3172 break; 3173 } 3174 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, (int)sizeof(cstore)), 3175 (uint8_t *) & cstore); 3176 ptype = ntohs(ph->param_type); 3177 num_param++; 3178 if (param_len > (int)sizeof(cstore)) { 3179 trunc = 1; 3180 } else { 3181 trunc = 0; 3182 } 3183 3184 if (num_param > SCTP_MAX_RESET_PARAMS) { 3185 /* hit the max of parameters already sorry.. */ 3186 break; 3187 } 3188 if (ptype == SCTP_STR_RESET_OUT_REQUEST) { 3189 struct sctp_stream_reset_out_request *req_out; 3190 3191 req_out = (struct sctp_stream_reset_out_request *)ph; 3192 num_req++; 3193 if (stcb->asoc.stream_reset_outstanding) { 3194 seq = ntohl(req_out->response_seq); 3195 if (seq == stcb->asoc.str_reset_seq_out) { 3196 /* implicit ack */ 3197 (void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_PERFORMED, NULL); 3198 } 3199 } 3200 sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc); 3201 } else if (ptype == SCTP_STR_RESET_IN_REQUEST) { 3202 struct sctp_stream_reset_in_request *req_in; 3203 3204 num_req++; 3205 3206 3207 req_in = (struct sctp_stream_reset_in_request *)ph; 3208 3209 sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc); 3210 } else if (ptype == SCTP_STR_RESET_TSN_REQUEST) { 3211 struct sctp_stream_reset_tsn_request *req_tsn; 3212 3213 num_req++; 3214 req_tsn = (struct sctp_stream_reset_tsn_request *)ph; 3215 3216 if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) { 3217 ret_code = 1; 3218 goto strres_nochunk; 3219 } 3220 /* no more */ 3221 break; 3222 } else if (ptype == SCTP_STR_RESET_RESPONSE) { 3223 struct sctp_stream_reset_response *resp; 3224 uint32_t result; 3225 3226 resp = (struct sctp_stream_reset_response *)ph; 3227 seq = ntohl(resp->response_seq); 3228 result = ntohl(resp->result); 3229 if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) { 3230 ret_code = 1; 3231 goto strres_nochunk; 3232 } 3233 } else { 3234 break; 3235 } 3236 offset += SCTP_SIZE32(param_len); 3237 chk_length -= SCTP_SIZE32(param_len); 3238 } 3239 if (num_req == 0) { 3240 /* we have no response free the stuff */ 3241 goto strres_nochunk; 3242 } 3243 /* ok we have a chunk to link in */ 3244 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, 3245 chk, 3246 sctp_next); 3247 stcb->asoc.ctrl_queue_cnt++; 3248 return (ret_code); 3249} 3250 3251/* 3252 * Handle a router or endpoints report of a packet loss, there are two ways 3253 * to handle this, either we get the whole packet and must disect it 3254 * ourselves (possibly with truncation and or corruption) or it is a summary 3255 * from a middle box that did the disectting for us. 3256 */ 3257static void 3258sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp, 3259 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit) 3260{ 3261 uint32_t bottle_bw, on_queue; 3262 uint16_t trunc_len; 3263 unsigned int chlen; 3264 unsigned int at; 3265 struct sctp_chunk_desc desc; 3266 struct sctp_chunkhdr *ch; 3267 3268 chlen = ntohs(cp->ch.chunk_length); 3269 chlen -= sizeof(struct sctp_pktdrop_chunk); 3270 /* XXX possible chlen underflow */ 3271 if (chlen == 0) { 3272 ch = NULL; 3273 if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) 3274 SCTP_STAT_INCR(sctps_pdrpbwrpt); 3275 } else { 3276 ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr)); 3277 chlen -= sizeof(struct sctphdr); 3278 /* XXX possible chlen underflow */ 3279 memset(&desc, 0, sizeof(desc)); 3280 } 3281 trunc_len = (uint16_t) ntohs(cp->trunc_len); 3282 if (trunc_len > limit) { 3283 trunc_len = limit; 3284 } 3285 /* now the chunks themselves */ 3286 while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) { 3287 desc.chunk_type = ch->chunk_type; 3288 /* get amount we need to move */ 3289 at = ntohs(ch->chunk_length); 3290 if (at < sizeof(struct sctp_chunkhdr)) { 3291 /* corrupt chunk, maybe at the end? */ 3292 SCTP_STAT_INCR(sctps_pdrpcrupt); 3293 break; 3294 } 3295 if (trunc_len == 0) { 3296 /* we are supposed to have all of it */ 3297 if (at > chlen) { 3298 /* corrupt skip it */ 3299 SCTP_STAT_INCR(sctps_pdrpcrupt); 3300 break; 3301 } 3302 } else { 3303 /* is there enough of it left ? */ 3304 if (desc.chunk_type == SCTP_DATA) { 3305 if (chlen < (sizeof(struct sctp_data_chunk) + 3306 sizeof(desc.data_bytes))) { 3307 break; 3308 } 3309 } else { 3310 if (chlen < sizeof(struct sctp_chunkhdr)) { 3311 break; 3312 } 3313 } 3314 } 3315 if (desc.chunk_type == SCTP_DATA) { 3316 /* can we get out the tsn? */ 3317 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) 3318 SCTP_STAT_INCR(sctps_pdrpmbda); 3319 3320 if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) { 3321 /* yep */ 3322 struct sctp_data_chunk *dcp; 3323 uint8_t *ddp; 3324 unsigned int iii; 3325 3326 dcp = (struct sctp_data_chunk *)ch; 3327 ddp = (uint8_t *) (dcp + 1); 3328 for (iii = 0; iii < sizeof(desc.data_bytes); iii++) { 3329 desc.data_bytes[iii] = ddp[iii]; 3330 } 3331 desc.tsn_ifany = dcp->dp.tsn; 3332 } else { 3333 /* nope we are done. */ 3334 SCTP_STAT_INCR(sctps_pdrpnedat); 3335 break; 3336 } 3337 } else { 3338 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) 3339 SCTP_STAT_INCR(sctps_pdrpmbct); 3340 } 3341 3342 if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) { 3343 SCTP_STAT_INCR(sctps_pdrppdbrk); 3344 break; 3345 } 3346 if (SCTP_SIZE32(at) > chlen) { 3347 break; 3348 } 3349 chlen -= SCTP_SIZE32(at); 3350 if (chlen < sizeof(struct sctp_chunkhdr)) { 3351 /* done, none left */ 3352 break; 3353 } 3354 ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at)); 3355 } 3356 /* Now update any rwnd --- possibly */ 3357 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) { 3358 /* From a peer, we get a rwnd report */ 3359 uint32_t a_rwnd; 3360 3361 SCTP_STAT_INCR(sctps_pdrpfehos); 3362 3363 bottle_bw = ntohl(cp->bottle_bw); 3364 on_queue = ntohl(cp->current_onq); 3365 if (bottle_bw && on_queue) { 3366 /* a rwnd report is in here */ 3367 if (bottle_bw > on_queue) 3368 a_rwnd = bottle_bw - on_queue; 3369 else 3370 a_rwnd = 0; 3371 3372 if (a_rwnd == 0) 3373 stcb->asoc.peers_rwnd = 0; 3374 else { 3375 if (a_rwnd > stcb->asoc.total_flight) { 3376 stcb->asoc.peers_rwnd = 3377 a_rwnd - stcb->asoc.total_flight; 3378 } else { 3379 stcb->asoc.peers_rwnd = 0; 3380 } 3381 if (stcb->asoc.peers_rwnd < 3382 stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 3383 /* SWS sender side engages */ 3384 stcb->asoc.peers_rwnd = 0; 3385 } 3386 } 3387 } 3388 } else { 3389 SCTP_STAT_INCR(sctps_pdrpfmbox); 3390 } 3391 3392 /* now middle boxes in sat networks get a cwnd bump */ 3393 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) && 3394 (stcb->asoc.sat_t3_loss_recovery == 0) && 3395 (stcb->asoc.sat_network)) { 3396 /* 3397 * This is debateable but for sat networks it makes sense 3398 * Note if a T3 timer has went off, we will prohibit any 3399 * changes to cwnd until we exit the t3 loss recovery. 3400 */ 3401 uint32_t bw_avail; 3402 int rtt, incr; 3403 3404 int old_cwnd = net->cwnd; 3405 3406 /* need real RTT for this calc */ 3407 rtt = ((net->lastsa >> 2) + net->lastsv) >> 1; 3408 /* get bottle neck bw */ 3409 bottle_bw = ntohl(cp->bottle_bw); 3410 /* and whats on queue */ 3411 on_queue = ntohl(cp->current_onq); 3412 /* 3413 * adjust the on-queue if our flight is more it could be 3414 * that the router has not yet gotten data "in-flight" to it 3415 */ 3416 if (on_queue < net->flight_size) 3417 on_queue = net->flight_size; 3418 3419 /* calculate the available space */ 3420 bw_avail = (bottle_bw * rtt) / 1000; 3421 if (bw_avail > bottle_bw) { 3422 /* 3423 * Cap the growth to no more than the bottle neck. 3424 * This can happen as RTT slides up due to queues. 3425 * It also means if you have more than a 1 second 3426 * RTT with a empty queue you will be limited to the 3427 * bottle_bw per second no matter if other points 3428 * have 1/2 the RTT and you could get more out... 3429 */ 3430 bw_avail = bottle_bw; 3431 } 3432 if (on_queue > bw_avail) { 3433 /* 3434 * No room for anything else don't allow anything 3435 * else to be "added to the fire". 3436 */ 3437 int seg_inflight, seg_onqueue, my_portion; 3438 3439 net->partial_bytes_acked = 0; 3440 3441 /* how much are we over queue size? */ 3442 incr = on_queue - bw_avail; 3443 if (stcb->asoc.seen_a_sack_this_pkt) { 3444 /* 3445 * undo any cwnd adjustment that the sack 3446 * might have made 3447 */ 3448 net->cwnd = net->prev_cwnd; 3449 } 3450 /* Now how much of that is mine? */ 3451 seg_inflight = net->flight_size / net->mtu; 3452 seg_onqueue = on_queue / net->mtu; 3453 my_portion = (incr * seg_inflight) / seg_onqueue; 3454 3455 /* Have I made an adjustment already */ 3456 if (net->cwnd > net->flight_size) { 3457 /* 3458 * for this flight I made an adjustment we 3459 * need to decrease the portion by a share 3460 * our previous adjustment. 3461 */ 3462 int diff_adj; 3463 3464 diff_adj = net->cwnd - net->flight_size; 3465 if (diff_adj > my_portion) 3466 my_portion = 0; 3467 else 3468 my_portion -= diff_adj; 3469 } 3470 /* 3471 * back down to the previous cwnd (assume we have 3472 * had a sack before this packet). minus what ever 3473 * portion of the overage is my fault. 3474 */ 3475 net->cwnd -= my_portion; 3476 3477 /* we will NOT back down more than 1 MTU */ 3478 if (net->cwnd <= net->mtu) { 3479 net->cwnd = net->mtu; 3480 } 3481 /* force into CA */ 3482 net->ssthresh = net->cwnd - 1; 3483 } else { 3484 /* 3485 * Take 1/4 of the space left or max burst up .. 3486 * whichever is less. 3487 */ 3488 incr = min((bw_avail - on_queue) >> 2, 3489 stcb->asoc.max_burst * net->mtu); 3490 net->cwnd += incr; 3491 } 3492 if (net->cwnd > bw_avail) { 3493 /* We can't exceed the pipe size */ 3494 net->cwnd = bw_avail; 3495 } 3496 if (net->cwnd < net->mtu) { 3497 /* We always have 1 MTU */ 3498 net->cwnd = net->mtu; 3499 } 3500 if (net->cwnd - old_cwnd != 0) { 3501 /* log only changes */ 3502 if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) { 3503 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), 3504 SCTP_CWND_LOG_FROM_SAT); 3505 } 3506 } 3507 } 3508} 3509 3510/* 3511 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to 3512 * still contain IP/SCTP header - stcb: is the tcb found for this packet - 3513 * offset: offset into the mbuf chain to first chunkhdr - length: is the 3514 * length of the complete packet outputs: - length: modified to remaining 3515 * length after control processing - netp: modified to new sctp_nets after 3516 * cookie-echo processing - return NULL to discard the packet (ie. no asoc, 3517 * bad packet,...) otherwise return the tcb for this packet 3518 */ 3519#ifdef __GNUC__ 3520__attribute__((noinline)) 3521#endif 3522 static struct sctp_tcb * 3523 sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, 3524 struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp, 3525 struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen, 3526 uint32_t vrf_id) 3527{ 3528 struct sctp_association *asoc; 3529 uint32_t vtag_in; 3530 int num_chunks = 0; /* number of control chunks processed */ 3531 uint32_t chk_length; 3532 int ret; 3533 int abort_no_unlock = 0; 3534 3535 /* 3536 * How big should this be, and should it be alloc'd? Lets try the 3537 * d-mtu-ceiling for now (2k) and that should hopefully work ... 3538 * until we get into jumbo grams and such.. 3539 */ 3540 uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE]; 3541 struct sctp_tcb *locked_tcb = stcb; 3542 int got_auth = 0; 3543 uint32_t auth_offset = 0, auth_len = 0; 3544 int auth_skipped = 0; 3545 3546 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n", 3547 iphlen, *offset, length, stcb); 3548 3549 /* validate chunk header length... */ 3550 if (ntohs(ch->chunk_length) < sizeof(*ch)) { 3551 SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n", 3552 ntohs(ch->chunk_length)); 3553 if (locked_tcb) { 3554 SCTP_TCB_UNLOCK(locked_tcb); 3555 } 3556 return (NULL); 3557 } 3558 /* 3559 * validate the verification tag 3560 */ 3561 vtag_in = ntohl(sh->v_tag); 3562 3563 if (locked_tcb) { 3564 SCTP_TCB_LOCK_ASSERT(locked_tcb); 3565 } 3566 if (ch->chunk_type == SCTP_INITIATION) { 3567 SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n", 3568 ntohs(ch->chunk_length), vtag_in); 3569 if (vtag_in != 0) { 3570 /* protocol error- silently discard... */ 3571 SCTP_STAT_INCR(sctps_badvtag); 3572 if (locked_tcb) { 3573 SCTP_TCB_UNLOCK(locked_tcb); 3574 } 3575 return (NULL); 3576 } 3577 } else if (ch->chunk_type != SCTP_COOKIE_ECHO) { 3578 /* 3579 * If there is no stcb, skip the AUTH chunk and process 3580 * later after a stcb is found (to validate the lookup was 3581 * valid. 3582 */ 3583 if ((ch->chunk_type == SCTP_AUTHENTICATION) && 3584 (stcb == NULL) && !sctp_auth_disable) { 3585 /* save this chunk for later processing */ 3586 auth_skipped = 1; 3587 auth_offset = *offset; 3588 auth_len = ntohs(ch->chunk_length); 3589 3590 /* (temporarily) move past this chunk */ 3591 *offset += SCTP_SIZE32(auth_len); 3592 if (*offset >= length) { 3593 /* no more data left in the mbuf chain */ 3594 *offset = length; 3595 if (locked_tcb) { 3596 SCTP_TCB_UNLOCK(locked_tcb); 3597 } 3598 return (NULL); 3599 } 3600 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 3601 sizeof(struct sctp_chunkhdr), chunk_buf); 3602 } 3603 if (ch == NULL) { 3604 /* Help */ 3605 *offset = length; 3606 if (locked_tcb) { 3607 SCTP_TCB_UNLOCK(locked_tcb); 3608 } 3609 return (NULL); 3610 } 3611 if (ch->chunk_type == SCTP_COOKIE_ECHO) { 3612 goto process_control_chunks; 3613 } 3614 /* 3615 * first check if it's an ASCONF with an unknown src addr we 3616 * need to look inside to find the association 3617 */ 3618 if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) { 3619 /* inp's refcount may be reduced */ 3620 SCTP_INP_INCR_REF(inp); 3621 3622 stcb = sctp_findassociation_ep_asconf(m, iphlen, 3623 *offset, sh, &inp, netp); 3624 if (stcb == NULL) { 3625 /* 3626 * reduce inp's refcount if not reduced in 3627 * sctp_findassociation_ep_asconf(). 3628 */ 3629 SCTP_INP_DECR_REF(inp); 3630 } 3631 /* now go back and verify any auth chunk to be sure */ 3632 if (auth_skipped && (stcb != NULL)) { 3633 struct sctp_auth_chunk *auth; 3634 3635 auth = (struct sctp_auth_chunk *) 3636 sctp_m_getptr(m, auth_offset, 3637 auth_len, chunk_buf); 3638 got_auth = 1; 3639 auth_skipped = 0; 3640 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, 3641 auth_offset)) { 3642 /* auth HMAC failed so dump it */ 3643 *offset = length; 3644 if (locked_tcb) { 3645 SCTP_TCB_UNLOCK(locked_tcb); 3646 } 3647 return (NULL); 3648 } else { 3649 /* remaining chunks are HMAC checked */ 3650 stcb->asoc.authenticated = 1; 3651 } 3652 } 3653 } 3654 if (stcb == NULL) { 3655 /* no association, so it's out of the blue... */ 3656 sctp_handle_ootb(m, iphlen, *offset, sh, inp, NULL, 3657 vrf_id); 3658 *offset = length; 3659 if (locked_tcb) { 3660 SCTP_TCB_UNLOCK(locked_tcb); 3661 } 3662 return (NULL); 3663 } 3664 asoc = &stcb->asoc; 3665 /* ABORT and SHUTDOWN can use either v_tag... */ 3666 if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) || 3667 (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) || 3668 (ch->chunk_type == SCTP_PACKET_DROPPED)) { 3669 if ((vtag_in == asoc->my_vtag) || 3670 ((ch->chunk_flags & SCTP_HAD_NO_TCB) && 3671 (vtag_in == asoc->peer_vtag))) { 3672 /* this is valid */ 3673 } else { 3674 /* drop this packet... */ 3675 SCTP_STAT_INCR(sctps_badvtag); 3676 if (locked_tcb) { 3677 SCTP_TCB_UNLOCK(locked_tcb); 3678 } 3679 return (NULL); 3680 } 3681 } else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 3682 if (vtag_in != asoc->my_vtag) { 3683 /* 3684 * this could be a stale SHUTDOWN-ACK or the 3685 * peer never got the SHUTDOWN-COMPLETE and 3686 * is still hung; we have started a new asoc 3687 * but it won't complete until the shutdown 3688 * is completed 3689 */ 3690 if (locked_tcb) { 3691 SCTP_TCB_UNLOCK(locked_tcb); 3692 } 3693 sctp_handle_ootb(m, iphlen, *offset, sh, inp, 3694 NULL, vrf_id); 3695 return (NULL); 3696 } 3697 } else { 3698 /* for all other chunks, vtag must match */ 3699 if (vtag_in != asoc->my_vtag) { 3700 /* invalid vtag... */ 3701 SCTPDBG(SCTP_DEBUG_INPUT3, 3702 "invalid vtag: %xh, expect %xh\n", 3703 vtag_in, asoc->my_vtag); 3704 SCTP_STAT_INCR(sctps_badvtag); 3705 if (locked_tcb) { 3706 SCTP_TCB_UNLOCK(locked_tcb); 3707 } 3708 *offset = length; 3709 return (NULL); 3710 } 3711 } 3712 } /* end if !SCTP_COOKIE_ECHO */ 3713 /* 3714 * process all control chunks... 3715 */ 3716 if (((ch->chunk_type == SCTP_SELECTIVE_ACK) || 3717 (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) && 3718 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) { 3719 /* implied cookie-ack.. we must have lost the ack */ 3720 stcb->asoc.overall_error_count = 0; 3721 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, 3722 *netp); 3723 } 3724process_control_chunks: 3725 while (IS_SCTP_CONTROL(ch)) { 3726 /* validate chunk length */ 3727 chk_length = ntohs(ch->chunk_length); 3728 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n", 3729 ch->chunk_type, chk_length); 3730 SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length); 3731 if (chk_length < sizeof(*ch) || 3732 (*offset + (int)chk_length) > length) { 3733 *offset = length; 3734 if (locked_tcb) { 3735 SCTP_TCB_UNLOCK(locked_tcb); 3736 } 3737 return (NULL); 3738 } 3739 SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks); 3740 /* 3741 * INIT-ACK only gets the init ack "header" portion only 3742 * because we don't have to process the peer's COOKIE. All 3743 * others get a complete chunk. 3744 */ 3745 if ((ch->chunk_type == SCTP_INITIATION_ACK) || 3746 (ch->chunk_type == SCTP_INITIATION)) { 3747 /* get an init-ack chunk */ 3748 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 3749 sizeof(struct sctp_init_ack_chunk), chunk_buf); 3750 if (ch == NULL) { 3751 *offset = length; 3752 if (locked_tcb) { 3753 SCTP_TCB_UNLOCK(locked_tcb); 3754 } 3755 return (NULL); 3756 } 3757 } else { 3758 /* For cookies and all other chunks. */ 3759 if (chk_length > sizeof(chunk_buf)) { 3760 /* 3761 * use just the size of the chunk buffer so 3762 * the front part of our chunks fit in 3763 * contiguous space up to the chunk buffer 3764 * size (508 bytes). For chunks that need to 3765 * get more than that they must use the 3766 * sctp_m_getptr() function or other means 3767 * (e.g. know how to parse mbuf chains). 3768 * Cookies do this already. 3769 */ 3770 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 3771 (sizeof(chunk_buf) - 4), 3772 chunk_buf); 3773 if (ch == NULL) { 3774 *offset = length; 3775 if (locked_tcb) { 3776 SCTP_TCB_UNLOCK(locked_tcb); 3777 } 3778 return (NULL); 3779 } 3780 } else { 3781 /* We can fit it all */ 3782 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 3783 chk_length, chunk_buf); 3784 if (ch == NULL) { 3785 SCTP_PRINTF("sctp_process_control: Can't get the all data....\n"); 3786 *offset = length; 3787 if (locked_tcb) { 3788 SCTP_TCB_UNLOCK(locked_tcb); 3789 } 3790 return (NULL); 3791 } 3792 } 3793 } 3794 num_chunks++; 3795 /* Save off the last place we got a control from */ 3796 if (stcb != NULL) { 3797 if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) { 3798 /* 3799 * allow last_control to be NULL if 3800 * ASCONF... ASCONF processing will find the 3801 * right net later 3802 */ 3803 if ((netp != NULL) && (*netp != NULL)) 3804 stcb->asoc.last_control_chunk_from = *netp; 3805 } 3806 } 3807#ifdef SCTP_AUDITING_ENABLED 3808 sctp_audit_log(0xB0, ch->chunk_type); 3809#endif 3810 3811 /* check to see if this chunk required auth, but isn't */ 3812 if ((stcb != NULL) && !sctp_auth_disable && 3813 sctp_auth_is_required_chunk(ch->chunk_type, 3814 stcb->asoc.local_auth_chunks) && 3815 !stcb->asoc.authenticated) { 3816 /* "silently" ignore */ 3817 SCTP_STAT_INCR(sctps_recvauthmissing); 3818 goto next_chunk; 3819 } 3820 switch (ch->chunk_type) { 3821 case SCTP_INITIATION: 3822 /* must be first and only chunk */ 3823 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n"); 3824 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3825 /* We are not interested anymore? */ 3826 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 3827 /* 3828 * collision case where we are 3829 * sending to them too 3830 */ 3831 ; 3832 } else { 3833 if (locked_tcb) { 3834 SCTP_TCB_UNLOCK(locked_tcb); 3835 } 3836 *offset = length; 3837 return (NULL); 3838 } 3839 } 3840 if ((num_chunks > 1) || 3841 (sctp_strict_init && (length - *offset > (int)SCTP_SIZE32(chk_length)))) { 3842 *offset = length; 3843 if (locked_tcb) { 3844 SCTP_TCB_UNLOCK(locked_tcb); 3845 } 3846 return (NULL); 3847 } 3848 if ((stcb != NULL) && 3849 (SCTP_GET_STATE(&stcb->asoc) == 3850 SCTP_STATE_SHUTDOWN_ACK_SENT)) { 3851 sctp_send_shutdown_ack(stcb, 3852 stcb->asoc.primary_destination); 3853 *offset = length; 3854 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC); 3855 if (locked_tcb) { 3856 SCTP_TCB_UNLOCK(locked_tcb); 3857 } 3858 return (NULL); 3859 } 3860 if (netp) { 3861 sctp_handle_init(m, iphlen, *offset, sh, 3862 (struct sctp_init_chunk *)ch, inp, 3863 stcb, *netp, &abort_no_unlock, vrf_id); 3864 } 3865 if (abort_no_unlock) 3866 return (NULL); 3867 3868 *offset = length; 3869 if (locked_tcb) { 3870 SCTP_TCB_UNLOCK(locked_tcb); 3871 } 3872 return (NULL); 3873 break; 3874 case SCTP_PAD_CHUNK: 3875 break; 3876 case SCTP_INITIATION_ACK: 3877 /* must be first and only chunk */ 3878 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n"); 3879 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3880 /* We are not interested anymore */ 3881 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 3882 ; 3883 } else { 3884 if (locked_tcb) { 3885 SCTP_TCB_UNLOCK(locked_tcb); 3886 } 3887 *offset = length; 3888 if (stcb) { 3889 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_26); 3890 } 3891 return (NULL); 3892 } 3893 } 3894 if ((num_chunks > 1) || 3895 (sctp_strict_init && (length - *offset > (int)SCTP_SIZE32(chk_length)))) { 3896 *offset = length; 3897 if (locked_tcb) { 3898 SCTP_TCB_UNLOCK(locked_tcb); 3899 } 3900 return (NULL); 3901 } 3902 if ((netp) && (*netp)) { 3903 ret = sctp_handle_init_ack(m, iphlen, *offset, sh, 3904 (struct sctp_init_ack_chunk *)ch, stcb, *netp, &abort_no_unlock, vrf_id); 3905 } else { 3906 ret = -1; 3907 } 3908 /* 3909 * Special case, I must call the output routine to 3910 * get the cookie echoed 3911 */ 3912 if (abort_no_unlock) 3913 return (NULL); 3914 3915 if ((stcb) && ret == 0) 3916 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC); 3917 *offset = length; 3918 if (locked_tcb) { 3919 SCTP_TCB_UNLOCK(locked_tcb); 3920 } 3921 return (NULL); 3922 break; 3923 case SCTP_SELECTIVE_ACK: 3924 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n"); 3925 SCTP_STAT_INCR(sctps_recvsacks); 3926 { 3927 struct sctp_sack_chunk *sack; 3928 int abort_now = 0; 3929 uint32_t a_rwnd, cum_ack; 3930 uint16_t num_seg; 3931 int nonce_sum_flag; 3932 3933 if ((stcb == NULL) || (chk_length < sizeof(struct sctp_sack_chunk))) { 3934 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on sack chunk, too small\n"); 3935 *offset = length; 3936 if (locked_tcb) { 3937 SCTP_TCB_UNLOCK(locked_tcb); 3938 } 3939 return (NULL); 3940 } 3941 sack = (struct sctp_sack_chunk *)ch; 3942 nonce_sum_flag = ch->chunk_flags & SCTP_SACK_NONCE_SUM; 3943 cum_ack = ntohl(sack->sack.cum_tsn_ack); 3944 num_seg = ntohs(sack->sack.num_gap_ack_blks); 3945 a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd); 3946 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n", 3947 cum_ack, 3948 num_seg, 3949 a_rwnd 3950 ); 3951 stcb->asoc.seen_a_sack_this_pkt = 1; 3952 if ((stcb->asoc.pr_sctp_cnt == 0) && 3953 (num_seg == 0) && 3954 ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) || 3955 (cum_ack == stcb->asoc.last_acked_seq)) && 3956 (stcb->asoc.saw_sack_with_frags == 0) && 3957 (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) 3958 ) { 3959 /* 3960 * We have a SIMPLE sack having no 3961 * prior segments and data on sent 3962 * queue to be acked.. Use the 3963 * faster path sack processing. We 3964 * also allow window update sacks 3965 * with no missing segments to go 3966 * this way too. 3967 */ 3968 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag, 3969 &abort_now); 3970 } else { 3971 if (netp && *netp) 3972 sctp_handle_sack(m, *offset, 3973 sack, stcb, *netp, &abort_now, chk_length, a_rwnd); 3974 } 3975 if (abort_now) { 3976 /* ABORT signal from sack processing */ 3977 *offset = length; 3978 return (NULL); 3979 } 3980 } 3981 break; 3982 case SCTP_HEARTBEAT_REQUEST: 3983 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n"); 3984 if ((stcb) && netp && *netp) { 3985 SCTP_STAT_INCR(sctps_recvheartbeat); 3986 sctp_send_heartbeat_ack(stcb, m, *offset, 3987 chk_length, *netp); 3988 3989 /* He's alive so give him credit */ 3990 stcb->asoc.overall_error_count = 0; 3991 } 3992 break; 3993 case SCTP_HEARTBEAT_ACK: 3994 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n"); 3995 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) { 3996 /* Its not ours */ 3997 *offset = length; 3998 if (locked_tcb) { 3999 SCTP_TCB_UNLOCK(locked_tcb); 4000 } 4001 return (NULL); 4002 } 4003 /* He's alive so give him credit */ 4004 stcb->asoc.overall_error_count = 0; 4005 SCTP_STAT_INCR(sctps_recvheartbeatack); 4006 if (netp && *netp) 4007 sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch, 4008 stcb, *netp); 4009 break; 4010 case SCTP_ABORT_ASSOCIATION: 4011 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n", 4012 stcb); 4013 if ((stcb) && netp && *netp) 4014 sctp_handle_abort((struct sctp_abort_chunk *)ch, 4015 stcb, *netp); 4016 *offset = length; 4017 return (NULL); 4018 break; 4019 case SCTP_SHUTDOWN: 4020 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n", 4021 stcb); 4022 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) { 4023 *offset = length; 4024 if (locked_tcb) { 4025 SCTP_TCB_UNLOCK(locked_tcb); 4026 } 4027 return (NULL); 4028 4029 } 4030 if (netp && *netp) { 4031 int abort_flag = 0; 4032 4033 sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch, 4034 stcb, *netp, &abort_flag); 4035 if (abort_flag) { 4036 *offset = length; 4037 return (NULL); 4038 } 4039 } 4040 break; 4041 case SCTP_SHUTDOWN_ACK: 4042 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", stcb); 4043 if ((stcb) && (netp) && (*netp)) 4044 sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp); 4045 *offset = length; 4046 return (NULL); 4047 break; 4048 4049 case SCTP_OPERATION_ERROR: 4050 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n"); 4051 if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) { 4052 4053 *offset = length; 4054 return (NULL); 4055 } 4056 break; 4057 case SCTP_COOKIE_ECHO: 4058 SCTPDBG(SCTP_DEBUG_INPUT3, 4059 "SCTP_COOKIE-ECHO, stcb %p\n", stcb); 4060 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4061 ; 4062 } else { 4063 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4064 /* We are not interested anymore */ 4065 *offset = length; 4066 return (NULL); 4067 } 4068 } 4069 /* 4070 * First are we accepting? We do this again here 4071 * sincen it is possible that a previous endpoint 4072 * WAS listening responded to a INIT-ACK and then 4073 * closed. We opened and bound.. and are now no 4074 * longer listening. 4075 */ 4076 if (inp->sctp_socket->so_qlimit == 0) { 4077 if ((stcb) && (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 4078 /* 4079 * special case, is this a retran'd 4080 * COOKIE-ECHO or a restarting assoc 4081 * that is a peeled off or 4082 * one-to-one style socket. 4083 */ 4084 goto process_cookie_anyway; 4085 } 4086 sctp_abort_association(inp, stcb, m, iphlen, 4087 sh, NULL, vrf_id); 4088 *offset = length; 4089 return (NULL); 4090 } else if (inp->sctp_socket->so_qlimit) { 4091 /* we are accepting so check limits like TCP */ 4092 if (inp->sctp_socket->so_qlen > 4093 inp->sctp_socket->so_qlimit) { 4094 /* no space */ 4095 struct mbuf *oper; 4096 struct sctp_paramhdr *phdr; 4097 4098 if (sctp_abort_if_one_2_one_hits_limit) { 4099 oper = NULL; 4100 oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 4101 0, M_DONTWAIT, 1, MT_DATA); 4102 if (oper) { 4103 SCTP_BUF_LEN(oper) = 4104 sizeof(struct sctp_paramhdr); 4105 phdr = mtod(oper, 4106 struct sctp_paramhdr *); 4107 phdr->param_type = 4108 htons(SCTP_CAUSE_OUT_OF_RESC); 4109 phdr->param_length = 4110 htons(sizeof(struct sctp_paramhdr)); 4111 } 4112 sctp_abort_association(inp, stcb, m, 4113 iphlen, sh, oper, vrf_id); 4114 } 4115 *offset = length; 4116 return (NULL); 4117 } 4118 } 4119 process_cookie_anyway: 4120 { 4121 struct mbuf *ret_buf; 4122 struct sctp_inpcb *linp; 4123 4124 if (stcb) { 4125 linp = NULL; 4126 } else { 4127 linp = inp; 4128 } 4129 4130 if (linp) { 4131 SCTP_ASOC_CREATE_LOCK(linp); 4132 } 4133 if (netp) { 4134 ret_buf = 4135 sctp_handle_cookie_echo(m, iphlen, 4136 *offset, sh, 4137 (struct sctp_cookie_echo_chunk *)ch, 4138 &inp, &stcb, netp, 4139 auth_skipped, 4140 auth_offset, 4141 auth_len, 4142 &locked_tcb, 4143 vrf_id); 4144 } else { 4145 ret_buf = NULL; 4146 } 4147 if (linp) { 4148 SCTP_ASOC_CREATE_UNLOCK(linp); 4149 } 4150 if (ret_buf == NULL) { 4151 if (locked_tcb) { 4152 SCTP_TCB_UNLOCK(locked_tcb); 4153 } 4154 SCTPDBG(SCTP_DEBUG_INPUT3, 4155 "GAK, null buffer\n"); 4156 auth_skipped = 0; 4157 *offset = length; 4158 return (NULL); 4159 } 4160 /* if AUTH skipped, see if it verified... */ 4161 if (auth_skipped) { 4162 got_auth = 1; 4163 auth_skipped = 0; 4164 } 4165 if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) { 4166 /* 4167 * Restart the timer if we have 4168 * pending data 4169 */ 4170 struct sctp_tmit_chunk *chk; 4171 4172 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 4173 if (chk) { 4174 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4175 stcb->sctp_ep, stcb, 4176 chk->whoTo); 4177 } 4178 } 4179 } 4180 break; 4181 case SCTP_COOKIE_ACK: 4182 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", stcb); 4183 if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) { 4184 if (locked_tcb) { 4185 SCTP_TCB_UNLOCK(locked_tcb); 4186 } 4187 return (NULL); 4188 } 4189 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4190 /* We are not interested anymore */ 4191 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4192 ; 4193 } else if (stcb) { 4194 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27); 4195 *offset = length; 4196 return (NULL); 4197 } 4198 } 4199 /* He's alive so give him credit */ 4200 if ((stcb) && netp && *netp) { 4201 stcb->asoc.overall_error_count = 0; 4202 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp); 4203 } 4204 break; 4205 case SCTP_ECN_ECHO: 4206 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n"); 4207 /* He's alive so give him credit */ 4208 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) { 4209 /* Its not ours */ 4210 if (locked_tcb) { 4211 SCTP_TCB_UNLOCK(locked_tcb); 4212 } 4213 *offset = length; 4214 return (NULL); 4215 } 4216 if (stcb) { 4217 stcb->asoc.overall_error_count = 0; 4218 sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch, 4219 stcb); 4220 } 4221 break; 4222 case SCTP_ECN_CWR: 4223 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n"); 4224 /* He's alive so give him credit */ 4225 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) { 4226 /* Its not ours */ 4227 if (locked_tcb) { 4228 SCTP_TCB_UNLOCK(locked_tcb); 4229 } 4230 *offset = length; 4231 return (NULL); 4232 } 4233 if (stcb) { 4234 stcb->asoc.overall_error_count = 0; 4235 sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb); 4236 } 4237 break; 4238 case SCTP_SHUTDOWN_COMPLETE: 4239 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", stcb); 4240 /* must be first and only chunk */ 4241 if ((num_chunks > 1) || 4242 (length - *offset > (int)SCTP_SIZE32(chk_length))) { 4243 *offset = length; 4244 if (locked_tcb) { 4245 SCTP_TCB_UNLOCK(locked_tcb); 4246 } 4247 return (NULL); 4248 } 4249 if ((stcb) && netp && *netp) { 4250 sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch, 4251 stcb, *netp); 4252 } 4253 *offset = length; 4254 return (NULL); 4255 break; 4256 case SCTP_ASCONF: 4257 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n"); 4258 /* He's alive so give him credit */ 4259 if (stcb) { 4260 stcb->asoc.overall_error_count = 0; 4261 sctp_handle_asconf(m, *offset, 4262 (struct sctp_asconf_chunk *)ch, stcb); 4263 } 4264 break; 4265 case SCTP_ASCONF_ACK: 4266 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n"); 4267 if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) { 4268 /* Its not ours */ 4269 if (locked_tcb) { 4270 SCTP_TCB_UNLOCK(locked_tcb); 4271 } 4272 *offset = length; 4273 return (NULL); 4274 } 4275 if ((stcb) && netp && *netp) { 4276 /* He's alive so give him credit */ 4277 stcb->asoc.overall_error_count = 0; 4278 sctp_handle_asconf_ack(m, *offset, 4279 (struct sctp_asconf_ack_chunk *)ch, stcb, *netp); 4280 } 4281 break; 4282 case SCTP_FORWARD_CUM_TSN: 4283 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n"); 4284 if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) { 4285 /* Its not ours */ 4286 if (locked_tcb) { 4287 SCTP_TCB_UNLOCK(locked_tcb); 4288 } 4289 *offset = length; 4290 return (NULL); 4291 } 4292 /* He's alive so give him credit */ 4293 if (stcb) { 4294 int abort_flag = 0; 4295 4296 stcb->asoc.overall_error_count = 0; 4297 *fwd_tsn_seen = 1; 4298 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4299 /* We are not interested anymore */ 4300 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_28); 4301 *offset = length; 4302 return (NULL); 4303 } 4304 sctp_handle_forward_tsn(stcb, 4305 (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset); 4306 if (abort_flag) { 4307 *offset = length; 4308 return (NULL); 4309 } else { 4310 stcb->asoc.overall_error_count = 0; 4311 } 4312 4313 } 4314 break; 4315 case SCTP_STREAM_RESET: 4316 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n"); 4317 if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) { 4318 /* Its not ours */ 4319 if (locked_tcb) { 4320 SCTP_TCB_UNLOCK(locked_tcb); 4321 } 4322 *offset = length; 4323 return (NULL); 4324 } 4325 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4326 /* We are not interested anymore */ 4327 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29); 4328 *offset = length; 4329 return (NULL); 4330 } 4331 if (stcb->asoc.peer_supports_strreset == 0) { 4332 /* 4333 * hmm, peer should have announced this, but 4334 * we will turn it on since he is sending us 4335 * a stream reset. 4336 */ 4337 stcb->asoc.peer_supports_strreset = 1; 4338 } 4339 if (sctp_handle_stream_reset(stcb, m, *offset, (struct sctp_stream_reset_out_req *)ch)) { 4340 /* stop processing */ 4341 *offset = length; 4342 return (NULL); 4343 } 4344 break; 4345 case SCTP_PACKET_DROPPED: 4346 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n"); 4347 /* re-get it all please */ 4348 if (chk_length < sizeof(struct sctp_pktdrop_chunk)) { 4349 /* Its not ours */ 4350 if (locked_tcb) { 4351 SCTP_TCB_UNLOCK(locked_tcb); 4352 } 4353 *offset = length; 4354 return (NULL); 4355 } 4356 if (ch && (stcb) && netp && (*netp)) { 4357 sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch, 4358 stcb, *netp, 4359 min(chk_length, (sizeof(chunk_buf) - 4))); 4360 4361 } 4362 break; 4363 4364 case SCTP_AUTHENTICATION: 4365 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n"); 4366 if (sctp_auth_disable) 4367 goto unknown_chunk; 4368 4369 if (stcb == NULL) { 4370 /* save the first AUTH for later processing */ 4371 if (auth_skipped == 0) { 4372 auth_offset = *offset; 4373 auth_len = chk_length; 4374 auth_skipped = 1; 4375 } 4376 /* skip this chunk (temporarily) */ 4377 goto next_chunk; 4378 } 4379 if ((chk_length < (sizeof(struct sctp_auth_chunk))) || 4380 (chk_length > (sizeof(struct sctp_auth_chunk) + 4381 SCTP_AUTH_DIGEST_LEN_MAX))) { 4382 /* Its not ours */ 4383 if (locked_tcb) { 4384 SCTP_TCB_UNLOCK(locked_tcb); 4385 } 4386 *offset = length; 4387 return (NULL); 4388 } 4389 if (got_auth == 1) { 4390 /* skip this chunk... it's already auth'd */ 4391 goto next_chunk; 4392 } 4393 got_auth = 1; 4394 if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch, 4395 m, *offset)) { 4396 /* auth HMAC failed so dump the packet */ 4397 *offset = length; 4398 return (stcb); 4399 } else { 4400 /* remaining chunks are HMAC checked */ 4401 stcb->asoc.authenticated = 1; 4402 } 4403 break; 4404 4405 default: 4406 unknown_chunk: 4407 /* it's an unknown chunk! */ 4408 if ((ch->chunk_type & 0x40) && (stcb != NULL)) { 4409 struct mbuf *mm; 4410 struct sctp_paramhdr *phd; 4411 4412 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 4413 0, M_DONTWAIT, 1, MT_DATA); 4414 if (mm) { 4415 phd = mtod(mm, struct sctp_paramhdr *); 4416 /* 4417 * We cheat and use param type since 4418 * we did not bother to define a 4419 * error cause struct. They are the 4420 * same basic format with different 4421 * names. 4422 */ 4423 phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK); 4424 phd->param_length = htons(chk_length + sizeof(*phd)); 4425 SCTP_BUF_LEN(mm) = sizeof(*phd); 4426 SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, SCTP_SIZE32(chk_length), 4427 M_DONTWAIT); 4428 if (SCTP_BUF_NEXT(mm)) { 4429 sctp_queue_op_err(stcb, mm); 4430 } else { 4431 sctp_m_freem(mm); 4432 } 4433 } 4434 } 4435 if ((ch->chunk_type & 0x80) == 0) { 4436 /* discard this packet */ 4437 *offset = length; 4438 return (stcb); 4439 } /* else skip this bad chunk and continue... */ 4440 break; 4441 } /* switch (ch->chunk_type) */ 4442 4443 4444next_chunk: 4445 /* get the next chunk */ 4446 *offset += SCTP_SIZE32(chk_length); 4447 if (*offset >= length) { 4448 /* no more data left in the mbuf chain */ 4449 break; 4450 } 4451 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4452 sizeof(struct sctp_chunkhdr), chunk_buf); 4453 if (ch == NULL) { 4454 if (locked_tcb) { 4455 SCTP_TCB_UNLOCK(locked_tcb); 4456 } 4457 *offset = length; 4458 return (NULL); 4459 } 4460 } /* while */ 4461 return (stcb); 4462} 4463 4464 4465/* 4466 * Process the ECN bits we have something set so we must look to see if it is 4467 * ECN(0) or ECN(1) or CE 4468 */ 4469static void 4470sctp_process_ecn_marked_a(struct sctp_tcb *stcb, struct sctp_nets *net, 4471 uint8_t ecn_bits) 4472{ 4473 if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) { 4474 ; 4475 } else if ((ecn_bits & SCTP_ECT1_BIT) == SCTP_ECT1_BIT) { 4476 /* 4477 * we only add to the nonce sum for ECT1, ECT0 does not 4478 * change the NS bit (that we have yet to find a way to send 4479 * it yet). 4480 */ 4481 4482 /* ECN Nonce stuff */ 4483 stcb->asoc.receiver_nonce_sum++; 4484 stcb->asoc.receiver_nonce_sum &= SCTP_SACK_NONCE_SUM; 4485 4486 /* 4487 * Drag up the last_echo point if cumack is larger since we 4488 * don't want the point falling way behind by more than 4489 * 2^^31 and then having it be incorrect. 4490 */ 4491 if (compare_with_wrap(stcb->asoc.cumulative_tsn, 4492 stcb->asoc.last_echo_tsn, MAX_TSN)) { 4493 stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn; 4494 } 4495 } else if ((ecn_bits & SCTP_ECT0_BIT) == SCTP_ECT0_BIT) { 4496 /* 4497 * Drag up the last_echo point if cumack is larger since we 4498 * don't want the point falling way behind by more than 4499 * 2^^31 and then having it be incorrect. 4500 */ 4501 if (compare_with_wrap(stcb->asoc.cumulative_tsn, 4502 stcb->asoc.last_echo_tsn, MAX_TSN)) { 4503 stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn; 4504 } 4505 } 4506} 4507 4508static void 4509sctp_process_ecn_marked_b(struct sctp_tcb *stcb, struct sctp_nets *net, 4510 uint32_t high_tsn, uint8_t ecn_bits) 4511{ 4512 if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) { 4513 /* 4514 * we possibly must notify the sender that a congestion 4515 * window reduction is in order. We do this by adding a ECNE 4516 * chunk to the output chunk queue. The incoming CWR will 4517 * remove this chunk. 4518 */ 4519 if (compare_with_wrap(high_tsn, stcb->asoc.last_echo_tsn, 4520 MAX_TSN)) { 4521 /* Yep, we need to add a ECNE */ 4522 sctp_send_ecn_echo(stcb, net, high_tsn); 4523 stcb->asoc.last_echo_tsn = high_tsn; 4524 } 4525 } 4526} 4527 4528/* 4529 * common input chunk processing (v4 and v6) 4530 */ 4531void 4532sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, 4533 int length, struct sctphdr *sh, struct sctp_chunkhdr *ch, 4534 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, 4535 uint8_t ecn_bits, uint32_t vrf_id) 4536{ 4537 /* 4538 * Control chunk processing 4539 */ 4540 uint32_t high_tsn; 4541 int fwd_tsn_seen = 0, data_processed = 0; 4542 struct mbuf *m = *mm; 4543 int abort_flag = 0; 4544 int un_sent; 4545 4546 SCTP_STAT_INCR(sctps_recvdatagrams); 4547#ifdef SCTP_AUDITING_ENABLED 4548 sctp_audit_log(0xE0, 1); 4549 sctp_auditing(0, inp, stcb, net); 4550#endif 4551 4552 SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d\n", 4553 m, iphlen, offset); 4554 4555 if (stcb) { 4556 /* always clear this before beginning a packet */ 4557 stcb->asoc.authenticated = 0; 4558 stcb->asoc.seen_a_sack_this_pkt = 0; 4559 } 4560 if (IS_SCTP_CONTROL(ch)) { 4561 /* process the control portion of the SCTP packet */ 4562 /* sa_ignore NO_NULL_CHK */ 4563 stcb = sctp_process_control(m, iphlen, &offset, length, sh, ch, 4564 inp, stcb, &net, &fwd_tsn_seen, vrf_id); 4565 if (stcb) { 4566 /* 4567 * This covers us if the cookie-echo was there and 4568 * it changes our INP. 4569 */ 4570 inp = stcb->sctp_ep; 4571 } 4572 } else { 4573 /* 4574 * no control chunks, so pre-process DATA chunks (these 4575 * checks are taken care of by control processing) 4576 */ 4577 4578 /* 4579 * if DATA only packet, and auth is required, then punt... 4580 * can't have authenticated without any AUTH (control) 4581 * chunks 4582 */ 4583 if ((stcb != NULL) && !sctp_auth_disable && 4584 sctp_auth_is_required_chunk(SCTP_DATA, 4585 stcb->asoc.local_auth_chunks)) { 4586 /* "silently" ignore */ 4587 SCTP_STAT_INCR(sctps_recvauthmissing); 4588 SCTP_TCB_UNLOCK(stcb); 4589 return; 4590 } 4591 if (stcb == NULL) { 4592 /* out of the blue DATA chunk */ 4593 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL, 4594 vrf_id); 4595 return; 4596 } 4597 if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) { 4598 /* v_tag mismatch! */ 4599 SCTP_STAT_INCR(sctps_badvtag); 4600 SCTP_TCB_UNLOCK(stcb); 4601 return; 4602 } 4603 } 4604 4605 if (stcb == NULL) { 4606 /* 4607 * no valid TCB for this packet, or we found it's a bad 4608 * packet while processing control, or we're done with this 4609 * packet (done or skip rest of data), so we drop it... 4610 */ 4611 return; 4612 } 4613 /* 4614 * DATA chunk processing 4615 */ 4616 /* plow through the data chunks while length > offset */ 4617 4618 /* 4619 * Rest should be DATA only. Check authentication state if AUTH for 4620 * DATA is required. 4621 */ 4622 if ((length > offset) && (stcb != NULL) && !sctp_auth_disable && 4623 sctp_auth_is_required_chunk(SCTP_DATA, 4624 stcb->asoc.local_auth_chunks) && 4625 !stcb->asoc.authenticated) { 4626 /* "silently" ignore */ 4627 SCTP_STAT_INCR(sctps_recvauthmissing); 4628 SCTPDBG(SCTP_DEBUG_AUTH1, 4629 "Data chunk requires AUTH, skipped\n"); 4630 goto trigger_send; 4631 } 4632 if (length > offset) { 4633 int retval; 4634 4635 /* 4636 * First check to make sure our state is correct. We would 4637 * not get here unless we really did have a tag, so we don't 4638 * abort if this happens, just dump the chunk silently. 4639 */ 4640 switch (SCTP_GET_STATE(&stcb->asoc)) { 4641 case SCTP_STATE_COOKIE_ECHOED: 4642 /* 4643 * we consider data with valid tags in this state 4644 * shows us the cookie-ack was lost. Imply it was 4645 * there. 4646 */ 4647 stcb->asoc.overall_error_count = 0; 4648 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net); 4649 break; 4650 case SCTP_STATE_COOKIE_WAIT: 4651 /* 4652 * We consider OOTB any data sent during asoc setup. 4653 */ 4654 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL, 4655 vrf_id); 4656 SCTP_TCB_UNLOCK(stcb); 4657 return; 4658 break; 4659 case SCTP_STATE_EMPTY: /* should not happen */ 4660 case SCTP_STATE_INUSE: /* should not happen */ 4661 case SCTP_STATE_SHUTDOWN_RECEIVED: /* This is a peer error */ 4662 case SCTP_STATE_SHUTDOWN_ACK_SENT: 4663 default: 4664 SCTP_TCB_UNLOCK(stcb); 4665 return; 4666 break; 4667 case SCTP_STATE_OPEN: 4668 case SCTP_STATE_SHUTDOWN_SENT: 4669 break; 4670 } 4671 /* take care of ECN, part 1. */ 4672 if (stcb->asoc.ecn_allowed && 4673 (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) { 4674 sctp_process_ecn_marked_a(stcb, net, ecn_bits); 4675 } 4676 /* plow through the data chunks while length > offset */ 4677 retval = sctp_process_data(mm, iphlen, &offset, length, sh, 4678 inp, stcb, net, &high_tsn); 4679 if (retval == 2) { 4680 /* 4681 * The association aborted, NO UNLOCK needed since 4682 * the association is destroyed. 4683 */ 4684 return; 4685 } 4686 data_processed = 1; 4687 if (retval == 0) { 4688 /* take care of ecn part 2. */ 4689 if (stcb->asoc.ecn_allowed && 4690 (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) { 4691 sctp_process_ecn_marked_b(stcb, net, high_tsn, 4692 ecn_bits); 4693 } 4694 } 4695 /* 4696 * Anything important needs to have been m_copy'ed in 4697 * process_data 4698 */ 4699 } 4700 if ((data_processed == 0) && (fwd_tsn_seen)) { 4701 int was_a_gap = 0; 4702 4703 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 4704 stcb->asoc.cumulative_tsn, MAX_TSN)) { 4705 /* there was a gap before this data was processed */ 4706 was_a_gap = 1; 4707 } 4708 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag); 4709 if (abort_flag) { 4710 /* Again, we aborted so NO UNLOCK needed */ 4711 return; 4712 } 4713 } 4714 /* trigger send of any chunks in queue... */ 4715trigger_send: 4716#ifdef SCTP_AUDITING_ENABLED 4717 sctp_audit_log(0xE0, 2); 4718 sctp_auditing(1, inp, stcb, net); 4719#endif 4720 SCTPDBG(SCTP_DEBUG_INPUT1, 4721 "Check for chunk output prw:%d tqe:%d tf=%d\n", 4722 stcb->asoc.peers_rwnd, 4723 TAILQ_EMPTY(&stcb->asoc.control_send_queue), 4724 stcb->asoc.total_flight); 4725 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight); 4726 4727 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue) || 4728 ((un_sent) && 4729 (stcb->asoc.peers_rwnd > 0 || 4730 (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) { 4731 SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n"); 4732 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC); 4733 SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n"); 4734 } 4735#ifdef SCTP_AUDITING_ENABLED 4736 sctp_audit_log(0xE0, 3); 4737 sctp_auditing(2, inp, stcb, net); 4738#endif 4739 SCTP_TCB_UNLOCK(stcb); 4740 return; 4741} 4742 4743 4744 4745void 4746sctp_input(i_pak, off) 4747 struct mbuf *i_pak; 4748 int off; 4749 4750{ 4751#ifdef SCTP_MBUF_LOGGING 4752 struct mbuf *mat; 4753 4754#endif 4755 struct mbuf *m; 4756 int iphlen; 4757 uint32_t vrf_id = 0; 4758 uint8_t ecn_bits; 4759 struct ip *ip; 4760 struct sctphdr *sh; 4761 struct sctp_inpcb *inp = NULL; 4762 4763 uint32_t check, calc_check; 4764 struct sctp_nets *net; 4765 struct sctp_tcb *stcb = NULL; 4766 struct sctp_chunkhdr *ch; 4767 int refcount_up = 0; 4768 int length, mlen, offset; 4769 4770 4771 if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) { 4772 SCTP_RELEASE_PKT(i_pak); 4773 return; 4774 } 4775 mlen = SCTP_HEADER_LEN(i_pak); 4776 iphlen = off; 4777 m = SCTP_HEADER_TO_CHAIN(i_pak); 4778 4779 net = NULL; 4780 SCTP_STAT_INCR(sctps_recvpackets); 4781 SCTP_STAT_INCR_COUNTER64(sctps_inpackets); 4782 4783 4784#ifdef SCTP_MBUF_LOGGING 4785 /* Log in any input mbufs */ 4786 if (sctp_logging_level & SCTP_MBUF_LOGGING_ENABLE) { 4787 mat = m; 4788 while (mat) { 4789 if (SCTP_BUF_IS_EXTENDED(mat)) { 4790 sctp_log_mb(mat, SCTP_MBUF_INPUT); 4791 } 4792 mat = SCTP_BUF_NEXT(mat); 4793 } 4794 } 4795#endif 4796#ifdef SCTP_PACKET_LOGGING 4797 if (sctp_logging_level & SCTP_LAST_PACKET_TRACING) 4798 sctp_packet_log(m, mlen); 4799#endif 4800 /* 4801 * Must take out the iphlen, since mlen expects this (only effect lb 4802 * case) 4803 */ 4804 mlen -= iphlen; 4805 4806 /* 4807 * Get IP, SCTP, and first chunk header together in first mbuf. 4808 */ 4809 ip = mtod(m, struct ip *); 4810 offset = iphlen + sizeof(*sh) + sizeof(*ch); 4811 if (SCTP_BUF_LEN(m) < offset) { 4812 if ((m = m_pullup(m, offset)) == 0) { 4813 SCTP_STAT_INCR(sctps_hdrops); 4814 return; 4815 } 4816 ip = mtod(m, struct ip *); 4817 } 4818 sh = (struct sctphdr *)((caddr_t)ip + iphlen); 4819 ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh)); 4820 SCTPDBG(SCTP_DEBUG_INPUT1, 4821 "sctp_input() length:%d iphlen:%d\n", mlen, iphlen); 4822 4823 /* SCTP does not allow broadcasts or multicasts */ 4824 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 4825 goto bad; 4826 } 4827 if (SCTP_IS_IT_BROADCAST(ip->ip_dst, m)) { 4828 /* 4829 * We only look at broadcast if its a front state, All 4830 * others we will not have a tcb for anyway. 4831 */ 4832 goto bad; 4833 } 4834 /* validate SCTP checksum */ 4835 if ((sctp_no_csum_on_loopback == 0) || !SCTP_IS_IT_LOOPBACK(m)) { 4836 /* 4837 * we do NOT validate things from the loopback if the sysctl 4838 * is set to 1. 4839 */ 4840 check = sh->checksum; /* save incoming checksum */ 4841 if ((check == 0) && (sctp_no_csum_on_loopback)) { 4842 /* 4843 * special hook for where we got a local address 4844 * somehow routed across a non IFT_LOOP type 4845 * interface 4846 */ 4847 if (ip->ip_src.s_addr == ip->ip_dst.s_addr) 4848 goto sctp_skip_csum_4; 4849 } 4850 sh->checksum = 0; /* prepare for calc */ 4851 calc_check = sctp_calculate_sum(m, &mlen, iphlen); 4852 if (calc_check != check) { 4853 SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n", 4854 calc_check, check, m, mlen, iphlen); 4855 4856 stcb = sctp_findassociation_addr(m, iphlen, 4857 offset - sizeof(*ch), 4858 sh, ch, &inp, &net, 4859 vrf_id); 4860 if ((inp) && (stcb)) { 4861 sctp_send_packet_dropped(stcb, net, m, iphlen, 1); 4862 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR); 4863 } else if ((inp != NULL) && (stcb == NULL)) { 4864 refcount_up = 1; 4865 } 4866 SCTP_STAT_INCR(sctps_badsum); 4867 SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors); 4868 goto bad; 4869 } 4870 sh->checksum = calc_check; 4871 } 4872sctp_skip_csum_4: 4873 /* destination port of 0 is illegal, based on RFC2960. */ 4874 if (sh->dest_port == 0) { 4875 SCTP_STAT_INCR(sctps_hdrops); 4876 goto bad; 4877 } 4878 /* validate mbuf chain length with IP payload length */ 4879 if (mlen < (ip->ip_len - iphlen)) { 4880 SCTP_STAT_INCR(sctps_hdrops); 4881 goto bad; 4882 } 4883 /* 4884 * Locate pcb and tcb for datagram sctp_findassociation_addr() wants 4885 * IP/SCTP/first chunk header... 4886 */ 4887 stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch), 4888 sh, ch, &inp, &net, vrf_id); 4889 /* inp's ref-count increased && stcb locked */ 4890 if (inp == NULL) { 4891 struct sctp_init_chunk *init_chk, chunk_buf; 4892 4893 SCTP_STAT_INCR(sctps_noport); 4894#ifdef ICMP_BANDLIM 4895 /* 4896 * we use the bandwidth limiting to protect against sending 4897 * too many ABORTS all at once. In this case these count the 4898 * same as an ICMP message. 4899 */ 4900 if (badport_bandlim(0) < 0) 4901 goto bad; 4902#endif /* ICMP_BANDLIM */ 4903 SCTPDBG(SCTP_DEBUG_INPUT1, 4904 "Sending a ABORT from packet entry!\n"); 4905 if (ch->chunk_type == SCTP_INITIATION) { 4906 /* 4907 * we do a trick here to get the INIT tag, dig in 4908 * and get the tag from the INIT and put it in the 4909 * common header. 4910 */ 4911 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4912 iphlen + sizeof(*sh), sizeof(*init_chk), 4913 (uint8_t *) & chunk_buf); 4914 if (init_chk != NULL) 4915 sh->v_tag = init_chk->init.initiate_tag; 4916 } 4917 if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 4918 sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id); 4919 goto bad; 4920 } 4921 if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) { 4922 goto bad; 4923 } 4924 if (ch->chunk_type != SCTP_ABORT_ASSOCIATION) 4925 sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id); 4926 goto bad; 4927 } else if (stcb == NULL) { 4928 refcount_up = 1; 4929 } 4930#ifdef FAST_IPSEC 4931 /* 4932 * I very much doubt any of the IPSEC stuff will work but I have no 4933 * idea, so I will leave it in place. 4934 */ 4935 4936 if (inp && ipsec4_in_reject(m, &inp->ip_inp.inp)) { 4937 ipsec4stat.in_polvio++; 4938 SCTP_STAT_INCR(sctps_hdrops); 4939 goto bad; 4940 } 4941#endif /* IPSEC */ 4942 4943 /* 4944 * common chunk processing 4945 */ 4946 length = ip->ip_len + iphlen; 4947 offset -= sizeof(struct sctp_chunkhdr); 4948 4949 ecn_bits = ip->ip_tos; 4950 4951 /* sa_ignore NO_NULL_CHK */ 4952 sctp_common_input_processing(&m, iphlen, offset, length, sh, ch, 4953 inp, stcb, net, ecn_bits, vrf_id); 4954 /* inp's ref-count reduced && stcb unlocked */ 4955 if (m) { 4956 sctp_m_freem(m); 4957 } 4958 if ((inp) && (refcount_up)) { 4959 /* reduce ref-count */ 4960 SCTP_INP_WLOCK(inp); 4961 SCTP_INP_DECR_REF(inp); 4962 SCTP_INP_WUNLOCK(inp); 4963 } 4964 return; 4965bad: 4966 if (stcb) { 4967 SCTP_TCB_UNLOCK(stcb); 4968 } 4969 if ((inp) && (refcount_up)) { 4970 /* reduce ref-count */ 4971 SCTP_INP_WLOCK(inp); 4972 SCTP_INP_DECR_REF(inp); 4973 SCTP_INP_WUNLOCK(inp); 4974 } 4975 if (m) { 4976 sctp_m_freem(m); 4977 } 4978 return; 4979} 4980