sctp_input.c revision 169420
1/*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31/* $KAME: sctp_input.c,v 1.27 2005/03/06 16:04:17 itojun Exp $ */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: head/sys/netinet/sctp_input.c 169420 2007-05-09 13:30:06Z rrs $"); 35 36#include <netinet/sctp_os.h> 37#include <netinet/sctp_var.h> 38#include <netinet/sctp_sysctl.h> 39#include <netinet/sctp_pcb.h> 40#include <netinet/sctp_header.h> 41#include <netinet/sctputil.h> 42#include <netinet/sctp_output.h> 43#include <netinet/sctp_input.h> 44#include <netinet/sctp_auth.h> 45#include <netinet/sctp_indata.h> 46#include <netinet/sctp_asconf.h> 47 48 49 50 51static void 52sctp_stop_all_cookie_timers(struct sctp_tcb *stcb) 53{ 54 struct sctp_nets *net; 55 56 /* 57 * This now not only stops all cookie timers it also stops any INIT 58 * timers as well. This will make sure that the timers are stopped 59 * in all collision cases. 60 */ 61 SCTP_TCB_LOCK_ASSERT(stcb); 62 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 63 if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) { 64 sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, 65 stcb->sctp_ep, 66 stcb, 67 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1); 68 } else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) { 69 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, 70 stcb->sctp_ep, 71 stcb, 72 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2); 73 } 74 } 75} 76 77/* INIT handler */ 78static void 79sctp_handle_init(struct mbuf *m, int iphlen, int offset, struct sctphdr *sh, 80 struct sctp_init_chunk *cp, struct sctp_inpcb *inp, struct sctp_tcb *stcb, 81 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id, 82 uint32_t table_id) 83{ 84 struct sctp_init *init; 85 struct mbuf *op_err; 86 uint32_t init_limit; 87 88 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n", 89 stcb); 90 op_err = NULL; 91 init = &cp->init; 92 /* First are we accepting? */ 93 if ((inp->sctp_socket->so_qlimit == 0) && (stcb == NULL)) { 94 SCTPDBG(SCTP_DEBUG_INPUT2, 95 "sctp_handle_init: Abort, so_qlimit:%d\n", 96 inp->sctp_socket->so_qlimit); 97 /* 98 * FIX ME ?? What about TCP model and we have a 99 * match/restart case? 100 */ 101 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 102 vrf_id, table_id); 103 if (stcb) 104 *abort_no_unlock = 1; 105 return; 106 } 107 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) { 108 /* Invalid length */ 109 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 110 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 111 vrf_id, table_id); 112 if (stcb) 113 *abort_no_unlock = 1; 114 return; 115 } 116 /* validate parameters */ 117 if (init->initiate_tag == 0) { 118 /* protocol error... send abort */ 119 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 120 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 121 vrf_id, table_id); 122 if (stcb) 123 *abort_no_unlock = 1; 124 return; 125 } 126 if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) { 127 /* invalid parameter... send abort */ 128 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 129 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 130 vrf_id, table_id); 131 return; 132 } 133 if (init->num_inbound_streams == 0) { 134 /* protocol error... send abort */ 135 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 136 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 137 vrf_id, table_id); 138 if (stcb) 139 *abort_no_unlock = 1; 140 return; 141 } 142 if (init->num_outbound_streams == 0) { 143 /* protocol error... send abort */ 144 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 145 sctp_abort_association(inp, stcb, m, iphlen, sh, op_err, 146 vrf_id, table_id); 147 if (stcb) 148 *abort_no_unlock = 1; 149 return; 150 } 151 init_limit = offset + ntohs(cp->ch.chunk_length); 152 if (sctp_validate_init_auth_params(m, offset + sizeof(*cp), 153 init_limit)) { 154 /* auth parameter(s) error... send abort */ 155 sctp_abort_association(inp, stcb, m, iphlen, sh, NULL, vrf_id, 156 table_id); 157 if (stcb) 158 *abort_no_unlock = 1; 159 return; 160 } 161 /* send an INIT-ACK w/cookie */ 162 SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n"); 163 sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, sh, cp, vrf_id, 164 table_id); 165} 166 167/* 168 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error 169 */ 170static int 171sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb, 172 struct sctp_nets *net) 173{ 174 struct sctp_init *init; 175 struct sctp_association *asoc; 176 struct sctp_nets *lnet; 177 unsigned int i; 178 179 init = &cp->init; 180 asoc = &stcb->asoc; 181 /* save off parameters */ 182 asoc->peer_vtag = ntohl(init->initiate_tag); 183 asoc->peers_rwnd = ntohl(init->a_rwnd); 184 if (TAILQ_FIRST(&asoc->nets)) { 185 /* update any ssthresh's that may have a default */ 186 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) { 187 lnet->ssthresh = asoc->peers_rwnd; 188 189#if defined(SCTP_CWND_MONITOR) || defined(SCTP_CWND_LOGGING) 190 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION); 191#endif 192 193 } 194 } 195 SCTP_TCB_SEND_LOCK(stcb); 196 if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) { 197 unsigned int newcnt; 198 struct sctp_stream_out *outs; 199 struct sctp_stream_queue_pending *sp; 200 201 /* cut back on number of streams */ 202 newcnt = ntohs(init->num_inbound_streams); 203 /* This if is probably not needed but I am cautious */ 204 if (asoc->strmout) { 205 /* First make sure no data chunks are trapped */ 206 for (i = newcnt; i < asoc->pre_open_streams; i++) { 207 outs = &asoc->strmout[i]; 208 sp = TAILQ_FIRST(&outs->outqueue); 209 while (sp) { 210 TAILQ_REMOVE(&outs->outqueue, sp, 211 next); 212 asoc->stream_queue_cnt--; 213 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, 214 stcb, SCTP_NOTIFY_DATAGRAM_UNSENT, 215 sp); 216 if (sp->data) { 217 sctp_m_freem(sp->data); 218 sp->data = NULL; 219 } 220 sctp_free_remote_addr(sp->net); 221 sp->net = NULL; 222 /* Free the chunk */ 223 SCTP_PRINTF("sp:%p tcb:%p weird free case\n", 224 sp, stcb); 225 226 sctp_free_a_strmoq(stcb, sp); 227 sp = TAILQ_FIRST(&outs->outqueue); 228 } 229 } 230 } 231 /* cut back the count and abandon the upper streams */ 232 asoc->pre_open_streams = newcnt; 233 } 234 SCTP_TCB_SEND_UNLOCK(stcb); 235 asoc->streamoutcnt = asoc->pre_open_streams; 236 /* init tsn's */ 237 asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1; 238#ifdef SCTP_MAP_LOGGING 239 sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 240#endif 241 /* This is the next one we expect */ 242 asoc->str_reset_seq_in = asoc->asconf_seq_in + 1; 243 244 asoc->mapping_array_base_tsn = ntohl(init->initial_tsn); 245 asoc->cumulative_tsn = asoc->asconf_seq_in; 246 asoc->last_echo_tsn = asoc->asconf_seq_in; 247 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 248 /* open the requested streams */ 249 if (asoc->strmin != NULL) { 250 /* Free the old ones */ 251 struct sctp_queued_to_read *ctl; 252 253 for (i = 0; i < asoc->streamincnt; i++) { 254 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue); 255 while (ctl) { 256 TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next); 257 sctp_free_remote_addr(ctl->whoFrom); 258 sctp_m_freem(ctl->data); 259 ctl->data = NULL; 260 sctp_free_a_readq(stcb, ctl); 261 ctl = TAILQ_FIRST(&asoc->strmin[i].inqueue); 262 } 263 } 264 SCTP_FREE(asoc->strmin); 265 } 266 asoc->streamincnt = ntohs(init->num_outbound_streams); 267 if (asoc->streamincnt > MAX_SCTP_STREAMS) { 268 asoc->streamincnt = MAX_SCTP_STREAMS; 269 } 270 SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt * 271 sizeof(struct sctp_stream_in), "StreamsIn"); 272 if (asoc->strmin == NULL) { 273 /* we didn't get memory for the streams! */ 274 SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n"); 275 return (-1); 276 } 277 for (i = 0; i < asoc->streamincnt; i++) { 278 asoc->strmin[i].stream_no = i; 279 asoc->strmin[i].last_sequence_delivered = 0xffff; 280 /* 281 * U-stream ranges will be set when the cookie is unpacked. 282 * Or for the INIT sender they are un set (if pr-sctp not 283 * supported) when the INIT-ACK arrives. 284 */ 285 TAILQ_INIT(&asoc->strmin[i].inqueue); 286 asoc->strmin[i].delivery_started = 0; 287 } 288 /* 289 * load_address_from_init will put the addresses into the 290 * association when the COOKIE is processed or the INIT-ACK is 291 * processed. Both types of COOKIE's existing and new call this 292 * routine. It will remove addresses that are no longer in the 293 * association (for the restarting case where addresses are 294 * removed). Up front when the INIT arrives we will discard it if it 295 * is a restart and new addresses have been added. 296 */ 297 return (0); 298} 299 300/* 301 * INIT-ACK message processing/consumption returns value < 0 on error 302 */ 303static int 304sctp_process_init_ack(struct mbuf *m, int iphlen, int offset, 305 struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 306 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id, 307 uint32_t table_id) 308{ 309 struct sctp_association *asoc; 310 struct mbuf *op_err; 311 int retval, abort_flag; 312 uint32_t initack_limit; 313 314 /* First verify that we have no illegal param's */ 315 abort_flag = 0; 316 op_err = NULL; 317 318 op_err = sctp_arethere_unrecognized_parameters(m, 319 (offset + sizeof(struct sctp_init_chunk)), 320 &abort_flag, (struct sctp_chunkhdr *)cp); 321 if (abort_flag) { 322 /* Send an abort and notify peer */ 323 if (op_err != NULL) { 324 sctp_send_operr_to(m, iphlen, op_err, 325 cp->init.initiate_tag, vrf_id, 326 table_id); 327 } else { 328 /* 329 * Just notify (abort_assoc does this if we send an 330 * abort). 331 */ 332 sctp_abort_notification(stcb, 0); 333 /* 334 * No sense in further INIT's since we will get the 335 * same param back 336 */ 337 sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3); 338 *abort_no_unlock = 1; 339 } 340 return (-1); 341 } 342 asoc = &stcb->asoc; 343 /* process the peer's parameters in the INIT-ACK */ 344 retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb, net); 345 if (retval < 0) { 346 return (retval); 347 } 348 initack_limit = offset + ntohs(cp->ch.chunk_length); 349 /* load all addresses */ 350 if ((retval = sctp_load_addresses_from_init(stcb, m, iphlen, 351 (offset + sizeof(struct sctp_init_chunk)), initack_limit, sh, 352 NULL))) { 353 /* Huh, we should abort */ 354 SCTPDBG(SCTP_DEBUG_INPUT1, 355 "Load addresses from INIT causes an abort %d\n", 356 retval); 357 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 358 NULL, 0, 0); 359 *abort_no_unlock = 1; 360 return (-1); 361 } 362 stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs, 363 stcb->asoc.local_hmacs); 364 if (op_err) { 365 sctp_queue_op_err(stcb, op_err); 366 /* queuing will steal away the mbuf chain to the out queue */ 367 op_err = NULL; 368 } 369 /* extract the cookie and queue it to "echo" it back... */ 370 stcb->asoc.overall_error_count = 0; 371 net->error_count = 0; 372 373 /* 374 * Cancel the INIT timer, We do this first before queueing the 375 * cookie. We always cancel at the primary to assue that we are 376 * canceling the timer started by the INIT which always goes to the 377 * primary. 378 */ 379 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb, 380 asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4); 381 382 /* calculate the RTO */ 383 net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered); 384 385 retval = sctp_send_cookie_echo(m, offset, stcb, net); 386 if (retval < 0) { 387 /* 388 * No cookie, we probably should send a op error. But in any 389 * case if there is no cookie in the INIT-ACK, we can 390 * abandon the peer, its broke. 391 */ 392 if (retval == -3) { 393 /* We abort with an error of missing mandatory param */ 394 struct mbuf *op_err; 395 396 op_err = 397 sctp_generate_invmanparam(SCTP_CAUSE_MISSING_PARAM); 398 if (op_err) { 399 /* 400 * Expand beyond to include the mandatory 401 * param cookie 402 */ 403 struct sctp_inv_mandatory_param *mp; 404 405 SCTP_BUF_LEN(op_err) = 406 sizeof(struct sctp_inv_mandatory_param); 407 mp = mtod(op_err, 408 struct sctp_inv_mandatory_param *); 409 /* Subtract the reserved param */ 410 mp->length = 411 htons(sizeof(struct sctp_inv_mandatory_param) - 2); 412 mp->num_param = htonl(1); 413 mp->param = htons(SCTP_STATE_COOKIE); 414 mp->resv = 0; 415 } 416 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 417 sh, op_err, 0, 0); 418 *abort_no_unlock = 1; 419 } 420 return (retval); 421 } 422 return (0); 423} 424 425static void 426sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp, 427 struct sctp_tcb *stcb, struct sctp_nets *net) 428{ 429 struct sockaddr_storage store; 430 struct sockaddr_in *sin; 431 struct sockaddr_in6 *sin6; 432 struct sctp_nets *r_net; 433 struct timeval tv; 434 435 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) { 436 /* Invalid length */ 437 return; 438 } 439 sin = (struct sockaddr_in *)&store; 440 sin6 = (struct sockaddr_in6 *)&store; 441 442 memset(&store, 0, sizeof(store)); 443 if (cp->heartbeat.hb_info.addr_family == AF_INET && 444 cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) { 445 sin->sin_family = cp->heartbeat.hb_info.addr_family; 446 sin->sin_len = cp->heartbeat.hb_info.addr_len; 447 sin->sin_port = stcb->rport; 448 memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address, 449 sizeof(sin->sin_addr)); 450 } else if (cp->heartbeat.hb_info.addr_family == AF_INET6 && 451 cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) { 452 sin6->sin6_family = cp->heartbeat.hb_info.addr_family; 453 sin6->sin6_len = cp->heartbeat.hb_info.addr_len; 454 sin6->sin6_port = stcb->rport; 455 memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address, 456 sizeof(sin6->sin6_addr)); 457 } else { 458 return; 459 } 460 r_net = sctp_findnet(stcb, (struct sockaddr *)sin); 461 if (r_net == NULL) { 462 SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n"); 463 return; 464 } 465 if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) && 466 (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) && 467 (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) { 468 /* 469 * If the its a HB and it's random value is correct when can 470 * confirm the destination. 471 */ 472 r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 473 if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) { 474 stcb->asoc.primary_destination = r_net; 475 r_net->dest_state &= ~SCTP_ADDR_WAS_PRIMARY; 476 r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY; 477 r_net = TAILQ_FIRST(&stcb->asoc.nets); 478 if (r_net != stcb->asoc.primary_destination) { 479 /* 480 * first one on the list is NOT the primary 481 * sctp_cmpaddr() is much more efficent if 482 * the primary is the first on the list, 483 * make it so. 484 */ 485 TAILQ_REMOVE(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); 486 TAILQ_INSERT_HEAD(&stcb->asoc.nets, stcb->asoc.primary_destination, sctp_next); 487 } 488 } 489 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 490 stcb, 0, (void *)r_net); 491 } 492 r_net->error_count = 0; 493 r_net->hb_responded = 1; 494 tv.tv_sec = cp->heartbeat.hb_info.time_value_1; 495 tv.tv_usec = cp->heartbeat.hb_info.time_value_2; 496 if (r_net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 497 r_net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE; 498 r_net->dest_state |= SCTP_ADDR_REACHABLE; 499 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 500 SCTP_HEARTBEAT_SUCCESS, (void *)r_net); 501 /* now was it the primary? if so restore */ 502 if (r_net->dest_state & SCTP_ADDR_WAS_PRIMARY) { 503 (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, r_net); 504 } 505 } 506 /* Now lets do a RTO with this */ 507 r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv); 508} 509 510static void 511sctp_handle_abort(struct sctp_abort_chunk *cp, 512 struct sctp_tcb *stcb, struct sctp_nets *net) 513{ 514 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n"); 515 if (stcb == NULL) 516 return; 517 518 /* stop any receive timers */ 519 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_5); 520 /* notify user of the abort and clean up... */ 521 sctp_abort_notification(stcb, 0); 522 /* free the tcb */ 523 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 524 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 525 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 526 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 527 } 528#ifdef SCTP_ASOCLOG_OF_TSNS 529 sctp_print_out_track_log(stcb); 530#endif 531 sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6); 532 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n"); 533} 534 535static void 536sctp_handle_shutdown(struct sctp_shutdown_chunk *cp, 537 struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag) 538{ 539 struct sctp_association *asoc; 540 int some_on_streamwheel; 541 542 SCTPDBG(SCTP_DEBUG_INPUT2, 543 "sctp_handle_shutdown: handling SHUTDOWN\n"); 544 if (stcb == NULL) 545 return; 546 asoc = &stcb->asoc; 547 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 548 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 549 return; 550 } 551 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) { 552 /* Shutdown NOT the expected size */ 553 return; 554 } else { 555 sctp_update_acked(stcb, cp, net, abort_flag); 556 } 557 if (asoc->control_pdapi) { 558 /* 559 * With a normal shutdown we assume the end of last record. 560 */ 561 SCTP_INP_READ_LOCK(stcb->sctp_ep); 562 asoc->control_pdapi->end_added = 1; 563 asoc->control_pdapi->pdapi_aborted = 1; 564 asoc->control_pdapi = NULL; 565 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 566 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 567 } 568 /* goto SHUTDOWN_RECEIVED state to block new requests */ 569 if (stcb->sctp_socket) { 570 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 571 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) { 572 asoc->state = SCTP_STATE_SHUTDOWN_RECEIVED; 573 /* 574 * notify upper layer that peer has initiated a 575 * shutdown 576 */ 577 sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL); 578 579 /* reset time */ 580 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 581 } 582 } 583 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 584 /* 585 * stop the shutdown timer, since we WILL move to 586 * SHUTDOWN-ACK-SENT. 587 */ 588 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_7); 589 } 590 /* Now are we there yet? */ 591 some_on_streamwheel = 0; 592 if (!TAILQ_EMPTY(&asoc->out_wheel)) { 593 /* Check to see if some data queued */ 594 struct sctp_stream_out *outs; 595 596 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) { 597 if (!TAILQ_EMPTY(&outs->outqueue)) { 598 some_on_streamwheel = 1; 599 break; 600 } 601 } 602 } 603 if (!TAILQ_EMPTY(&asoc->send_queue) || 604 !TAILQ_EMPTY(&asoc->sent_queue) || 605 some_on_streamwheel) { 606 /* By returning we will push more data out */ 607 return; 608 } else { 609 /* no outstanding data to send, so move on... */ 610 /* send SHUTDOWN-ACK */ 611 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination); 612 /* move to SHUTDOWN-ACK-SENT state */ 613 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 614 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 615 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 616 } 617 asoc->state = SCTP_STATE_SHUTDOWN_ACK_SENT; 618 619 /* start SHUTDOWN timer */ 620 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, 621 stcb, net); 622 } 623} 624 625static void 626sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp, 627 struct sctp_tcb *stcb, struct sctp_nets *net) 628{ 629 struct sctp_association *asoc; 630 631 SCTPDBG(SCTP_DEBUG_INPUT2, 632 "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n"); 633 if (stcb == NULL) 634 return; 635 636 asoc = &stcb->asoc; 637 /* process according to association state */ 638 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 639 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 640 /* unexpected SHUTDOWN-ACK... so ignore... */ 641 SCTP_TCB_UNLOCK(stcb); 642 return; 643 } 644 if (asoc->control_pdapi) { 645 /* 646 * With a normal shutdown we assume the end of last record. 647 */ 648 SCTP_INP_READ_LOCK(stcb->sctp_ep); 649 asoc->control_pdapi->end_added = 1; 650 asoc->control_pdapi->pdapi_aborted = 1; 651 asoc->control_pdapi = NULL; 652 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 653 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 654 } 655 /* are the queues empty? */ 656 if (!TAILQ_EMPTY(&asoc->send_queue) || 657 !TAILQ_EMPTY(&asoc->sent_queue) || 658 !TAILQ_EMPTY(&asoc->out_wheel)) { 659 sctp_report_all_outbound(stcb, 0); 660 } 661 /* stop the timer */ 662 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8); 663 /* send SHUTDOWN-COMPLETE */ 664 sctp_send_shutdown_complete(stcb, net); 665 /* notify upper layer protocol */ 666 if (stcb->sctp_socket) { 667 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL); 668 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 669 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 670 /* Set the connected flag to disconnected */ 671 stcb->sctp_ep->sctp_socket->so_snd.sb_cc = 0; 672 } 673 } 674 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 675 /* free the TCB but first save off the ep */ 676 sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 677 SCTP_FROM_SCTP_INPUT + SCTP_LOC_9); 678} 679 680/* 681 * Skip past the param header and then we will find the chunk that caused the 682 * problem. There are two possiblities ASCONF or FWD-TSN other than that and 683 * our peer must be broken. 684 */ 685static void 686sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr, 687 struct sctp_nets *net) 688{ 689 struct sctp_chunkhdr *chk; 690 691 chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr)); 692 switch (chk->chunk_type) { 693 case SCTP_ASCONF_ACK: 694 case SCTP_ASCONF: 695 sctp_asconf_cleanup(stcb, net); 696 break; 697 case SCTP_FORWARD_CUM_TSN: 698 stcb->asoc.peer_supports_prsctp = 0; 699 break; 700 default: 701 SCTPDBG(SCTP_DEBUG_INPUT2, 702 "Peer does not support chunk type %d(%x)??\n", 703 chk->chunk_type, (uint32_t) chk->chunk_type); 704 break; 705 } 706} 707 708/* 709 * Skip past the param header and then we will find the param that caused the 710 * problem. There are a number of param's in a ASCONF OR the prsctp param 711 * these will turn of specific features. 712 */ 713static void 714sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr) 715{ 716 struct sctp_paramhdr *pbad; 717 718 pbad = phdr + 1; 719 switch (ntohs(pbad->param_type)) { 720 /* pr-sctp draft */ 721 case SCTP_PRSCTP_SUPPORTED: 722 stcb->asoc.peer_supports_prsctp = 0; 723 break; 724 case SCTP_SUPPORTED_CHUNK_EXT: 725 break; 726 /* draft-ietf-tsvwg-addip-sctp */ 727 case SCTP_ECN_NONCE_SUPPORTED: 728 stcb->asoc.peer_supports_ecn_nonce = 0; 729 stcb->asoc.ecn_nonce_allowed = 0; 730 stcb->asoc.ecn_allowed = 0; 731 break; 732 case SCTP_ADD_IP_ADDRESS: 733 case SCTP_DEL_IP_ADDRESS: 734 case SCTP_SET_PRIM_ADDR: 735 stcb->asoc.peer_supports_asconf = 0; 736 break; 737 case SCTP_SUCCESS_REPORT: 738 case SCTP_ERROR_CAUSE_IND: 739 SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n"); 740 SCTPDBG(SCTP_DEBUG_INPUT2, 741 "Turning off ASCONF to this strange peer\n"); 742 stcb->asoc.peer_supports_asconf = 0; 743 break; 744 default: 745 SCTPDBG(SCTP_DEBUG_INPUT2, 746 "Peer does not support param type %d(%x)??\n", 747 pbad->param_type, (uint32_t) pbad->param_type); 748 break; 749 } 750} 751 752static int 753sctp_handle_error(struct sctp_chunkhdr *ch, 754 struct sctp_tcb *stcb, struct sctp_nets *net) 755{ 756 int chklen; 757 struct sctp_paramhdr *phdr; 758 uint16_t error_type; 759 uint16_t error_len; 760 struct sctp_association *asoc; 761 762 int adjust; 763 764 /* parse through all of the errors and process */ 765 asoc = &stcb->asoc; 766 phdr = (struct sctp_paramhdr *)((caddr_t)ch + 767 sizeof(struct sctp_chunkhdr)); 768 chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr); 769 while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) { 770 /* Process an Error Cause */ 771 error_type = ntohs(phdr->param_type); 772 error_len = ntohs(phdr->param_length); 773 if ((error_len > chklen) || (error_len == 0)) { 774 /* invalid param length for this param */ 775 SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n", 776 chklen, error_len); 777 return (0); 778 } 779 switch (error_type) { 780 case SCTP_CAUSE_INVALID_STREAM: 781 case SCTP_CAUSE_MISSING_PARAM: 782 case SCTP_CAUSE_INVALID_PARAM: 783 case SCTP_CAUSE_NO_USER_DATA: 784 SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n", 785 error_type); 786 break; 787 case SCTP_CAUSE_STALE_COOKIE: 788 /* 789 * We only act if we have echoed a cookie and are 790 * waiting. 791 */ 792 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) { 793 int *p; 794 795 p = (int *)((caddr_t)phdr + sizeof(*phdr)); 796 /* Save the time doubled */ 797 asoc->cookie_preserve_req = ntohl(*p) << 1; 798 asoc->stale_cookie_count++; 799 if (asoc->stale_cookie_count > 800 asoc->max_init_times) { 801 sctp_abort_notification(stcb, 0); 802 /* now free the asoc */ 803 sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_10); 804 return (-1); 805 } 806 /* blast back to INIT state */ 807 asoc->state &= ~SCTP_STATE_COOKIE_ECHOED; 808 asoc->state |= SCTP_STATE_COOKIE_WAIT; 809 810 sctp_stop_all_cookie_timers(stcb); 811 sctp_send_initiate(stcb->sctp_ep, stcb); 812 } 813 break; 814 case SCTP_CAUSE_UNRESOLVABLE_ADDR: 815 /* 816 * Nothing we can do here, we don't do hostname 817 * addresses so if the peer does not like my IPv6 818 * (or IPv4 for that matter) it does not matter. If 819 * they don't support that type of address, they can 820 * NOT possibly get that packet type... i.e. with no 821 * IPv6 you can't recieve a IPv6 packet. so we can 822 * safely ignore this one. If we ever added support 823 * for HOSTNAME Addresses, then we would need to do 824 * something here. 825 */ 826 break; 827 case SCTP_CAUSE_UNRECOG_CHUNK: 828 sctp_process_unrecog_chunk(stcb, phdr, net); 829 break; 830 case SCTP_CAUSE_UNRECOG_PARAM: 831 sctp_process_unrecog_param(stcb, phdr); 832 break; 833 case SCTP_CAUSE_COOKIE_IN_SHUTDOWN: 834 /* 835 * We ignore this since the timer will drive out a 836 * new cookie anyway and there timer will drive us 837 * to send a SHUTDOWN_COMPLETE. We can't send one 838 * here since we don't have their tag. 839 */ 840 break; 841 case SCTP_CAUSE_DELETING_LAST_ADDR: 842 case SCTP_CAUSE_RESOURCE_SHORTAGE: 843 case SCTP_CAUSE_DELETING_SRC_ADDR: 844 /* 845 * We should NOT get these here, but in a 846 * ASCONF-ACK. 847 */ 848 SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n", 849 error_type); 850 break; 851 case SCTP_CAUSE_OUT_OF_RESC: 852 /* 853 * And what, pray tell do we do with the fact that 854 * the peer is out of resources? Not really sure we 855 * could do anything but abort. I suspect this 856 * should have came WITH an abort instead of in a 857 * OP-ERROR. 858 */ 859 break; 860 default: 861 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n", 862 error_type); 863 break; 864 } 865 adjust = SCTP_SIZE32(error_len); 866 chklen -= adjust; 867 phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust); 868 } 869 return (0); 870} 871 872static int 873sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset, 874 struct sctphdr *sh, struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 875 struct sctp_nets *net, int *abort_no_unlock, uint32_t vrf_id, 876 uint32_t table_id) 877{ 878 struct sctp_init_ack *init_ack; 879 int *state; 880 struct mbuf *op_err; 881 882 SCTPDBG(SCTP_DEBUG_INPUT2, 883 "sctp_handle_init_ack: handling INIT-ACK\n"); 884 885 if (stcb == NULL) { 886 SCTPDBG(SCTP_DEBUG_INPUT2, 887 "sctp_handle_init_ack: TCB is null\n"); 888 return (-1); 889 } 890 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) { 891 /* Invalid length */ 892 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 893 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 894 op_err, 0, 0); 895 *abort_no_unlock = 1; 896 return (-1); 897 } 898 init_ack = &cp->init; 899 /* validate parameters */ 900 if (init_ack->initiate_tag == 0) { 901 /* protocol error... send an abort */ 902 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 903 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 904 op_err, 0, 0); 905 *abort_no_unlock = 1; 906 return (-1); 907 } 908 if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) { 909 /* protocol error... send an abort */ 910 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 911 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 912 op_err, 0, 0); 913 *abort_no_unlock = 1; 914 return (-1); 915 } 916 if (init_ack->num_inbound_streams == 0) { 917 /* protocol error... send an abort */ 918 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 919 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 920 op_err, 0, 0); 921 *abort_no_unlock = 1; 922 return (-1); 923 } 924 if (init_ack->num_outbound_streams == 0) { 925 /* protocol error... send an abort */ 926 op_err = sctp_generate_invmanparam(SCTP_CAUSE_INVALID_PARAM); 927 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, sh, 928 op_err, 0, 0); 929 *abort_no_unlock = 1; 930 return (-1); 931 } 932 /* process according to association state... */ 933 state = &stcb->asoc.state; 934 switch (*state & SCTP_STATE_MASK) { 935 case SCTP_STATE_COOKIE_WAIT: 936 /* this is the expected state for this chunk */ 937 /* process the INIT-ACK parameters */ 938 if (stcb->asoc.primary_destination->dest_state & 939 SCTP_ADDR_UNCONFIRMED) { 940 /* 941 * The primary is where we sent the INIT, we can 942 * always consider it confirmed when the INIT-ACK is 943 * returned. Do this before we load addresses 944 * though. 945 */ 946 stcb->asoc.primary_destination->dest_state &= 947 ~SCTP_ADDR_UNCONFIRMED; 948 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 949 stcb, 0, (void *)stcb->asoc.primary_destination); 950 } 951 if (sctp_process_init_ack(m, iphlen, offset, sh, cp, stcb, 952 net, abort_no_unlock, vrf_id, 953 table_id) < 0) { 954 /* error in parsing parameters */ 955 return (-1); 956 } 957 /* update our state */ 958 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n"); 959 if (*state & SCTP_STATE_SHUTDOWN_PENDING) { 960 *state = SCTP_STATE_COOKIE_ECHOED | 961 SCTP_STATE_SHUTDOWN_PENDING; 962 } else { 963 *state = SCTP_STATE_COOKIE_ECHOED; 964 } 965 966 /* reset the RTO calc */ 967 stcb->asoc.overall_error_count = 0; 968 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 969 /* 970 * collapse the init timer back in case of a exponential 971 * backoff 972 */ 973 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep, 974 stcb, net); 975 /* 976 * the send at the end of the inbound data processing will 977 * cause the cookie to be sent 978 */ 979 break; 980 case SCTP_STATE_SHUTDOWN_SENT: 981 /* incorrect state... discard */ 982 break; 983 case SCTP_STATE_COOKIE_ECHOED: 984 /* incorrect state... discard */ 985 break; 986 case SCTP_STATE_OPEN: 987 /* incorrect state... discard */ 988 break; 989 case SCTP_STATE_EMPTY: 990 case SCTP_STATE_INUSE: 991 default: 992 /* incorrect state... discard */ 993 return (-1); 994 break; 995 } 996 SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n"); 997 return (0); 998} 999 1000 1001/* 1002 * handle a state cookie for an existing association m: input packet mbuf 1003 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a 1004 * "split" mbuf and the cookie signature does not exist offset: offset into 1005 * mbuf to the cookie-echo chunk 1006 */ 1007static struct sctp_tcb * 1008sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, 1009 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1010 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, 1011 struct sockaddr *init_src, int *notification, sctp_assoc_t * sac_assoc_id, 1012 uint32_t vrf_id, uint32_t table_id) 1013{ 1014 struct sctp_association *asoc; 1015 struct sctp_init_chunk *init_cp, init_buf; 1016 struct sctp_init_ack_chunk *initack_cp, initack_buf; 1017 int chk_length; 1018 int init_offset, initack_offset, i; 1019 int retval; 1020 int spec_flag = 0; 1021 int how_indx; 1022 1023 /* I know that the TCB is non-NULL from the caller */ 1024 asoc = &stcb->asoc; 1025 for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) { 1026 if (asoc->cookie_how[how_indx] == 0) 1027 break; 1028 } 1029 if (how_indx < sizeof(asoc->cookie_how)) { 1030 asoc->cookie_how[how_indx] = 1; 1031 } 1032 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 1033 /* SHUTDOWN came in after sending INIT-ACK */ 1034 struct mbuf *op_err; 1035 struct sctp_paramhdr *ph; 1036 1037 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination); 1038 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 1039 0, M_DONTWAIT, 1, MT_DATA); 1040 if (op_err == NULL) { 1041 /* FOOBAR */ 1042 return (NULL); 1043 } 1044 /* pre-reserve some space */ 1045 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 1046 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 1047 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 1048 /* Set the len */ 1049 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr); 1050 ph = mtod(op_err, struct sctp_paramhdr *); 1051 ph->param_type = htons(SCTP_CAUSE_COOKIE_IN_SHUTDOWN); 1052 ph->param_length = htons(sizeof(struct sctp_paramhdr)); 1053 sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag, 1054 vrf_id, table_id); 1055 if (how_indx < sizeof(asoc->cookie_how)) 1056 asoc->cookie_how[how_indx] = 2; 1057 return (NULL); 1058 } 1059 /* 1060 * find and validate the INIT chunk in the cookie (peer's info) the 1061 * INIT should start after the cookie-echo header struct (chunk 1062 * header, state cookie header struct) 1063 */ 1064 init_offset = offset += sizeof(struct sctp_cookie_echo_chunk); 1065 1066 init_cp = (struct sctp_init_chunk *) 1067 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 1068 (uint8_t *) & init_buf); 1069 if (init_cp == NULL) { 1070 /* could not pull a INIT chunk in cookie */ 1071 return (NULL); 1072 } 1073 chk_length = ntohs(init_cp->ch.chunk_length); 1074 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 1075 return (NULL); 1076 } 1077 /* 1078 * find and validate the INIT-ACK chunk in the cookie (my info) the 1079 * INIT-ACK follows the INIT chunk 1080 */ 1081 initack_offset = init_offset + SCTP_SIZE32(chk_length); 1082 initack_cp = (struct sctp_init_ack_chunk *) 1083 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 1084 (uint8_t *) & initack_buf); 1085 if (initack_cp == NULL) { 1086 /* could not pull INIT-ACK chunk in cookie */ 1087 return (NULL); 1088 } 1089 chk_length = ntohs(initack_cp->ch.chunk_length); 1090 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 1091 return (NULL); 1092 } 1093 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && 1094 (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) { 1095 /* 1096 * case D in Section 5.2.4 Table 2: MMAA process accordingly 1097 * to get into the OPEN state 1098 */ 1099 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1100#ifdef INVARIANTS 1101 panic("Case D and non-match seq?"); 1102#else 1103 SCTP_PRINTF("Case D, seq non-match %x vs %x?\n", 1104 ntohl(initack_cp->init.initial_tsn), 1105 asoc->init_seq_number); 1106#endif 1107 } 1108 switch SCTP_GET_STATE 1109 (asoc) { 1110 case SCTP_STATE_COOKIE_WAIT: 1111 case SCTP_STATE_COOKIE_ECHOED: 1112 /* 1113 * INIT was sent but got a COOKIE_ECHO with the 1114 * correct tags... just accept it...but we must 1115 * process the init so that we can make sure we have 1116 * the right seq no's. 1117 */ 1118 /* First we must process the INIT !! */ 1119 retval = sctp_process_init(init_cp, stcb, net); 1120 if (retval < 0) { 1121 if (how_indx < sizeof(asoc->cookie_how)) 1122 asoc->cookie_how[how_indx] = 3; 1123 return (NULL); 1124 } 1125 /* we have already processed the INIT so no problem */ 1126 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, 1127 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_11); 1128 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12); 1129 /* update current state */ 1130 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) 1131 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1132 else 1133 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1134 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1135 asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING; 1136 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1137 stcb->sctp_ep, stcb, asoc->primary_destination); 1138 1139 } else { 1140 /* if ok, move to OPEN state */ 1141 asoc->state = SCTP_STATE_OPEN; 1142 } 1143 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1144 sctp_stop_all_cookie_timers(stcb); 1145 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1146 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1147 (inp->sctp_socket->so_qlimit == 0) 1148 ) { 1149 /* 1150 * Here is where collision would go if we 1151 * did a connect() and instead got a 1152 * init/init-ack/cookie done before the 1153 * init-ack came back.. 1154 */ 1155 stcb->sctp_ep->sctp_flags |= 1156 SCTP_PCB_FLAGS_CONNECTED; 1157 soisconnected(stcb->sctp_ep->sctp_socket); 1158 } 1159 /* notify upper layer */ 1160 *notification = SCTP_NOTIFY_ASSOC_UP; 1161 /* 1162 * since we did not send a HB make sure we don't 1163 * double things 1164 */ 1165 net->hb_responded = 1; 1166 1167 if (stcb->asoc.sctp_autoclose_ticks && 1168 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) { 1169 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 1170 inp, stcb, NULL); 1171 } 1172 break; 1173 default: 1174 /* 1175 * we're in the OPEN state (or beyond), so peer must 1176 * have simply lost the COOKIE-ACK 1177 */ 1178 break; 1179 } /* end switch */ 1180 sctp_stop_all_cookie_timers(stcb); 1181 /* 1182 * We ignore the return code here.. not sure if we should 1183 * somehow abort.. but we do have an existing asoc. This 1184 * really should not fail. 1185 */ 1186 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1187 init_offset + sizeof(struct sctp_init_chunk), 1188 initack_offset, sh, init_src)) { 1189 if (how_indx < sizeof(asoc->cookie_how)) 1190 asoc->cookie_how[how_indx] = 4; 1191 return (NULL); 1192 } 1193 /* respond with a COOKIE-ACK */ 1194 sctp_toss_old_cookies(stcb, asoc); 1195 sctp_send_cookie_ack(stcb); 1196 if (how_indx < sizeof(asoc->cookie_how)) 1197 asoc->cookie_how[how_indx] = 5; 1198 return (stcb); 1199 } 1200 if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1201 ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag && 1202 cookie->tie_tag_my_vtag == 0 && 1203 cookie->tie_tag_peer_vtag == 0) { 1204 /* 1205 * case C in Section 5.2.4 Table 2: XMOO silently discard 1206 */ 1207 if (how_indx < sizeof(asoc->cookie_how)) 1208 asoc->cookie_how[how_indx] = 6; 1209 return (NULL); 1210 } 1211 if (ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag && 1212 (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag || 1213 init_cp->init.initiate_tag == 0)) { 1214 /* 1215 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info 1216 * should be ok, re-accept peer info 1217 */ 1218 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1219 /* 1220 * Extension of case C. If we hit this, then the 1221 * random number generator returned the same vtag 1222 * when we first sent our INIT-ACK and when we later 1223 * sent our INIT. The side with the seq numbers that 1224 * are different will be the one that normnally 1225 * would have hit case C. This in effect "extends" 1226 * our vtags in this collision case to be 64 bits. 1227 * The same collision could occur aka you get both 1228 * vtag and seq number the same twice in a row.. but 1229 * is much less likely. If it did happen then we 1230 * would proceed through and bring up the assoc.. we 1231 * may end up with the wrong stream setup however.. 1232 * which would be bad.. but there is no way to 1233 * tell.. until we send on a stream that does not 1234 * exist :-) 1235 */ 1236 if (how_indx < sizeof(asoc->cookie_how)) 1237 asoc->cookie_how[how_indx] = 7; 1238 1239 return (NULL); 1240 } 1241 if (how_indx < sizeof(asoc->cookie_how)) 1242 asoc->cookie_how[how_indx] = 8; 1243 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_13); 1244 sctp_stop_all_cookie_timers(stcb); 1245 /* 1246 * since we did not send a HB make sure we don't double 1247 * things 1248 */ 1249 net->hb_responded = 1; 1250 if (stcb->asoc.sctp_autoclose_ticks && 1251 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1252 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, 1253 NULL); 1254 } 1255 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1256 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); 1257 1258 /* Note last_cwr_tsn? where is this used? */ 1259 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 1260 if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) { 1261 /* 1262 * Ok the peer probably discarded our data (if we 1263 * echoed a cookie+data). So anything on the 1264 * sent_queue should be marked for retransmit, we 1265 * may not get something to kick us so it COULD 1266 * still take a timeout to move these.. but it can't 1267 * hurt to mark them. 1268 */ 1269 struct sctp_tmit_chunk *chk; 1270 1271 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1272 if (chk->sent < SCTP_DATAGRAM_RESEND) { 1273 chk->sent = SCTP_DATAGRAM_RESEND; 1274 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1275 spec_flag++; 1276 } 1277 } 1278 1279 } 1280 /* process the INIT info (peer's info) */ 1281 retval = sctp_process_init(init_cp, stcb, net); 1282 if (retval < 0) { 1283 if (how_indx < sizeof(asoc->cookie_how)) 1284 asoc->cookie_how[how_indx] = 9; 1285 return (NULL); 1286 } 1287 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1288 init_offset + sizeof(struct sctp_init_chunk), 1289 initack_offset, sh, init_src)) { 1290 if (how_indx < sizeof(asoc->cookie_how)) 1291 asoc->cookie_how[how_indx] = 10; 1292 return (NULL); 1293 } 1294 if ((asoc->state & SCTP_STATE_COOKIE_WAIT) || 1295 (asoc->state & SCTP_STATE_COOKIE_ECHOED)) { 1296 *notification = SCTP_NOTIFY_ASSOC_UP; 1297 1298 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1299 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1300 (inp->sctp_socket->so_qlimit == 0)) { 1301 stcb->sctp_ep->sctp_flags |= 1302 SCTP_PCB_FLAGS_CONNECTED; 1303 soisconnected(stcb->sctp_ep->sctp_socket); 1304 } 1305 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) 1306 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1307 else 1308 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1309 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1310 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1311 } else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1312 SCTP_STAT_INCR_COUNTER32(sctps_restartestab); 1313 } else { 1314 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1315 } 1316 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1317 asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING; 1318 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1319 stcb->sctp_ep, stcb, asoc->primary_destination); 1320 1321 } else { 1322 asoc->state = SCTP_STATE_OPEN; 1323 } 1324 sctp_stop_all_cookie_timers(stcb); 1325 sctp_toss_old_cookies(stcb, asoc); 1326 sctp_send_cookie_ack(stcb); 1327 if (spec_flag) { 1328 /* 1329 * only if we have retrans set do we do this. What 1330 * this call does is get only the COOKIE-ACK out and 1331 * then when we return the normal call to 1332 * sctp_chunk_output will get the retrans out behind 1333 * this. 1334 */ 1335 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK); 1336 } 1337 if (how_indx < sizeof(asoc->cookie_how)) 1338 asoc->cookie_how[how_indx] = 11; 1339 1340 return (stcb); 1341 } 1342 if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1343 ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) && 1344 cookie->tie_tag_my_vtag == asoc->my_vtag_nonce && 1345 cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce && 1346 cookie->tie_tag_peer_vtag != 0) { 1347 struct sctpasochead *head; 1348 1349 /* 1350 * case A in Section 5.2.4 Table 2: XXMM (peer restarted) 1351 */ 1352 /* temp code */ 1353 if (how_indx < sizeof(asoc->cookie_how)) 1354 asoc->cookie_how[how_indx] = 12; 1355 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_14); 1356 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_15); 1357 1358 *sac_assoc_id = sctp_get_associd(stcb); 1359 /* notify upper layer */ 1360 *notification = SCTP_NOTIFY_ASSOC_RESTART; 1361 atomic_add_int(&stcb->asoc.refcnt, 1); 1362 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) && 1363 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 1364 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) { 1365 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1366 } 1367 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1368 SCTP_STAT_INCR_GAUGE32(sctps_restartestab); 1369 } else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1370 SCTP_STAT_INCR_GAUGE32(sctps_collisionestab); 1371 } 1372 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1373 asoc->state = SCTP_STATE_OPEN | 1374 SCTP_STATE_SHUTDOWN_PENDING; 1375 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1376 stcb->sctp_ep, stcb, asoc->primary_destination); 1377 1378 } else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) { 1379 /* move to OPEN state, if not in SHUTDOWN_SENT */ 1380 asoc->state = SCTP_STATE_OPEN; 1381 } 1382 asoc->pre_open_streams = 1383 ntohs(initack_cp->init.num_outbound_streams); 1384 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 1385 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 1386 1387 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 1388 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 1389 1390 asoc->str_reset_seq_in = asoc->init_seq_number; 1391 1392 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1393 if (asoc->mapping_array) 1394 memset(asoc->mapping_array, 0, 1395 asoc->mapping_array_size); 1396 SCTP_TCB_UNLOCK(stcb); 1397 SCTP_INP_INFO_WLOCK(); 1398 SCTP_INP_WLOCK(stcb->sctp_ep); 1399 SCTP_TCB_LOCK(stcb); 1400 atomic_add_int(&stcb->asoc.refcnt, -1); 1401 /* send up all the data */ 1402 SCTP_TCB_SEND_LOCK(stcb); 1403 1404 sctp_report_all_outbound(stcb, 1); 1405 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1406 stcb->asoc.strmout[i].stream_no = i; 1407 stcb->asoc.strmout[i].next_sequence_sent = 0; 1408 stcb->asoc.strmout[i].last_msg_incomplete = 0; 1409 } 1410 /* process the INIT-ACK info (my info) */ 1411 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 1412 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1413 1414 /* pull from vtag hash */ 1415 LIST_REMOVE(stcb, sctp_asocs); 1416 /* re-insert to new vtag position */ 1417 head = &sctppcbinfo.sctp_asochash[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, 1418 sctppcbinfo.hashasocmark)]; 1419 /* 1420 * put it in the bucket in the vtag hash of assoc's for the 1421 * system 1422 */ 1423 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 1424 1425 /* Is this the first restart? */ 1426 if (stcb->asoc.in_restart_hash == 0) { 1427 /* Ok add it to assoc_id vtag hash */ 1428 head = &sctppcbinfo.sctp_restarthash[SCTP_PCBHASH_ASOC(stcb->asoc.assoc_id, 1429 sctppcbinfo.hashrestartmark)]; 1430 LIST_INSERT_HEAD(head, stcb, sctp_tcbrestarhash); 1431 stcb->asoc.in_restart_hash = 1; 1432 } 1433 /* process the INIT info (peer's info) */ 1434 SCTP_TCB_SEND_UNLOCK(stcb); 1435 SCTP_INP_WUNLOCK(stcb->sctp_ep); 1436 SCTP_INP_INFO_WUNLOCK(); 1437 1438 retval = sctp_process_init(init_cp, stcb, net); 1439 if (retval < 0) { 1440 if (how_indx < sizeof(asoc->cookie_how)) 1441 asoc->cookie_how[how_indx] = 13; 1442 1443 return (NULL); 1444 } 1445 /* 1446 * since we did not send a HB make sure we don't double 1447 * things 1448 */ 1449 net->hb_responded = 1; 1450 1451 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1452 init_offset + sizeof(struct sctp_init_chunk), 1453 initack_offset, sh, init_src)) { 1454 if (how_indx < sizeof(asoc->cookie_how)) 1455 asoc->cookie_how[how_indx] = 14; 1456 1457 return (NULL); 1458 } 1459 /* respond with a COOKIE-ACK */ 1460 sctp_stop_all_cookie_timers(stcb); 1461 sctp_toss_old_cookies(stcb, asoc); 1462 sctp_send_cookie_ack(stcb); 1463 if (how_indx < sizeof(asoc->cookie_how)) 1464 asoc->cookie_how[how_indx] = 15; 1465 1466 return (stcb); 1467 } 1468 if (how_indx < sizeof(asoc->cookie_how)) 1469 asoc->cookie_how[how_indx] = 16; 1470 /* all other cases... */ 1471 return (NULL); 1472} 1473 1474 1475/* 1476 * handle a state cookie for a new association m: input packet mbuf chain-- 1477 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf 1478 * and the cookie signature does not exist offset: offset into mbuf to the 1479 * cookie-echo chunk length: length of the cookie chunk to: where the init 1480 * was from returns a new TCB 1481 */ 1482static struct sctp_tcb * 1483sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, 1484 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1485 struct sctp_inpcb *inp, struct sctp_nets **netp, 1486 struct sockaddr *init_src, int *notification, 1487 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 1488 uint32_t vrf_id, uint32_t table_id) 1489{ 1490 struct sctp_tcb *stcb; 1491 struct sctp_init_chunk *init_cp, init_buf; 1492 struct sctp_init_ack_chunk *initack_cp, initack_buf; 1493 struct sockaddr_storage sa_store; 1494 struct sockaddr *initack_src = (struct sockaddr *)&sa_store; 1495 struct sockaddr_in *sin; 1496 struct sockaddr_in6 *sin6; 1497 struct sctp_association *asoc; 1498 int chk_length; 1499 int init_offset, initack_offset, initack_limit; 1500 int retval; 1501 int error = 0; 1502 uint32_t old_tag; 1503 uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE]; 1504 1505 /* 1506 * find and validate the INIT chunk in the cookie (peer's info) the 1507 * INIT should start after the cookie-echo header struct (chunk 1508 * header, state cookie header struct) 1509 */ 1510 init_offset = offset + sizeof(struct sctp_cookie_echo_chunk); 1511 init_cp = (struct sctp_init_chunk *) 1512 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 1513 (uint8_t *) & init_buf); 1514 if (init_cp == NULL) { 1515 /* could not pull a INIT chunk in cookie */ 1516 SCTPDBG(SCTP_DEBUG_INPUT1, 1517 "process_cookie_new: could not pull INIT chunk hdr\n"); 1518 return (NULL); 1519 } 1520 chk_length = ntohs(init_cp->ch.chunk_length); 1521 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 1522 SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n"); 1523 return (NULL); 1524 } 1525 initack_offset = init_offset + SCTP_SIZE32(chk_length); 1526 /* 1527 * find and validate the INIT-ACK chunk in the cookie (my info) the 1528 * INIT-ACK follows the INIT chunk 1529 */ 1530 initack_cp = (struct sctp_init_ack_chunk *) 1531 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 1532 (uint8_t *) & initack_buf); 1533 if (initack_cp == NULL) { 1534 /* could not pull INIT-ACK chunk in cookie */ 1535 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n"); 1536 return (NULL); 1537 } 1538 chk_length = ntohs(initack_cp->ch.chunk_length); 1539 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 1540 return (NULL); 1541 } 1542 /* 1543 * NOTE: We can't use the INIT_ACK's chk_length to determine the 1544 * "initack_limit" value. This is because the chk_length field 1545 * includes the length of the cookie, but the cookie is omitted when 1546 * the INIT and INIT_ACK are tacked onto the cookie... 1547 */ 1548 initack_limit = offset + cookie_len; 1549 1550 /* 1551 * now that we know the INIT/INIT-ACK are in place, create a new TCB 1552 * and popluate 1553 */ 1554 stcb = sctp_aloc_assoc(inp, init_src, 0, &error, 1555 ntohl(initack_cp->init.initiate_tag), vrf_id); 1556 if (stcb == NULL) { 1557 struct mbuf *op_err; 1558 1559 /* memory problem? */ 1560 SCTPDBG(SCTP_DEBUG_INPUT1, 1561 "process_cookie_new: no room for another TCB!\n"); 1562 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1563 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 1564 sh, op_err, vrf_id, table_id); 1565 return (NULL); 1566 } 1567 /* get the correct sctp_nets */ 1568 if (netp) 1569 *netp = sctp_findnet(stcb, init_src); 1570 1571 asoc = &stcb->asoc; 1572 /* save the table id (vrf_id is done in aloc_assoc) */ 1573 asoc->table_id = table_id; 1574 /* get scope variables out of cookie */ 1575 asoc->ipv4_local_scope = cookie->ipv4_scope; 1576 asoc->site_scope = cookie->site_scope; 1577 asoc->local_scope = cookie->local_scope; 1578 asoc->loopback_scope = cookie->loopback_scope; 1579 1580 if ((asoc->ipv4_addr_legal != cookie->ipv4_addr_legal) || 1581 (asoc->ipv6_addr_legal != cookie->ipv6_addr_legal)) { 1582 struct mbuf *op_err; 1583 1584 /* 1585 * Houston we have a problem. The EP changed while the 1586 * cookie was in flight. Only recourse is to abort the 1587 * association. 1588 */ 1589 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 1590 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 1591 sh, op_err, vrf_id, table_id); 1592 return (NULL); 1593 } 1594 /* process the INIT-ACK info (my info) */ 1595 old_tag = asoc->my_vtag; 1596 asoc->assoc_id = asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 1597 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1598 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); 1599 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 1600 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 1601 asoc->last_cwr_tsn = asoc->init_seq_number - 1; 1602 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 1603 asoc->str_reset_seq_in = asoc->init_seq_number; 1604 1605 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1606 1607 /* process the INIT info (peer's info) */ 1608 if (netp) 1609 retval = sctp_process_init(init_cp, stcb, *netp); 1610 else 1611 retval = 0; 1612 if (retval < 0) { 1613 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 1614 return (NULL); 1615 } 1616 /* load all addresses */ 1617 if (sctp_load_addresses_from_init(stcb, m, iphlen, 1618 init_offset + sizeof(struct sctp_init_chunk), initack_offset, sh, 1619 init_src)) { 1620 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_17); 1621 return (NULL); 1622 } 1623 /* 1624 * verify any preceding AUTH chunk that was skipped 1625 */ 1626 /* pull the local authentication parameters from the cookie/init-ack */ 1627 sctp_auth_get_cookie_params(stcb, m, 1628 initack_offset + sizeof(struct sctp_init_ack_chunk), 1629 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk))); 1630 if (auth_skipped) { 1631 struct sctp_auth_chunk *auth; 1632 1633 auth = (struct sctp_auth_chunk *) 1634 sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf); 1635 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) { 1636 /* auth HMAC failed, dump the assoc and packet */ 1637 SCTPDBG(SCTP_DEBUG_AUTH1, 1638 "COOKIE-ECHO: AUTH failed\n"); 1639 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18); 1640 return (NULL); 1641 } else { 1642 /* remaining chunks checked... good to go */ 1643 stcb->asoc.authenticated = 1; 1644 } 1645 } 1646 /* update current state */ 1647 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 1648 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1649 asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING; 1650 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1651 stcb->sctp_ep, stcb, asoc->primary_destination); 1652 } else { 1653 asoc->state = SCTP_STATE_OPEN; 1654 } 1655 sctp_stop_all_cookie_timers(stcb); 1656 SCTP_STAT_INCR_COUNTER32(sctps_passiveestab); 1657 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1658 1659 /* 1660 * if we're doing ASCONFs, check to see if we have any new local 1661 * addresses that need to get added to the peer (eg. addresses 1662 * changed while cookie echo in flight). This needs to be done 1663 * after we go to the OPEN state to do the correct asconf 1664 * processing. else, make sure we have the correct addresses in our 1665 * lists 1666 */ 1667 1668 /* warning, we re-use sin, sin6, sa_store here! */ 1669 /* pull in local_address (our "from" address) */ 1670 if (cookie->laddr_type == SCTP_IPV4_ADDRESS) { 1671 /* source addr is IPv4 */ 1672 sin = (struct sockaddr_in *)initack_src; 1673 memset(sin, 0, sizeof(*sin)); 1674 sin->sin_family = AF_INET; 1675 sin->sin_len = sizeof(struct sockaddr_in); 1676 sin->sin_addr.s_addr = cookie->laddress[0]; 1677 } else if (cookie->laddr_type == SCTP_IPV6_ADDRESS) { 1678 /* source addr is IPv6 */ 1679 sin6 = (struct sockaddr_in6 *)initack_src; 1680 memset(sin6, 0, sizeof(*sin6)); 1681 sin6->sin6_family = AF_INET6; 1682 sin6->sin6_len = sizeof(struct sockaddr_in6); 1683 sin6->sin6_scope_id = cookie->scope_id; 1684 memcpy(&sin6->sin6_addr, cookie->laddress, 1685 sizeof(sin6->sin6_addr)); 1686 } else { 1687 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19); 1688 return (NULL); 1689 } 1690 1691 sctp_check_address_list(stcb, m, 1692 initack_offset + sizeof(struct sctp_init_ack_chunk), 1693 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)), 1694 initack_src, cookie->local_scope, cookie->site_scope, 1695 cookie->ipv4_scope, cookie->loopback_scope); 1696 1697 1698 /* set up to notify upper layer */ 1699 *notification = SCTP_NOTIFY_ASSOC_UP; 1700 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1701 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1702 (inp->sctp_socket->so_qlimit == 0)) { 1703 /* 1704 * This is an endpoint that called connect() how it got a 1705 * cookie that is NEW is a bit of a mystery. It must be that 1706 * the INIT was sent, but before it got there.. a complete 1707 * INIT/INIT-ACK/COOKIE arrived. But of course then it 1708 * should have went to the other code.. not here.. oh well.. 1709 * a bit of protection is worth having.. 1710 */ 1711 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 1712 soisconnected(stcb->sctp_ep->sctp_socket); 1713 } else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 1714 (inp->sctp_socket->so_qlimit)) { 1715 /* 1716 * We don't want to do anything with this one. Since it is 1717 * the listening guy. The timer will get started for 1718 * accepted connections in the caller. 1719 */ 1720 ; 1721 } 1722 /* since we did not send a HB make sure we don't double things */ 1723 if ((netp) && (*netp)) 1724 (*netp)->hb_responded = 1; 1725 1726 if (stcb->asoc.sctp_autoclose_ticks && 1727 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1728 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL); 1729 } 1730 /* respond with a COOKIE-ACK */ 1731 /* calculate the RTT */ 1732 if ((netp) && (*netp)) 1733 (*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp, 1734 &cookie->time_entered); 1735 sctp_send_cookie_ack(stcb); 1736 return (stcb); 1737} 1738 1739 1740/* 1741 * handles a COOKIE-ECHO message stcb: modified to either a new or left as 1742 * existing (non-NULL) TCB 1743 */ 1744static struct mbuf * 1745sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, 1746 struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp, 1747 struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp, 1748 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 1749 struct sctp_tcb **locked_tcb, uint32_t vrf_id, uint32_t table_id) 1750{ 1751 struct sctp_state_cookie *cookie; 1752 struct sockaddr_in6 sin6; 1753 struct sockaddr_in sin; 1754 struct sctp_tcb *l_stcb = *stcb; 1755 struct sctp_inpcb *l_inp; 1756 struct sockaddr *to; 1757 sctp_assoc_t sac_restart_id; 1758 struct sctp_pcb *ep; 1759 struct mbuf *m_sig; 1760 uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE]; 1761 uint8_t *sig; 1762 uint8_t cookie_ok = 0; 1763 unsigned int size_of_pkt, sig_offset, cookie_offset; 1764 unsigned int cookie_len; 1765 struct timeval now; 1766 struct timeval time_expires; 1767 struct sockaddr_storage dest_store; 1768 struct sockaddr *localep_sa = (struct sockaddr *)&dest_store; 1769 struct ip *iph; 1770 int notification = 0; 1771 struct sctp_nets *netl; 1772 int had_a_existing_tcb = 0; 1773 1774 SCTPDBG(SCTP_DEBUG_INPUT2, 1775 "sctp_handle_cookie: handling COOKIE-ECHO\n"); 1776 1777 if (inp_p == NULL) { 1778 return (NULL); 1779 } 1780 /* First get the destination address setup too. */ 1781 iph = mtod(m, struct ip *); 1782 if (iph->ip_v == IPVERSION) { 1783 /* its IPv4 */ 1784 struct sockaddr_in *sin; 1785 1786 sin = (struct sockaddr_in *)(localep_sa); 1787 memset(sin, 0, sizeof(*sin)); 1788 sin->sin_family = AF_INET; 1789 sin->sin_len = sizeof(*sin); 1790 sin->sin_port = sh->dest_port; 1791 sin->sin_addr.s_addr = iph->ip_dst.s_addr; 1792 size_of_pkt = SCTP_GET_IPV4_LENGTH(iph); 1793 } else if (iph->ip_v == (IPV6_VERSION >> 4)) { 1794 /* its IPv6 */ 1795 struct ip6_hdr *ip6; 1796 struct sockaddr_in6 *sin6; 1797 1798 sin6 = (struct sockaddr_in6 *)(localep_sa); 1799 memset(sin6, 0, sizeof(*sin6)); 1800 sin6->sin6_family = AF_INET6; 1801 sin6->sin6_len = sizeof(struct sockaddr_in6); 1802 ip6 = mtod(m, struct ip6_hdr *); 1803 sin6->sin6_port = sh->dest_port; 1804 sin6->sin6_addr = ip6->ip6_dst; 1805 size_of_pkt = SCTP_GET_IPV6_LENGTH(ip6) + iphlen; 1806 } else { 1807 return (NULL); 1808 } 1809 1810 cookie = &cp->cookie; 1811 cookie_offset = offset + sizeof(struct sctp_chunkhdr); 1812 cookie_len = ntohs(cp->ch.chunk_length); 1813 1814 if ((cookie->peerport != sh->src_port) && 1815 (cookie->myport != sh->dest_port) && 1816 (cookie->my_vtag != sh->v_tag)) { 1817 /* 1818 * invalid ports or bad tag. Note that we always leave the 1819 * v_tag in the header in network order and when we stored 1820 * it in the my_vtag slot we also left it in network order. 1821 * This maintains the match even though it may be in the 1822 * opposite byte order of the machine :-> 1823 */ 1824 return (NULL); 1825 } 1826 if (cookie_len > size_of_pkt || 1827 cookie_len < sizeof(struct sctp_cookie_echo_chunk) + 1828 sizeof(struct sctp_init_chunk) + 1829 sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) { 1830 /* cookie too long! or too small */ 1831 return (NULL); 1832 } 1833 /* 1834 * split off the signature into its own mbuf (since it should not be 1835 * calculated in the sctp_hmac_m() call). 1836 */ 1837 sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE; 1838 if (sig_offset > size_of_pkt) { 1839 /* packet not correct size! */ 1840 /* XXX this may already be accounted for earlier... */ 1841 return (NULL); 1842 } 1843 m_sig = m_split(m, sig_offset, M_DONTWAIT); 1844 if (m_sig == NULL) { 1845 /* out of memory or ?? */ 1846 return (NULL); 1847 } 1848 /* 1849 * compute the signature/digest for the cookie 1850 */ 1851 ep = &(*inp_p)->sctp_ep; 1852 l_inp = *inp_p; 1853 if (l_stcb) { 1854 SCTP_TCB_UNLOCK(l_stcb); 1855 } 1856 SCTP_INP_RLOCK(l_inp); 1857 if (l_stcb) { 1858 SCTP_TCB_LOCK(l_stcb); 1859 } 1860 /* which cookie is it? */ 1861 if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) && 1862 (ep->current_secret_number != ep->last_secret_number)) { 1863 /* it's the old cookie */ 1864 (void)sctp_hmac_m(SCTP_HMAC, 1865 (uint8_t *) ep->secret_key[(int)ep->last_secret_number], 1866 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig); 1867 } else { 1868 /* it's the current cookie */ 1869 (void)sctp_hmac_m(SCTP_HMAC, 1870 (uint8_t *) ep->secret_key[(int)ep->current_secret_number], 1871 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig); 1872 } 1873 /* get the signature */ 1874 SCTP_INP_RUNLOCK(l_inp); 1875 sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig); 1876 if (sig == NULL) { 1877 /* couldn't find signature */ 1878 sctp_m_freem(m_sig); 1879 return (NULL); 1880 } 1881 /* compare the received digest with the computed digest */ 1882 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) { 1883 /* try the old cookie? */ 1884 if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) && 1885 (ep->current_secret_number != ep->last_secret_number)) { 1886 /* compute digest with old */ 1887 (void)sctp_hmac_m(SCTP_HMAC, 1888 (uint8_t *) ep->secret_key[(int)ep->last_secret_number], 1889 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig); 1890 /* compare */ 1891 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0) 1892 cookie_ok = 1; 1893 } 1894 } else { 1895 cookie_ok = 1; 1896 } 1897 1898 /* 1899 * Now before we continue we must reconstruct our mbuf so that 1900 * normal processing of any other chunks will work. 1901 */ 1902 { 1903 struct mbuf *m_at; 1904 1905 m_at = m; 1906 while (SCTP_BUF_NEXT(m_at) != NULL) { 1907 m_at = SCTP_BUF_NEXT(m_at); 1908 } 1909 SCTP_BUF_NEXT(m_at) = m_sig; 1910 } 1911 1912 if (cookie_ok == 0) { 1913 SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n"); 1914 SCTPDBG(SCTP_DEBUG_INPUT2, 1915 "offset = %u, cookie_offset = %u, sig_offset = %u\n", 1916 (uint32_t) offset, cookie_offset, sig_offset); 1917 return (NULL); 1918 } 1919 /* 1920 * check the cookie timestamps to be sure it's not stale 1921 */ 1922 (void)SCTP_GETTIME_TIMEVAL(&now); 1923 /* Expire time is in Ticks, so we convert to seconds */ 1924 time_expires.tv_sec = cookie->time_entered.tv_sec + cookie->cookie_life; 1925 time_expires.tv_usec = cookie->time_entered.tv_usec; 1926 if (timevalcmp(&now, &time_expires, >)) { 1927 /* cookie is stale! */ 1928 struct mbuf *op_err; 1929 struct sctp_stale_cookie_msg *scm; 1930 uint32_t tim; 1931 1932 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg), 1933 0, M_DONTWAIT, 1, MT_DATA); 1934 if (op_err == NULL) { 1935 /* FOOBAR */ 1936 return (NULL); 1937 } 1938 /* pre-reserve some space */ 1939 SCTP_BUF_RESV_UF(op_err, sizeof(struct ip6_hdr)); 1940 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctphdr)); 1941 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 1942 1943 /* Set the len */ 1944 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg); 1945 scm = mtod(op_err, struct sctp_stale_cookie_msg *); 1946 scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE); 1947 scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) + 1948 (sizeof(uint32_t)))); 1949 /* seconds to usec */ 1950 tim = (now.tv_sec - time_expires.tv_sec) * 1000000; 1951 /* add in usec */ 1952 if (tim == 0) 1953 tim = now.tv_usec - cookie->time_entered.tv_usec; 1954 scm->time_usec = htonl(tim); 1955 sctp_send_operr_to(m, iphlen, op_err, cookie->peers_vtag, 1956 vrf_id, table_id); 1957 return (NULL); 1958 } 1959 /* 1960 * Now we must see with the lookup address if we have an existing 1961 * asoc. This will only happen if we were in the COOKIE-WAIT state 1962 * and a INIT collided with us and somewhere the peer sent the 1963 * cookie on another address besides the single address our assoc 1964 * had for him. In this case we will have one of the tie-tags set at 1965 * least AND the address field in the cookie can be used to look it 1966 * up. 1967 */ 1968 to = NULL; 1969 if (cookie->addr_type == SCTP_IPV6_ADDRESS) { 1970 memset(&sin6, 0, sizeof(sin6)); 1971 sin6.sin6_family = AF_INET6; 1972 sin6.sin6_len = sizeof(sin6); 1973 sin6.sin6_port = sh->src_port; 1974 sin6.sin6_scope_id = cookie->scope_id; 1975 memcpy(&sin6.sin6_addr.s6_addr, cookie->address, 1976 sizeof(sin6.sin6_addr.s6_addr)); 1977 to = (struct sockaddr *)&sin6; 1978 } else if (cookie->addr_type == SCTP_IPV4_ADDRESS) { 1979 memset(&sin, 0, sizeof(sin)); 1980 sin.sin_family = AF_INET; 1981 sin.sin_len = sizeof(sin); 1982 sin.sin_port = sh->src_port; 1983 sin.sin_addr.s_addr = cookie->address[0]; 1984 to = (struct sockaddr *)&sin; 1985 } else { 1986 /* This should not happen */ 1987 return (NULL); 1988 } 1989 if ((*stcb == NULL) && to) { 1990 /* Yep, lets check */ 1991 *stcb = sctp_findassociation_ep_addr(inp_p, to, netp, localep_sa, NULL); 1992 if (*stcb == NULL) { 1993 /* 1994 * We should have only got back the same inp. If we 1995 * got back a different ep we have a problem. The 1996 * original findep got back l_inp and now 1997 */ 1998 if (l_inp != *inp_p) { 1999 SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n"); 2000 } 2001 } else { 2002 if (*locked_tcb == NULL) { 2003 /* 2004 * In this case we found the assoc only 2005 * after we locked the create lock. This 2006 * means we are in a colliding case and we 2007 * must make sure that we unlock the tcb if 2008 * its one of the cases where we throw away 2009 * the incoming packets. 2010 */ 2011 *locked_tcb = *stcb; 2012 2013 /* 2014 * We must also increment the inp ref count 2015 * since the ref_count flags was set when we 2016 * did not find the TCB, now we found it 2017 * which reduces the refcount.. we must 2018 * raise it back out to balance it all :-) 2019 */ 2020 SCTP_INP_INCR_REF((*stcb)->sctp_ep); 2021 if ((*stcb)->sctp_ep != l_inp) { 2022 SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n", 2023 (*stcb)->sctp_ep, l_inp); 2024 } 2025 } 2026 } 2027 } 2028 if (to == NULL) 2029 return (NULL); 2030 2031 cookie_len -= SCTP_SIGNATURE_SIZE; 2032 if (*stcb == NULL) { 2033 /* this is the "normal" case... get a new TCB */ 2034 *stcb = sctp_process_cookie_new(m, iphlen, offset, sh, cookie, 2035 cookie_len, *inp_p, netp, to, ¬ification, 2036 auth_skipped, auth_offset, auth_len, vrf_id, table_id); 2037 } else { 2038 /* this is abnormal... cookie-echo on existing TCB */ 2039 had_a_existing_tcb = 1; 2040 *stcb = sctp_process_cookie_existing(m, iphlen, offset, sh, 2041 cookie, cookie_len, *inp_p, *stcb, *netp, to, ¬ification, 2042 &sac_restart_id, vrf_id, table_id); 2043 } 2044 2045 if (*stcb == NULL) { 2046 /* still no TCB... must be bad cookie-echo */ 2047 return (NULL); 2048 } 2049 /* 2050 * Ok, we built an association so confirm the address we sent the 2051 * INIT-ACK to. 2052 */ 2053 netl = sctp_findnet(*stcb, to); 2054 /* 2055 * This code should in theory NOT run but 2056 */ 2057 if (netl == NULL) { 2058 /* TSNH! Huh, why do I need to add this address here? */ 2059 int ret; 2060 2061 ret = sctp_add_remote_addr(*stcb, to, SCTP_DONOT_SETSCOPE, 2062 SCTP_IN_COOKIE_PROC); 2063 netl = sctp_findnet(*stcb, to); 2064 } 2065 if (netl) { 2066 if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) { 2067 netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 2068 (void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL, 2069 netl); 2070 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 2071 (*stcb), 0, (void *)netl); 2072 } 2073 } 2074 if (*stcb) { 2075 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, *inp_p, 2076 *stcb, NULL); 2077 } 2078 if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 2079 if (!had_a_existing_tcb || 2080 (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { 2081 /* 2082 * If we have a NEW cookie or the connect never 2083 * reached the connected state during collision we 2084 * must do the TCP accept thing. 2085 */ 2086 struct socket *so, *oso; 2087 struct sctp_inpcb *inp; 2088 2089 if (notification == SCTP_NOTIFY_ASSOC_RESTART) { 2090 /* 2091 * For a restart we will keep the same 2092 * socket, no need to do anything. I THINK!! 2093 */ 2094 sctp_ulp_notify(notification, *stcb, 0, (void *)&sac_restart_id); 2095 return (m); 2096 } 2097 oso = (*inp_p)->sctp_socket; 2098 /* 2099 * We do this to keep the sockets side happy durin 2100 * the sonewcon ONLY. 2101 */ 2102 NET_LOCK_GIANT(); 2103 SCTP_TCB_UNLOCK((*stcb)); 2104 so = sonewconn(oso, 0 2105 ); 2106 NET_UNLOCK_GIANT(); 2107 SCTP_INP_WLOCK((*stcb)->sctp_ep); 2108 SCTP_TCB_LOCK((*stcb)); 2109 SCTP_INP_WUNLOCK((*stcb)->sctp_ep); 2110 if (so == NULL) { 2111 struct mbuf *op_err; 2112 2113 /* Too many sockets */ 2114 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n"); 2115 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC); 2116 sctp_abort_association(*inp_p, NULL, m, iphlen, 2117 sh, op_err, vrf_id, 2118 table_id); 2119 sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20); 2120 return (NULL); 2121 } 2122 inp = (struct sctp_inpcb *)so->so_pcb; 2123 SCTP_INP_INCR_REF(inp); 2124 /* 2125 * We add the unbound flag here so that if we get an 2126 * soabort() before we get the move_pcb done, we 2127 * will properly cleanup. 2128 */ 2129 inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE | 2130 SCTP_PCB_FLAGS_CONNECTED | 2131 SCTP_PCB_FLAGS_IN_TCPPOOL | 2132 SCTP_PCB_FLAGS_UNBOUND | 2133 (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) | 2134 SCTP_PCB_FLAGS_DONT_WAKE); 2135 inp->sctp_features = (*inp_p)->sctp_features; 2136 inp->sctp_socket = so; 2137 inp->sctp_frag_point = (*inp_p)->sctp_frag_point; 2138 inp->partial_delivery_point = (*inp_p)->partial_delivery_point; 2139 inp->sctp_context = (*inp_p)->sctp_context; 2140 inp->inp_starting_point_for_iterator = NULL; 2141 /* 2142 * copy in the authentication parameters from the 2143 * original endpoint 2144 */ 2145 if (inp->sctp_ep.local_hmacs) 2146 sctp_free_hmaclist(inp->sctp_ep.local_hmacs); 2147 inp->sctp_ep.local_hmacs = 2148 sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs); 2149 if (inp->sctp_ep.local_auth_chunks) 2150 sctp_free_chunklist(inp->sctp_ep.local_auth_chunks); 2151 inp->sctp_ep.local_auth_chunks = 2152 sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks); 2153 (void)sctp_copy_skeylist(&(*inp_p)->sctp_ep.shared_keys, 2154 &inp->sctp_ep.shared_keys); 2155 2156 /* 2157 * Now we must move it from one hash table to 2158 * another and get the tcb in the right place. 2159 */ 2160 sctp_move_pcb_and_assoc(*inp_p, inp, *stcb); 2161 sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, M_NOWAIT); 2162 2163 /* 2164 * now we must check to see if we were aborted while 2165 * the move was going on and the lock/unlock 2166 * happened. 2167 */ 2168 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 2169 /* 2170 * yep it was, we leave the assoc attached 2171 * to the socket since the sctp_inpcb_free() 2172 * call will send an abort for us. 2173 */ 2174 SCTP_INP_DECR_REF(inp); 2175 return (NULL); 2176 } 2177 SCTP_INP_DECR_REF(inp); 2178 /* Switch over to the new guy */ 2179 *inp_p = inp; 2180 sctp_ulp_notify(notification, *stcb, 0, NULL); 2181 2182 /* 2183 * Pull it from the incomplete queue and wake the 2184 * guy 2185 */ 2186 soisconnected(so); 2187 return (m); 2188 } 2189 } 2190 if ((notification) && ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_UDPTYPE)) { 2191 sctp_ulp_notify(notification, *stcb, 0, NULL); 2192 } 2193 return (m); 2194} 2195 2196static void 2197sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp, 2198 struct sctp_tcb *stcb, struct sctp_nets *net) 2199{ 2200 /* cp must not be used, others call this without a c-ack :-) */ 2201 struct sctp_association *asoc; 2202 2203 SCTPDBG(SCTP_DEBUG_INPUT2, 2204 "sctp_handle_cookie_ack: handling COOKIE-ACK\n"); 2205 if (stcb == NULL) 2206 return; 2207 2208 asoc = &stcb->asoc; 2209 2210 sctp_stop_all_cookie_timers(stcb); 2211 /* process according to association state */ 2212 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) { 2213 /* state change only needed when I am in right state */ 2214 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 2215 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 2216 asoc->state = SCTP_STATE_OPEN | SCTP_STATE_SHUTDOWN_PENDING; 2217 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 2218 stcb->sctp_ep, stcb, asoc->primary_destination); 2219 2220 } else { 2221 asoc->state = SCTP_STATE_OPEN; 2222 } 2223 /* update RTO */ 2224 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 2225 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 2226 if (asoc->overall_error_count == 0) { 2227 net->RTO = sctp_calculate_rto(stcb, asoc, net, 2228 &asoc->time_entered); 2229 } 2230 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 2231 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL); 2232 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2233 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 2234 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 2235 soisconnected(stcb->sctp_ep->sctp_socket); 2236 } 2237 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, 2238 stcb, net); 2239 /* 2240 * since we did not send a HB make sure we don't double 2241 * things 2242 */ 2243 net->hb_responded = 1; 2244 2245 if (stcb->asoc.sctp_autoclose_ticks && 2246 sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) { 2247 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 2248 stcb->sctp_ep, stcb, NULL); 2249 } 2250 /* 2251 * set ASCONF timer if ASCONFs are pending and allowed (eg. 2252 * addresses changed when init/cookie echo in flight) 2253 */ 2254 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) && 2255 (stcb->asoc.peer_supports_asconf) && 2256 (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) { 2257 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, 2258 stcb->sctp_ep, stcb, 2259 stcb->asoc.primary_destination); 2260 } 2261 } 2262 /* Toss the cookie if I can */ 2263 sctp_toss_old_cookies(stcb, asoc); 2264 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 2265 /* Restart the timer if we have pending data */ 2266 struct sctp_tmit_chunk *chk; 2267 2268 chk = TAILQ_FIRST(&asoc->sent_queue); 2269 if (chk) { 2270 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 2271 stcb, chk->whoTo); 2272 } 2273 } 2274} 2275 2276static void 2277sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp, 2278 struct sctp_tcb *stcb) 2279{ 2280 struct sctp_nets *net; 2281 struct sctp_tmit_chunk *lchk; 2282 uint32_t tsn; 2283 2284 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_ecne_chunk)) { 2285 return; 2286 } 2287 SCTP_STAT_INCR(sctps_recvecne); 2288 tsn = ntohl(cp->tsn); 2289 /* ECN Nonce stuff: need a resync and disable the nonce sum check */ 2290 /* Also we make sure we disable the nonce_wait */ 2291 lchk = TAILQ_FIRST(&stcb->asoc.send_queue); 2292 if (lchk == NULL) { 2293 stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq; 2294 } else { 2295 stcb->asoc.nonce_resync_tsn = lchk->rec.data.TSN_seq; 2296 } 2297 stcb->asoc.nonce_wait_for_ecne = 0; 2298 stcb->asoc.nonce_sum_check = 0; 2299 2300 /* Find where it was sent, if possible */ 2301 net = NULL; 2302 lchk = TAILQ_FIRST(&stcb->asoc.sent_queue); 2303 while (lchk) { 2304 if (lchk->rec.data.TSN_seq == tsn) { 2305 net = lchk->whoTo; 2306 break; 2307 } 2308 if (compare_with_wrap(lchk->rec.data.TSN_seq, tsn, MAX_SEQ)) 2309 break; 2310 lchk = TAILQ_NEXT(lchk, sctp_next); 2311 } 2312 if (net == NULL) 2313 /* default is we use the primary */ 2314 net = stcb->asoc.primary_destination; 2315 2316 if (compare_with_wrap(tsn, stcb->asoc.last_cwr_tsn, MAX_TSN)) { 2317#ifdef SCTP_CWND_MONITOR 2318 int old_cwnd; 2319 2320 old_cwnd = net->cwnd; 2321#endif 2322 SCTP_STAT_INCR(sctps_ecnereducedcwnd); 2323 net->ssthresh = net->cwnd / 2; 2324 if (net->ssthresh < net->mtu) { 2325 net->ssthresh = net->mtu; 2326 /* here back off the timer as well, to slow us down */ 2327 net->RTO <<= 2; 2328 } 2329 net->cwnd = net->ssthresh; 2330#ifdef SCTP_CWND_MONITOR 2331 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT); 2332#endif 2333 /* 2334 * we reduce once every RTT. So we will only lower cwnd at 2335 * the next sending seq i.e. the resync_tsn. 2336 */ 2337 stcb->asoc.last_cwr_tsn = stcb->asoc.nonce_resync_tsn; 2338 } 2339 /* 2340 * We always send a CWR this way if our previous one was lost our 2341 * peer will get an update, or if it is not time again to reduce we 2342 * still get the cwr to the peer. 2343 */ 2344 sctp_send_cwr(stcb, net, tsn); 2345} 2346 2347static void 2348sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb) 2349{ 2350 /* 2351 * Here we get a CWR from the peer. We must look in the outqueue and 2352 * make sure that we have a covered ECNE in teh control chunk part. 2353 * If so remove it. 2354 */ 2355 struct sctp_tmit_chunk *chk; 2356 struct sctp_ecne_chunk *ecne; 2357 2358 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 2359 if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) { 2360 continue; 2361 } 2362 /* 2363 * Look for and remove if it is the right TSN. Since there 2364 * is only ONE ECNE on the control queue at any one time we 2365 * don't need to worry about more than one! 2366 */ 2367 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 2368 if (compare_with_wrap(ntohl(cp->tsn), ntohl(ecne->tsn), 2369 MAX_TSN) || (cp->tsn == ecne->tsn)) { 2370 /* this covers this ECNE, we can remove it */ 2371 stcb->asoc.ecn_echo_cnt_onq--; 2372 TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, 2373 sctp_next); 2374 if (chk->data) { 2375 sctp_m_freem(chk->data); 2376 chk->data = NULL; 2377 } 2378 stcb->asoc.ctrl_queue_cnt--; 2379 sctp_free_remote_addr(chk->whoTo); 2380 sctp_free_a_chunk(stcb, chk); 2381 break; 2382 } 2383 } 2384} 2385 2386static void 2387sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp, 2388 struct sctp_tcb *stcb, struct sctp_nets *net) 2389{ 2390 struct sctp_association *asoc; 2391 2392 SCTPDBG(SCTP_DEBUG_INPUT2, 2393 "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n"); 2394 if (stcb == NULL) 2395 return; 2396 2397 asoc = &stcb->asoc; 2398 /* process according to association state */ 2399 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) { 2400 /* unexpected SHUTDOWN-COMPLETE... so ignore... */ 2401 SCTP_TCB_UNLOCK(stcb); 2402 return; 2403 } 2404 /* notify upper layer protocol */ 2405 if (stcb->sctp_socket) { 2406 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL); 2407 /* are the queues empty? they should be */ 2408 if (!TAILQ_EMPTY(&asoc->send_queue) || 2409 !TAILQ_EMPTY(&asoc->sent_queue) || 2410 !TAILQ_EMPTY(&asoc->out_wheel)) { 2411 sctp_report_all_outbound(stcb, 0); 2412 } 2413 } 2414 /* stop the timer */ 2415 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_21); 2416 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 2417 /* free the TCB */ 2418 sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22); 2419 return; 2420} 2421 2422static int 2423process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc, 2424 struct sctp_nets *net, uint8_t flg) 2425{ 2426 switch (desc->chunk_type) { 2427 case SCTP_DATA: 2428 /* find the tsn to resend (possibly */ 2429 { 2430 uint32_t tsn; 2431 struct sctp_tmit_chunk *tp1; 2432 2433 tsn = ntohl(desc->tsn_ifany); 2434 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2435 while (tp1) { 2436 if (tp1->rec.data.TSN_seq == tsn) { 2437 /* found it */ 2438 break; 2439 } 2440 if (compare_with_wrap(tp1->rec.data.TSN_seq, tsn, 2441 MAX_TSN)) { 2442 /* not found */ 2443 tp1 = NULL; 2444 break; 2445 } 2446 tp1 = TAILQ_NEXT(tp1, sctp_next); 2447 } 2448 if (tp1 == NULL) { 2449 /* 2450 * Do it the other way , aka without paying 2451 * attention to queue seq order. 2452 */ 2453 SCTP_STAT_INCR(sctps_pdrpdnfnd); 2454 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2455 while (tp1) { 2456 if (tp1->rec.data.TSN_seq == tsn) { 2457 /* found it */ 2458 break; 2459 } 2460 tp1 = TAILQ_NEXT(tp1, sctp_next); 2461 } 2462 } 2463 if (tp1 == NULL) { 2464 SCTP_STAT_INCR(sctps_pdrptsnnf); 2465 } 2466 if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) { 2467 uint8_t *ddp; 2468 2469 if ((stcb->asoc.peers_rwnd == 0) && 2470 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) { 2471 SCTP_STAT_INCR(sctps_pdrpdiwnp); 2472 return (0); 2473 } 2474 if (stcb->asoc.peers_rwnd == 0 && 2475 (flg & SCTP_FROM_MIDDLE_BOX)) { 2476 SCTP_STAT_INCR(sctps_pdrpdizrw); 2477 return (0); 2478 } 2479 ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+ 2480 sizeof(struct sctp_data_chunk)); 2481 { 2482 unsigned int iii; 2483 2484 for (iii = 0; iii < sizeof(desc->data_bytes); 2485 iii++) { 2486 if (ddp[iii] != desc->data_bytes[iii]) { 2487 SCTP_STAT_INCR(sctps_pdrpbadd); 2488 return (-1); 2489 } 2490 } 2491 } 2492 /* 2493 * We zero out the nonce so resync not 2494 * needed 2495 */ 2496 tp1->rec.data.ect_nonce = 0; 2497 2498 if (tp1->do_rtt) { 2499 /* 2500 * this guy had a RTO calculation 2501 * pending on it, cancel it 2502 */ 2503 tp1->do_rtt = 0; 2504 } 2505 SCTP_STAT_INCR(sctps_pdrpmark); 2506 if (tp1->sent != SCTP_DATAGRAM_RESEND) 2507 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 2508 tp1->sent = SCTP_DATAGRAM_RESEND; 2509 /* 2510 * mark it as if we were doing a FR, since 2511 * we will be getting gap ack reports behind 2512 * the info from the router. 2513 */ 2514 tp1->rec.data.doing_fast_retransmit = 1; 2515 /* 2516 * mark the tsn with what sequences can 2517 * cause a new FR. 2518 */ 2519 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) { 2520 tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq; 2521 } else { 2522 tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq; 2523 } 2524 2525 /* restart the timer */ 2526 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 2527 stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23); 2528 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 2529 stcb, tp1->whoTo); 2530 2531 /* fix counts and things */ 2532#ifdef SCTP_FLIGHT_LOGGING 2533 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP, 2534 tp1->whoTo->flight_size, 2535 tp1->book_size, 2536 (uintptr_t) stcb, 2537 tp1->rec.data.TSN_seq); 2538#endif 2539 sctp_flight_size_decrease(tp1); 2540 sctp_total_flight_decrease(stcb, tp1); 2541 } { 2542 /* audit code */ 2543 unsigned int audit; 2544 2545 audit = 0; 2546 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) { 2547 if (tp1->sent == SCTP_DATAGRAM_RESEND) 2548 audit++; 2549 } 2550 TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue, 2551 sctp_next) { 2552 if (tp1->sent == SCTP_DATAGRAM_RESEND) 2553 audit++; 2554 } 2555 if (audit != stcb->asoc.sent_queue_retran_cnt) { 2556 SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n", 2557 audit, stcb->asoc.sent_queue_retran_cnt); 2558#ifndef SCTP_AUDITING_ENABLED 2559 stcb->asoc.sent_queue_retran_cnt = audit; 2560#endif 2561 } 2562 } 2563 } 2564 break; 2565 case SCTP_ASCONF: 2566 { 2567 struct sctp_tmit_chunk *asconf; 2568 2569 TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue, 2570 sctp_next) { 2571 if (asconf->rec.chunk_id.id == SCTP_ASCONF) { 2572 break; 2573 } 2574 } 2575 if (asconf) { 2576 if (asconf->sent != SCTP_DATAGRAM_RESEND) 2577 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 2578 asconf->sent = SCTP_DATAGRAM_RESEND; 2579 asconf->snd_count--; 2580 } 2581 } 2582 break; 2583 case SCTP_INITIATION: 2584 /* resend the INIT */ 2585 stcb->asoc.dropped_special_cnt++; 2586 if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) { 2587 /* 2588 * If we can get it in, in a few attempts we do 2589 * this, otherwise we let the timer fire. 2590 */ 2591 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, 2592 stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_24); 2593 sctp_send_initiate(stcb->sctp_ep, stcb); 2594 } 2595 break; 2596 case SCTP_SELECTIVE_ACK: 2597 /* resend the sack */ 2598 sctp_send_sack(stcb); 2599 break; 2600 case SCTP_HEARTBEAT_REQUEST: 2601 /* resend a demand HB */ 2602 (void)sctp_send_hb(stcb, 1, net); 2603 break; 2604 case SCTP_SHUTDOWN: 2605 sctp_send_shutdown(stcb, net); 2606 break; 2607 case SCTP_SHUTDOWN_ACK: 2608 sctp_send_shutdown_ack(stcb, net); 2609 break; 2610 case SCTP_COOKIE_ECHO: 2611 { 2612 struct sctp_tmit_chunk *cookie; 2613 2614 cookie = NULL; 2615 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, 2616 sctp_next) { 2617 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 2618 break; 2619 } 2620 } 2621 if (cookie) { 2622 if (cookie->sent != SCTP_DATAGRAM_RESEND) 2623 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 2624 cookie->sent = SCTP_DATAGRAM_RESEND; 2625 sctp_stop_all_cookie_timers(stcb); 2626 } 2627 } 2628 break; 2629 case SCTP_COOKIE_ACK: 2630 sctp_send_cookie_ack(stcb); 2631 break; 2632 case SCTP_ASCONF_ACK: 2633 /* resend last asconf ack */ 2634 sctp_send_asconf_ack(stcb, 1); 2635 break; 2636 case SCTP_FORWARD_CUM_TSN: 2637 send_forward_tsn(stcb, &stcb->asoc); 2638 break; 2639 /* can't do anything with these */ 2640 case SCTP_PACKET_DROPPED: 2641 case SCTP_INITIATION_ACK: /* this should not happen */ 2642 case SCTP_HEARTBEAT_ACK: 2643 case SCTP_ABORT_ASSOCIATION: 2644 case SCTP_OPERATION_ERROR: 2645 case SCTP_SHUTDOWN_COMPLETE: 2646 case SCTP_ECN_ECHO: 2647 case SCTP_ECN_CWR: 2648 default: 2649 break; 2650 } 2651 return (0); 2652} 2653 2654void 2655sctp_reset_in_stream(struct sctp_tcb *stcb, int number_entries, uint16_t * list) 2656{ 2657 int i; 2658 uint16_t temp; 2659 2660 /* 2661 * We set things to 0xffff since this is the last delivered sequence 2662 * and we will be sending in 0 after the reset. 2663 */ 2664 2665 if (number_entries) { 2666 for (i = 0; i < number_entries; i++) { 2667 temp = ntohs(list[i]); 2668 if (temp >= stcb->asoc.streamincnt) { 2669 continue; 2670 } 2671 stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff; 2672 } 2673 } else { 2674 list = NULL; 2675 for (i = 0; i < stcb->asoc.streamincnt; i++) { 2676 stcb->asoc.strmin[i].last_sequence_delivered = 0xffff; 2677 } 2678 } 2679 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list); 2680} 2681 2682static void 2683sctp_reset_out_streams(struct sctp_tcb *stcb, int number_entries, uint16_t * list) 2684{ 2685 int i; 2686 2687 if (number_entries == 0) { 2688 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 2689 stcb->asoc.strmout[i].next_sequence_sent = 0; 2690 } 2691 } else if (number_entries) { 2692 for (i = 0; i < number_entries; i++) { 2693 uint16_t temp; 2694 2695 temp = ntohs(list[i]); 2696 if (temp >= stcb->asoc.streamoutcnt) { 2697 /* no such stream */ 2698 continue; 2699 } 2700 stcb->asoc.strmout[temp].next_sequence_sent = 0; 2701 } 2702 } 2703 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list); 2704} 2705 2706 2707struct sctp_stream_reset_out_request * 2708sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk) 2709{ 2710 struct sctp_association *asoc; 2711 struct sctp_stream_reset_out_req *req; 2712 struct sctp_stream_reset_out_request *r; 2713 struct sctp_tmit_chunk *chk; 2714 int len, clen; 2715 2716 asoc = &stcb->asoc; 2717 if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { 2718 asoc->stream_reset_outstanding = 0; 2719 return (NULL); 2720 } 2721 if (stcb->asoc.str_reset == NULL) { 2722 asoc->stream_reset_outstanding = 0; 2723 return (NULL); 2724 } 2725 chk = stcb->asoc.str_reset; 2726 if (chk->data == NULL) { 2727 return (NULL); 2728 } 2729 if (bchk) { 2730 /* he wants a copy of the chk pointer */ 2731 *bchk = chk; 2732 } 2733 clen = chk->send_size; 2734 req = mtod(chk->data, struct sctp_stream_reset_out_req *); 2735 r = &req->sr_req; 2736 if (ntohl(r->request_seq) == seq) { 2737 /* found it */ 2738 return (r); 2739 } 2740 len = SCTP_SIZE32(ntohs(r->ph.param_length)); 2741 if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) { 2742 /* move to the next one, there can only be a max of two */ 2743 r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len); 2744 if (ntohl(r->request_seq) == seq) { 2745 return (r); 2746 } 2747 } 2748 /* that seq is not here */ 2749 return (NULL); 2750} 2751 2752static void 2753sctp_clean_up_stream_reset(struct sctp_tcb *stcb) 2754{ 2755 struct sctp_association *asoc; 2756 struct sctp_tmit_chunk *chk = stcb->asoc.str_reset; 2757 2758 if (stcb->asoc.str_reset == NULL) { 2759 return; 2760 } 2761 asoc = &stcb->asoc; 2762 2763 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25); 2764 TAILQ_REMOVE(&asoc->control_send_queue, 2765 chk, 2766 sctp_next); 2767 if (chk->data) { 2768 sctp_m_freem(chk->data); 2769 chk->data = NULL; 2770 } 2771 asoc->ctrl_queue_cnt--; 2772 sctp_free_remote_addr(chk->whoTo); 2773 2774 sctp_free_a_chunk(stcb, chk); 2775 stcb->asoc.str_reset = NULL; 2776} 2777 2778 2779static int 2780sctp_handle_stream_reset_response(struct sctp_tcb *stcb, 2781 uint32_t seq, uint32_t action, 2782 struct sctp_stream_reset_response *respin) 2783{ 2784 uint16_t type; 2785 int lparm_len; 2786 struct sctp_association *asoc = &stcb->asoc; 2787 struct sctp_tmit_chunk *chk; 2788 struct sctp_stream_reset_out_request *srparam; 2789 int number_entries; 2790 2791 if (asoc->stream_reset_outstanding == 0) { 2792 /* duplicate */ 2793 return (0); 2794 } 2795 if (seq == stcb->asoc.str_reset_seq_out) { 2796 srparam = sctp_find_stream_reset(stcb, seq, &chk); 2797 if (srparam) { 2798 stcb->asoc.str_reset_seq_out++; 2799 type = ntohs(srparam->ph.param_type); 2800 lparm_len = ntohs(srparam->ph.param_length); 2801 if (type == SCTP_STR_RESET_OUT_REQUEST) { 2802 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t); 2803 asoc->stream_reset_out_is_outstanding = 0; 2804 if (asoc->stream_reset_outstanding) 2805 asoc->stream_reset_outstanding--; 2806 if (action == SCTP_STREAM_RESET_PERFORMED) { 2807 /* do it */ 2808 sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams); 2809 } else { 2810 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams); 2811 } 2812 } else if (type == SCTP_STR_RESET_IN_REQUEST) { 2813 /* Answered my request */ 2814 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t); 2815 if (asoc->stream_reset_outstanding) 2816 asoc->stream_reset_outstanding--; 2817 if (action != SCTP_STREAM_RESET_PERFORMED) { 2818 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, number_entries, srparam->list_of_streams); 2819 } 2820 } else if (type == SCTP_STR_RESET_TSN_REQUEST) { 2821 /** 2822 * a) Adopt the new in tsn. 2823 * b) reset the map 2824 * c) Adopt the new out-tsn 2825 */ 2826 struct sctp_stream_reset_response_tsn *resp; 2827 struct sctp_forward_tsn_chunk fwdtsn; 2828 int abort_flag = 0; 2829 2830 if (respin == NULL) { 2831 /* huh ? */ 2832 return (0); 2833 } 2834 if (action == SCTP_STREAM_RESET_PERFORMED) { 2835 resp = (struct sctp_stream_reset_response_tsn *)respin; 2836 asoc->stream_reset_outstanding--; 2837 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 2838 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 2839 fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1); 2840 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag); 2841 if (abort_flag) { 2842 return (1); 2843 } 2844 stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1); 2845 stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map; 2846 stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn); 2847 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 2848 stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn); 2849 stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn; 2850 2851 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL); 2852 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL); 2853 2854 } 2855 } 2856 /* get rid of the request and get the request flags */ 2857 if (asoc->stream_reset_outstanding == 0) { 2858 sctp_clean_up_stream_reset(stcb); 2859 } 2860 } 2861 } 2862 return (0); 2863} 2864 2865static void 2866sctp_handle_str_reset_request_in(struct sctp_tcb *stcb, 2867 struct sctp_tmit_chunk *chk, 2868 struct sctp_stream_reset_in_request *req) 2869{ 2870 uint32_t seq; 2871 int len, i; 2872 int number_entries; 2873 uint16_t temp; 2874 2875 /* 2876 * peer wants me to send a str-reset to him for my outgoing seq's if 2877 * seq_in is right. 2878 */ 2879 struct sctp_association *asoc = &stcb->asoc; 2880 2881 seq = ntohl(req->request_seq); 2882 if (asoc->str_reset_seq_in == seq) { 2883 if (stcb->asoc.stream_reset_out_is_outstanding == 0) { 2884 len = ntohs(req->ph.param_length); 2885 number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t)); 2886 for (i = 0; i < number_entries; i++) { 2887 temp = ntohs(req->list_of_streams[i]); 2888 req->list_of_streams[i] = temp; 2889 } 2890 /* move the reset action back one */ 2891 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 2892 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 2893 sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams, 2894 asoc->str_reset_seq_out, 2895 seq, (asoc->sending_seq - 1)); 2896 asoc->stream_reset_out_is_outstanding = 1; 2897 asoc->str_reset = chk; 2898 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo); 2899 stcb->asoc.stream_reset_outstanding++; 2900 } else { 2901 /* Can't do it, since we have sent one out */ 2902 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 2903 asoc->last_reset_action[0] = SCTP_STREAM_RESET_TRY_LATER; 2904 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 2905 } 2906 asoc->str_reset_seq_in++; 2907 } else if (asoc->str_reset_seq_in - 1 == seq) { 2908 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 2909 } else if (asoc->str_reset_seq_in - 2 == seq) { 2910 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 2911 } else { 2912 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 2913 } 2914} 2915 2916static int 2917sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb, 2918 struct sctp_tmit_chunk *chk, 2919 struct sctp_stream_reset_tsn_request *req) 2920{ 2921 /* reset all in and out and update the tsn */ 2922 /* 2923 * A) reset my str-seq's on in and out. B) Select a receive next, 2924 * and set cum-ack to it. Also process this selected number as a 2925 * fwd-tsn as well. C) set in the response my next sending seq. 2926 */ 2927 struct sctp_forward_tsn_chunk fwdtsn; 2928 struct sctp_association *asoc = &stcb->asoc; 2929 int abort_flag = 0; 2930 uint32_t seq; 2931 2932 seq = ntohl(req->request_seq); 2933 if (asoc->str_reset_seq_in == seq) { 2934 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 2935 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 2936 fwdtsn.ch.chunk_flags = 0; 2937 fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1); 2938 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag); 2939 if (abort_flag) { 2940 return (1); 2941 } 2942 stcb->asoc.highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA; 2943 stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map; 2944 stcb->asoc.mapping_array_base_tsn = stcb->asoc.highest_tsn_inside_map + 1; 2945 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 2946 atomic_add_int(&stcb->asoc.sending_seq, 1); 2947 /* save off historical data for retrans */ 2948 stcb->asoc.last_sending_seq[1] = stcb->asoc.last_sending_seq[0]; 2949 stcb->asoc.last_sending_seq[0] = stcb->asoc.sending_seq; 2950 stcb->asoc.last_base_tsnsent[1] = stcb->asoc.last_base_tsnsent[0]; 2951 stcb->asoc.last_base_tsnsent[0] = stcb->asoc.mapping_array_base_tsn; 2952 2953 sctp_add_stream_reset_result_tsn(chk, 2954 ntohl(req->request_seq), 2955 SCTP_STREAM_RESET_PERFORMED, 2956 stcb->asoc.sending_seq, 2957 stcb->asoc.mapping_array_base_tsn); 2958 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL); 2959 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL); 2960 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0]; 2961 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 2962 2963 asoc->str_reset_seq_in++; 2964 } else if (asoc->str_reset_seq_in - 1 == seq) { 2965 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0], 2966 stcb->asoc.last_sending_seq[0], 2967 stcb->asoc.last_base_tsnsent[0] 2968 ); 2969 } else if (asoc->str_reset_seq_in - 2 == seq) { 2970 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1], 2971 stcb->asoc.last_sending_seq[1], 2972 stcb->asoc.last_base_tsnsent[1] 2973 ); 2974 } else { 2975 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 2976 } 2977 return (0); 2978} 2979 2980static void 2981sctp_handle_str_reset_request_out(struct sctp_tcb *stcb, 2982 struct sctp_tmit_chunk *chk, 2983 struct sctp_stream_reset_out_request *req) 2984{ 2985 uint32_t seq, tsn; 2986 int number_entries, len; 2987 struct sctp_association *asoc = &stcb->asoc; 2988 2989 seq = ntohl(req->request_seq); 2990 2991 /* now if its not a duplicate we process it */ 2992 if (asoc->str_reset_seq_in == seq) { 2993 len = ntohs(req->ph.param_length); 2994 number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t)); 2995 /* 2996 * the sender is resetting, handle the list issue.. we must 2997 * a) verify if we can do the reset, if so no problem b) If 2998 * we can't do the reset we must copy the request. c) queue 2999 * it, and setup the data in processor to trigger it off 3000 * when needed and dequeue all the queued data. 3001 */ 3002 tsn = ntohl(req->send_reset_at_tsn); 3003 3004 /* move the reset action back one */ 3005 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3006 if ((tsn == asoc->cumulative_tsn) || 3007 (compare_with_wrap(asoc->cumulative_tsn, tsn, MAX_TSN))) { 3008 /* we can do it now */ 3009 sctp_reset_in_stream(stcb, number_entries, req->list_of_streams); 3010 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED); 3011 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3012 } else { 3013 /* 3014 * we must queue it up and thus wait for the TSN's 3015 * to arrive that are at or before tsn 3016 */ 3017 struct sctp_stream_reset_list *liste; 3018 int siz; 3019 3020 siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t)); 3021 SCTP_MALLOC(liste, struct sctp_stream_reset_list *, 3022 siz, "StrRstList"); 3023 if (liste == NULL) { 3024 /* gak out of memory */ 3025 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_DENIED); 3026 asoc->last_reset_action[0] = SCTP_STREAM_RESET_DENIED; 3027 return; 3028 } 3029 liste->tsn = tsn; 3030 liste->number_entries = number_entries; 3031 memcpy(&liste->req, req, 3032 (sizeof(struct sctp_stream_reset_out_request) + (number_entries * sizeof(uint16_t)))); 3033 TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp); 3034 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_PERFORMED); 3035 asoc->last_reset_action[0] = SCTP_STREAM_RESET_PERFORMED; 3036 } 3037 asoc->str_reset_seq_in++; 3038 } else if ((asoc->str_reset_seq_in - 1) == seq) { 3039 /* 3040 * one seq back, just echo back last action since my 3041 * response was lost. 3042 */ 3043 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3044 } else if ((asoc->str_reset_seq_in - 2) == seq) { 3045 /* 3046 * two seq back, just echo back last action since my 3047 * response was lost. 3048 */ 3049 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 3050 } else { 3051 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_BAD_SEQNO); 3052 } 3053} 3054 3055static int 3056sctp_handle_stream_reset(struct sctp_tcb *stcb, struct sctp_stream_reset_out_req *sr_req) 3057{ 3058 int chk_length, param_len, ptype; 3059 uint32_t seq; 3060 int num_req = 0; 3061 struct sctp_tmit_chunk *chk; 3062 struct sctp_chunkhdr *ch; 3063 struct sctp_paramhdr *ph; 3064 int ret_code = 0; 3065 int num_param = 0; 3066 3067 /* now it may be a reset or a reset-response */ 3068 chk_length = ntohs(sr_req->ch.chunk_length); 3069 3070 /* setup for adding the response */ 3071 sctp_alloc_a_chunk(stcb, chk); 3072 if (chk == NULL) { 3073 return (ret_code); 3074 } 3075 chk->rec.chunk_id.id = SCTP_STREAM_RESET; 3076 chk->rec.chunk_id.can_take_data = 0; 3077 chk->asoc = &stcb->asoc; 3078 chk->no_fr_allowed = 0; 3079 chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr); 3080 chk->book_size_scale = 0; 3081 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_DONTWAIT, 1, MT_DATA); 3082 if (chk->data == NULL) { 3083strres_nochunk: 3084 if (chk->data) { 3085 sctp_m_freem(chk->data); 3086 chk->data = NULL; 3087 } 3088 sctp_free_a_chunk(stcb, chk); 3089 return (ret_code); 3090 } 3091 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 3092 3093 /* setup chunk parameters */ 3094 chk->sent = SCTP_DATAGRAM_UNSENT; 3095 chk->snd_count = 0; 3096 chk->whoTo = stcb->asoc.primary_destination; 3097 atomic_add_int(&chk->whoTo->ref_count, 1); 3098 3099 ch = mtod(chk->data, struct sctp_chunkhdr *); 3100 ch->chunk_type = SCTP_STREAM_RESET; 3101 ch->chunk_flags = 0; 3102 ch->chunk_length = htons(chk->send_size); 3103 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 3104 ph = (struct sctp_paramhdr *)&sr_req->sr_req; 3105 while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) { 3106 param_len = ntohs(ph->param_length); 3107 if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) { 3108 /* bad param */ 3109 break; 3110 } 3111 ptype = ntohs(ph->param_type); 3112 num_param++; 3113 if (num_param > SCTP_MAX_RESET_PARAMS) { 3114 /* hit the max of parameters already sorry.. */ 3115 break; 3116 } 3117 if (ptype == SCTP_STR_RESET_OUT_REQUEST) { 3118 struct sctp_stream_reset_out_request *req_out; 3119 3120 req_out = (struct sctp_stream_reset_out_request *)ph; 3121 num_req++; 3122 if (stcb->asoc.stream_reset_outstanding) { 3123 seq = ntohl(req_out->response_seq); 3124 if (seq == stcb->asoc.str_reset_seq_out) { 3125 /* implicit ack */ 3126 (void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_PERFORMED, NULL); 3127 } 3128 } 3129 sctp_handle_str_reset_request_out(stcb, chk, req_out); 3130 } else if (ptype == SCTP_STR_RESET_IN_REQUEST) { 3131 struct sctp_stream_reset_in_request *req_in; 3132 3133 num_req++; 3134 req_in = (struct sctp_stream_reset_in_request *)ph; 3135 sctp_handle_str_reset_request_in(stcb, chk, req_in); 3136 } else if (ptype == SCTP_STR_RESET_TSN_REQUEST) { 3137 struct sctp_stream_reset_tsn_request *req_tsn; 3138 3139 num_req++; 3140 req_tsn = (struct sctp_stream_reset_tsn_request *)ph; 3141 if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) { 3142 ret_code = 1; 3143 goto strres_nochunk; 3144 } 3145 /* no more */ 3146 break; 3147 } else if (ptype == SCTP_STR_RESET_RESPONSE) { 3148 struct sctp_stream_reset_response *resp; 3149 uint32_t result; 3150 3151 resp = (struct sctp_stream_reset_response *)ph; 3152 seq = ntohl(resp->response_seq); 3153 result = ntohl(resp->result); 3154 if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) { 3155 ret_code = 1; 3156 goto strres_nochunk; 3157 } 3158 } else { 3159 break; 3160 } 3161 3162 ph = (struct sctp_paramhdr *)((caddr_t)ph + SCTP_SIZE32(param_len)); 3163 chk_length -= SCTP_SIZE32(param_len); 3164 } 3165 if (num_req == 0) { 3166 /* we have no response free the stuff */ 3167 goto strres_nochunk; 3168 } 3169 /* ok we have a chunk to link in */ 3170 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, 3171 chk, 3172 sctp_next); 3173 stcb->asoc.ctrl_queue_cnt++; 3174 return (ret_code); 3175} 3176 3177/* 3178 * Handle a router or endpoints report of a packet loss, there are two ways 3179 * to handle this, either we get the whole packet and must disect it 3180 * ourselves (possibly with truncation and or corruption) or it is a summary 3181 * from a middle box that did the disectting for us. 3182 */ 3183static void 3184sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp, 3185 struct sctp_tcb *stcb, struct sctp_nets *net) 3186{ 3187 uint32_t bottle_bw, on_queue; 3188 uint16_t trunc_len; 3189 unsigned int chlen; 3190 unsigned int at; 3191 struct sctp_chunk_desc desc; 3192 struct sctp_chunkhdr *ch; 3193 3194 chlen = ntohs(cp->ch.chunk_length); 3195 chlen -= sizeof(struct sctp_pktdrop_chunk); 3196 /* XXX possible chlen underflow */ 3197 if (chlen == 0) { 3198 ch = NULL; 3199 if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) 3200 SCTP_STAT_INCR(sctps_pdrpbwrpt); 3201 } else { 3202 ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr)); 3203 chlen -= sizeof(struct sctphdr); 3204 /* XXX possible chlen underflow */ 3205 memset(&desc, 0, sizeof(desc)); 3206 } 3207 trunc_len = (uint16_t) ntohs(cp->trunc_len); 3208 /* now the chunks themselves */ 3209 while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) { 3210 desc.chunk_type = ch->chunk_type; 3211 /* get amount we need to move */ 3212 at = ntohs(ch->chunk_length); 3213 if (at < sizeof(struct sctp_chunkhdr)) { 3214 /* corrupt chunk, maybe at the end? */ 3215 SCTP_STAT_INCR(sctps_pdrpcrupt); 3216 break; 3217 } 3218 if (trunc_len == 0) { 3219 /* we are supposed to have all of it */ 3220 if (at > chlen) { 3221 /* corrupt skip it */ 3222 SCTP_STAT_INCR(sctps_pdrpcrupt); 3223 break; 3224 } 3225 } else { 3226 /* is there enough of it left ? */ 3227 if (desc.chunk_type == SCTP_DATA) { 3228 if (chlen < (sizeof(struct sctp_data_chunk) + 3229 sizeof(desc.data_bytes))) { 3230 break; 3231 } 3232 } else { 3233 if (chlen < sizeof(struct sctp_chunkhdr)) { 3234 break; 3235 } 3236 } 3237 } 3238 if (desc.chunk_type == SCTP_DATA) { 3239 /* can we get out the tsn? */ 3240 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) 3241 SCTP_STAT_INCR(sctps_pdrpmbda); 3242 3243 if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) { 3244 /* yep */ 3245 struct sctp_data_chunk *dcp; 3246 uint8_t *ddp; 3247 unsigned int iii; 3248 3249 dcp = (struct sctp_data_chunk *)ch; 3250 ddp = (uint8_t *) (dcp + 1); 3251 for (iii = 0; iii < sizeof(desc.data_bytes); iii++) { 3252 desc.data_bytes[iii] = ddp[iii]; 3253 } 3254 desc.tsn_ifany = dcp->dp.tsn; 3255 } else { 3256 /* nope we are done. */ 3257 SCTP_STAT_INCR(sctps_pdrpnedat); 3258 break; 3259 } 3260 } else { 3261 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) 3262 SCTP_STAT_INCR(sctps_pdrpmbct); 3263 } 3264 3265 if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) { 3266 SCTP_STAT_INCR(sctps_pdrppdbrk); 3267 break; 3268 } 3269 if (SCTP_SIZE32(at) > chlen) { 3270 break; 3271 } 3272 chlen -= SCTP_SIZE32(at); 3273 if (chlen < sizeof(struct sctp_chunkhdr)) { 3274 /* done, none left */ 3275 break; 3276 } 3277 ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at)); 3278 } 3279 /* Now update any rwnd --- possibly */ 3280 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) { 3281 /* From a peer, we get a rwnd report */ 3282 uint32_t a_rwnd; 3283 3284 SCTP_STAT_INCR(sctps_pdrpfehos); 3285 3286 bottle_bw = ntohl(cp->bottle_bw); 3287 on_queue = ntohl(cp->current_onq); 3288 if (bottle_bw && on_queue) { 3289 /* a rwnd report is in here */ 3290 if (bottle_bw > on_queue) 3291 a_rwnd = bottle_bw - on_queue; 3292 else 3293 a_rwnd = 0; 3294 3295 if (a_rwnd == 0) 3296 stcb->asoc.peers_rwnd = 0; 3297 else { 3298 if (a_rwnd > stcb->asoc.total_flight) { 3299 stcb->asoc.peers_rwnd = 3300 a_rwnd - stcb->asoc.total_flight; 3301 } else { 3302 stcb->asoc.peers_rwnd = 0; 3303 } 3304 if (stcb->asoc.peers_rwnd < 3305 stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 3306 /* SWS sender side engages */ 3307 stcb->asoc.peers_rwnd = 0; 3308 } 3309 } 3310 } 3311 } else { 3312 SCTP_STAT_INCR(sctps_pdrpfmbox); 3313 } 3314 3315 /* now middle boxes in sat networks get a cwnd bump */ 3316 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) && 3317 (stcb->asoc.sat_t3_loss_recovery == 0) && 3318 (stcb->asoc.sat_network)) { 3319 /* 3320 * This is debateable but for sat networks it makes sense 3321 * Note if a T3 timer has went off, we will prohibit any 3322 * changes to cwnd until we exit the t3 loss recovery. 3323 */ 3324 uint32_t bw_avail; 3325 int rtt, incr; 3326 3327#ifdef SCTP_CWND_MONITOR 3328 int old_cwnd = net->cwnd; 3329 3330#endif 3331 /* need real RTT for this calc */ 3332 rtt = ((net->lastsa >> 2) + net->lastsv) >> 1; 3333 /* get bottle neck bw */ 3334 bottle_bw = ntohl(cp->bottle_bw); 3335 /* and whats on queue */ 3336 on_queue = ntohl(cp->current_onq); 3337 /* 3338 * adjust the on-queue if our flight is more it could be 3339 * that the router has not yet gotten data "in-flight" to it 3340 */ 3341 if (on_queue < net->flight_size) 3342 on_queue = net->flight_size; 3343 3344 /* calculate the available space */ 3345 bw_avail = (bottle_bw * rtt) / 1000; 3346 if (bw_avail > bottle_bw) { 3347 /* 3348 * Cap the growth to no more than the bottle neck. 3349 * This can happen as RTT slides up due to queues. 3350 * It also means if you have more than a 1 second 3351 * RTT with a empty queue you will be limited to the 3352 * bottle_bw per second no matter if other points 3353 * have 1/2 the RTT and you could get more out... 3354 */ 3355 bw_avail = bottle_bw; 3356 } 3357 if (on_queue > bw_avail) { 3358 /* 3359 * No room for anything else don't allow anything 3360 * else to be "added to the fire". 3361 */ 3362 int seg_inflight, seg_onqueue, my_portion; 3363 3364 net->partial_bytes_acked = 0; 3365 3366 /* how much are we over queue size? */ 3367 incr = on_queue - bw_avail; 3368 if (stcb->asoc.seen_a_sack_this_pkt) { 3369 /* 3370 * undo any cwnd adjustment that the sack 3371 * might have made 3372 */ 3373 net->cwnd = net->prev_cwnd; 3374 } 3375 /* Now how much of that is mine? */ 3376 seg_inflight = net->flight_size / net->mtu; 3377 seg_onqueue = on_queue / net->mtu; 3378 my_portion = (incr * seg_inflight) / seg_onqueue; 3379 3380 /* Have I made an adjustment already */ 3381 if (net->cwnd > net->flight_size) { 3382 /* 3383 * for this flight I made an adjustment we 3384 * need to decrease the portion by a share 3385 * our previous adjustment. 3386 */ 3387 int diff_adj; 3388 3389 diff_adj = net->cwnd - net->flight_size; 3390 if (diff_adj > my_portion) 3391 my_portion = 0; 3392 else 3393 my_portion -= diff_adj; 3394 } 3395 /* 3396 * back down to the previous cwnd (assume we have 3397 * had a sack before this packet). minus what ever 3398 * portion of the overage is my fault. 3399 */ 3400 net->cwnd -= my_portion; 3401 3402 /* we will NOT back down more than 1 MTU */ 3403 if (net->cwnd <= net->mtu) { 3404 net->cwnd = net->mtu; 3405 } 3406 /* force into CA */ 3407 net->ssthresh = net->cwnd - 1; 3408 } else { 3409 /* 3410 * Take 1/4 of the space left or max burst up .. 3411 * whichever is less. 3412 */ 3413 incr = min((bw_avail - on_queue) >> 2, 3414 (int)stcb->asoc.max_burst * (int)net->mtu); 3415 net->cwnd += incr; 3416 } 3417 if (net->cwnd > bw_avail) { 3418 /* We can't exceed the pipe size */ 3419 net->cwnd = bw_avail; 3420 } 3421 if (net->cwnd < net->mtu) { 3422 /* We always have 1 MTU */ 3423 net->cwnd = net->mtu; 3424 } 3425#ifdef SCTP_CWND_MONITOR 3426 if (net->cwnd - old_cwnd != 0) { 3427 /* log only changes */ 3428 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), 3429 SCTP_CWND_LOG_FROM_SAT); 3430 } 3431#endif 3432 } 3433} 3434 3435/* 3436 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to 3437 * still contain IP/SCTP header - stcb: is the tcb found for this packet - 3438 * offset: offset into the mbuf chain to first chunkhdr - length: is the 3439 * length of the complete packet outputs: - length: modified to remaining 3440 * length after control processing - netp: modified to new sctp_nets after 3441 * cookie-echo processing - return NULL to discard the packet (ie. no asoc, 3442 * bad packet,...) otherwise return the tcb for this packet 3443 */ 3444static struct sctp_tcb * 3445sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, 3446 struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp, 3447 struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen, 3448 uint32_t vrf_id, uint32_t table_id) 3449{ 3450 struct sctp_association *asoc; 3451 uint32_t vtag_in; 3452 int num_chunks = 0; /* number of control chunks processed */ 3453 int chk_length; 3454 int ret; 3455 int abort_no_unlock = 0; 3456 3457 /* 3458 * How big should this be, and should it be alloc'd? Lets try the 3459 * d-mtu-ceiling for now (2k) and that should hopefully work ... 3460 * until we get into jumbo grams and such.. 3461 */ 3462 uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE]; 3463 struct sctp_tcb *locked_tcb = stcb; 3464 int got_auth = 0; 3465 uint32_t auth_offset = 0, auth_len = 0; 3466 int auth_skipped = 0; 3467 3468 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n", 3469 iphlen, *offset, length, stcb); 3470 3471 /* validate chunk header length... */ 3472 if (ntohs(ch->chunk_length) < sizeof(*ch)) { 3473 return (NULL); 3474 } 3475 /* 3476 * validate the verification tag 3477 */ 3478 vtag_in = ntohl(sh->v_tag); 3479 3480 if (locked_tcb) { 3481 SCTP_TCB_LOCK_ASSERT(locked_tcb); 3482 } 3483 if (ch->chunk_type == SCTP_INITIATION) { 3484 if (vtag_in != 0) { 3485 /* protocol error- silently discard... */ 3486 SCTP_STAT_INCR(sctps_badvtag); 3487 if (locked_tcb) { 3488 SCTP_TCB_UNLOCK(locked_tcb); 3489 } 3490 return (NULL); 3491 } 3492 } else if (ch->chunk_type != SCTP_COOKIE_ECHO) { 3493 /* 3494 * If there is no stcb, skip the AUTH chunk and process 3495 * later after a stcb is found (to validate the lookup was 3496 * valid. 3497 */ 3498 if ((ch->chunk_type == SCTP_AUTHENTICATION) && 3499 (stcb == NULL) && !sctp_auth_disable) { 3500 /* save this chunk for later processing */ 3501 auth_skipped = 1; 3502 auth_offset = *offset; 3503 auth_len = ntohs(ch->chunk_length); 3504 3505 /* (temporarily) move past this chunk */ 3506 *offset += SCTP_SIZE32(auth_len); 3507 if (*offset >= length) { 3508 /* no more data left in the mbuf chain */ 3509 *offset = length; 3510 return (NULL); 3511 } 3512 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 3513 sizeof(struct sctp_chunkhdr), chunk_buf); 3514 } 3515 if (ch == NULL) { 3516 /* Help */ 3517 *offset = length; 3518 return (NULL); 3519 } 3520 if (ch->chunk_type == SCTP_COOKIE_ECHO) { 3521 goto process_control_chunks; 3522 } 3523 /* 3524 * first check if it's an ASCONF with an unknown src addr we 3525 * need to look inside to find the association 3526 */ 3527 if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) { 3528 /* inp's refcount may be reduced */ 3529 SCTP_INP_INCR_REF(inp); 3530 3531 stcb = sctp_findassociation_ep_asconf(m, iphlen, 3532 *offset, sh, &inp, netp); 3533 if (stcb == NULL) { 3534 /* 3535 * reduce inp's refcount if not reduced in 3536 * sctp_findassociation_ep_asconf(). 3537 */ 3538 SCTP_INP_DECR_REF(inp); 3539 } 3540 /* now go back and verify any auth chunk to be sure */ 3541 if (auth_skipped && (stcb != NULL)) { 3542 struct sctp_auth_chunk *auth; 3543 3544 auth = (struct sctp_auth_chunk *) 3545 sctp_m_getptr(m, auth_offset, 3546 auth_len, chunk_buf); 3547 got_auth = 1; 3548 auth_skipped = 0; 3549 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, 3550 auth_offset)) { 3551 /* auth HMAC failed so dump it */ 3552 *offset = length; 3553 return (NULL); 3554 } else { 3555 /* remaining chunks are HMAC checked */ 3556 stcb->asoc.authenticated = 1; 3557 } 3558 } 3559 } 3560 if (stcb == NULL) { 3561 /* no association, so it's out of the blue... */ 3562 sctp_handle_ootb(m, iphlen, *offset, sh, inp, NULL, 3563 vrf_id, table_id); 3564 *offset = length; 3565 if (locked_tcb) { 3566 SCTP_TCB_UNLOCK(locked_tcb); 3567 } 3568 return (NULL); 3569 } 3570 asoc = &stcb->asoc; 3571 /* ABORT and SHUTDOWN can use either v_tag... */ 3572 if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) || 3573 (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) || 3574 (ch->chunk_type == SCTP_PACKET_DROPPED)) { 3575 if ((vtag_in == asoc->my_vtag) || 3576 ((ch->chunk_flags & SCTP_HAD_NO_TCB) && 3577 (vtag_in == asoc->peer_vtag))) { 3578 /* this is valid */ 3579 } else { 3580 /* drop this packet... */ 3581 SCTP_STAT_INCR(sctps_badvtag); 3582 if (locked_tcb) { 3583 SCTP_TCB_UNLOCK(locked_tcb); 3584 } 3585 return (NULL); 3586 } 3587 } else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 3588 if (vtag_in != asoc->my_vtag) { 3589 /* 3590 * this could be a stale SHUTDOWN-ACK or the 3591 * peer never got the SHUTDOWN-COMPLETE and 3592 * is still hung; we have started a new asoc 3593 * but it won't complete until the shutdown 3594 * is completed 3595 */ 3596 if (locked_tcb) { 3597 SCTP_TCB_UNLOCK(locked_tcb); 3598 } 3599 sctp_handle_ootb(m, iphlen, *offset, sh, inp, 3600 NULL, vrf_id, table_id); 3601 return (NULL); 3602 } 3603 } else { 3604 /* for all other chunks, vtag must match */ 3605 if (vtag_in != asoc->my_vtag) { 3606 /* invalid vtag... */ 3607 SCTPDBG(SCTP_DEBUG_INPUT3, 3608 "invalid vtag: %xh, expect %xh\n", 3609 vtag_in, asoc->my_vtag); 3610 SCTP_STAT_INCR(sctps_badvtag); 3611 if (locked_tcb) { 3612 SCTP_TCB_UNLOCK(locked_tcb); 3613 } 3614 *offset = length; 3615 return (NULL); 3616 } 3617 } 3618 } /* end if !SCTP_COOKIE_ECHO */ 3619 /* 3620 * process all control chunks... 3621 */ 3622 if (((ch->chunk_type == SCTP_SELECTIVE_ACK) || 3623 (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) && 3624 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) { 3625 /* implied cookie-ack.. we must have lost the ack */ 3626 stcb->asoc.overall_error_count = 0; 3627 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, 3628 *netp); 3629 } 3630process_control_chunks: 3631 while (IS_SCTP_CONTROL(ch)) { 3632 /* validate chunk length */ 3633 chk_length = ntohs(ch->chunk_length); 3634 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n", 3635 ch->chunk_type, chk_length); 3636 if ((size_t)chk_length < sizeof(*ch) || 3637 (*offset + chk_length) > length) { 3638 *offset = length; 3639 if (locked_tcb) { 3640 SCTP_TCB_UNLOCK(locked_tcb); 3641 } 3642 return (NULL); 3643 } 3644 SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks); 3645 /* 3646 * INIT-ACK only gets the init ack "header" portion only 3647 * because we don't have to process the peer's COOKIE. All 3648 * others get a complete chunk. 3649 */ 3650 if ((ch->chunk_type == SCTP_INITIATION_ACK) || 3651 (ch->chunk_type == SCTP_INITIATION)) { 3652 /* get an init-ack chunk */ 3653 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 3654 sizeof(struct sctp_init_ack_chunk), chunk_buf); 3655 if (ch == NULL) { 3656 *offset = length; 3657 if (locked_tcb) { 3658 SCTP_TCB_UNLOCK(locked_tcb); 3659 } 3660 return (NULL); 3661 } 3662 } else if (ch->chunk_type == SCTP_COOKIE_ECHO) { 3663 if (chk_length > sizeof(chunk_buf)) { 3664 /* 3665 * use just the size of the chunk buffer so 3666 * the front part of our cookie is intact. 3667 * The rest of cookie processing should use 3668 * the sctp_m_getptr() function to access 3669 * the other parts. 3670 */ 3671 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 3672 (sizeof(chunk_buf) - 4), 3673 chunk_buf); 3674 if (ch == NULL) { 3675 *offset = length; 3676 if (locked_tcb) { 3677 SCTP_TCB_UNLOCK(locked_tcb); 3678 } 3679 return (NULL); 3680 } 3681 } else { 3682 /* We can fit it all */ 3683 goto all_fits; 3684 } 3685 } else { 3686 /* get a complete chunk... */ 3687 if ((size_t)chk_length > sizeof(chunk_buf)) { 3688 struct mbuf *oper; 3689 struct sctp_paramhdr *phdr; 3690 3691 oper = NULL; 3692 if (stcb) { 3693 oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 3694 0, M_DONTWAIT, 1, MT_DATA); 3695 3696 if (oper) { 3697 /* pre-reserve some space */ 3698 SCTP_BUF_RESV_UF(oper, sizeof(struct sctp_chunkhdr)); 3699 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr); 3700 phdr = mtod(oper, struct sctp_paramhdr *); 3701 phdr->param_type = htons(SCTP_CAUSE_OUT_OF_RESC); 3702 phdr->param_length = htons(sizeof(struct sctp_paramhdr)); 3703 sctp_queue_op_err(stcb, oper); 3704 } 3705 } 3706 if (locked_tcb) { 3707 SCTP_TCB_UNLOCK(locked_tcb); 3708 } 3709 return (NULL); 3710 } 3711 all_fits: 3712 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 3713 chk_length, chunk_buf); 3714 if (ch == NULL) { 3715 SCTP_PRINTF("sctp_process_control: Can't get the all data....\n"); 3716 *offset = length; 3717 if (locked_tcb) { 3718 SCTP_TCB_UNLOCK(locked_tcb); 3719 } 3720 return (NULL); 3721 } 3722 } 3723 num_chunks++; 3724 /* Save off the last place we got a control from */ 3725 if (stcb != NULL) { 3726 if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) { 3727 /* 3728 * allow last_control to be NULL if 3729 * ASCONF... ASCONF processing will find the 3730 * right net later 3731 */ 3732 if ((netp != NULL) && (*netp != NULL)) 3733 stcb->asoc.last_control_chunk_from = *netp; 3734 } 3735 } 3736#ifdef SCTP_AUDITING_ENABLED 3737 sctp_audit_log(0xB0, ch->chunk_type); 3738#endif 3739 3740 /* check to see if this chunk required auth, but isn't */ 3741 if ((stcb != NULL) && !sctp_auth_disable && 3742 sctp_auth_is_required_chunk(ch->chunk_type, 3743 stcb->asoc.local_auth_chunks) && 3744 !stcb->asoc.authenticated) { 3745 /* "silently" ignore */ 3746 SCTP_STAT_INCR(sctps_recvauthmissing); 3747 goto next_chunk; 3748 } 3749 switch (ch->chunk_type) { 3750 case SCTP_INITIATION: 3751 /* must be first and only chunk */ 3752 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n"); 3753 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3754 /* We are not interested anymore? */ 3755 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 3756 /* 3757 * collision case where we are 3758 * sending to them too 3759 */ 3760 ; 3761 } else { 3762 if (locked_tcb) { 3763 SCTP_TCB_UNLOCK(locked_tcb); 3764 } 3765 *offset = length; 3766 return (NULL); 3767 } 3768 } 3769 if ((num_chunks > 1) || 3770 (sctp_strict_init && (length - *offset > SCTP_SIZE32(chk_length)))) { 3771 *offset = length; 3772 if (locked_tcb) { 3773 SCTP_TCB_UNLOCK(locked_tcb); 3774 } 3775 return (NULL); 3776 } 3777 if ((stcb != NULL) && 3778 (SCTP_GET_STATE(&stcb->asoc) == 3779 SCTP_STATE_SHUTDOWN_ACK_SENT)) { 3780 sctp_send_shutdown_ack(stcb, 3781 stcb->asoc.primary_destination); 3782 *offset = length; 3783 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC); 3784 if (locked_tcb) { 3785 SCTP_TCB_UNLOCK(locked_tcb); 3786 } 3787 return (NULL); 3788 } 3789 if (netp) { 3790 sctp_handle_init(m, iphlen, *offset, sh, 3791 (struct sctp_init_chunk *)ch, inp, 3792 stcb, *netp, &abort_no_unlock, vrf_id, table_id); 3793 } 3794 if (abort_no_unlock) 3795 return (NULL); 3796 3797 *offset = length; 3798 if (locked_tcb) { 3799 SCTP_TCB_UNLOCK(locked_tcb); 3800 } 3801 return (NULL); 3802 break; 3803 case SCTP_INITIATION_ACK: 3804 /* must be first and only chunk */ 3805 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n"); 3806 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3807 /* We are not interested anymore */ 3808 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 3809 ; 3810 } else { 3811 if (locked_tcb) { 3812 SCTP_TCB_UNLOCK(locked_tcb); 3813 } 3814 *offset = length; 3815 if (stcb) { 3816 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_26); 3817 } 3818 return (NULL); 3819 } 3820 } 3821 if ((num_chunks > 1) || 3822 (sctp_strict_init && (length - *offset > SCTP_SIZE32(chk_length)))) { 3823 *offset = length; 3824 if (locked_tcb) { 3825 SCTP_TCB_UNLOCK(locked_tcb); 3826 } 3827 return (NULL); 3828 } 3829 if ((netp) && (*netp)) { 3830 ret = sctp_handle_init_ack(m, iphlen, *offset, sh, 3831 (struct sctp_init_ack_chunk *)ch, stcb, *netp, &abort_no_unlock, vrf_id, table_id); 3832 } else { 3833 ret = -1; 3834 } 3835 /* 3836 * Special case, I must call the output routine to 3837 * get the cookie echoed 3838 */ 3839 if (abort_no_unlock) 3840 return (NULL); 3841 3842 if ((stcb) && ret == 0) 3843 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC); 3844 *offset = length; 3845 if (locked_tcb) { 3846 SCTP_TCB_UNLOCK(locked_tcb); 3847 } 3848 return (NULL); 3849 break; 3850 case SCTP_SELECTIVE_ACK: 3851 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n"); 3852 SCTP_STAT_INCR(sctps_recvsacks); 3853 { 3854 struct sctp_sack_chunk *sack; 3855 int abort_now = 0; 3856 uint32_t a_rwnd, cum_ack; 3857 uint16_t num_seg; 3858 int nonce_sum_flag; 3859 3860 if ((stcb == NULL) || (chk_length < sizeof(struct sctp_sack_chunk))) { 3861 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on sack chunk, too small\n"); 3862 *offset = length; 3863 if (locked_tcb) { 3864 SCTP_TCB_UNLOCK(locked_tcb); 3865 } 3866 return (NULL); 3867 } 3868 sack = (struct sctp_sack_chunk *)ch; 3869 nonce_sum_flag = ch->chunk_flags & SCTP_SACK_NONCE_SUM; 3870 cum_ack = ntohl(sack->sack.cum_tsn_ack); 3871 num_seg = ntohs(sack->sack.num_gap_ack_blks); 3872 a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd); 3873 stcb->asoc.seen_a_sack_this_pkt = 1; 3874 if ((stcb->asoc.pr_sctp_cnt == 0) && 3875 (num_seg == 0) && 3876 ((compare_with_wrap(cum_ack, stcb->asoc.last_acked_seq, MAX_TSN)) || 3877 (cum_ack == stcb->asoc.last_acked_seq)) && 3878 (stcb->asoc.saw_sack_with_frags == 0) && 3879 (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) 3880 ) { 3881 /* 3882 * We have a SIMPLE sack having no 3883 * prior segments and data on sent 3884 * queue to be acked.. Use the 3885 * faster path sack processing. We 3886 * also allow window update sacks 3887 * with no missing segments to go 3888 * this way too. 3889 */ 3890 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, nonce_sum_flag, 3891 &abort_now); 3892 } else { 3893 if (netp && *netp) 3894 sctp_handle_sack(sack, stcb, *netp, &abort_now, chk_length, a_rwnd); 3895 } 3896 if (abort_now) { 3897 /* ABORT signal from sack processing */ 3898 *offset = length; 3899 return (NULL); 3900 } 3901 } 3902 break; 3903 case SCTP_HEARTBEAT_REQUEST: 3904 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n"); 3905 if ((stcb) && netp && *netp) { 3906 SCTP_STAT_INCR(sctps_recvheartbeat); 3907 sctp_send_heartbeat_ack(stcb, m, *offset, 3908 chk_length, *netp); 3909 3910 /* He's alive so give him credit */ 3911 stcb->asoc.overall_error_count = 0; 3912 } 3913 break; 3914 case SCTP_HEARTBEAT_ACK: 3915 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n"); 3916 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) { 3917 /* Its not ours */ 3918 *offset = length; 3919 if (locked_tcb) { 3920 SCTP_TCB_UNLOCK(locked_tcb); 3921 } 3922 return (NULL); 3923 } 3924 /* He's alive so give him credit */ 3925 stcb->asoc.overall_error_count = 0; 3926 SCTP_STAT_INCR(sctps_recvheartbeatack); 3927 if (netp && *netp) 3928 sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch, 3929 stcb, *netp); 3930 break; 3931 case SCTP_ABORT_ASSOCIATION: 3932 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT\n"); 3933 if ((stcb) && netp && *netp) 3934 sctp_handle_abort((struct sctp_abort_chunk *)ch, 3935 stcb, *netp); 3936 *offset = length; 3937 return (NULL); 3938 break; 3939 case SCTP_SHUTDOWN: 3940 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN\n"); 3941 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) { 3942 *offset = length; 3943 if (locked_tcb) { 3944 SCTP_TCB_UNLOCK(locked_tcb); 3945 } 3946 return (NULL); 3947 3948 } 3949 if (netp && *netp) { 3950 int abort_flag = 0; 3951 3952 sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch, 3953 stcb, *netp, &abort_flag); 3954 if (abort_flag) { 3955 *offset = length; 3956 return (NULL); 3957 } 3958 } 3959 break; 3960 case SCTP_SHUTDOWN_ACK: 3961 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK\n"); 3962 if ((stcb) && (netp) && (*netp)) 3963 sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp); 3964 *offset = length; 3965 return (NULL); 3966 break; 3967 3968 case SCTP_OPERATION_ERROR: 3969 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n"); 3970 if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) { 3971 3972 *offset = length; 3973 return (NULL); 3974 } 3975 break; 3976 case SCTP_COOKIE_ECHO: 3977 SCTPDBG(SCTP_DEBUG_INPUT3, 3978 "SCTP_COOKIE-ECHO stcb is %p\n", stcb); 3979 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 3980 ; 3981 } else { 3982 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 3983 /* We are not interested anymore */ 3984 *offset = length; 3985 return (NULL); 3986 } 3987 } 3988 /* 3989 * First are we accepting? We do this again here 3990 * sincen it is possible that a previous endpoint 3991 * WAS listening responded to a INIT-ACK and then 3992 * closed. We opened and bound.. and are now no 3993 * longer listening. 3994 */ 3995 if (inp->sctp_socket->so_qlimit == 0) { 3996 if ((stcb) && (inp->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 3997 /* 3998 * special case, is this a retran'd 3999 * COOKIE-ECHO or a restarting assoc 4000 * that is a peeled off or 4001 * one-to-one style socket. 4002 */ 4003 goto process_cookie_anyway; 4004 } 4005 sctp_abort_association(inp, stcb, m, iphlen, 4006 sh, NULL, vrf_id, 4007 table_id); 4008 *offset = length; 4009 return (NULL); 4010 } else if (inp->sctp_socket->so_qlimit) { 4011 /* we are accepting so check limits like TCP */ 4012 if (inp->sctp_socket->so_qlen > 4013 inp->sctp_socket->so_qlimit) { 4014 /* no space */ 4015 struct mbuf *oper; 4016 struct sctp_paramhdr *phdr; 4017 4018 if (sctp_abort_if_one_2_one_hits_limit) { 4019 oper = NULL; 4020 oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 4021 0, M_DONTWAIT, 1, MT_DATA); 4022 if (oper) { 4023 SCTP_BUF_LEN(oper) = 4024 sizeof(struct sctp_paramhdr); 4025 phdr = mtod(oper, 4026 struct sctp_paramhdr *); 4027 phdr->param_type = 4028 htons(SCTP_CAUSE_OUT_OF_RESC); 4029 phdr->param_length = 4030 htons(sizeof(struct sctp_paramhdr)); 4031 } 4032 sctp_abort_association(inp, stcb, m, 4033 iphlen, sh, oper, vrf_id, table_id); 4034 } 4035 *offset = length; 4036 return (NULL); 4037 } 4038 } 4039 process_cookie_anyway: 4040 { 4041 struct mbuf *ret_buf; 4042 struct sctp_inpcb *linp; 4043 4044 if (stcb) { 4045 linp = NULL; 4046 } else { 4047 linp = inp; 4048 } 4049 4050 if (linp) { 4051 SCTP_ASOC_CREATE_LOCK(linp); 4052 } 4053 if (netp) { 4054 ret_buf = 4055 sctp_handle_cookie_echo(m, iphlen, 4056 *offset, sh, 4057 (struct sctp_cookie_echo_chunk *)ch, 4058 &inp, &stcb, netp, 4059 auth_skipped, 4060 auth_offset, 4061 auth_len, 4062 &locked_tcb, 4063 vrf_id, 4064 table_id); 4065 } else { 4066 ret_buf = NULL; 4067 } 4068 if (linp) { 4069 SCTP_ASOC_CREATE_UNLOCK(linp); 4070 } 4071 if (ret_buf == NULL) { 4072 if (locked_tcb) { 4073 SCTP_TCB_UNLOCK(locked_tcb); 4074 } 4075 SCTPDBG(SCTP_DEBUG_INPUT3, 4076 "GAK, null buffer\n"); 4077 auth_skipped = 0; 4078 *offset = length; 4079 return (NULL); 4080 } 4081 /* if AUTH skipped, see if it verified... */ 4082 if (auth_skipped) { 4083 got_auth = 1; 4084 auth_skipped = 0; 4085 } 4086 if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) { 4087 /* 4088 * Restart the timer if we have 4089 * pending data 4090 */ 4091 struct sctp_tmit_chunk *chk; 4092 4093 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 4094 if (chk) { 4095 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4096 stcb->sctp_ep, stcb, 4097 chk->whoTo); 4098 } 4099 } 4100 } 4101 break; 4102 case SCTP_COOKIE_ACK: 4103 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK\n"); 4104 if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) { 4105 if (locked_tcb) { 4106 SCTP_TCB_UNLOCK(locked_tcb); 4107 } 4108 return (NULL); 4109 } 4110 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4111 /* We are not interested anymore */ 4112 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4113 ; 4114 } else if (stcb) { 4115 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27); 4116 *offset = length; 4117 return (NULL); 4118 } 4119 } 4120 /* He's alive so give him credit */ 4121 if ((stcb) && netp && *netp) { 4122 stcb->asoc.overall_error_count = 0; 4123 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp); 4124 } 4125 break; 4126 case SCTP_ECN_ECHO: 4127 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n"); 4128 /* He's alive so give him credit */ 4129 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) { 4130 /* Its not ours */ 4131 if (locked_tcb) { 4132 SCTP_TCB_UNLOCK(locked_tcb); 4133 } 4134 *offset = length; 4135 return (NULL); 4136 } 4137 if (stcb) { 4138 stcb->asoc.overall_error_count = 0; 4139 sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch, 4140 stcb); 4141 } 4142 break; 4143 case SCTP_ECN_CWR: 4144 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n"); 4145 /* He's alive so give him credit */ 4146 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) { 4147 /* Its not ours */ 4148 if (locked_tcb) { 4149 SCTP_TCB_UNLOCK(locked_tcb); 4150 } 4151 *offset = length; 4152 return (NULL); 4153 } 4154 if (stcb) { 4155 stcb->asoc.overall_error_count = 0; 4156 sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb); 4157 } 4158 break; 4159 case SCTP_SHUTDOWN_COMPLETE: 4160 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE\n"); 4161 /* must be first and only chunk */ 4162 if ((num_chunks > 1) || 4163 (length - *offset > SCTP_SIZE32(chk_length))) { 4164 *offset = length; 4165 if (locked_tcb) { 4166 SCTP_TCB_UNLOCK(locked_tcb); 4167 } 4168 return (NULL); 4169 } 4170 if ((stcb) && netp && *netp) { 4171 sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch, 4172 stcb, *netp); 4173 } 4174 *offset = length; 4175 return (NULL); 4176 break; 4177 case SCTP_ASCONF: 4178 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n"); 4179 /* He's alive so give him credit */ 4180 if (stcb) { 4181 stcb->asoc.overall_error_count = 0; 4182 sctp_handle_asconf(m, *offset, 4183 (struct sctp_asconf_chunk *)ch, stcb); 4184 } 4185 break; 4186 case SCTP_ASCONF_ACK: 4187 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n"); 4188 if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) { 4189 /* Its not ours */ 4190 if (locked_tcb) { 4191 SCTP_TCB_UNLOCK(locked_tcb); 4192 } 4193 *offset = length; 4194 return (NULL); 4195 } 4196 if ((stcb) && netp && *netp) { 4197 /* He's alive so give him credit */ 4198 stcb->asoc.overall_error_count = 0; 4199 sctp_handle_asconf_ack(m, *offset, 4200 (struct sctp_asconf_ack_chunk *)ch, stcb, *netp); 4201 } 4202 break; 4203 case SCTP_FORWARD_CUM_TSN: 4204 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n"); 4205 if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) { 4206 /* Its not ours */ 4207 if (locked_tcb) { 4208 SCTP_TCB_UNLOCK(locked_tcb); 4209 } 4210 *offset = length; 4211 return (NULL); 4212 } 4213 /* He's alive so give him credit */ 4214 if (stcb) { 4215 int abort_flag = 0; 4216 4217 stcb->asoc.overall_error_count = 0; 4218 *fwd_tsn_seen = 1; 4219 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4220 /* We are not interested anymore */ 4221 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_28); 4222 *offset = length; 4223 return (NULL); 4224 } 4225 sctp_handle_forward_tsn(stcb, 4226 (struct sctp_forward_tsn_chunk *)ch, &abort_flag); 4227 if (abort_flag) { 4228 *offset = length; 4229 return (NULL); 4230 } else { 4231 stcb->asoc.overall_error_count = 0; 4232 } 4233 4234 } 4235 break; 4236 case SCTP_STREAM_RESET: 4237 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n"); 4238 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4239 chk_length, chunk_buf); 4240 if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) { 4241 /* Its not ours */ 4242 if (locked_tcb) { 4243 SCTP_TCB_UNLOCK(locked_tcb); 4244 } 4245 *offset = length; 4246 return (NULL); 4247 } 4248 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4249 /* We are not interested anymore */ 4250 sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29); 4251 *offset = length; 4252 return (NULL); 4253 } 4254 if (stcb->asoc.peer_supports_strreset == 0) { 4255 /* 4256 * hmm, peer should have announced this, but 4257 * we will turn it on since he is sending us 4258 * a stream reset. 4259 */ 4260 stcb->asoc.peer_supports_strreset = 1; 4261 } 4262 if (sctp_handle_stream_reset(stcb, (struct sctp_stream_reset_out_req *)ch)) { 4263 /* stop processing */ 4264 *offset = length; 4265 return (NULL); 4266 } 4267 break; 4268 case SCTP_PACKET_DROPPED: 4269 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n"); 4270 /* re-get it all please */ 4271 if (chk_length < sizeof(struct sctp_pktdrop_chunk)) { 4272 /* Its not ours */ 4273 if (locked_tcb) { 4274 SCTP_TCB_UNLOCK(locked_tcb); 4275 } 4276 *offset = length; 4277 return (NULL); 4278 } 4279 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4280 chk_length, chunk_buf); 4281 4282 if (ch && (stcb) && netp && (*netp)) { 4283 sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch, 4284 stcb, *netp); 4285 } 4286 break; 4287 4288 case SCTP_AUTHENTICATION: 4289 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n"); 4290 if (sctp_auth_disable) 4291 goto unknown_chunk; 4292 4293 if (stcb == NULL) { 4294 /* save the first AUTH for later processing */ 4295 if (auth_skipped == 0) { 4296 auth_offset = *offset; 4297 auth_len = chk_length; 4298 auth_skipped = 1; 4299 } 4300 /* skip this chunk (temporarily) */ 4301 goto next_chunk; 4302 } 4303 if ((chk_length < (sizeof(struct sctp_auth_chunk))) || 4304 (chk_length > (sizeof(struct sctp_auth_chunk) + 4305 SCTP_AUTH_DIGEST_LEN_MAX))) { 4306 /* Its not ours */ 4307 if (locked_tcb) { 4308 SCTP_TCB_UNLOCK(locked_tcb); 4309 } 4310 *offset = length; 4311 return (NULL); 4312 } 4313 if (got_auth == 1) { 4314 /* skip this chunk... it's already auth'd */ 4315 goto next_chunk; 4316 } 4317 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4318 chk_length, chunk_buf); 4319 got_auth = 1; 4320 if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch, 4321 m, *offset)) { 4322 /* auth HMAC failed so dump the packet */ 4323 *offset = length; 4324 return (stcb); 4325 } else { 4326 /* remaining chunks are HMAC checked */ 4327 stcb->asoc.authenticated = 1; 4328 } 4329 break; 4330 4331 default: 4332 unknown_chunk: 4333 /* it's an unknown chunk! */ 4334 if ((ch->chunk_type & 0x40) && (stcb != NULL)) { 4335 struct mbuf *mm; 4336 struct sctp_paramhdr *phd; 4337 4338 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 4339 0, M_DONTWAIT, 1, MT_DATA); 4340 if (mm) { 4341 phd = mtod(mm, struct sctp_paramhdr *); 4342 /* 4343 * We cheat and use param type since 4344 * we did not bother to define a 4345 * error cause struct. They are the 4346 * same basic format with different 4347 * names. 4348 */ 4349 phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK); 4350 phd->param_length = htons(chk_length + sizeof(*phd)); 4351 SCTP_BUF_LEN(mm) = sizeof(*phd); 4352 SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, SCTP_SIZE32(chk_length), 4353 M_DONTWAIT); 4354 if (SCTP_BUF_NEXT(mm)) { 4355 sctp_queue_op_err(stcb, mm); 4356 } else { 4357 sctp_m_freem(mm); 4358 } 4359 } 4360 } 4361 if ((ch->chunk_type & 0x80) == 0) { 4362 /* discard this packet */ 4363 *offset = length; 4364 return (stcb); 4365 } /* else skip this bad chunk and continue... */ 4366 break; 4367 } /* switch (ch->chunk_type) */ 4368 4369 4370next_chunk: 4371 /* get the next chunk */ 4372 *offset += SCTP_SIZE32(chk_length); 4373 if (*offset >= length) { 4374 /* no more data left in the mbuf chain */ 4375 break; 4376 } 4377 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4378 sizeof(struct sctp_chunkhdr), chunk_buf); 4379 if (ch == NULL) { 4380 if (locked_tcb) { 4381 SCTP_TCB_UNLOCK(locked_tcb); 4382 } 4383 *offset = length; 4384 return (NULL); 4385 } 4386 } /* while */ 4387 return (stcb); 4388} 4389 4390 4391/* 4392 * Process the ECN bits we have something set so we must look to see if it is 4393 * ECN(0) or ECN(1) or CE 4394 */ 4395static __inline void 4396sctp_process_ecn_marked_a(struct sctp_tcb *stcb, struct sctp_nets *net, 4397 uint8_t ecn_bits) 4398{ 4399 if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) { 4400 ; 4401 } else if ((ecn_bits & SCTP_ECT1_BIT) == SCTP_ECT1_BIT) { 4402 /* 4403 * we only add to the nonce sum for ECT1, ECT0 does not 4404 * change the NS bit (that we have yet to find a way to send 4405 * it yet). 4406 */ 4407 4408 /* ECN Nonce stuff */ 4409 stcb->asoc.receiver_nonce_sum++; 4410 stcb->asoc.receiver_nonce_sum &= SCTP_SACK_NONCE_SUM; 4411 4412 /* 4413 * Drag up the last_echo point if cumack is larger since we 4414 * don't want the point falling way behind by more than 4415 * 2^^31 and then having it be incorrect. 4416 */ 4417 if (compare_with_wrap(stcb->asoc.cumulative_tsn, 4418 stcb->asoc.last_echo_tsn, MAX_TSN)) { 4419 stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn; 4420 } 4421 } else if ((ecn_bits & SCTP_ECT0_BIT) == SCTP_ECT0_BIT) { 4422 /* 4423 * Drag up the last_echo point if cumack is larger since we 4424 * don't want the point falling way behind by more than 4425 * 2^^31 and then having it be incorrect. 4426 */ 4427 if (compare_with_wrap(stcb->asoc.cumulative_tsn, 4428 stcb->asoc.last_echo_tsn, MAX_TSN)) { 4429 stcb->asoc.last_echo_tsn = stcb->asoc.cumulative_tsn; 4430 } 4431 } 4432} 4433 4434static __inline void 4435sctp_process_ecn_marked_b(struct sctp_tcb *stcb, struct sctp_nets *net, 4436 uint32_t high_tsn, uint8_t ecn_bits) 4437{ 4438 if ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS) { 4439 /* 4440 * we possibly must notify the sender that a congestion 4441 * window reduction is in order. We do this by adding a ECNE 4442 * chunk to the output chunk queue. The incoming CWR will 4443 * remove this chunk. 4444 */ 4445 if (compare_with_wrap(high_tsn, stcb->asoc.last_echo_tsn, 4446 MAX_TSN)) { 4447 /* Yep, we need to add a ECNE */ 4448 sctp_send_ecn_echo(stcb, net, high_tsn); 4449 stcb->asoc.last_echo_tsn = high_tsn; 4450 } 4451 } 4452} 4453 4454/* 4455 * common input chunk processing (v4 and v6) 4456 */ 4457void 4458sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, 4459 int length, struct sctphdr *sh, struct sctp_chunkhdr *ch, 4460 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets *net, 4461 uint8_t ecn_bits, uint32_t vrf_id, uint32_t table_id) 4462{ 4463 /* 4464 * Control chunk processing 4465 */ 4466 uint32_t high_tsn; 4467 int fwd_tsn_seen = 0, data_processed = 0; 4468 struct mbuf *m = *mm; 4469 int abort_flag = 0; 4470 int un_sent; 4471 4472 SCTP_STAT_INCR(sctps_recvdatagrams); 4473#ifdef SCTP_AUDITING_ENABLED 4474 sctp_audit_log(0xE0, 1); 4475 sctp_auditing(0, inp, stcb, net); 4476#endif 4477 4478 SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d\n", 4479 m, iphlen, offset); 4480 4481 if (stcb) { 4482 /* always clear this before beginning a packet */ 4483 stcb->asoc.authenticated = 0; 4484 stcb->asoc.seen_a_sack_this_pkt = 0; 4485 } 4486 if (IS_SCTP_CONTROL(ch)) { 4487 /* process the control portion of the SCTP packet */ 4488 stcb = sctp_process_control(m, iphlen, &offset, length, sh, ch, 4489 inp, stcb, &net, &fwd_tsn_seen, vrf_id, table_id); 4490 if (stcb) { 4491 /* 4492 * This covers us if the cookie-echo was there and 4493 * it changes our INP. 4494 */ 4495 inp = stcb->sctp_ep; 4496 } 4497 } else { 4498 /* 4499 * no control chunks, so pre-process DATA chunks (these 4500 * checks are taken care of by control processing) 4501 */ 4502 4503 /* 4504 * if DATA only packet, and auth is required, then punt... 4505 * can't have authenticated without any AUTH (control) 4506 * chunks 4507 */ 4508 if ((stcb != NULL) && !sctp_auth_disable && 4509 sctp_auth_is_required_chunk(SCTP_DATA, 4510 stcb->asoc.local_auth_chunks)) { 4511 /* "silently" ignore */ 4512 SCTP_STAT_INCR(sctps_recvauthmissing); 4513 SCTP_TCB_UNLOCK(stcb); 4514 return; 4515 } 4516 if (stcb == NULL) { 4517 /* out of the blue DATA chunk */ 4518 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL, 4519 vrf_id, table_id); 4520 return; 4521 } 4522 if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) { 4523 /* v_tag mismatch! */ 4524 SCTP_STAT_INCR(sctps_badvtag); 4525 SCTP_TCB_UNLOCK(stcb); 4526 return; 4527 } 4528 } 4529 4530 if (stcb == NULL) { 4531 /* 4532 * no valid TCB for this packet, or we found it's a bad 4533 * packet while processing control, or we're done with this 4534 * packet (done or skip rest of data), so we drop it... 4535 */ 4536 return; 4537 } 4538 /* 4539 * DATA chunk processing 4540 */ 4541 /* plow through the data chunks while length > offset */ 4542 4543 /* 4544 * Rest should be DATA only. Check authentication state if AUTH for 4545 * DATA is required. 4546 */ 4547 if ((length > offset) && (stcb != NULL) && !sctp_auth_disable && 4548 sctp_auth_is_required_chunk(SCTP_DATA, 4549 stcb->asoc.local_auth_chunks) && 4550 !stcb->asoc.authenticated) { 4551 /* "silently" ignore */ 4552 SCTP_STAT_INCR(sctps_recvauthmissing); 4553 SCTPDBG(SCTP_DEBUG_AUTH1, 4554 "Data chunk requires AUTH, skipped\n"); 4555 goto trigger_send; 4556 } 4557 if (length > offset) { 4558 int retval; 4559 4560 /* 4561 * First check to make sure our state is correct. We would 4562 * not get here unless we really did have a tag, so we don't 4563 * abort if this happens, just dump the chunk silently. 4564 */ 4565 switch (SCTP_GET_STATE(&stcb->asoc)) { 4566 case SCTP_STATE_COOKIE_ECHOED: 4567 /* 4568 * we consider data with valid tags in this state 4569 * shows us the cookie-ack was lost. Imply it was 4570 * there. 4571 */ 4572 stcb->asoc.overall_error_count = 0; 4573 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net); 4574 break; 4575 case SCTP_STATE_COOKIE_WAIT: 4576 /* 4577 * We consider OOTB any data sent during asoc setup. 4578 */ 4579 sctp_handle_ootb(m, iphlen, offset, sh, inp, NULL, 4580 vrf_id, table_id); 4581 SCTP_TCB_UNLOCK(stcb); 4582 return; 4583 break; 4584 case SCTP_STATE_EMPTY: /* should not happen */ 4585 case SCTP_STATE_INUSE: /* should not happen */ 4586 case SCTP_STATE_SHUTDOWN_RECEIVED: /* This is a peer error */ 4587 case SCTP_STATE_SHUTDOWN_ACK_SENT: 4588 default: 4589 SCTP_TCB_UNLOCK(stcb); 4590 return; 4591 break; 4592 case SCTP_STATE_OPEN: 4593 case SCTP_STATE_SHUTDOWN_SENT: 4594 break; 4595 } 4596 /* take care of ECN, part 1. */ 4597 if (stcb->asoc.ecn_allowed && 4598 (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) { 4599 sctp_process_ecn_marked_a(stcb, net, ecn_bits); 4600 } 4601 /* plow through the data chunks while length > offset */ 4602 retval = sctp_process_data(mm, iphlen, &offset, length, sh, 4603 inp, stcb, net, &high_tsn); 4604 if (retval == 2) { 4605 /* 4606 * The association aborted, NO UNLOCK needed since 4607 * the association is destroyed. 4608 */ 4609 return; 4610 } 4611 data_processed = 1; 4612 if (retval == 0) { 4613 /* take care of ecn part 2. */ 4614 if (stcb->asoc.ecn_allowed && 4615 (ecn_bits & (SCTP_ECT0_BIT | SCTP_ECT1_BIT))) { 4616 sctp_process_ecn_marked_b(stcb, net, high_tsn, 4617 ecn_bits); 4618 } 4619 } 4620 /* 4621 * Anything important needs to have been m_copy'ed in 4622 * process_data 4623 */ 4624 } 4625 if ((data_processed == 0) && (fwd_tsn_seen)) { 4626 int was_a_gap = 0; 4627 4628 if (compare_with_wrap(stcb->asoc.highest_tsn_inside_map, 4629 stcb->asoc.cumulative_tsn, MAX_TSN)) { 4630 /* there was a gap before this data was processed */ 4631 was_a_gap = 1; 4632 } 4633 sctp_sack_check(stcb, 1, was_a_gap, &abort_flag); 4634 if (abort_flag) { 4635 /* Again, we aborted so NO UNLOCK needed */ 4636 return; 4637 } 4638 } 4639 /* trigger send of any chunks in queue... */ 4640trigger_send: 4641#ifdef SCTP_AUDITING_ENABLED 4642 sctp_audit_log(0xE0, 2); 4643 sctp_auditing(1, inp, stcb, net); 4644#endif 4645 SCTPDBG(SCTP_DEBUG_INPUT1, 4646 "Check for chunk output prw:%d tqe:%d tf=%d\n", 4647 stcb->asoc.peers_rwnd, 4648 TAILQ_EMPTY(&stcb->asoc.control_send_queue), 4649 stcb->asoc.total_flight); 4650 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight); 4651 4652 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue) || 4653 ((un_sent) && 4654 (stcb->asoc.peers_rwnd > 0 || 4655 (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) { 4656 SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n"); 4657 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC); 4658 SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n"); 4659 } 4660#ifdef SCTP_AUDITING_ENABLED 4661 sctp_audit_log(0xE0, 3); 4662 sctp_auditing(2, inp, stcb, net); 4663#endif 4664 SCTP_TCB_UNLOCK(stcb); 4665 return; 4666} 4667 4668 4669 4670void 4671sctp_input(i_pak, off) 4672 struct mbuf *i_pak; 4673 int off; 4674 4675{ 4676#ifdef SCTP_MBUF_LOGGING 4677 struct mbuf *mat; 4678 4679#endif 4680 struct mbuf *m; 4681 int iphlen; 4682 uint32_t vrf_id = 0, table_id = 0; 4683 uint8_t ecn_bits; 4684 struct ip *ip; 4685 struct sctphdr *sh; 4686 struct sctp_inpcb *inp = NULL; 4687 4688 uint32_t check, calc_check; 4689 struct sctp_nets *net; 4690 struct sctp_tcb *stcb = NULL; 4691 struct sctp_chunkhdr *ch; 4692 int refcount_up = 0; 4693 int length, mlen, offset; 4694 4695 4696 if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) { 4697 SCTP_RELEASE_PKT(i_pak); 4698 return; 4699 } 4700 if (SCTP_GET_PKT_TABLEID(i_pak, table_id)) { 4701 SCTP_RELEASE_PKT(i_pak); 4702 return; 4703 } 4704 mlen = SCTP_HEADER_LEN(i_pak); 4705 iphlen = off; 4706 m = SCTP_HEADER_TO_CHAIN(i_pak); 4707 net = NULL; 4708 SCTP_STAT_INCR(sctps_recvpackets); 4709 SCTP_STAT_INCR_COUNTER64(sctps_inpackets); 4710 4711#ifdef SCTP_MBUF_LOGGING 4712 /* Log in any input mbufs */ 4713 mat = m; 4714 while (mat) { 4715 if (SCTP_BUF_IS_EXTENDED(mat)) { 4716 sctp_log_mb(mat, SCTP_MBUF_INPUT); 4717 } 4718 mat = SCTP_BUF_NEXT(mat); 4719 } 4720#endif 4721 4722 /* 4723 * Get IP, SCTP, and first chunk header together in first mbuf. 4724 */ 4725 ip = mtod(m, struct ip *); 4726 offset = iphlen + sizeof(*sh) + sizeof(*ch); 4727 if (SCTP_BUF_LEN(m) < offset) { 4728 if ((m = m_pullup(m, offset)) == 0) { 4729 SCTP_STAT_INCR(sctps_hdrops); 4730 return; 4731 } 4732 ip = mtod(m, struct ip *); 4733 } 4734 sh = (struct sctphdr *)((caddr_t)ip + iphlen); 4735 ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(*sh)); 4736 4737 /* SCTP does not allow broadcasts or multicasts */ 4738 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) { 4739 goto bad; 4740 } 4741 if (SCTP_IS_IT_BROADCAST(ip->ip_dst, m)) { 4742 /* 4743 * We only look at broadcast if its a front state, All 4744 * others we will not have a tcb for anyway. 4745 */ 4746 goto bad; 4747 } 4748 /* validate SCTP checksum */ 4749 if ((sctp_no_csum_on_loopback == 0) || !SCTP_IS_IT_LOOPBACK(m)) { 4750 /* 4751 * we do NOT validate things from the loopback if the sysctl 4752 * is set to 1. 4753 */ 4754 check = sh->checksum; /* save incoming checksum */ 4755 if ((check == 0) && (sctp_no_csum_on_loopback)) { 4756 /* 4757 * special hook for where we got a local address 4758 * somehow routed across a non IFT_LOOP type 4759 * interface 4760 */ 4761 if (ip->ip_src.s_addr == ip->ip_dst.s_addr) 4762 goto sctp_skip_csum_4; 4763 } 4764 sh->checksum = 0; /* prepare for calc */ 4765 calc_check = sctp_calculate_sum(m, &mlen, iphlen); 4766 if (calc_check != check) { 4767 SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n", 4768 calc_check, check, m, mlen, iphlen); 4769 4770 stcb = sctp_findassociation_addr(m, iphlen, 4771 offset - sizeof(*ch), 4772 sh, ch, &inp, &net, 4773 vrf_id); 4774 if ((inp) && (stcb)) { 4775 sctp_send_packet_dropped(stcb, net, m, iphlen, 1); 4776 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR); 4777 } else if ((inp != NULL) && (stcb == NULL)) { 4778 refcount_up = 1; 4779 } 4780 SCTP_STAT_INCR(sctps_badsum); 4781 SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors); 4782 goto bad; 4783 } 4784 sh->checksum = calc_check; 4785 } 4786sctp_skip_csum_4: 4787 /* destination port of 0 is illegal, based on RFC2960. */ 4788 if (sh->dest_port == 0) { 4789 SCTP_STAT_INCR(sctps_hdrops); 4790 goto bad; 4791 } 4792 /* validate mbuf chain length with IP payload length */ 4793 if (mlen < (ip->ip_len - iphlen)) { 4794 SCTP_STAT_INCR(sctps_hdrops); 4795 goto bad; 4796 } 4797 /* 4798 * Locate pcb and tcb for datagram sctp_findassociation_addr() wants 4799 * IP/SCTP/first chunk header... 4800 */ 4801 stcb = sctp_findassociation_addr(m, iphlen, offset - sizeof(*ch), 4802 sh, ch, &inp, &net, vrf_id); 4803 /* inp's ref-count increased && stcb locked */ 4804 if (inp == NULL) { 4805 struct sctp_init_chunk *init_chk, chunk_buf; 4806 4807 SCTP_STAT_INCR(sctps_noport); 4808#ifdef ICMP_BANDLIM 4809 /* 4810 * we use the bandwidth limiting to protect against sending 4811 * too many ABORTS all at once. In this case these count the 4812 * same as an ICMP message. 4813 */ 4814 if (badport_bandlim(0) < 0) 4815 goto bad; 4816#endif /* ICMP_BANDLIM */ 4817 SCTPDBG(SCTP_DEBUG_INPUT1, 4818 "Sending a ABORT from packet entry!\n"); 4819 if (ch->chunk_type == SCTP_INITIATION) { 4820 /* 4821 * we do a trick here to get the INIT tag, dig in 4822 * and get the tag from the INIT and put it in the 4823 * common header. 4824 */ 4825 init_chk = (struct sctp_init_chunk *)sctp_m_getptr(m, 4826 iphlen + sizeof(*sh), sizeof(*init_chk), 4827 (uint8_t *) & chunk_buf); 4828 if (init_chk != NULL) 4829 sh->v_tag = init_chk->init.initiate_tag; 4830 } 4831 if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 4832 sctp_send_shutdown_complete2(m, iphlen, sh, vrf_id, 4833 table_id); 4834 goto bad; 4835 } 4836 if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) { 4837 goto bad; 4838 } 4839 if (ch->chunk_type != SCTP_ABORT_ASSOCIATION) 4840 sctp_send_abort(m, iphlen, sh, 0, NULL, vrf_id, 4841 table_id); 4842 goto bad; 4843 } else if (stcb == NULL) { 4844 refcount_up = 1; 4845 } 4846#ifdef IPSEC 4847 /* 4848 * I very much doubt any of the IPSEC stuff will work but I have no 4849 * idea, so I will leave it in place. 4850 */ 4851 4852 if (inp && ipsec4_in_reject(m, &inp->ip_inp.inp)) { 4853 ipsecstat.in_polvio++; 4854 SCTP_STAT_INCR(sctps_hdrops); 4855 goto bad; 4856 } 4857#endif /* IPSEC */ 4858 4859 /* 4860 * common chunk processing 4861 */ 4862 length = ip->ip_len + iphlen; 4863 offset -= sizeof(struct sctp_chunkhdr); 4864 4865 ecn_bits = ip->ip_tos; 4866 4867 sctp_common_input_processing(&m, iphlen, offset, length, sh, ch, 4868 inp, stcb, net, ecn_bits, vrf_id, 4869 table_id); 4870 /* inp's ref-count reduced && stcb unlocked */ 4871 if (m) { 4872 sctp_m_freem(m); 4873 } 4874 if ((inp) && (refcount_up)) { 4875 /* reduce ref-count */ 4876 SCTP_INP_WLOCK(inp); 4877 SCTP_INP_DECR_REF(inp); 4878 SCTP_INP_WUNLOCK(inp); 4879 } 4880 return; 4881bad: 4882 if (stcb) { 4883 SCTP_TCB_UNLOCK(stcb); 4884 } 4885 if ((inp) && (refcount_up)) { 4886 /* reduce ref-count */ 4887 SCTP_INP_WLOCK(inp); 4888 SCTP_INP_DECR_REF(inp); 4889 SCTP_INP_WUNLOCK(inp); 4890 } 4891 if (m) { 4892 sctp_m_freem(m); 4893 } 4894 return; 4895} 4896