sctp_input.c revision 269448
14887Schin/*- 24887Schin * Copyright (c) 2001-2008, by Cisco Systems, Inc. All rights reserved. 34887Schin * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 44887Schin * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 54887Schin * 64887Schin * Redistribution and use in source and binary forms, with or without 74887Schin * modification, are permitted provided that the following conditions are met: 84887Schin * 94887Schin * a) Redistributions of source code must retain the above copyright notice, 104887Schin * this list of conditions and the following disclaimer. 114887Schin * 124887Schin * b) Redistributions in binary form must reproduce the above copyright 134887Schin * notice, this list of conditions and the following disclaimer in 144887Schin * the documentation and/or other materials provided with the distribution. 154887Schin * 164887Schin * c) Neither the name of Cisco Systems, Inc. nor the names of its 174887Schin * contributors may be used to endorse or promote products derived 184887Schin * from this software without specific prior written permission. 194887Schin * 204887Schin * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 214887Schin * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 224887Schin * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 234887Schin * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 244887Schin * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 254887Schin * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 264887Schin * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 274887Schin * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 284887Schin * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 294887Schin * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 304887Schin * THE POSSIBILITY OF SUCH DAMAGE. 314887Schin */ 324887Schin 334887Schin#include <sys/cdefs.h> 344887Schin__FBSDID("$FreeBSD: head/sys/netinet/sctp_input.c 269448 2014-08-02 21:36:40Z tuexen $"); 354887Schin 364887Schin#include <netinet/sctp_os.h> 374887Schin#include <netinet/sctp_var.h> 384887Schin#include <netinet/sctp_sysctl.h> 394887Schin#include <netinet/sctp_pcb.h> 404887Schin#include <netinet/sctp_header.h> 414887Schin#include <netinet/sctputil.h> 424887Schin#include <netinet/sctp_output.h> 434887Schin#include <netinet/sctp_input.h> 444887Schin#include <netinet/sctp_auth.h> 454887Schin#include <netinet/sctp_indata.h> 464887Schin#include <netinet/sctp_asconf.h> 474887Schin#include <netinet/sctp_bsd_addr.h> 484887Schin#include <netinet/sctp_timer.h> 494887Schin#include <netinet/sctp_crc32.h> 504887Schin#if defined(INET) || defined(INET6) 514887Schin#include <netinet/udp.h> 524887Schin#endif 534887Schin#include <sys/smp.h> 544887Schin 554887Schin 564887Schin 574887Schinstatic void 584887Schinsctp_stop_all_cookie_timers(struct sctp_tcb *stcb) 594887Schin{ 604887Schin struct sctp_nets *net; 614887Schin 624887Schin /* 634887Schin * This now not only stops all cookie timers it also stops any INIT 644887Schin * timers as well. This will make sure that the timers are stopped 654887Schin * in all collision cases. 664887Schin */ 674887Schin SCTP_TCB_LOCK_ASSERT(stcb); 684887Schin TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 694887Schin if (net->rxt_timer.type == SCTP_TIMER_TYPE_COOKIE) { 704887Schin sctp_timer_stop(SCTP_TIMER_TYPE_COOKIE, 714887Schin stcb->sctp_ep, 724887Schin stcb, 734887Schin net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_1); 744887Schin } else if (net->rxt_timer.type == SCTP_TIMER_TYPE_INIT) { 754887Schin sctp_timer_stop(SCTP_TIMER_TYPE_INIT, 764887Schin stcb->sctp_ep, 774887Schin stcb, 784887Schin net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_2); 794887Schin } 804887Schin } 814887Schin} 824887Schin 834887Schin/* INIT handler */ 844887Schinstatic void 854887Schinsctp_handle_init(struct mbuf *m, int iphlen, int offset, 864887Schin struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, 874887Schin struct sctp_init_chunk *cp, struct sctp_inpcb *inp, 884887Schin struct sctp_tcb *stcb, int *abort_no_unlock, 894887Schin uint8_t use_mflowid, uint32_t mflowid, 904887Schin uint32_t vrf_id, uint16_t port) 914887Schin{ 924887Schin struct sctp_init *init; 934887Schin struct mbuf *op_err; 944887Schin 954887Schin SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_init: handling INIT tcb:%p\n", 964887Schin (void *)stcb); 974887Schin if (stcb == NULL) { 984887Schin SCTP_INP_RLOCK(inp); 994887Schin } 1004887Schin /* validate length */ 1014887Schin if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_chunk)) { 1024887Schin op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 1034887Schin sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err, 1044887Schin use_mflowid, mflowid, 1054887Schin vrf_id, port); 1064887Schin if (stcb) 1074887Schin *abort_no_unlock = 1; 1084887Schin goto outnow; 1094887Schin } 1104887Schin /* validate parameters */ 1114887Schin init = &cp->init; 1124887Schin if (init->initiate_tag == 0) { 1134887Schin /* protocol error... send abort */ 1144887Schin op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 1154887Schin sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err, 1164887Schin use_mflowid, mflowid, 1174887Schin vrf_id, port); 118 if (stcb) 119 *abort_no_unlock = 1; 120 goto outnow; 121 } 122 if (ntohl(init->a_rwnd) < SCTP_MIN_RWND) { 123 /* invalid parameter... send abort */ 124 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 125 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err, 126 use_mflowid, mflowid, 127 vrf_id, port); 128 if (stcb) 129 *abort_no_unlock = 1; 130 goto outnow; 131 } 132 if (init->num_inbound_streams == 0) { 133 /* protocol error... send abort */ 134 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 135 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err, 136 use_mflowid, mflowid, 137 vrf_id, port); 138 if (stcb) 139 *abort_no_unlock = 1; 140 goto outnow; 141 } 142 if (init->num_outbound_streams == 0) { 143 /* protocol error... send abort */ 144 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 145 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err, 146 use_mflowid, mflowid, 147 vrf_id, port); 148 if (stcb) 149 *abort_no_unlock = 1; 150 goto outnow; 151 } 152 if (sctp_validate_init_auth_params(m, offset + sizeof(*cp), 153 offset + ntohs(cp->ch.chunk_length))) { 154 /* auth parameter(s) error... send abort */ 155 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 156 "Problem with AUTH parameters"); 157 sctp_abort_association(inp, stcb, m, iphlen, src, dst, sh, op_err, 158 use_mflowid, mflowid, 159 vrf_id, port); 160 if (stcb) 161 *abort_no_unlock = 1; 162 goto outnow; 163 } 164 /* 165 * We are only accepting if we have a socket with positive 166 * so_qlimit. 167 */ 168 if ((stcb == NULL) && 169 ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 170 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 171 (inp->sctp_socket == NULL) || 172 (inp->sctp_socket->so_qlimit == 0))) { 173 /* 174 * FIX ME ?? What about TCP model and we have a 175 * match/restart case? Actually no fix is needed. the lookup 176 * will always find the existing assoc so stcb would not be 177 * NULL. It may be questionable to do this since we COULD 178 * just send back the INIT-ACK and hope that the app did 179 * accept()'s by the time the COOKIE was sent. But there is 180 * a price to pay for COOKIE generation and I don't want to 181 * pay it on the chance that the app will actually do some 182 * accepts(). The App just looses and should NOT be in this 183 * state :-) 184 */ 185 if (SCTP_BASE_SYSCTL(sctp_blackhole) == 0) { 186 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 187 "No listener"); 188 sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err, 189 use_mflowid, mflowid, 190 vrf_id, port); 191 } 192 goto outnow; 193 } 194 if ((stcb != NULL) && 195 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT)) { 196 SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending SHUTDOWN-ACK\n"); 197 sctp_send_shutdown_ack(stcb, NULL); 198 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 199 } else { 200 SCTPDBG(SCTP_DEBUG_INPUT3, "sctp_handle_init: sending INIT-ACK\n"); 201 sctp_send_initiate_ack(inp, stcb, m, iphlen, offset, src, dst, 202 sh, cp, 203 use_mflowid, mflowid, 204 vrf_id, port, 205 ((stcb == NULL) ? SCTP_HOLDS_LOCK : SCTP_NOT_LOCKED)); 206 } 207outnow: 208 if (stcb == NULL) { 209 SCTP_INP_RUNLOCK(inp); 210 } 211} 212 213/* 214 * process peer "INIT/INIT-ACK" chunk returns value < 0 on error 215 */ 216 217int 218sctp_is_there_unsent_data(struct sctp_tcb *stcb, int so_locked 219#if !defined(__APPLE__) && !defined(SCTP_SO_LOCK_TESTING) 220 SCTP_UNUSED 221#endif 222) 223{ 224 int unsent_data = 0; 225 unsigned int i; 226 struct sctp_stream_queue_pending *sp; 227 struct sctp_association *asoc; 228 229 /* 230 * This function returns the number of streams that have true unsent 231 * data on them. Note that as it looks through it will clean up any 232 * places that have old data that has been sent but left at top of 233 * stream queue. 234 */ 235 asoc = &stcb->asoc; 236 SCTP_TCB_SEND_LOCK(stcb); 237 if (!stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) { 238 /* Check to see if some data queued */ 239 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 240 /* sa_ignore FREED_MEMORY */ 241 sp = TAILQ_FIRST(&stcb->asoc.strmout[i].outqueue); 242 if (sp == NULL) { 243 continue; 244 } 245 if ((sp->msg_is_complete) && 246 (sp->length == 0) && 247 (sp->sender_all_done)) { 248 /* 249 * We are doing differed cleanup. Last time 250 * through when we took all the data the 251 * sender_all_done was not set. 252 */ 253 if (sp->put_last_out == 0) { 254 SCTP_PRINTF("Gak, put out entire msg with NO end!-1\n"); 255 SCTP_PRINTF("sender_done:%d len:%d msg_comp:%d put_last_out:%d\n", 256 sp->sender_all_done, 257 sp->length, 258 sp->msg_is_complete, 259 sp->put_last_out); 260 } 261 atomic_subtract_int(&stcb->asoc.stream_queue_cnt, 1); 262 TAILQ_REMOVE(&stcb->asoc.strmout[i].outqueue, sp, next); 263 if (sp->net) { 264 sctp_free_remote_addr(sp->net); 265 sp->net = NULL; 266 } 267 if (sp->data) { 268 sctp_m_freem(sp->data); 269 sp->data = NULL; 270 } 271 sctp_free_a_strmoq(stcb, sp, so_locked); 272 } else { 273 unsent_data++; 274 break; 275 } 276 } 277 } 278 SCTP_TCB_SEND_UNLOCK(stcb); 279 return (unsent_data); 280} 281 282static int 283sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb) 284{ 285 struct sctp_init *init; 286 struct sctp_association *asoc; 287 struct sctp_nets *lnet; 288 unsigned int i; 289 290 init = &cp->init; 291 asoc = &stcb->asoc; 292 /* save off parameters */ 293 asoc->peer_vtag = ntohl(init->initiate_tag); 294 asoc->peers_rwnd = ntohl(init->a_rwnd); 295 /* init tsn's */ 296 asoc->highest_tsn_inside_map = asoc->asconf_seq_in = ntohl(init->initial_tsn) - 1; 297 298 if (!TAILQ_EMPTY(&asoc->nets)) { 299 /* update any ssthresh's that may have a default */ 300 TAILQ_FOREACH(lnet, &asoc->nets, sctp_next) { 301 lnet->ssthresh = asoc->peers_rwnd; 302 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) { 303 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_INITIALIZATION); 304 } 305 } 306 } 307 SCTP_TCB_SEND_LOCK(stcb); 308 if (asoc->pre_open_streams > ntohs(init->num_inbound_streams)) { 309 unsigned int newcnt; 310 struct sctp_stream_out *outs; 311 struct sctp_stream_queue_pending *sp, *nsp; 312 struct sctp_tmit_chunk *chk, *nchk; 313 314 /* abandon the upper streams */ 315 newcnt = ntohs(init->num_inbound_streams); 316 TAILQ_FOREACH_SAFE(chk, &asoc->send_queue, sctp_next, nchk) { 317 if (chk->rec.data.stream_number >= newcnt) { 318 TAILQ_REMOVE(&asoc->send_queue, chk, sctp_next); 319 asoc->send_queue_cnt--; 320 if (asoc->strmout[chk->rec.data.stream_number].chunks_on_queues > 0) { 321 asoc->strmout[chk->rec.data.stream_number].chunks_on_queues--; 322#ifdef INVARIANTS 323 } else { 324 panic("No chunks on the queues for sid %u.", chk->rec.data.stream_number); 325#endif 326 } 327 if (chk->data != NULL) { 328 sctp_free_bufspace(stcb, asoc, chk, 1); 329 sctp_ulp_notify(SCTP_NOTIFY_UNSENT_DG_FAIL, stcb, 330 0, chk, SCTP_SO_NOT_LOCKED); 331 if (chk->data) { 332 sctp_m_freem(chk->data); 333 chk->data = NULL; 334 } 335 } 336 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 337 /* sa_ignore FREED_MEMORY */ 338 } 339 } 340 if (asoc->strmout) { 341 for (i = newcnt; i < asoc->pre_open_streams; i++) { 342 outs = &asoc->strmout[i]; 343 TAILQ_FOREACH_SAFE(sp, &outs->outqueue, next, nsp) { 344 TAILQ_REMOVE(&outs->outqueue, sp, next); 345 asoc->stream_queue_cnt--; 346 sctp_ulp_notify(SCTP_NOTIFY_SPECIAL_SP_FAIL, 347 stcb, 0, sp, SCTP_SO_NOT_LOCKED); 348 if (sp->data) { 349 sctp_m_freem(sp->data); 350 sp->data = NULL; 351 } 352 if (sp->net) { 353 sctp_free_remote_addr(sp->net); 354 sp->net = NULL; 355 } 356 /* Free the chunk */ 357 sctp_free_a_strmoq(stcb, sp, SCTP_SO_NOT_LOCKED); 358 /* sa_ignore FREED_MEMORY */ 359 } 360 } 361 } 362 /* cut back the count */ 363 asoc->pre_open_streams = newcnt; 364 } 365 SCTP_TCB_SEND_UNLOCK(stcb); 366 asoc->strm_realoutsize = asoc->streamoutcnt = asoc->pre_open_streams; 367 368 /* EY - nr_sack: initialize highest tsn in nr_mapping_array */ 369 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map; 370 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 371 sctp_log_map(0, 5, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 372 } 373 /* This is the next one we expect */ 374 asoc->str_reset_seq_in = asoc->asconf_seq_in + 1; 375 376 asoc->mapping_array_base_tsn = ntohl(init->initial_tsn); 377 asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->asconf_seq_in; 378 379 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 380 /* open the requested streams */ 381 382 if (asoc->strmin != NULL) { 383 /* Free the old ones */ 384 struct sctp_queued_to_read *ctl, *nctl; 385 386 for (i = 0; i < asoc->streamincnt; i++) { 387 TAILQ_FOREACH_SAFE(ctl, &asoc->strmin[i].inqueue, next, nctl) { 388 TAILQ_REMOVE(&asoc->strmin[i].inqueue, ctl, next); 389 sctp_free_remote_addr(ctl->whoFrom); 390 ctl->whoFrom = NULL; 391 sctp_m_freem(ctl->data); 392 ctl->data = NULL; 393 sctp_free_a_readq(stcb, ctl); 394 } 395 } 396 SCTP_FREE(asoc->strmin, SCTP_M_STRMI); 397 } 398 if (asoc->max_inbound_streams > ntohs(init->num_outbound_streams)) { 399 asoc->streamincnt = ntohs(init->num_outbound_streams); 400 } else { 401 asoc->streamincnt = asoc->max_inbound_streams; 402 } 403 SCTP_MALLOC(asoc->strmin, struct sctp_stream_in *, asoc->streamincnt * 404 sizeof(struct sctp_stream_in), SCTP_M_STRMI); 405 if (asoc->strmin == NULL) { 406 /* we didn't get memory for the streams! */ 407 SCTPDBG(SCTP_DEBUG_INPUT2, "process_init: couldn't get memory for the streams!\n"); 408 return (-1); 409 } 410 for (i = 0; i < asoc->streamincnt; i++) { 411 asoc->strmin[i].stream_no = i; 412 asoc->strmin[i].last_sequence_delivered = 0xffff; 413 TAILQ_INIT(&asoc->strmin[i].inqueue); 414 asoc->strmin[i].delivery_started = 0; 415 } 416 /* 417 * load_address_from_init will put the addresses into the 418 * association when the COOKIE is processed or the INIT-ACK is 419 * processed. Both types of COOKIE's existing and new call this 420 * routine. It will remove addresses that are no longer in the 421 * association (for the restarting case where addresses are 422 * removed). Up front when the INIT arrives we will discard it if it 423 * is a restart and new addresses have been added. 424 */ 425 /* sa_ignore MEMLEAK */ 426 return (0); 427} 428 429/* 430 * INIT-ACK message processing/consumption returns value < 0 on error 431 */ 432static int 433sctp_process_init_ack(struct mbuf *m, int iphlen, int offset, 434 struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, 435 struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 436 struct sctp_nets *net, int *abort_no_unlock, 437 uint8_t use_mflowid, uint32_t mflowid, 438 uint32_t vrf_id) 439{ 440 struct sctp_association *asoc; 441 struct mbuf *op_err; 442 int retval, abort_flag; 443 uint32_t initack_limit; 444 int nat_friendly = 0; 445 446 /* First verify that we have no illegal param's */ 447 abort_flag = 0; 448 449 op_err = sctp_arethere_unrecognized_parameters(m, 450 (offset + sizeof(struct sctp_init_chunk)), 451 &abort_flag, (struct sctp_chunkhdr *)cp, &nat_friendly); 452 if (abort_flag) { 453 /* Send an abort and notify peer */ 454 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 455 *abort_no_unlock = 1; 456 return (-1); 457 } 458 asoc = &stcb->asoc; 459 asoc->peer_supports_nat = (uint8_t) nat_friendly; 460 /* process the peer's parameters in the INIT-ACK */ 461 retval = sctp_process_init((struct sctp_init_chunk *)cp, stcb); 462 if (retval < 0) { 463 return (retval); 464 } 465 initack_limit = offset + ntohs(cp->ch.chunk_length); 466 /* load all addresses */ 467 if ((retval = sctp_load_addresses_from_init(stcb, m, 468 (offset + sizeof(struct sctp_init_chunk)), initack_limit, 469 src, dst, NULL))) { 470 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 471 "Problem with address parameters"); 472 SCTPDBG(SCTP_DEBUG_INPUT1, 473 "Load addresses from INIT causes an abort %d\n", 474 retval); 475 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 476 src, dst, sh, op_err, 477 use_mflowid, mflowid, 478 vrf_id, net->port); 479 *abort_no_unlock = 1; 480 return (-1); 481 } 482 /* if the peer doesn't support asconf, flush the asconf queue */ 483 if (asoc->peer_supports_asconf == 0) { 484 struct sctp_asconf_addr *param, *nparam; 485 486 TAILQ_FOREACH_SAFE(param, &asoc->asconf_queue, next, nparam) { 487 TAILQ_REMOVE(&asoc->asconf_queue, param, next); 488 SCTP_FREE(param, SCTP_M_ASC_ADDR); 489 } 490 } 491 stcb->asoc.peer_hmac_id = sctp_negotiate_hmacid(stcb->asoc.peer_hmacs, 492 stcb->asoc.local_hmacs); 493 if (op_err) { 494 sctp_queue_op_err(stcb, op_err); 495 /* queuing will steal away the mbuf chain to the out queue */ 496 op_err = NULL; 497 } 498 /* extract the cookie and queue it to "echo" it back... */ 499 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 500 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 501 stcb->asoc.overall_error_count, 502 0, 503 SCTP_FROM_SCTP_INPUT, 504 __LINE__); 505 } 506 stcb->asoc.overall_error_count = 0; 507 net->error_count = 0; 508 509 /* 510 * Cancel the INIT timer, We do this first before queueing the 511 * cookie. We always cancel at the primary to assue that we are 512 * canceling the timer started by the INIT which always goes to the 513 * primary. 514 */ 515 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, stcb, 516 asoc->primary_destination, SCTP_FROM_SCTP_INPUT + SCTP_LOC_4); 517 518 /* calculate the RTO */ 519 net->RTO = sctp_calculate_rto(stcb, asoc, net, &asoc->time_entered, sctp_align_safe_nocopy, 520 SCTP_RTT_FROM_NON_DATA); 521 522 retval = sctp_send_cookie_echo(m, offset, stcb, net); 523 if (retval < 0) { 524 /* 525 * No cookie, we probably should send a op error. But in any 526 * case if there is no cookie in the INIT-ACK, we can 527 * abandon the peer, its broke. 528 */ 529 if (retval == -3) { 530 /* We abort with an error of missing mandatory param */ 531 op_err = sctp_generate_cause(SCTP_CAUSE_MISSING_PARAM, ""); 532 if (op_err) { 533 /* 534 * Expand beyond to include the mandatory 535 * param cookie 536 */ 537 struct sctp_inv_mandatory_param *mp; 538 539 SCTP_BUF_LEN(op_err) = 540 sizeof(struct sctp_inv_mandatory_param); 541 mp = mtod(op_err, 542 struct sctp_inv_mandatory_param *); 543 /* Subtract the reserved param */ 544 mp->length = 545 htons(sizeof(struct sctp_inv_mandatory_param) - 2); 546 mp->num_param = htonl(1); 547 mp->param = htons(SCTP_STATE_COOKIE); 548 mp->resv = 0; 549 } 550 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 551 src, dst, sh, op_err, 552 use_mflowid, mflowid, 553 vrf_id, net->port); 554 *abort_no_unlock = 1; 555 } 556 return (retval); 557 } 558 return (0); 559} 560 561static void 562sctp_handle_heartbeat_ack(struct sctp_heartbeat_chunk *cp, 563 struct sctp_tcb *stcb, struct sctp_nets *net) 564{ 565 struct sockaddr_storage store; 566 struct sctp_nets *r_net, *f_net; 567 struct timeval tv; 568 int req_prim = 0; 569 uint16_t old_error_counter; 570 571#ifdef INET 572 struct sockaddr_in *sin; 573 574#endif 575#ifdef INET6 576 struct sockaddr_in6 *sin6; 577 578#endif 579 580 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_heartbeat_chunk)) { 581 /* Invalid length */ 582 return; 583 } 584 memset(&store, 0, sizeof(store)); 585 switch (cp->heartbeat.hb_info.addr_family) { 586#ifdef INET 587 case AF_INET: 588 if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in)) { 589 sin = (struct sockaddr_in *)&store; 590 sin->sin_family = cp->heartbeat.hb_info.addr_family; 591 sin->sin_len = cp->heartbeat.hb_info.addr_len; 592 sin->sin_port = stcb->rport; 593 memcpy(&sin->sin_addr, cp->heartbeat.hb_info.address, 594 sizeof(sin->sin_addr)); 595 } else { 596 return; 597 } 598 break; 599#endif 600#ifdef INET6 601 case AF_INET6: 602 if (cp->heartbeat.hb_info.addr_len == sizeof(struct sockaddr_in6)) { 603 sin6 = (struct sockaddr_in6 *)&store; 604 sin6->sin6_family = cp->heartbeat.hb_info.addr_family; 605 sin6->sin6_len = cp->heartbeat.hb_info.addr_len; 606 sin6->sin6_port = stcb->rport; 607 memcpy(&sin6->sin6_addr, cp->heartbeat.hb_info.address, 608 sizeof(sin6->sin6_addr)); 609 } else { 610 return; 611 } 612 break; 613#endif 614 default: 615 return; 616 } 617 r_net = sctp_findnet(stcb, (struct sockaddr *)&store); 618 if (r_net == NULL) { 619 SCTPDBG(SCTP_DEBUG_INPUT1, "Huh? I can't find the address I sent it to, discard\n"); 620 return; 621 } 622 if ((r_net && (r_net->dest_state & SCTP_ADDR_UNCONFIRMED)) && 623 (r_net->heartbeat_random1 == cp->heartbeat.hb_info.random_value1) && 624 (r_net->heartbeat_random2 == cp->heartbeat.hb_info.random_value2)) { 625 /* 626 * If the its a HB and it's random value is correct when can 627 * confirm the destination. 628 */ 629 r_net->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 630 if (r_net->dest_state & SCTP_ADDR_REQ_PRIMARY) { 631 stcb->asoc.primary_destination = r_net; 632 r_net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY; 633 f_net = TAILQ_FIRST(&stcb->asoc.nets); 634 if (f_net != r_net) { 635 /* 636 * first one on the list is NOT the primary 637 * sctp_cmpaddr() is much more efficent if 638 * the primary is the first on the list, 639 * make it so. 640 */ 641 TAILQ_REMOVE(&stcb->asoc.nets, r_net, sctp_next); 642 TAILQ_INSERT_HEAD(&stcb->asoc.nets, r_net, sctp_next); 643 } 644 req_prim = 1; 645 } 646 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 647 stcb, 0, (void *)r_net, SCTP_SO_NOT_LOCKED); 648 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3); 649 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net); 650 } 651 old_error_counter = r_net->error_count; 652 r_net->error_count = 0; 653 r_net->hb_responded = 1; 654 tv.tv_sec = cp->heartbeat.hb_info.time_value_1; 655 tv.tv_usec = cp->heartbeat.hb_info.time_value_2; 656 /* Now lets do a RTO with this */ 657 r_net->RTO = sctp_calculate_rto(stcb, &stcb->asoc, r_net, &tv, sctp_align_safe_nocopy, 658 SCTP_RTT_FROM_NON_DATA); 659 if (!(r_net->dest_state & SCTP_ADDR_REACHABLE)) { 660 r_net->dest_state |= SCTP_ADDR_REACHABLE; 661 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 662 0, (void *)r_net, SCTP_SO_NOT_LOCKED); 663 } 664 if (r_net->dest_state & SCTP_ADDR_PF) { 665 r_net->dest_state &= ~SCTP_ADDR_PF; 666 stcb->asoc.cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 667 } 668 if (old_error_counter > 0) { 669 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3); 670 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, r_net); 671 } 672 if (r_net == stcb->asoc.primary_destination) { 673 if (stcb->asoc.alternate) { 674 /* release the alternate, primary is good */ 675 sctp_free_remote_addr(stcb->asoc.alternate); 676 stcb->asoc.alternate = NULL; 677 } 678 } 679 /* Mobility adaptation */ 680 if (req_prim) { 681 if ((sctp_is_mobility_feature_on(stcb->sctp_ep, 682 SCTP_MOBILITY_BASE) || 683 sctp_is_mobility_feature_on(stcb->sctp_ep, 684 SCTP_MOBILITY_FASTHANDOFF)) && 685 sctp_is_mobility_feature_on(stcb->sctp_ep, 686 SCTP_MOBILITY_PRIM_DELETED)) { 687 688 sctp_timer_stop(SCTP_TIMER_TYPE_PRIM_DELETED, stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_TIMER + SCTP_LOC_7); 689 if (sctp_is_mobility_feature_on(stcb->sctp_ep, 690 SCTP_MOBILITY_FASTHANDOFF)) { 691 sctp_assoc_immediate_retrans(stcb, 692 stcb->asoc.primary_destination); 693 } 694 if (sctp_is_mobility_feature_on(stcb->sctp_ep, 695 SCTP_MOBILITY_BASE)) { 696 sctp_move_chunks_from_net(stcb, 697 stcb->asoc.deleted_primary); 698 } 699 sctp_delete_prim_timer(stcb->sctp_ep, stcb, 700 stcb->asoc.deleted_primary); 701 } 702 } 703} 704 705static int 706sctp_handle_nat_colliding_state(struct sctp_tcb *stcb) 707{ 708 /* 709 * return 0 means we want you to proceed with the abort non-zero 710 * means no abort processing 711 */ 712 struct sctpasochead *head; 713 714 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_WAIT) { 715 /* generate a new vtag and send init */ 716 LIST_REMOVE(stcb, sctp_asocs); 717 stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 718 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))]; 719 /* 720 * put it in the bucket in the vtag hash of assoc's for the 721 * system 722 */ 723 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 724 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 725 return (1); 726 } 727 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { 728 /* 729 * treat like a case where the cookie expired i.e.: - dump 730 * current cookie. - generate a new vtag. - resend init. 731 */ 732 /* generate a new vtag and send init */ 733 LIST_REMOVE(stcb, sctp_asocs); 734 stcb->asoc.state &= ~SCTP_STATE_COOKIE_ECHOED; 735 stcb->asoc.state |= SCTP_STATE_COOKIE_WAIT; 736 sctp_stop_all_cookie_timers(stcb); 737 sctp_toss_old_cookies(stcb, &stcb->asoc); 738 stcb->asoc.my_vtag = sctp_select_a_tag(stcb->sctp_ep, stcb->sctp_ep->sctp_lport, stcb->rport, 1); 739 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, SCTP_BASE_INFO(hashasocmark))]; 740 /* 741 * put it in the bucket in the vtag hash of assoc's for the 742 * system 743 */ 744 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 745 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 746 return (1); 747 } 748 return (0); 749} 750 751static int 752sctp_handle_nat_missing_state(struct sctp_tcb *stcb, 753 struct sctp_nets *net) 754{ 755 /* 756 * return 0 means we want you to proceed with the abort non-zero 757 * means no abort processing 758 */ 759 if (stcb->asoc.peer_supports_auth == 0) { 760 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_nat_missing_state: Peer does not support AUTH, cannot send an asconf\n"); 761 return (0); 762 } 763 sctp_asconf_send_nat_state_update(stcb, net); 764 return (1); 765} 766 767 768static void 769sctp_handle_abort(struct sctp_abort_chunk *abort, 770 struct sctp_tcb *stcb, struct sctp_nets *net) 771{ 772#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 773 struct socket *so; 774 775#endif 776 uint16_t len; 777 uint16_t error; 778 779 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: handling ABORT\n"); 780 if (stcb == NULL) 781 return; 782 783 len = ntohs(abort->ch.chunk_length); 784 if (len > sizeof(struct sctp_chunkhdr)) { 785 /* 786 * Need to check the cause codes for our two magic nat 787 * aborts which don't kill the assoc necessarily. 788 */ 789 struct sctp_missing_nat_state *natc; 790 791 natc = (struct sctp_missing_nat_state *)(abort + 1); 792 error = ntohs(natc->cause); 793 if (error == SCTP_CAUSE_NAT_COLLIDING_STATE) { 794 SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n", 795 abort->ch.chunk_flags); 796 if (sctp_handle_nat_colliding_state(stcb)) { 797 return; 798 } 799 } else if (error == SCTP_CAUSE_NAT_MISSING_STATE) { 800 SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n", 801 abort->ch.chunk_flags); 802 if (sctp_handle_nat_missing_state(stcb, net)) { 803 return; 804 } 805 } 806 } else { 807 error = 0; 808 } 809 /* stop any receive timers */ 810 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_6); 811 /* notify user of the abort and clean up... */ 812 sctp_abort_notification(stcb, 1, error, abort, SCTP_SO_NOT_LOCKED); 813 /* free the tcb */ 814 SCTP_STAT_INCR_COUNTER32(sctps_aborted); 815 if ((SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_OPEN) || 816 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 817 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 818 } 819#ifdef SCTP_ASOCLOG_OF_TSNS 820 sctp_print_out_track_log(stcb); 821#endif 822#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 823 so = SCTP_INP_SO(stcb->sctp_ep); 824 atomic_add_int(&stcb->asoc.refcnt, 1); 825 SCTP_TCB_UNLOCK(stcb); 826 SCTP_SOCKET_LOCK(so, 1); 827 SCTP_TCB_LOCK(stcb); 828 atomic_subtract_int(&stcb->asoc.refcnt, 1); 829#endif 830 stcb->asoc.state |= SCTP_STATE_WAS_ABORTED; 831 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 832 SCTP_FROM_SCTP_INPUT + SCTP_LOC_6); 833#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 834 SCTP_SOCKET_UNLOCK(so, 1); 835#endif 836 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_handle_abort: finished\n"); 837} 838 839static void 840sctp_start_net_timers(struct sctp_tcb *stcb) 841{ 842 uint32_t cnt_hb_sent; 843 struct sctp_nets *net; 844 845 cnt_hb_sent = 0; 846 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 847 /* 848 * For each network start: 1) A pmtu timer. 2) A HB timer 3) 849 * If the dest in unconfirmed send a hb as well if under 850 * max_hb_burst have been sent. 851 */ 852 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, stcb->sctp_ep, stcb, net); 853 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 854 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) && 855 (cnt_hb_sent < SCTP_BASE_SYSCTL(sctp_hb_maxburst))) { 856 sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED); 857 cnt_hb_sent++; 858 } 859 } 860 if (cnt_hb_sent) { 861 sctp_chunk_output(stcb->sctp_ep, stcb, 862 SCTP_OUTPUT_FROM_COOKIE_ACK, 863 SCTP_SO_NOT_LOCKED); 864 } 865} 866 867 868static void 869sctp_handle_shutdown(struct sctp_shutdown_chunk *cp, 870 struct sctp_tcb *stcb, struct sctp_nets *net, int *abort_flag) 871{ 872 struct sctp_association *asoc; 873 int some_on_streamwheel; 874 875#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 876 struct socket *so; 877 878#endif 879 880 SCTPDBG(SCTP_DEBUG_INPUT2, 881 "sctp_handle_shutdown: handling SHUTDOWN\n"); 882 if (stcb == NULL) 883 return; 884 asoc = &stcb->asoc; 885 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 886 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 887 return; 888 } 889 if (ntohs(cp->ch.chunk_length) != sizeof(struct sctp_shutdown_chunk)) { 890 /* Shutdown NOT the expected size */ 891 return; 892 } else { 893 sctp_update_acked(stcb, cp, abort_flag); 894 if (*abort_flag) { 895 return; 896 } 897 } 898 if (asoc->control_pdapi) { 899 /* 900 * With a normal shutdown we assume the end of last record. 901 */ 902 SCTP_INP_READ_LOCK(stcb->sctp_ep); 903 asoc->control_pdapi->end_added = 1; 904 asoc->control_pdapi->pdapi_aborted = 1; 905 asoc->control_pdapi = NULL; 906 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 907#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 908 so = SCTP_INP_SO(stcb->sctp_ep); 909 atomic_add_int(&stcb->asoc.refcnt, 1); 910 SCTP_TCB_UNLOCK(stcb); 911 SCTP_SOCKET_LOCK(so, 1); 912 SCTP_TCB_LOCK(stcb); 913 atomic_subtract_int(&stcb->asoc.refcnt, 1); 914 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 915 /* assoc was freed while we were unlocked */ 916 SCTP_SOCKET_UNLOCK(so, 1); 917 return; 918 } 919#endif 920 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 921#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 922 SCTP_SOCKET_UNLOCK(so, 1); 923#endif 924 } 925 /* goto SHUTDOWN_RECEIVED state to block new requests */ 926 if (stcb->sctp_socket) { 927 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 928 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) && 929 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) { 930 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_RECEIVED); 931 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 932 /* 933 * notify upper layer that peer has initiated a 934 * shutdown 935 */ 936 sctp_ulp_notify(SCTP_NOTIFY_PEER_SHUTDOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 937 938 /* reset time */ 939 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 940 } 941 } 942 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 943 /* 944 * stop the shutdown timer, since we WILL move to 945 * SHUTDOWN-ACK-SENT. 946 */ 947 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_8); 948 } 949 /* Now is there unsent data on a stream somewhere? */ 950 some_on_streamwheel = sctp_is_there_unsent_data(stcb, SCTP_SO_NOT_LOCKED); 951 952 if (!TAILQ_EMPTY(&asoc->send_queue) || 953 !TAILQ_EMPTY(&asoc->sent_queue) || 954 some_on_streamwheel) { 955 /* By returning we will push more data out */ 956 return; 957 } else { 958 /* no outstanding data to send, so move on... */ 959 /* send SHUTDOWN-ACK */ 960 /* move to SHUTDOWN-ACK-SENT state */ 961 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 962 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 963 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 964 } 965 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 966 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 967 sctp_stop_timers_for_shutdown(stcb); 968 sctp_send_shutdown_ack(stcb, net); 969 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, 970 stcb, net); 971 } 972} 973 974static void 975sctp_handle_shutdown_ack(struct sctp_shutdown_ack_chunk *cp SCTP_UNUSED, 976 struct sctp_tcb *stcb, 977 struct sctp_nets *net) 978{ 979 struct sctp_association *asoc; 980 981#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 982 struct socket *so; 983 984 so = SCTP_INP_SO(stcb->sctp_ep); 985#endif 986 SCTPDBG(SCTP_DEBUG_INPUT2, 987 "sctp_handle_shutdown_ack: handling SHUTDOWN ACK\n"); 988 if (stcb == NULL) 989 return; 990 991 asoc = &stcb->asoc; 992 /* process according to association state */ 993 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_WAIT) || 994 (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED)) { 995 /* unexpected SHUTDOWN-ACK... do OOTB handling... */ 996 sctp_send_shutdown_complete(stcb, net, 1); 997 SCTP_TCB_UNLOCK(stcb); 998 return; 999 } 1000 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) && 1001 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT)) { 1002 /* unexpected SHUTDOWN-ACK... so ignore... */ 1003 SCTP_TCB_UNLOCK(stcb); 1004 return; 1005 } 1006 if (asoc->control_pdapi) { 1007 /* 1008 * With a normal shutdown we assume the end of last record. 1009 */ 1010 SCTP_INP_READ_LOCK(stcb->sctp_ep); 1011 asoc->control_pdapi->end_added = 1; 1012 asoc->control_pdapi->pdapi_aborted = 1; 1013 asoc->control_pdapi = NULL; 1014 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 1015#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1016 atomic_add_int(&stcb->asoc.refcnt, 1); 1017 SCTP_TCB_UNLOCK(stcb); 1018 SCTP_SOCKET_LOCK(so, 1); 1019 SCTP_TCB_LOCK(stcb); 1020 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1021 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1022 /* assoc was freed while we were unlocked */ 1023 SCTP_SOCKET_UNLOCK(so, 1); 1024 return; 1025 } 1026#endif 1027 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1028#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1029 SCTP_SOCKET_UNLOCK(so, 1); 1030#endif 1031 } 1032#ifdef INVARIANTS 1033 if (!TAILQ_EMPTY(&asoc->send_queue) || 1034 !TAILQ_EMPTY(&asoc->sent_queue) || 1035 !stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) { 1036 panic("Queues are not empty when handling SHUTDOWN-ACK"); 1037 } 1038#endif 1039 /* stop the timer */ 1040 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWN, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_9); 1041 /* send SHUTDOWN-COMPLETE */ 1042 sctp_send_shutdown_complete(stcb, net, 0); 1043 /* notify upper layer protocol */ 1044 if (stcb->sctp_socket) { 1045 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1046 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 1047 stcb->sctp_socket->so_snd.sb_cc = 0; 1048 } 1049 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 1050 } 1051 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 1052 /* free the TCB but first save off the ep */ 1053#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1054 atomic_add_int(&stcb->asoc.refcnt, 1); 1055 SCTP_TCB_UNLOCK(stcb); 1056 SCTP_SOCKET_LOCK(so, 1); 1057 SCTP_TCB_LOCK(stcb); 1058 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1059#endif 1060 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 1061 SCTP_FROM_SCTP_INPUT + SCTP_LOC_10); 1062#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1063 SCTP_SOCKET_UNLOCK(so, 1); 1064#endif 1065} 1066 1067/* 1068 * Skip past the param header and then we will find the chunk that caused the 1069 * problem. There are two possiblities ASCONF or FWD-TSN other than that and 1070 * our peer must be broken. 1071 */ 1072static void 1073sctp_process_unrecog_chunk(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr, 1074 struct sctp_nets *net) 1075{ 1076 struct sctp_chunkhdr *chk; 1077 1078 chk = (struct sctp_chunkhdr *)((caddr_t)phdr + sizeof(*phdr)); 1079 switch (chk->chunk_type) { 1080 case SCTP_ASCONF_ACK: 1081 case SCTP_ASCONF: 1082 sctp_asconf_cleanup(stcb, net); 1083 break; 1084 case SCTP_FORWARD_CUM_TSN: 1085 stcb->asoc.prsctp_supported = 0; 1086 break; 1087 default: 1088 SCTPDBG(SCTP_DEBUG_INPUT2, 1089 "Peer does not support chunk type %d(%x)??\n", 1090 chk->chunk_type, (uint32_t) chk->chunk_type); 1091 break; 1092 } 1093} 1094 1095/* 1096 * Skip past the param header and then we will find the param that caused the 1097 * problem. There are a number of param's in a ASCONF OR the prsctp param 1098 * these will turn of specific features. 1099 */ 1100static void 1101sctp_process_unrecog_param(struct sctp_tcb *stcb, struct sctp_paramhdr *phdr) 1102{ 1103 struct sctp_paramhdr *pbad; 1104 1105 pbad = phdr + 1; 1106 switch (ntohs(pbad->param_type)) { 1107 /* pr-sctp draft */ 1108 case SCTP_PRSCTP_SUPPORTED: 1109 stcb->asoc.prsctp_supported = 0; 1110 break; 1111 case SCTP_SUPPORTED_CHUNK_EXT: 1112 break; 1113 /* draft-ietf-tsvwg-addip-sctp */ 1114 case SCTP_HAS_NAT_SUPPORT: 1115 stcb->asoc.peer_supports_nat = 0; 1116 break; 1117 case SCTP_ADD_IP_ADDRESS: 1118 case SCTP_DEL_IP_ADDRESS: 1119 case SCTP_SET_PRIM_ADDR: 1120 stcb->asoc.peer_supports_asconf = 0; 1121 break; 1122 case SCTP_SUCCESS_REPORT: 1123 case SCTP_ERROR_CAUSE_IND: 1124 SCTPDBG(SCTP_DEBUG_INPUT2, "Huh, the peer does not support success? or error cause?\n"); 1125 SCTPDBG(SCTP_DEBUG_INPUT2, 1126 "Turning off ASCONF to this strange peer\n"); 1127 stcb->asoc.peer_supports_asconf = 0; 1128 break; 1129 default: 1130 SCTPDBG(SCTP_DEBUG_INPUT2, 1131 "Peer does not support param type %d(%x)??\n", 1132 pbad->param_type, (uint32_t) pbad->param_type); 1133 break; 1134 } 1135} 1136 1137static int 1138sctp_handle_error(struct sctp_chunkhdr *ch, 1139 struct sctp_tcb *stcb, struct sctp_nets *net) 1140{ 1141 int chklen; 1142 struct sctp_paramhdr *phdr; 1143 uint16_t error, error_type; 1144 uint16_t error_len; 1145 struct sctp_association *asoc; 1146 int adjust; 1147 1148#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1149 struct socket *so; 1150 1151#endif 1152 1153 /* parse through all of the errors and process */ 1154 asoc = &stcb->asoc; 1155 phdr = (struct sctp_paramhdr *)((caddr_t)ch + 1156 sizeof(struct sctp_chunkhdr)); 1157 chklen = ntohs(ch->chunk_length) - sizeof(struct sctp_chunkhdr); 1158 error = 0; 1159 while ((size_t)chklen >= sizeof(struct sctp_paramhdr)) { 1160 /* Process an Error Cause */ 1161 error_type = ntohs(phdr->param_type); 1162 error_len = ntohs(phdr->param_length); 1163 if ((error_len > chklen) || (error_len == 0)) { 1164 /* invalid param length for this param */ 1165 SCTPDBG(SCTP_DEBUG_INPUT1, "Bogus length in error param- chunk left:%d errorlen:%d\n", 1166 chklen, error_len); 1167 return (0); 1168 } 1169 if (error == 0) { 1170 /* report the first error cause */ 1171 error = error_type; 1172 } 1173 switch (error_type) { 1174 case SCTP_CAUSE_INVALID_STREAM: 1175 case SCTP_CAUSE_MISSING_PARAM: 1176 case SCTP_CAUSE_INVALID_PARAM: 1177 case SCTP_CAUSE_NO_USER_DATA: 1178 SCTPDBG(SCTP_DEBUG_INPUT1, "Software error we got a %d back? We have a bug :/ (or do they?)\n", 1179 error_type); 1180 break; 1181 case SCTP_CAUSE_NAT_COLLIDING_STATE: 1182 SCTPDBG(SCTP_DEBUG_INPUT2, "Received Colliding state abort flags:%x\n", 1183 ch->chunk_flags); 1184 if (sctp_handle_nat_colliding_state(stcb)) { 1185 return (0); 1186 } 1187 break; 1188 case SCTP_CAUSE_NAT_MISSING_STATE: 1189 SCTPDBG(SCTP_DEBUG_INPUT2, "Received missing state abort flags:%x\n", 1190 ch->chunk_flags); 1191 if (sctp_handle_nat_missing_state(stcb, net)) { 1192 return (0); 1193 } 1194 break; 1195 case SCTP_CAUSE_STALE_COOKIE: 1196 /* 1197 * We only act if we have echoed a cookie and are 1198 * waiting. 1199 */ 1200 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) { 1201 int *p; 1202 1203 p = (int *)((caddr_t)phdr + sizeof(*phdr)); 1204 /* Save the time doubled */ 1205 asoc->cookie_preserve_req = ntohl(*p) << 1; 1206 asoc->stale_cookie_count++; 1207 if (asoc->stale_cookie_count > 1208 asoc->max_init_times) { 1209 sctp_abort_notification(stcb, 0, 0, NULL, SCTP_SO_NOT_LOCKED); 1210 /* now free the asoc */ 1211#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1212 so = SCTP_INP_SO(stcb->sctp_ep); 1213 atomic_add_int(&stcb->asoc.refcnt, 1); 1214 SCTP_TCB_UNLOCK(stcb); 1215 SCTP_SOCKET_LOCK(so, 1); 1216 SCTP_TCB_LOCK(stcb); 1217 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1218#endif 1219 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, 1220 SCTP_FROM_SCTP_INPUT + SCTP_LOC_11); 1221#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1222 SCTP_SOCKET_UNLOCK(so, 1); 1223#endif 1224 return (-1); 1225 } 1226 /* blast back to INIT state */ 1227 sctp_toss_old_cookies(stcb, &stcb->asoc); 1228 asoc->state &= ~SCTP_STATE_COOKIE_ECHOED; 1229 asoc->state |= SCTP_STATE_COOKIE_WAIT; 1230 sctp_stop_all_cookie_timers(stcb); 1231 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 1232 } 1233 break; 1234 case SCTP_CAUSE_UNRESOLVABLE_ADDR: 1235 /* 1236 * Nothing we can do here, we don't do hostname 1237 * addresses so if the peer does not like my IPv6 1238 * (or IPv4 for that matter) it does not matter. If 1239 * they don't support that type of address, they can 1240 * NOT possibly get that packet type... i.e. with no 1241 * IPv6 you can't recieve a IPv6 packet. so we can 1242 * safely ignore this one. If we ever added support 1243 * for HOSTNAME Addresses, then we would need to do 1244 * something here. 1245 */ 1246 break; 1247 case SCTP_CAUSE_UNRECOG_CHUNK: 1248 sctp_process_unrecog_chunk(stcb, phdr, net); 1249 break; 1250 case SCTP_CAUSE_UNRECOG_PARAM: 1251 sctp_process_unrecog_param(stcb, phdr); 1252 break; 1253 case SCTP_CAUSE_COOKIE_IN_SHUTDOWN: 1254 /* 1255 * We ignore this since the timer will drive out a 1256 * new cookie anyway and there timer will drive us 1257 * to send a SHUTDOWN_COMPLETE. We can't send one 1258 * here since we don't have their tag. 1259 */ 1260 break; 1261 case SCTP_CAUSE_DELETING_LAST_ADDR: 1262 case SCTP_CAUSE_RESOURCE_SHORTAGE: 1263 case SCTP_CAUSE_DELETING_SRC_ADDR: 1264 /* 1265 * We should NOT get these here, but in a 1266 * ASCONF-ACK. 1267 */ 1268 SCTPDBG(SCTP_DEBUG_INPUT2, "Peer sends ASCONF errors in a Operational Error?<%d>?\n", 1269 error_type); 1270 break; 1271 case SCTP_CAUSE_OUT_OF_RESC: 1272 /* 1273 * And what, pray tell do we do with the fact that 1274 * the peer is out of resources? Not really sure we 1275 * could do anything but abort. I suspect this 1276 * should have came WITH an abort instead of in a 1277 * OP-ERROR. 1278 */ 1279 break; 1280 default: 1281 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_handle_error: unknown error type = 0x%xh\n", 1282 error_type); 1283 break; 1284 } 1285 adjust = SCTP_SIZE32(error_len); 1286 chklen -= adjust; 1287 phdr = (struct sctp_paramhdr *)((caddr_t)phdr + adjust); 1288 } 1289 sctp_ulp_notify(SCTP_NOTIFY_REMOTE_ERROR, stcb, error, ch, SCTP_SO_NOT_LOCKED); 1290 return (0); 1291} 1292 1293static int 1294sctp_handle_init_ack(struct mbuf *m, int iphlen, int offset, 1295 struct sockaddr *src, struct sockaddr *dst, struct sctphdr *sh, 1296 struct sctp_init_ack_chunk *cp, struct sctp_tcb *stcb, 1297 struct sctp_nets *net, int *abort_no_unlock, 1298 uint8_t use_mflowid, uint32_t mflowid, 1299 uint32_t vrf_id) 1300{ 1301 struct sctp_init_ack *init_ack; 1302 struct mbuf *op_err; 1303 1304 SCTPDBG(SCTP_DEBUG_INPUT2, 1305 "sctp_handle_init_ack: handling INIT-ACK\n"); 1306 1307 if (stcb == NULL) { 1308 SCTPDBG(SCTP_DEBUG_INPUT2, 1309 "sctp_handle_init_ack: TCB is null\n"); 1310 return (-1); 1311 } 1312 if (ntohs(cp->ch.chunk_length) < sizeof(struct sctp_init_ack_chunk)) { 1313 /* Invalid length */ 1314 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 1315 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 1316 src, dst, sh, op_err, 1317 use_mflowid, mflowid, 1318 vrf_id, net->port); 1319 *abort_no_unlock = 1; 1320 return (-1); 1321 } 1322 init_ack = &cp->init; 1323 /* validate parameters */ 1324 if (init_ack->initiate_tag == 0) { 1325 /* protocol error... send an abort */ 1326 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 1327 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 1328 src, dst, sh, op_err, 1329 use_mflowid, mflowid, 1330 vrf_id, net->port); 1331 *abort_no_unlock = 1; 1332 return (-1); 1333 } 1334 if (ntohl(init_ack->a_rwnd) < SCTP_MIN_RWND) { 1335 /* protocol error... send an abort */ 1336 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 1337 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 1338 src, dst, sh, op_err, 1339 use_mflowid, mflowid, 1340 vrf_id, net->port); 1341 *abort_no_unlock = 1; 1342 return (-1); 1343 } 1344 if (init_ack->num_inbound_streams == 0) { 1345 /* protocol error... send an abort */ 1346 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 1347 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 1348 src, dst, sh, op_err, 1349 use_mflowid, mflowid, 1350 vrf_id, net->port); 1351 *abort_no_unlock = 1; 1352 return (-1); 1353 } 1354 if (init_ack->num_outbound_streams == 0) { 1355 /* protocol error... send an abort */ 1356 op_err = sctp_generate_cause(SCTP_CAUSE_INVALID_PARAM, ""); 1357 sctp_abort_association(stcb->sctp_ep, stcb, m, iphlen, 1358 src, dst, sh, op_err, 1359 use_mflowid, mflowid, 1360 vrf_id, net->port); 1361 *abort_no_unlock = 1; 1362 return (-1); 1363 } 1364 /* process according to association state... */ 1365 switch (stcb->asoc.state & SCTP_STATE_MASK) { 1366 case SCTP_STATE_COOKIE_WAIT: 1367 /* this is the expected state for this chunk */ 1368 /* process the INIT-ACK parameters */ 1369 if (stcb->asoc.primary_destination->dest_state & 1370 SCTP_ADDR_UNCONFIRMED) { 1371 /* 1372 * The primary is where we sent the INIT, we can 1373 * always consider it confirmed when the INIT-ACK is 1374 * returned. Do this before we load addresses 1375 * though. 1376 */ 1377 stcb->asoc.primary_destination->dest_state &= 1378 ~SCTP_ADDR_UNCONFIRMED; 1379 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 1380 stcb, 0, (void *)stcb->asoc.primary_destination, SCTP_SO_NOT_LOCKED); 1381 } 1382 if (sctp_process_init_ack(m, iphlen, offset, src, dst, sh, cp, stcb, 1383 net, abort_no_unlock, 1384 use_mflowid, mflowid, 1385 vrf_id) < 0) { 1386 /* error in parsing parameters */ 1387 return (-1); 1388 } 1389 /* update our state */ 1390 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to COOKIE-ECHOED state\n"); 1391 SCTP_SET_STATE(&stcb->asoc, SCTP_STATE_COOKIE_ECHOED); 1392 1393 /* reset the RTO calc */ 1394 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 1395 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 1396 stcb->asoc.overall_error_count, 1397 0, 1398 SCTP_FROM_SCTP_INPUT, 1399 __LINE__); 1400 } 1401 stcb->asoc.overall_error_count = 0; 1402 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 1403 /* 1404 * collapse the init timer back in case of a exponential 1405 * backoff 1406 */ 1407 sctp_timer_start(SCTP_TIMER_TYPE_COOKIE, stcb->sctp_ep, 1408 stcb, net); 1409 /* 1410 * the send at the end of the inbound data processing will 1411 * cause the cookie to be sent 1412 */ 1413 break; 1414 case SCTP_STATE_SHUTDOWN_SENT: 1415 /* incorrect state... discard */ 1416 break; 1417 case SCTP_STATE_COOKIE_ECHOED: 1418 /* incorrect state... discard */ 1419 break; 1420 case SCTP_STATE_OPEN: 1421 /* incorrect state... discard */ 1422 break; 1423 case SCTP_STATE_EMPTY: 1424 case SCTP_STATE_INUSE: 1425 default: 1426 /* incorrect state... discard */ 1427 return (-1); 1428 break; 1429 } 1430 SCTPDBG(SCTP_DEBUG_INPUT1, "Leaving handle-init-ack end\n"); 1431 return (0); 1432} 1433 1434static struct sctp_tcb * 1435sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, 1436 struct sockaddr *src, struct sockaddr *dst, 1437 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1438 struct sctp_inpcb *inp, struct sctp_nets **netp, 1439 struct sockaddr *init_src, int *notification, 1440 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 1441 uint8_t use_mflowid, uint32_t mflowid, 1442 uint32_t vrf_id, uint16_t port); 1443 1444 1445/* 1446 * handle a state cookie for an existing association m: input packet mbuf 1447 * chain-- assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a 1448 * "split" mbuf and the cookie signature does not exist offset: offset into 1449 * mbuf to the cookie-echo chunk 1450 */ 1451static struct sctp_tcb * 1452sctp_process_cookie_existing(struct mbuf *m, int iphlen, int offset, 1453 struct sockaddr *src, struct sockaddr *dst, 1454 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 1455 struct sctp_inpcb *inp, struct sctp_tcb *stcb, struct sctp_nets **netp, 1456 struct sockaddr *init_src, int *notification, 1457 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 1458 uint8_t use_mflowid, uint32_t mflowid, 1459 uint32_t vrf_id, uint16_t port) 1460{ 1461 struct sctp_association *asoc; 1462 struct sctp_init_chunk *init_cp, init_buf; 1463 struct sctp_init_ack_chunk *initack_cp, initack_buf; 1464 struct sctp_nets *net; 1465 struct mbuf *op_err; 1466 int init_offset, initack_offset, i; 1467 int retval; 1468 int spec_flag = 0; 1469 uint32_t how_indx; 1470 1471 net = *netp; 1472 /* I know that the TCB is non-NULL from the caller */ 1473 asoc = &stcb->asoc; 1474 for (how_indx = 0; how_indx < sizeof(asoc->cookie_how); how_indx++) { 1475 if (asoc->cookie_how[how_indx] == 0) 1476 break; 1477 } 1478 if (how_indx < sizeof(asoc->cookie_how)) { 1479 asoc->cookie_how[how_indx] = 1; 1480 } 1481 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 1482 /* SHUTDOWN came in after sending INIT-ACK */ 1483 sctp_send_shutdown_ack(stcb, stcb->asoc.primary_destination); 1484 op_err = sctp_generate_cause(SCTP_CAUSE_COOKIE_IN_SHUTDOWN, ""); 1485 sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err, 1486 use_mflowid, mflowid, 1487 vrf_id, net->port); 1488 if (how_indx < sizeof(asoc->cookie_how)) 1489 asoc->cookie_how[how_indx] = 2; 1490 return (NULL); 1491 } 1492 /* 1493 * find and validate the INIT chunk in the cookie (peer's info) the 1494 * INIT should start after the cookie-echo header struct (chunk 1495 * header, state cookie header struct) 1496 */ 1497 init_offset = offset += sizeof(struct sctp_cookie_echo_chunk); 1498 1499 init_cp = (struct sctp_init_chunk *) 1500 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 1501 (uint8_t *) & init_buf); 1502 if (init_cp == NULL) { 1503 /* could not pull a INIT chunk in cookie */ 1504 return (NULL); 1505 } 1506 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 1507 return (NULL); 1508 } 1509 /* 1510 * find and validate the INIT-ACK chunk in the cookie (my info) the 1511 * INIT-ACK follows the INIT chunk 1512 */ 1513 initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length)); 1514 initack_cp = (struct sctp_init_ack_chunk *) 1515 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 1516 (uint8_t *) & initack_buf); 1517 if (initack_cp == NULL) { 1518 /* could not pull INIT-ACK chunk in cookie */ 1519 return (NULL); 1520 } 1521 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 1522 return (NULL); 1523 } 1524 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && 1525 (ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag)) { 1526 /* 1527 * case D in Section 5.2.4 Table 2: MMAA process accordingly 1528 * to get into the OPEN state 1529 */ 1530 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1531 /*- 1532 * Opps, this means that we somehow generated two vtag's 1533 * the same. I.e. we did: 1534 * Us Peer 1535 * <---INIT(tag=a)------ 1536 * ----INIT-ACK(tag=t)--> 1537 * ----INIT(tag=t)------> *1 1538 * <---INIT-ACK(tag=a)--- 1539 * <----CE(tag=t)------------- *2 1540 * 1541 * At point *1 we should be generating a different 1542 * tag t'. Which means we would throw away the CE and send 1543 * ours instead. Basically this is case C (throw away side). 1544 */ 1545 if (how_indx < sizeof(asoc->cookie_how)) 1546 asoc->cookie_how[how_indx] = 17; 1547 return (NULL); 1548 1549 } 1550 switch (SCTP_GET_STATE(asoc)) { 1551 case SCTP_STATE_COOKIE_WAIT: 1552 case SCTP_STATE_COOKIE_ECHOED: 1553 /* 1554 * INIT was sent but got a COOKIE_ECHO with the 1555 * correct tags... just accept it...but we must 1556 * process the init so that we can make sure we have 1557 * the right seq no's. 1558 */ 1559 /* First we must process the INIT !! */ 1560 retval = sctp_process_init(init_cp, stcb); 1561 if (retval < 0) { 1562 if (how_indx < sizeof(asoc->cookie_how)) 1563 asoc->cookie_how[how_indx] = 3; 1564 return (NULL); 1565 } 1566 /* we have already processed the INIT so no problem */ 1567 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, 1568 net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_12); 1569 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_13); 1570 /* update current state */ 1571 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) 1572 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1573 else 1574 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1575 1576 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1577 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1578 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1579 stcb->sctp_ep, stcb, asoc->primary_destination); 1580 } 1581 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1582 sctp_stop_all_cookie_timers(stcb); 1583 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1584 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1585 (inp->sctp_socket->so_qlimit == 0) 1586 ) { 1587#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1588 struct socket *so; 1589 1590#endif 1591 /* 1592 * Here is where collision would go if we 1593 * did a connect() and instead got a 1594 * init/init-ack/cookie done before the 1595 * init-ack came back.. 1596 */ 1597 stcb->sctp_ep->sctp_flags |= 1598 SCTP_PCB_FLAGS_CONNECTED; 1599#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1600 so = SCTP_INP_SO(stcb->sctp_ep); 1601 atomic_add_int(&stcb->asoc.refcnt, 1); 1602 SCTP_TCB_UNLOCK(stcb); 1603 SCTP_SOCKET_LOCK(so, 1); 1604 SCTP_TCB_LOCK(stcb); 1605 atomic_add_int(&stcb->asoc.refcnt, -1); 1606 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1607 SCTP_SOCKET_UNLOCK(so, 1); 1608 return (NULL); 1609 } 1610#endif 1611 soisconnected(stcb->sctp_socket); 1612#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1613 SCTP_SOCKET_UNLOCK(so, 1); 1614#endif 1615 } 1616 /* notify upper layer */ 1617 *notification = SCTP_NOTIFY_ASSOC_UP; 1618 /* 1619 * since we did not send a HB make sure we don't 1620 * double things 1621 */ 1622 net->hb_responded = 1; 1623 net->RTO = sctp_calculate_rto(stcb, asoc, net, 1624 &cookie->time_entered, 1625 sctp_align_unsafe_makecopy, 1626 SCTP_RTT_FROM_NON_DATA); 1627 1628 if (stcb->asoc.sctp_autoclose_ticks && 1629 (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE))) { 1630 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 1631 inp, stcb, NULL); 1632 } 1633 break; 1634 default: 1635 /* 1636 * we're in the OPEN state (or beyond), so peer must 1637 * have simply lost the COOKIE-ACK 1638 */ 1639 break; 1640 } /* end switch */ 1641 sctp_stop_all_cookie_timers(stcb); 1642 /* 1643 * We ignore the return code here.. not sure if we should 1644 * somehow abort.. but we do have an existing asoc. This 1645 * really should not fail. 1646 */ 1647 if (sctp_load_addresses_from_init(stcb, m, 1648 init_offset + sizeof(struct sctp_init_chunk), 1649 initack_offset, src, dst, init_src)) { 1650 if (how_indx < sizeof(asoc->cookie_how)) 1651 asoc->cookie_how[how_indx] = 4; 1652 return (NULL); 1653 } 1654 /* respond with a COOKIE-ACK */ 1655 sctp_toss_old_cookies(stcb, asoc); 1656 sctp_send_cookie_ack(stcb); 1657 if (how_indx < sizeof(asoc->cookie_how)) 1658 asoc->cookie_how[how_indx] = 5; 1659 return (stcb); 1660 } 1661 if (ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1662 ntohl(init_cp->init.initiate_tag) == asoc->peer_vtag && 1663 cookie->tie_tag_my_vtag == 0 && 1664 cookie->tie_tag_peer_vtag == 0) { 1665 /* 1666 * case C in Section 5.2.4 Table 2: XMOO silently discard 1667 */ 1668 if (how_indx < sizeof(asoc->cookie_how)) 1669 asoc->cookie_how[how_indx] = 6; 1670 return (NULL); 1671 } 1672 /* 1673 * If nat support, and the below and stcb is established, send back 1674 * a ABORT(colliding state) if we are established. 1675 */ 1676 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) && 1677 (asoc->peer_supports_nat) && 1678 ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && 1679 ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) || 1680 (asoc->peer_vtag == 0)))) { 1681 /* 1682 * Special case - Peer's support nat. We may have two init's 1683 * that we gave out the same tag on since one was not 1684 * established.. i.e. we get INIT from host-1 behind the nat 1685 * and we respond tag-a, we get a INIT from host-2 behind 1686 * the nat and we get tag-a again. Then we bring up host-1 1687 * (or 2's) assoc, Then comes the cookie from hsot-2 (or 1). 1688 * Now we have colliding state. We must send an abort here 1689 * with colliding state indication. 1690 */ 1691 op_err = sctp_generate_cause(SCTP_CAUSE_NAT_COLLIDING_STATE, ""); 1692 sctp_send_abort(m, iphlen, src, dst, sh, 0, op_err, 1693 use_mflowid, mflowid, 1694 vrf_id, port); 1695 return (NULL); 1696 } 1697 if ((ntohl(initack_cp->init.initiate_tag) == asoc->my_vtag) && 1698 ((ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) || 1699 (asoc->peer_vtag == 0))) { 1700 /* 1701 * case B in Section 5.2.4 Table 2: MXAA or MOAA my info 1702 * should be ok, re-accept peer info 1703 */ 1704 if (ntohl(initack_cp->init.initial_tsn) != asoc->init_seq_number) { 1705 /* 1706 * Extension of case C. If we hit this, then the 1707 * random number generator returned the same vtag 1708 * when we first sent our INIT-ACK and when we later 1709 * sent our INIT. The side with the seq numbers that 1710 * are different will be the one that normnally 1711 * would have hit case C. This in effect "extends" 1712 * our vtags in this collision case to be 64 bits. 1713 * The same collision could occur aka you get both 1714 * vtag and seq number the same twice in a row.. but 1715 * is much less likely. If it did happen then we 1716 * would proceed through and bring up the assoc.. we 1717 * may end up with the wrong stream setup however.. 1718 * which would be bad.. but there is no way to 1719 * tell.. until we send on a stream that does not 1720 * exist :-) 1721 */ 1722 if (how_indx < sizeof(asoc->cookie_how)) 1723 asoc->cookie_how[how_indx] = 7; 1724 1725 return (NULL); 1726 } 1727 if (how_indx < sizeof(asoc->cookie_how)) 1728 asoc->cookie_how[how_indx] = 8; 1729 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_14); 1730 sctp_stop_all_cookie_timers(stcb); 1731 /* 1732 * since we did not send a HB make sure we don't double 1733 * things 1734 */ 1735 net->hb_responded = 1; 1736 if (stcb->asoc.sctp_autoclose_ticks && 1737 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1738 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, 1739 NULL); 1740 } 1741 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1742 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); 1743 1744 if (ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) { 1745 /* 1746 * Ok the peer probably discarded our data (if we 1747 * echoed a cookie+data). So anything on the 1748 * sent_queue should be marked for retransmit, we 1749 * may not get something to kick us so it COULD 1750 * still take a timeout to move these.. but it can't 1751 * hurt to mark them. 1752 */ 1753 struct sctp_tmit_chunk *chk; 1754 1755 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 1756 if (chk->sent < SCTP_DATAGRAM_RESEND) { 1757 chk->sent = SCTP_DATAGRAM_RESEND; 1758 sctp_flight_size_decrease(chk); 1759 sctp_total_flight_decrease(stcb, chk); 1760 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1761 spec_flag++; 1762 } 1763 } 1764 1765 } 1766 /* process the INIT info (peer's info) */ 1767 retval = sctp_process_init(init_cp, stcb); 1768 if (retval < 0) { 1769 if (how_indx < sizeof(asoc->cookie_how)) 1770 asoc->cookie_how[how_indx] = 9; 1771 return (NULL); 1772 } 1773 if (sctp_load_addresses_from_init(stcb, m, 1774 init_offset + sizeof(struct sctp_init_chunk), 1775 initack_offset, src, dst, init_src)) { 1776 if (how_indx < sizeof(asoc->cookie_how)) 1777 asoc->cookie_how[how_indx] = 10; 1778 return (NULL); 1779 } 1780 if ((asoc->state & SCTP_STATE_COOKIE_WAIT) || 1781 (asoc->state & SCTP_STATE_COOKIE_ECHOED)) { 1782 *notification = SCTP_NOTIFY_ASSOC_UP; 1783 1784 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 1785 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 1786 (inp->sctp_socket->so_qlimit == 0)) { 1787#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1788 struct socket *so; 1789 1790#endif 1791 stcb->sctp_ep->sctp_flags |= 1792 SCTP_PCB_FLAGS_CONNECTED; 1793#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1794 so = SCTP_INP_SO(stcb->sctp_ep); 1795 atomic_add_int(&stcb->asoc.refcnt, 1); 1796 SCTP_TCB_UNLOCK(stcb); 1797 SCTP_SOCKET_LOCK(so, 1); 1798 SCTP_TCB_LOCK(stcb); 1799 atomic_add_int(&stcb->asoc.refcnt, -1); 1800 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1801 SCTP_SOCKET_UNLOCK(so, 1); 1802 return (NULL); 1803 } 1804#endif 1805 soisconnected(stcb->sctp_socket); 1806#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1807 SCTP_SOCKET_UNLOCK(so, 1); 1808#endif 1809 } 1810 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) 1811 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 1812 else 1813 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1814 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1815 } else if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1816 SCTP_STAT_INCR_COUNTER32(sctps_restartestab); 1817 } else { 1818 SCTP_STAT_INCR_COUNTER32(sctps_collisionestab); 1819 } 1820 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1821 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1822 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1823 stcb->sctp_ep, stcb, asoc->primary_destination); 1824 } 1825 sctp_stop_all_cookie_timers(stcb); 1826 sctp_toss_old_cookies(stcb, asoc); 1827 sctp_send_cookie_ack(stcb); 1828 if (spec_flag) { 1829 /* 1830 * only if we have retrans set do we do this. What 1831 * this call does is get only the COOKIE-ACK out and 1832 * then when we return the normal call to 1833 * sctp_chunk_output will get the retrans out behind 1834 * this. 1835 */ 1836 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_COOKIE_ACK, SCTP_SO_NOT_LOCKED); 1837 } 1838 if (how_indx < sizeof(asoc->cookie_how)) 1839 asoc->cookie_how[how_indx] = 11; 1840 1841 return (stcb); 1842 } 1843 if ((ntohl(initack_cp->init.initiate_tag) != asoc->my_vtag && 1844 ntohl(init_cp->init.initiate_tag) != asoc->peer_vtag) && 1845 cookie->tie_tag_my_vtag == asoc->my_vtag_nonce && 1846 cookie->tie_tag_peer_vtag == asoc->peer_vtag_nonce && 1847 cookie->tie_tag_peer_vtag != 0) { 1848 struct sctpasochead *head; 1849 1850#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1851 struct socket *so; 1852 1853#endif 1854 1855 if (asoc->peer_supports_nat) { 1856 /* 1857 * This is a gross gross hack. Just call the 1858 * cookie_new code since we are allowing a duplicate 1859 * association. I hope this works... 1860 */ 1861 return (sctp_process_cookie_new(m, iphlen, offset, src, dst, 1862 sh, cookie, cookie_len, 1863 inp, netp, init_src, notification, 1864 auth_skipped, auth_offset, auth_len, 1865 use_mflowid, mflowid, 1866 vrf_id, port)); 1867 } 1868 /* 1869 * case A in Section 5.2.4 Table 2: XXMM (peer restarted) 1870 */ 1871 /* temp code */ 1872 if (how_indx < sizeof(asoc->cookie_how)) 1873 asoc->cookie_how[how_indx] = 12; 1874 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_15); 1875 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, inp, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 1876 1877 /* notify upper layer */ 1878 *notification = SCTP_NOTIFY_ASSOC_RESTART; 1879 atomic_add_int(&stcb->asoc.refcnt, 1); 1880 if ((SCTP_GET_STATE(asoc) != SCTP_STATE_OPEN) && 1881 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_RECEIVED) && 1882 (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT)) { 1883 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 1884 } 1885 if (SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) { 1886 SCTP_STAT_INCR_GAUGE32(sctps_restartestab); 1887 } else if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1888 SCTP_STAT_INCR_GAUGE32(sctps_collisionestab); 1889 } 1890 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 1891 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1892 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1893 stcb->sctp_ep, stcb, asoc->primary_destination); 1894 1895 } else if (!(asoc->state & SCTP_STATE_SHUTDOWN_SENT)) { 1896 /* move to OPEN state, if not in SHUTDOWN_SENT */ 1897 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 1898 } 1899 asoc->pre_open_streams = 1900 ntohs(initack_cp->init.num_outbound_streams); 1901 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 1902 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 1903 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 1904 1905 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 1906 1907 asoc->str_reset_seq_in = asoc->init_seq_number; 1908 1909 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 1910 if (asoc->mapping_array) { 1911 memset(asoc->mapping_array, 0, 1912 asoc->mapping_array_size); 1913 } 1914 if (asoc->nr_mapping_array) { 1915 memset(asoc->nr_mapping_array, 0, 1916 asoc->mapping_array_size); 1917 } 1918 SCTP_TCB_UNLOCK(stcb); 1919#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1920 so = SCTP_INP_SO(stcb->sctp_ep); 1921 SCTP_SOCKET_LOCK(so, 1); 1922#endif 1923 SCTP_INP_INFO_WLOCK(); 1924 SCTP_INP_WLOCK(stcb->sctp_ep); 1925 SCTP_TCB_LOCK(stcb); 1926 atomic_add_int(&stcb->asoc.refcnt, -1); 1927 /* send up all the data */ 1928 SCTP_TCB_SEND_LOCK(stcb); 1929 1930 sctp_report_all_outbound(stcb, 0, 1, SCTP_SO_LOCKED); 1931 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1932 stcb->asoc.strmout[i].chunks_on_queues = 0; 1933 stcb->asoc.strmout[i].stream_no = i; 1934 stcb->asoc.strmout[i].next_sequence_send = 0; 1935 stcb->asoc.strmout[i].last_msg_incomplete = 0; 1936 } 1937 /* process the INIT-ACK info (my info) */ 1938 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 1939 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 1940 1941 /* pull from vtag hash */ 1942 LIST_REMOVE(stcb, sctp_asocs); 1943 /* re-insert to new vtag position */ 1944 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(stcb->asoc.my_vtag, 1945 SCTP_BASE_INFO(hashasocmark))]; 1946 /* 1947 * put it in the bucket in the vtag hash of assoc's for the 1948 * system 1949 */ 1950 LIST_INSERT_HEAD(head, stcb, sctp_asocs); 1951 1952 SCTP_TCB_SEND_UNLOCK(stcb); 1953 SCTP_INP_WUNLOCK(stcb->sctp_ep); 1954 SCTP_INP_INFO_WUNLOCK(); 1955#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1956 SCTP_SOCKET_UNLOCK(so, 1); 1957#endif 1958 asoc->total_flight = 0; 1959 asoc->total_flight_count = 0; 1960 /* process the INIT info (peer's info) */ 1961 retval = sctp_process_init(init_cp, stcb); 1962 if (retval < 0) { 1963 if (how_indx < sizeof(asoc->cookie_how)) 1964 asoc->cookie_how[how_indx] = 13; 1965 1966 return (NULL); 1967 } 1968 /* 1969 * since we did not send a HB make sure we don't double 1970 * things 1971 */ 1972 net->hb_responded = 1; 1973 1974 if (sctp_load_addresses_from_init(stcb, m, 1975 init_offset + sizeof(struct sctp_init_chunk), 1976 initack_offset, src, dst, init_src)) { 1977 if (how_indx < sizeof(asoc->cookie_how)) 1978 asoc->cookie_how[how_indx] = 14; 1979 1980 return (NULL); 1981 } 1982 /* respond with a COOKIE-ACK */ 1983 sctp_stop_all_cookie_timers(stcb); 1984 sctp_toss_old_cookies(stcb, asoc); 1985 sctp_send_cookie_ack(stcb); 1986 if (how_indx < sizeof(asoc->cookie_how)) 1987 asoc->cookie_how[how_indx] = 15; 1988 1989 return (stcb); 1990 } 1991 if (how_indx < sizeof(asoc->cookie_how)) 1992 asoc->cookie_how[how_indx] = 16; 1993 /* all other cases... */ 1994 return (NULL); 1995} 1996 1997 1998/* 1999 * handle a state cookie for a new association m: input packet mbuf chain-- 2000 * assumes a pullup on IP/SCTP/COOKIE-ECHO chunk note: this is a "split" mbuf 2001 * and the cookie signature does not exist offset: offset into mbuf to the 2002 * cookie-echo chunk length: length of the cookie chunk to: where the init 2003 * was from returns a new TCB 2004 */ 2005static struct sctp_tcb * 2006sctp_process_cookie_new(struct mbuf *m, int iphlen, int offset, 2007 struct sockaddr *src, struct sockaddr *dst, 2008 struct sctphdr *sh, struct sctp_state_cookie *cookie, int cookie_len, 2009 struct sctp_inpcb *inp, struct sctp_nets **netp, 2010 struct sockaddr *init_src, int *notification, 2011 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 2012 uint8_t use_mflowid, uint32_t mflowid, 2013 uint32_t vrf_id, uint16_t port) 2014{ 2015 struct sctp_tcb *stcb; 2016 struct sctp_init_chunk *init_cp, init_buf; 2017 struct sctp_init_ack_chunk *initack_cp, initack_buf; 2018 struct sockaddr_storage sa_store; 2019 struct sockaddr *initack_src = (struct sockaddr *)&sa_store; 2020 struct sctp_association *asoc; 2021 int init_offset, initack_offset, initack_limit; 2022 int retval; 2023 int error = 0; 2024 uint8_t auth_chunk_buf[SCTP_PARAM_BUFFER_SIZE]; 2025 2026#ifdef INET 2027 struct sockaddr_in *sin; 2028 2029#endif 2030#ifdef INET6 2031 struct sockaddr_in6 *sin6; 2032 2033#endif 2034#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2035 struct socket *so; 2036 2037 so = SCTP_INP_SO(inp); 2038#endif 2039 2040 /* 2041 * find and validate the INIT chunk in the cookie (peer's info) the 2042 * INIT should start after the cookie-echo header struct (chunk 2043 * header, state cookie header struct) 2044 */ 2045 init_offset = offset + sizeof(struct sctp_cookie_echo_chunk); 2046 init_cp = (struct sctp_init_chunk *) 2047 sctp_m_getptr(m, init_offset, sizeof(struct sctp_init_chunk), 2048 (uint8_t *) & init_buf); 2049 if (init_cp == NULL) { 2050 /* could not pull a INIT chunk in cookie */ 2051 SCTPDBG(SCTP_DEBUG_INPUT1, 2052 "process_cookie_new: could not pull INIT chunk hdr\n"); 2053 return (NULL); 2054 } 2055 if (init_cp->ch.chunk_type != SCTP_INITIATION) { 2056 SCTPDBG(SCTP_DEBUG_INPUT1, "HUH? process_cookie_new: could not find INIT chunk!\n"); 2057 return (NULL); 2058 } 2059 initack_offset = init_offset + SCTP_SIZE32(ntohs(init_cp->ch.chunk_length)); 2060 /* 2061 * find and validate the INIT-ACK chunk in the cookie (my info) the 2062 * INIT-ACK follows the INIT chunk 2063 */ 2064 initack_cp = (struct sctp_init_ack_chunk *) 2065 sctp_m_getptr(m, initack_offset, sizeof(struct sctp_init_ack_chunk), 2066 (uint8_t *) & initack_buf); 2067 if (initack_cp == NULL) { 2068 /* could not pull INIT-ACK chunk in cookie */ 2069 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: could not pull INIT-ACK chunk hdr\n"); 2070 return (NULL); 2071 } 2072 if (initack_cp->ch.chunk_type != SCTP_INITIATION_ACK) { 2073 return (NULL); 2074 } 2075 /* 2076 * NOTE: We can't use the INIT_ACK's chk_length to determine the 2077 * "initack_limit" value. This is because the chk_length field 2078 * includes the length of the cookie, but the cookie is omitted when 2079 * the INIT and INIT_ACK are tacked onto the cookie... 2080 */ 2081 initack_limit = offset + cookie_len; 2082 2083 /* 2084 * now that we know the INIT/INIT-ACK are in place, create a new TCB 2085 * and popluate 2086 */ 2087 2088 /* 2089 * Here we do a trick, we set in NULL for the proc/thread argument. 2090 * We do this since in effect we only use the p argument when the 2091 * socket is unbound and we must do an implicit bind. Since we are 2092 * getting a cookie, we cannot be unbound. 2093 */ 2094 stcb = sctp_aloc_assoc(inp, init_src, &error, 2095 ntohl(initack_cp->init.initiate_tag), vrf_id, 2096 (struct thread *)NULL 2097 ); 2098 if (stcb == NULL) { 2099 struct mbuf *op_err; 2100 2101 /* memory problem? */ 2102 SCTPDBG(SCTP_DEBUG_INPUT1, 2103 "process_cookie_new: no room for another TCB!\n"); 2104 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 2105 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 2106 src, dst, sh, op_err, 2107 use_mflowid, mflowid, 2108 vrf_id, port); 2109 return (NULL); 2110 } 2111 /* get the correct sctp_nets */ 2112 if (netp) 2113 *netp = sctp_findnet(stcb, init_src); 2114 2115 asoc = &stcb->asoc; 2116 /* get scope variables out of cookie */ 2117 asoc->scope.ipv4_local_scope = cookie->ipv4_scope; 2118 asoc->scope.site_scope = cookie->site_scope; 2119 asoc->scope.local_scope = cookie->local_scope; 2120 asoc->scope.loopback_scope = cookie->loopback_scope; 2121 2122 if ((asoc->scope.ipv4_addr_legal != cookie->ipv4_addr_legal) || 2123 (asoc->scope.ipv6_addr_legal != cookie->ipv6_addr_legal)) { 2124 struct mbuf *op_err; 2125 2126 /* 2127 * Houston we have a problem. The EP changed while the 2128 * cookie was in flight. Only recourse is to abort the 2129 * association. 2130 */ 2131 atomic_add_int(&stcb->asoc.refcnt, 1); 2132 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 2133 sctp_abort_association(inp, (struct sctp_tcb *)NULL, m, iphlen, 2134 src, dst, sh, op_err, 2135 use_mflowid, mflowid, 2136 vrf_id, port); 2137#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2138 SCTP_TCB_UNLOCK(stcb); 2139 SCTP_SOCKET_LOCK(so, 1); 2140 SCTP_TCB_LOCK(stcb); 2141#endif 2142 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, 2143 SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 2144#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2145 SCTP_SOCKET_UNLOCK(so, 1); 2146#endif 2147 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2148 return (NULL); 2149 } 2150 /* process the INIT-ACK info (my info) */ 2151 asoc->my_vtag = ntohl(initack_cp->init.initiate_tag); 2152 asoc->my_rwnd = ntohl(initack_cp->init.a_rwnd); 2153 asoc->pre_open_streams = ntohs(initack_cp->init.num_outbound_streams); 2154 asoc->init_seq_number = ntohl(initack_cp->init.initial_tsn); 2155 asoc->sending_seq = asoc->asconf_seq_out = asoc->str_reset_seq_out = asoc->init_seq_number; 2156 asoc->asconf_seq_out_acked = asoc->asconf_seq_out - 1; 2157 asoc->asconf_seq_in = asoc->last_acked_seq = asoc->init_seq_number - 1; 2158 asoc->str_reset_seq_in = asoc->init_seq_number; 2159 2160 asoc->advanced_peer_ack_point = asoc->last_acked_seq; 2161 2162 /* process the INIT info (peer's info) */ 2163 if (netp) 2164 retval = sctp_process_init(init_cp, stcb); 2165 else 2166 retval = 0; 2167 if (retval < 0) { 2168 atomic_add_int(&stcb->asoc.refcnt, 1); 2169#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2170 SCTP_TCB_UNLOCK(stcb); 2171 SCTP_SOCKET_LOCK(so, 1); 2172 SCTP_TCB_LOCK(stcb); 2173#endif 2174 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_16); 2175#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2176 SCTP_SOCKET_UNLOCK(so, 1); 2177#endif 2178 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2179 return (NULL); 2180 } 2181 /* load all addresses */ 2182 if (sctp_load_addresses_from_init(stcb, m, 2183 init_offset + sizeof(struct sctp_init_chunk), initack_offset, 2184 src, dst, init_src)) { 2185 atomic_add_int(&stcb->asoc.refcnt, 1); 2186#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2187 SCTP_TCB_UNLOCK(stcb); 2188 SCTP_SOCKET_LOCK(so, 1); 2189 SCTP_TCB_LOCK(stcb); 2190#endif 2191 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_17); 2192#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2193 SCTP_SOCKET_UNLOCK(so, 1); 2194#endif 2195 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2196 return (NULL); 2197 } 2198 /* 2199 * verify any preceding AUTH chunk that was skipped 2200 */ 2201 /* pull the local authentication parameters from the cookie/init-ack */ 2202 sctp_auth_get_cookie_params(stcb, m, 2203 initack_offset + sizeof(struct sctp_init_ack_chunk), 2204 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk))); 2205 if (auth_skipped) { 2206 struct sctp_auth_chunk *auth; 2207 2208 auth = (struct sctp_auth_chunk *) 2209 sctp_m_getptr(m, auth_offset, auth_len, auth_chunk_buf); 2210 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, auth_offset)) { 2211 /* auth HMAC failed, dump the assoc and packet */ 2212 SCTPDBG(SCTP_DEBUG_AUTH1, 2213 "COOKIE-ECHO: AUTH failed\n"); 2214 atomic_add_int(&stcb->asoc.refcnt, 1); 2215#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2216 SCTP_TCB_UNLOCK(stcb); 2217 SCTP_SOCKET_LOCK(so, 1); 2218 SCTP_TCB_LOCK(stcb); 2219#endif 2220 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_18); 2221#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2222 SCTP_SOCKET_UNLOCK(so, 1); 2223#endif 2224 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2225 return (NULL); 2226 } else { 2227 /* remaining chunks checked... good to go */ 2228 stcb->asoc.authenticated = 1; 2229 } 2230 } 2231 /* update current state */ 2232 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 2233 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 2234 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 2235 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 2236 stcb->sctp_ep, stcb, asoc->primary_destination); 2237 } 2238 sctp_stop_all_cookie_timers(stcb); 2239 SCTP_STAT_INCR_COUNTER32(sctps_passiveestab); 2240 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 2241 2242 /* 2243 * if we're doing ASCONFs, check to see if we have any new local 2244 * addresses that need to get added to the peer (eg. addresses 2245 * changed while cookie echo in flight). This needs to be done 2246 * after we go to the OPEN state to do the correct asconf 2247 * processing. else, make sure we have the correct addresses in our 2248 * lists 2249 */ 2250 2251 /* warning, we re-use sin, sin6, sa_store here! */ 2252 /* pull in local_address (our "from" address) */ 2253 switch (cookie->laddr_type) { 2254#ifdef INET 2255 case SCTP_IPV4_ADDRESS: 2256 /* source addr is IPv4 */ 2257 sin = (struct sockaddr_in *)initack_src; 2258 memset(sin, 0, sizeof(*sin)); 2259 sin->sin_family = AF_INET; 2260 sin->sin_len = sizeof(struct sockaddr_in); 2261 sin->sin_addr.s_addr = cookie->laddress[0]; 2262 break; 2263#endif 2264#ifdef INET6 2265 case SCTP_IPV6_ADDRESS: 2266 /* source addr is IPv6 */ 2267 sin6 = (struct sockaddr_in6 *)initack_src; 2268 memset(sin6, 0, sizeof(*sin6)); 2269 sin6->sin6_family = AF_INET6; 2270 sin6->sin6_len = sizeof(struct sockaddr_in6); 2271 sin6->sin6_scope_id = cookie->scope_id; 2272 memcpy(&sin6->sin6_addr, cookie->laddress, 2273 sizeof(sin6->sin6_addr)); 2274 break; 2275#endif 2276 default: 2277 atomic_add_int(&stcb->asoc.refcnt, 1); 2278#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2279 SCTP_TCB_UNLOCK(stcb); 2280 SCTP_SOCKET_LOCK(so, 1); 2281 SCTP_TCB_LOCK(stcb); 2282#endif 2283 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_19); 2284#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2285 SCTP_SOCKET_UNLOCK(so, 1); 2286#endif 2287 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2288 return (NULL); 2289 } 2290 2291 /* set up to notify upper layer */ 2292 *notification = SCTP_NOTIFY_ASSOC_UP; 2293 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2294 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) && 2295 (inp->sctp_socket->so_qlimit == 0)) { 2296 /* 2297 * This is an endpoint that called connect() how it got a 2298 * cookie that is NEW is a bit of a mystery. It must be that 2299 * the INIT was sent, but before it got there.. a complete 2300 * INIT/INIT-ACK/COOKIE arrived. But of course then it 2301 * should have went to the other code.. not here.. oh well.. 2302 * a bit of protection is worth having.. 2303 */ 2304 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 2305#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2306 atomic_add_int(&stcb->asoc.refcnt, 1); 2307 SCTP_TCB_UNLOCK(stcb); 2308 SCTP_SOCKET_LOCK(so, 1); 2309 SCTP_TCB_LOCK(stcb); 2310 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2311 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2312 SCTP_SOCKET_UNLOCK(so, 1); 2313 return (NULL); 2314 } 2315#endif 2316 soisconnected(stcb->sctp_socket); 2317#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2318 SCTP_SOCKET_UNLOCK(so, 1); 2319#endif 2320 } else if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 2321 (inp->sctp_socket->so_qlimit)) { 2322 /* 2323 * We don't want to do anything with this one. Since it is 2324 * the listening guy. The timer will get started for 2325 * accepted connections in the caller. 2326 */ 2327 ; 2328 } 2329 /* since we did not send a HB make sure we don't double things */ 2330 if ((netp) && (*netp)) 2331 (*netp)->hb_responded = 1; 2332 2333 if (stcb->asoc.sctp_autoclose_ticks && 2334 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 2335 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, NULL); 2336 } 2337 /* calculate the RTT */ 2338 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_entered); 2339 if ((netp) && (*netp)) { 2340 (*netp)->RTO = sctp_calculate_rto(stcb, asoc, *netp, 2341 &cookie->time_entered, sctp_align_unsafe_makecopy, 2342 SCTP_RTT_FROM_NON_DATA); 2343 } 2344 /* respond with a COOKIE-ACK */ 2345 sctp_send_cookie_ack(stcb); 2346 2347 /* 2348 * check the address lists for any ASCONFs that need to be sent 2349 * AFTER the cookie-ack is sent 2350 */ 2351 sctp_check_address_list(stcb, m, 2352 initack_offset + sizeof(struct sctp_init_ack_chunk), 2353 initack_limit - (initack_offset + sizeof(struct sctp_init_ack_chunk)), 2354 initack_src, cookie->local_scope, cookie->site_scope, 2355 cookie->ipv4_scope, cookie->loopback_scope); 2356 2357 2358 return (stcb); 2359} 2360 2361/* 2362 * CODE LIKE THIS NEEDS TO RUN IF the peer supports the NAT extension, i.e 2363 * we NEED to make sure we are not already using the vtag. If so we 2364 * need to send back an ABORT-TRY-AGAIN-WITH-NEW-TAG No middle box bit! 2365 head = &SCTP_BASE_INFO(sctp_asochash)[SCTP_PCBHASH_ASOC(tag, 2366 SCTP_BASE_INFO(hashasocmark))]; 2367 LIST_FOREACH(stcb, head, sctp_asocs) { 2368 if ((stcb->asoc.my_vtag == tag) && (stcb->rport == rport) && (inp == stcb->sctp_ep)) { 2369 -- SEND ABORT - TRY AGAIN -- 2370 } 2371 } 2372*/ 2373 2374/* 2375 * handles a COOKIE-ECHO message stcb: modified to either a new or left as 2376 * existing (non-NULL) TCB 2377 */ 2378static struct mbuf * 2379sctp_handle_cookie_echo(struct mbuf *m, int iphlen, int offset, 2380 struct sockaddr *src, struct sockaddr *dst, 2381 struct sctphdr *sh, struct sctp_cookie_echo_chunk *cp, 2382 struct sctp_inpcb **inp_p, struct sctp_tcb **stcb, struct sctp_nets **netp, 2383 int auth_skipped, uint32_t auth_offset, uint32_t auth_len, 2384 struct sctp_tcb **locked_tcb, 2385 uint8_t use_mflowid, uint32_t mflowid, 2386 uint32_t vrf_id, uint16_t port) 2387{ 2388 struct sctp_state_cookie *cookie; 2389 struct sctp_tcb *l_stcb = *stcb; 2390 struct sctp_inpcb *l_inp; 2391 struct sockaddr *to; 2392 struct sctp_pcb *ep; 2393 struct mbuf *m_sig; 2394 uint8_t calc_sig[SCTP_SIGNATURE_SIZE], tmp_sig[SCTP_SIGNATURE_SIZE]; 2395 uint8_t *sig; 2396 uint8_t cookie_ok = 0; 2397 unsigned int sig_offset, cookie_offset; 2398 unsigned int cookie_len; 2399 struct timeval now; 2400 struct timeval time_expires; 2401 int notification = 0; 2402 struct sctp_nets *netl; 2403 int had_a_existing_tcb = 0; 2404 int send_int_conf = 0; 2405 2406#ifdef INET 2407 struct sockaddr_in sin; 2408 2409#endif 2410#ifdef INET6 2411 struct sockaddr_in6 sin6; 2412 2413#endif 2414 2415 SCTPDBG(SCTP_DEBUG_INPUT2, 2416 "sctp_handle_cookie: handling COOKIE-ECHO\n"); 2417 2418 if (inp_p == NULL) { 2419 return (NULL); 2420 } 2421 cookie = &cp->cookie; 2422 cookie_offset = offset + sizeof(struct sctp_chunkhdr); 2423 cookie_len = ntohs(cp->ch.chunk_length); 2424 2425 if ((cookie->peerport != sh->src_port) && 2426 (cookie->myport != sh->dest_port) && 2427 (cookie->my_vtag != sh->v_tag)) { 2428 /* 2429 * invalid ports or bad tag. Note that we always leave the 2430 * v_tag in the header in network order and when we stored 2431 * it in the my_vtag slot we also left it in network order. 2432 * This maintains the match even though it may be in the 2433 * opposite byte order of the machine :-> 2434 */ 2435 return (NULL); 2436 } 2437 if (cookie_len < sizeof(struct sctp_cookie_echo_chunk) + 2438 sizeof(struct sctp_init_chunk) + 2439 sizeof(struct sctp_init_ack_chunk) + SCTP_SIGNATURE_SIZE) { 2440 /* cookie too small */ 2441 return (NULL); 2442 } 2443 /* 2444 * split off the signature into its own mbuf (since it should not be 2445 * calculated in the sctp_hmac_m() call). 2446 */ 2447 sig_offset = offset + cookie_len - SCTP_SIGNATURE_SIZE; 2448 m_sig = m_split(m, sig_offset, M_NOWAIT); 2449 if (m_sig == NULL) { 2450 /* out of memory or ?? */ 2451 return (NULL); 2452 } 2453#ifdef SCTP_MBUF_LOGGING 2454 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 2455 struct mbuf *mat; 2456 2457 for (mat = m_sig; mat; mat = SCTP_BUF_NEXT(mat)) { 2458 if (SCTP_BUF_IS_EXTENDED(mat)) { 2459 sctp_log_mb(mat, SCTP_MBUF_SPLIT); 2460 } 2461 } 2462 } 2463#endif 2464 2465 /* 2466 * compute the signature/digest for the cookie 2467 */ 2468 ep = &(*inp_p)->sctp_ep; 2469 l_inp = *inp_p; 2470 if (l_stcb) { 2471 SCTP_TCB_UNLOCK(l_stcb); 2472 } 2473 SCTP_INP_RLOCK(l_inp); 2474 if (l_stcb) { 2475 SCTP_TCB_LOCK(l_stcb); 2476 } 2477 /* which cookie is it? */ 2478 if ((cookie->time_entered.tv_sec < (long)ep->time_of_secret_change) && 2479 (ep->current_secret_number != ep->last_secret_number)) { 2480 /* it's the old cookie */ 2481 (void)sctp_hmac_m(SCTP_HMAC, 2482 (uint8_t *) ep->secret_key[(int)ep->last_secret_number], 2483 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2484 } else { 2485 /* it's the current cookie */ 2486 (void)sctp_hmac_m(SCTP_HMAC, 2487 (uint8_t *) ep->secret_key[(int)ep->current_secret_number], 2488 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2489 } 2490 /* get the signature */ 2491 SCTP_INP_RUNLOCK(l_inp); 2492 sig = (uint8_t *) sctp_m_getptr(m_sig, 0, SCTP_SIGNATURE_SIZE, (uint8_t *) & tmp_sig); 2493 if (sig == NULL) { 2494 /* couldn't find signature */ 2495 sctp_m_freem(m_sig); 2496 return (NULL); 2497 } 2498 /* compare the received digest with the computed digest */ 2499 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) != 0) { 2500 /* try the old cookie? */ 2501 if ((cookie->time_entered.tv_sec == (long)ep->time_of_secret_change) && 2502 (ep->current_secret_number != ep->last_secret_number)) { 2503 /* compute digest with old */ 2504 (void)sctp_hmac_m(SCTP_HMAC, 2505 (uint8_t *) ep->secret_key[(int)ep->last_secret_number], 2506 SCTP_SECRET_SIZE, m, cookie_offset, calc_sig, 0); 2507 /* compare */ 2508 if (memcmp(calc_sig, sig, SCTP_SIGNATURE_SIZE) == 0) 2509 cookie_ok = 1; 2510 } 2511 } else { 2512 cookie_ok = 1; 2513 } 2514 2515 /* 2516 * Now before we continue we must reconstruct our mbuf so that 2517 * normal processing of any other chunks will work. 2518 */ 2519 { 2520 struct mbuf *m_at; 2521 2522 m_at = m; 2523 while (SCTP_BUF_NEXT(m_at) != NULL) { 2524 m_at = SCTP_BUF_NEXT(m_at); 2525 } 2526 SCTP_BUF_NEXT(m_at) = m_sig; 2527 } 2528 2529 if (cookie_ok == 0) { 2530 SCTPDBG(SCTP_DEBUG_INPUT2, "handle_cookie_echo: cookie signature validation failed!\n"); 2531 SCTPDBG(SCTP_DEBUG_INPUT2, 2532 "offset = %u, cookie_offset = %u, sig_offset = %u\n", 2533 (uint32_t) offset, cookie_offset, sig_offset); 2534 return (NULL); 2535 } 2536 /* 2537 * check the cookie timestamps to be sure it's not stale 2538 */ 2539 (void)SCTP_GETTIME_TIMEVAL(&now); 2540 /* Expire time is in Ticks, so we convert to seconds */ 2541 time_expires.tv_sec = cookie->time_entered.tv_sec + TICKS_TO_SEC(cookie->cookie_life); 2542 time_expires.tv_usec = cookie->time_entered.tv_usec; 2543 /* 2544 * TODO sctp_constants.h needs alternative time macros when _KERNEL 2545 * is undefined. 2546 */ 2547 if (timevalcmp(&now, &time_expires, >)) { 2548 /* cookie is stale! */ 2549 struct mbuf *op_err; 2550 struct sctp_stale_cookie_msg *scm; 2551 uint32_t tim; 2552 2553 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_stale_cookie_msg), 2554 0, M_NOWAIT, 1, MT_DATA); 2555 if (op_err == NULL) { 2556 /* FOOBAR */ 2557 return (NULL); 2558 } 2559 /* Set the len */ 2560 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_stale_cookie_msg); 2561 scm = mtod(op_err, struct sctp_stale_cookie_msg *); 2562 scm->ph.param_type = htons(SCTP_CAUSE_STALE_COOKIE); 2563 scm->ph.param_length = htons((sizeof(struct sctp_paramhdr) + 2564 (sizeof(uint32_t)))); 2565 /* seconds to usec */ 2566 tim = (now.tv_sec - time_expires.tv_sec) * 1000000; 2567 /* add in usec */ 2568 if (tim == 0) 2569 tim = now.tv_usec - cookie->time_entered.tv_usec; 2570 scm->time_usec = htonl(tim); 2571 sctp_send_operr_to(src, dst, sh, cookie->peers_vtag, op_err, 2572 use_mflowid, mflowid, 2573 vrf_id, port); 2574 return (NULL); 2575 } 2576 /* 2577 * Now we must see with the lookup address if we have an existing 2578 * asoc. This will only happen if we were in the COOKIE-WAIT state 2579 * and a INIT collided with us and somewhere the peer sent the 2580 * cookie on another address besides the single address our assoc 2581 * had for him. In this case we will have one of the tie-tags set at 2582 * least AND the address field in the cookie can be used to look it 2583 * up. 2584 */ 2585 to = NULL; 2586 switch (cookie->addr_type) { 2587#ifdef INET6 2588 case SCTP_IPV6_ADDRESS: 2589 memset(&sin6, 0, sizeof(sin6)); 2590 sin6.sin6_family = AF_INET6; 2591 sin6.sin6_len = sizeof(sin6); 2592 sin6.sin6_port = sh->src_port; 2593 sin6.sin6_scope_id = cookie->scope_id; 2594 memcpy(&sin6.sin6_addr.s6_addr, cookie->address, 2595 sizeof(sin6.sin6_addr.s6_addr)); 2596 to = (struct sockaddr *)&sin6; 2597 break; 2598#endif 2599#ifdef INET 2600 case SCTP_IPV4_ADDRESS: 2601 memset(&sin, 0, sizeof(sin)); 2602 sin.sin_family = AF_INET; 2603 sin.sin_len = sizeof(sin); 2604 sin.sin_port = sh->src_port; 2605 sin.sin_addr.s_addr = cookie->address[0]; 2606 to = (struct sockaddr *)&sin; 2607 break; 2608#endif 2609 default: 2610 /* This should not happen */ 2611 return (NULL); 2612 } 2613 if ((*stcb == NULL) && to) { 2614 /* Yep, lets check */ 2615 *stcb = sctp_findassociation_ep_addr(inp_p, to, netp, dst, NULL); 2616 if (*stcb == NULL) { 2617 /* 2618 * We should have only got back the same inp. If we 2619 * got back a different ep we have a problem. The 2620 * original findep got back l_inp and now 2621 */ 2622 if (l_inp != *inp_p) { 2623 SCTP_PRINTF("Bad problem find_ep got a diff inp then special_locate?\n"); 2624 } 2625 } else { 2626 if (*locked_tcb == NULL) { 2627 /* 2628 * In this case we found the assoc only 2629 * after we locked the create lock. This 2630 * means we are in a colliding case and we 2631 * must make sure that we unlock the tcb if 2632 * its one of the cases where we throw away 2633 * the incoming packets. 2634 */ 2635 *locked_tcb = *stcb; 2636 2637 /* 2638 * We must also increment the inp ref count 2639 * since the ref_count flags was set when we 2640 * did not find the TCB, now we found it 2641 * which reduces the refcount.. we must 2642 * raise it back out to balance it all :-) 2643 */ 2644 SCTP_INP_INCR_REF((*stcb)->sctp_ep); 2645 if ((*stcb)->sctp_ep != l_inp) { 2646 SCTP_PRINTF("Huh? ep:%p diff then l_inp:%p?\n", 2647 (void *)(*stcb)->sctp_ep, (void *)l_inp); 2648 } 2649 } 2650 } 2651 } 2652 if (to == NULL) { 2653 return (NULL); 2654 } 2655 cookie_len -= SCTP_SIGNATURE_SIZE; 2656 if (*stcb == NULL) { 2657 /* this is the "normal" case... get a new TCB */ 2658 *stcb = sctp_process_cookie_new(m, iphlen, offset, src, dst, sh, 2659 cookie, cookie_len, *inp_p, 2660 netp, to, ¬ification, 2661 auth_skipped, auth_offset, auth_len, 2662 use_mflowid, mflowid, 2663 vrf_id, port); 2664 } else { 2665 /* this is abnormal... cookie-echo on existing TCB */ 2666 had_a_existing_tcb = 1; 2667 *stcb = sctp_process_cookie_existing(m, iphlen, offset, 2668 src, dst, sh, 2669 cookie, cookie_len, *inp_p, *stcb, netp, to, 2670 ¬ification, auth_skipped, auth_offset, auth_len, 2671 use_mflowid, mflowid, 2672 vrf_id, port); 2673 } 2674 2675 if (*stcb == NULL) { 2676 /* still no TCB... must be bad cookie-echo */ 2677 return (NULL); 2678 } 2679 if ((*netp != NULL) && (use_mflowid != 0)) { 2680 (*netp)->flowid = mflowid; 2681#ifdef INVARIANTS 2682 (*netp)->flowidset = 1; 2683#endif 2684 } 2685 /* 2686 * Ok, we built an association so confirm the address we sent the 2687 * INIT-ACK to. 2688 */ 2689 netl = sctp_findnet(*stcb, to); 2690 /* 2691 * This code should in theory NOT run but 2692 */ 2693 if (netl == NULL) { 2694 /* TSNH! Huh, why do I need to add this address here? */ 2695 if (sctp_add_remote_addr(*stcb, to, NULL, SCTP_DONOT_SETSCOPE, SCTP_IN_COOKIE_PROC)) { 2696 return (NULL); 2697 } 2698 netl = sctp_findnet(*stcb, to); 2699 } 2700 if (netl) { 2701 if (netl->dest_state & SCTP_ADDR_UNCONFIRMED) { 2702 netl->dest_state &= ~SCTP_ADDR_UNCONFIRMED; 2703 (void)sctp_set_primary_addr((*stcb), (struct sockaddr *)NULL, 2704 netl); 2705 send_int_conf = 1; 2706 } 2707 } 2708 sctp_start_net_timers(*stcb); 2709 if ((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) { 2710 if (!had_a_existing_tcb || 2711 (((*inp_p)->sctp_flags & SCTP_PCB_FLAGS_CONNECTED) == 0)) { 2712 /* 2713 * If we have a NEW cookie or the connect never 2714 * reached the connected state during collision we 2715 * must do the TCP accept thing. 2716 */ 2717 struct socket *so, *oso; 2718 struct sctp_inpcb *inp; 2719 2720 if (notification == SCTP_NOTIFY_ASSOC_RESTART) { 2721 /* 2722 * For a restart we will keep the same 2723 * socket, no need to do anything. I THINK!! 2724 */ 2725 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2726 if (send_int_conf) { 2727 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 2728 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED); 2729 } 2730 return (m); 2731 } 2732 oso = (*inp_p)->sctp_socket; 2733 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2734 SCTP_TCB_UNLOCK((*stcb)); 2735 CURVNET_SET(oso->so_vnet); 2736 so = sonewconn(oso, 0 2737 ); 2738 CURVNET_RESTORE(); 2739 SCTP_TCB_LOCK((*stcb)); 2740 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2741 2742 if (so == NULL) { 2743 struct mbuf *op_err; 2744 2745#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2746 struct socket *pcb_so; 2747 2748#endif 2749 /* Too many sockets */ 2750 SCTPDBG(SCTP_DEBUG_INPUT1, "process_cookie_new: no room for another socket!\n"); 2751 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 2752 sctp_abort_association(*inp_p, NULL, m, iphlen, 2753 src, dst, sh, op_err, 2754 use_mflowid, mflowid, 2755 vrf_id, port); 2756#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2757 pcb_so = SCTP_INP_SO(*inp_p); 2758 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2759 SCTP_TCB_UNLOCK((*stcb)); 2760 SCTP_SOCKET_LOCK(pcb_so, 1); 2761 SCTP_TCB_LOCK((*stcb)); 2762 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2763#endif 2764 (void)sctp_free_assoc(*inp_p, *stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_20); 2765#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2766 SCTP_SOCKET_UNLOCK(pcb_so, 1); 2767#endif 2768 return (NULL); 2769 } 2770 inp = (struct sctp_inpcb *)so->so_pcb; 2771 SCTP_INP_INCR_REF(inp); 2772 /* 2773 * We add the unbound flag here so that if we get an 2774 * soabort() before we get the move_pcb done, we 2775 * will properly cleanup. 2776 */ 2777 inp->sctp_flags = (SCTP_PCB_FLAGS_TCPTYPE | 2778 SCTP_PCB_FLAGS_CONNECTED | 2779 SCTP_PCB_FLAGS_IN_TCPPOOL | 2780 SCTP_PCB_FLAGS_UNBOUND | 2781 (SCTP_PCB_COPY_FLAGS & (*inp_p)->sctp_flags) | 2782 SCTP_PCB_FLAGS_DONT_WAKE); 2783 inp->sctp_features = (*inp_p)->sctp_features; 2784 inp->sctp_mobility_features = (*inp_p)->sctp_mobility_features; 2785 inp->sctp_socket = so; 2786 inp->sctp_frag_point = (*inp_p)->sctp_frag_point; 2787 inp->sctp_cmt_on_off = (*inp_p)->sctp_cmt_on_off; 2788 inp->ecn_supported = (*inp_p)->ecn_supported; 2789 inp->prsctp_supported = (*inp_p)->prsctp_supported; 2790 inp->partial_delivery_point = (*inp_p)->partial_delivery_point; 2791 inp->sctp_context = (*inp_p)->sctp_context; 2792 inp->local_strreset_support = (*inp_p)->local_strreset_support; 2793 inp->inp_starting_point_for_iterator = NULL; 2794 /* 2795 * copy in the authentication parameters from the 2796 * original endpoint 2797 */ 2798 if (inp->sctp_ep.local_hmacs) 2799 sctp_free_hmaclist(inp->sctp_ep.local_hmacs); 2800 inp->sctp_ep.local_hmacs = 2801 sctp_copy_hmaclist((*inp_p)->sctp_ep.local_hmacs); 2802 if (inp->sctp_ep.local_auth_chunks) 2803 sctp_free_chunklist(inp->sctp_ep.local_auth_chunks); 2804 inp->sctp_ep.local_auth_chunks = 2805 sctp_copy_chunklist((*inp_p)->sctp_ep.local_auth_chunks); 2806 2807 /* 2808 * Now we must move it from one hash table to 2809 * another and get the tcb in the right place. 2810 */ 2811 2812 /* 2813 * This is where the one-2-one socket is put into 2814 * the accept state waiting for the accept! 2815 */ 2816 if (*stcb) { 2817 (*stcb)->asoc.state |= SCTP_STATE_IN_ACCEPT_QUEUE; 2818 } 2819 sctp_move_pcb_and_assoc(*inp_p, inp, *stcb); 2820 2821 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2822 SCTP_TCB_UNLOCK((*stcb)); 2823 2824 sctp_pull_off_control_to_new_inp((*inp_p), inp, *stcb, 2825 0); 2826 SCTP_TCB_LOCK((*stcb)); 2827 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2828 2829 2830 /* 2831 * now we must check to see if we were aborted while 2832 * the move was going on and the lock/unlock 2833 * happened. 2834 */ 2835 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 2836 /* 2837 * yep it was, we leave the assoc attached 2838 * to the socket since the sctp_inpcb_free() 2839 * call will send an abort for us. 2840 */ 2841 SCTP_INP_DECR_REF(inp); 2842 return (NULL); 2843 } 2844 SCTP_INP_DECR_REF(inp); 2845 /* Switch over to the new guy */ 2846 *inp_p = inp; 2847 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2848 if (send_int_conf) { 2849 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 2850 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED); 2851 } 2852 /* 2853 * Pull it from the incomplete queue and wake the 2854 * guy 2855 */ 2856#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2857 atomic_add_int(&(*stcb)->asoc.refcnt, 1); 2858 SCTP_TCB_UNLOCK((*stcb)); 2859 SCTP_SOCKET_LOCK(so, 1); 2860#endif 2861 soisconnected(so); 2862#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2863 SCTP_TCB_LOCK((*stcb)); 2864 atomic_subtract_int(&(*stcb)->asoc.refcnt, 1); 2865 SCTP_SOCKET_UNLOCK(so, 1); 2866#endif 2867 return (m); 2868 } 2869 } 2870 if (notification) { 2871 sctp_ulp_notify(notification, *stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2872 } 2873 if (send_int_conf) { 2874 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_CONFIRMED, 2875 (*stcb), 0, (void *)netl, SCTP_SO_NOT_LOCKED); 2876 } 2877 return (m); 2878} 2879 2880static void 2881sctp_handle_cookie_ack(struct sctp_cookie_ack_chunk *cp SCTP_UNUSED, 2882 struct sctp_tcb *stcb, struct sctp_nets *net) 2883{ 2884 /* cp must not be used, others call this without a c-ack :-) */ 2885 struct sctp_association *asoc; 2886 2887 SCTPDBG(SCTP_DEBUG_INPUT2, 2888 "sctp_handle_cookie_ack: handling COOKIE-ACK\n"); 2889 if (stcb == NULL) 2890 return; 2891 2892 asoc = &stcb->asoc; 2893 2894 sctp_stop_all_cookie_timers(stcb); 2895 /* process according to association state */ 2896 if (SCTP_GET_STATE(asoc) == SCTP_STATE_COOKIE_ECHOED) { 2897 /* state change only needed when I am in right state */ 2898 SCTPDBG(SCTP_DEBUG_INPUT2, "moving to OPEN state\n"); 2899 SCTP_SET_STATE(asoc, SCTP_STATE_OPEN); 2900 sctp_start_net_timers(stcb); 2901 if (asoc->state & SCTP_STATE_SHUTDOWN_PENDING) { 2902 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 2903 stcb->sctp_ep, stcb, asoc->primary_destination); 2904 2905 } 2906 /* update RTO */ 2907 SCTP_STAT_INCR_COUNTER32(sctps_activeestab); 2908 SCTP_STAT_INCR_GAUGE32(sctps_currestab); 2909 if (asoc->overall_error_count == 0) { 2910 net->RTO = sctp_calculate_rto(stcb, asoc, net, 2911 &asoc->time_entered, sctp_align_safe_nocopy, 2912 SCTP_RTT_FROM_NON_DATA); 2913 } 2914 (void)SCTP_GETTIME_TIMEVAL(&asoc->time_entered); 2915 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_UP, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 2916 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) || 2917 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_IN_TCPPOOL)) { 2918#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2919 struct socket *so; 2920 2921#endif 2922 stcb->sctp_ep->sctp_flags |= SCTP_PCB_FLAGS_CONNECTED; 2923#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2924 so = SCTP_INP_SO(stcb->sctp_ep); 2925 atomic_add_int(&stcb->asoc.refcnt, 1); 2926 SCTP_TCB_UNLOCK(stcb); 2927 SCTP_SOCKET_LOCK(so, 1); 2928 SCTP_TCB_LOCK(stcb); 2929 atomic_subtract_int(&stcb->asoc.refcnt, 1); 2930#endif 2931 if ((stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) == 0) { 2932 soisconnected(stcb->sctp_socket); 2933 } 2934#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 2935 SCTP_SOCKET_UNLOCK(so, 1); 2936#endif 2937 } 2938 /* 2939 * since we did not send a HB make sure we don't double 2940 * things 2941 */ 2942 net->hb_responded = 1; 2943 2944 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 2945 /* 2946 * We don't need to do the asconf thing, nor hb or 2947 * autoclose if the socket is closed. 2948 */ 2949 goto closed_socket; 2950 } 2951 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, 2952 stcb, net); 2953 2954 2955 if (stcb->asoc.sctp_autoclose_ticks && 2956 sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_AUTOCLOSE)) { 2957 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, 2958 stcb->sctp_ep, stcb, NULL); 2959 } 2960 /* 2961 * send ASCONF if parameters are pending and ASCONFs are 2962 * allowed (eg. addresses changed when init/cookie echo were 2963 * in flight) 2964 */ 2965 if ((sctp_is_feature_on(stcb->sctp_ep, SCTP_PCB_FLAGS_DO_ASCONF)) && 2966 (stcb->asoc.peer_supports_asconf) && 2967 (!TAILQ_EMPTY(&stcb->asoc.asconf_queue))) { 2968#ifdef SCTP_TIMER_BASED_ASCONF 2969 sctp_timer_start(SCTP_TIMER_TYPE_ASCONF, 2970 stcb->sctp_ep, stcb, 2971 stcb->asoc.primary_destination); 2972#else 2973 sctp_send_asconf(stcb, stcb->asoc.primary_destination, 2974 SCTP_ADDR_NOT_LOCKED); 2975#endif 2976 } 2977 } 2978closed_socket: 2979 /* Toss the cookie if I can */ 2980 sctp_toss_old_cookies(stcb, asoc); 2981 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 2982 /* Restart the timer if we have pending data */ 2983 struct sctp_tmit_chunk *chk; 2984 2985 chk = TAILQ_FIRST(&asoc->sent_queue); 2986 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 2987 } 2988} 2989 2990static void 2991sctp_handle_ecn_echo(struct sctp_ecne_chunk *cp, 2992 struct sctp_tcb *stcb) 2993{ 2994 struct sctp_nets *net; 2995 struct sctp_tmit_chunk *lchk; 2996 struct sctp_ecne_chunk bkup; 2997 uint8_t override_bit; 2998 uint32_t tsn, window_data_tsn; 2999 int len; 3000 unsigned int pkt_cnt; 3001 3002 len = ntohs(cp->ch.chunk_length); 3003 if ((len != sizeof(struct sctp_ecne_chunk)) && 3004 (len != sizeof(struct old_sctp_ecne_chunk))) { 3005 return; 3006 } 3007 if (len == sizeof(struct old_sctp_ecne_chunk)) { 3008 /* Its the old format */ 3009 memcpy(&bkup, cp, sizeof(struct old_sctp_ecne_chunk)); 3010 bkup.num_pkts_since_cwr = htonl(1); 3011 cp = &bkup; 3012 } 3013 SCTP_STAT_INCR(sctps_recvecne); 3014 tsn = ntohl(cp->tsn); 3015 pkt_cnt = ntohl(cp->num_pkts_since_cwr); 3016 lchk = TAILQ_LAST(&stcb->asoc.send_queue, sctpchunk_listhead); 3017 if (lchk == NULL) { 3018 window_data_tsn = stcb->asoc.sending_seq - 1; 3019 } else { 3020 window_data_tsn = lchk->rec.data.TSN_seq; 3021 } 3022 3023 /* Find where it was sent to if possible. */ 3024 net = NULL; 3025 TAILQ_FOREACH(lchk, &stcb->asoc.sent_queue, sctp_next) { 3026 if (lchk->rec.data.TSN_seq == tsn) { 3027 net = lchk->whoTo; 3028 net->ecn_prev_cwnd = lchk->rec.data.cwnd_at_send; 3029 break; 3030 } 3031 if (SCTP_TSN_GT(lchk->rec.data.TSN_seq, tsn)) { 3032 break; 3033 } 3034 } 3035 if (net == NULL) { 3036 /* 3037 * What to do. A previous send of a CWR was possibly lost. 3038 * See how old it is, we may have it marked on the actual 3039 * net. 3040 */ 3041 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 3042 if (tsn == net->last_cwr_tsn) { 3043 /* Found him, send it off */ 3044 break; 3045 } 3046 } 3047 if (net == NULL) { 3048 /* 3049 * If we reach here, we need to send a special CWR 3050 * that says hey, we did this a long time ago and 3051 * you lost the response. 3052 */ 3053 net = TAILQ_FIRST(&stcb->asoc.nets); 3054 if (net == NULL) { 3055 /* TSNH */ 3056 return; 3057 } 3058 override_bit = SCTP_CWR_REDUCE_OVERRIDE; 3059 } else { 3060 override_bit = 0; 3061 } 3062 } else { 3063 override_bit = 0; 3064 } 3065 if (SCTP_TSN_GT(tsn, net->cwr_window_tsn) && 3066 ((override_bit & SCTP_CWR_REDUCE_OVERRIDE) == 0)) { 3067 /* 3068 * JRS - Use the congestion control given in the pluggable 3069 * CC module 3070 */ 3071 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 0, pkt_cnt); 3072 /* 3073 * We reduce once every RTT. So we will only lower cwnd at 3074 * the next sending seq i.e. the window_data_tsn 3075 */ 3076 net->cwr_window_tsn = window_data_tsn; 3077 net->ecn_ce_pkt_cnt += pkt_cnt; 3078 net->lost_cnt = pkt_cnt; 3079 net->last_cwr_tsn = tsn; 3080 } else { 3081 override_bit |= SCTP_CWR_IN_SAME_WINDOW; 3082 if (SCTP_TSN_GT(tsn, net->last_cwr_tsn) && 3083 ((override_bit & SCTP_CWR_REDUCE_OVERRIDE) == 0)) { 3084 /* 3085 * Another loss in the same window update how many 3086 * marks/packets lost we have had. 3087 */ 3088 int cnt = 1; 3089 3090 if (pkt_cnt > net->lost_cnt) { 3091 /* Should be the case */ 3092 cnt = (pkt_cnt - net->lost_cnt); 3093 net->ecn_ce_pkt_cnt += cnt; 3094 } 3095 net->lost_cnt = pkt_cnt; 3096 net->last_cwr_tsn = tsn; 3097 /* 3098 * Most CC functions will ignore this call, since we 3099 * are in-window yet of the initial CE the peer saw. 3100 */ 3101 stcb->asoc.cc_functions.sctp_cwnd_update_after_ecn_echo(stcb, net, 1, cnt); 3102 } 3103 } 3104 /* 3105 * We always send a CWR this way if our previous one was lost our 3106 * peer will get an update, or if it is not time again to reduce we 3107 * still get the cwr to the peer. Note we set the override when we 3108 * could not find the TSN on the chunk or the destination network. 3109 */ 3110 sctp_send_cwr(stcb, net, net->last_cwr_tsn, override_bit); 3111} 3112 3113static void 3114sctp_handle_ecn_cwr(struct sctp_cwr_chunk *cp, struct sctp_tcb *stcb, struct sctp_nets *net) 3115{ 3116 /* 3117 * Here we get a CWR from the peer. We must look in the outqueue and 3118 * make sure that we have a covered ECNE in the control chunk part. 3119 * If so remove it. 3120 */ 3121 struct sctp_tmit_chunk *chk; 3122 struct sctp_ecne_chunk *ecne; 3123 int override; 3124 uint32_t cwr_tsn; 3125 3126 cwr_tsn = ntohl(cp->tsn); 3127 3128 override = cp->ch.chunk_flags & SCTP_CWR_REDUCE_OVERRIDE; 3129 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 3130 if (chk->rec.chunk_id.id != SCTP_ECN_ECHO) { 3131 continue; 3132 } 3133 if ((override == 0) && (chk->whoTo != net)) { 3134 /* Must be from the right src unless override is set */ 3135 continue; 3136 } 3137 ecne = mtod(chk->data, struct sctp_ecne_chunk *); 3138 if (SCTP_TSN_GE(cwr_tsn, ntohl(ecne->tsn))) { 3139 /* this covers this ECNE, we can remove it */ 3140 stcb->asoc.ecn_echo_cnt_onq--; 3141 TAILQ_REMOVE(&stcb->asoc.control_send_queue, chk, 3142 sctp_next); 3143 if (chk->data) { 3144 sctp_m_freem(chk->data); 3145 chk->data = NULL; 3146 } 3147 stcb->asoc.ctrl_queue_cnt--; 3148 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 3149 if (override == 0) { 3150 break; 3151 } 3152 } 3153 } 3154} 3155 3156static void 3157sctp_handle_shutdown_complete(struct sctp_shutdown_complete_chunk *cp SCTP_UNUSED, 3158 struct sctp_tcb *stcb, struct sctp_nets *net) 3159{ 3160 struct sctp_association *asoc; 3161 3162#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3163 struct socket *so; 3164 3165#endif 3166 3167 SCTPDBG(SCTP_DEBUG_INPUT2, 3168 "sctp_handle_shutdown_complete: handling SHUTDOWN-COMPLETE\n"); 3169 if (stcb == NULL) 3170 return; 3171 3172 asoc = &stcb->asoc; 3173 /* process according to association state */ 3174 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_ACK_SENT) { 3175 /* unexpected SHUTDOWN-COMPLETE... so ignore... */ 3176 SCTPDBG(SCTP_DEBUG_INPUT2, 3177 "sctp_handle_shutdown_complete: not in SCTP_STATE_SHUTDOWN_ACK_SENT --- ignore\n"); 3178 SCTP_TCB_UNLOCK(stcb); 3179 return; 3180 } 3181 /* notify upper layer protocol */ 3182 if (stcb->sctp_socket) { 3183 sctp_ulp_notify(SCTP_NOTIFY_ASSOC_DOWN, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 3184 } 3185#ifdef INVARIANTS 3186 if (!TAILQ_EMPTY(&asoc->send_queue) || 3187 !TAILQ_EMPTY(&asoc->sent_queue) || 3188 !stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, asoc)) { 3189 panic("Queues are not empty when handling SHUTDOWN-COMPLETE"); 3190 } 3191#endif 3192 /* stop the timer */ 3193 sctp_timer_stop(SCTP_TIMER_TYPE_SHUTDOWNACK, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_22); 3194 SCTP_STAT_INCR_COUNTER32(sctps_shutdown); 3195 /* free the TCB */ 3196 SCTPDBG(SCTP_DEBUG_INPUT2, 3197 "sctp_handle_shutdown_complete: calls free-asoc\n"); 3198#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3199 so = SCTP_INP_SO(stcb->sctp_ep); 3200 atomic_add_int(&stcb->asoc.refcnt, 1); 3201 SCTP_TCB_UNLOCK(stcb); 3202 SCTP_SOCKET_LOCK(so, 1); 3203 SCTP_TCB_LOCK(stcb); 3204 atomic_subtract_int(&stcb->asoc.refcnt, 1); 3205#endif 3206 (void)sctp_free_assoc(stcb->sctp_ep, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_23); 3207#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 3208 SCTP_SOCKET_UNLOCK(so, 1); 3209#endif 3210 return; 3211} 3212 3213static int 3214process_chunk_drop(struct sctp_tcb *stcb, struct sctp_chunk_desc *desc, 3215 struct sctp_nets *net, uint8_t flg) 3216{ 3217 switch (desc->chunk_type) { 3218 case SCTP_DATA: 3219 /* find the tsn to resend (possibly */ 3220 { 3221 uint32_t tsn; 3222 struct sctp_tmit_chunk *tp1; 3223 3224 tsn = ntohl(desc->tsn_ifany); 3225 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) { 3226 if (tp1->rec.data.TSN_seq == tsn) { 3227 /* found it */ 3228 break; 3229 } 3230 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, tsn)) { 3231 /* not found */ 3232 tp1 = NULL; 3233 break; 3234 } 3235 } 3236 if (tp1 == NULL) { 3237 /* 3238 * Do it the other way , aka without paying 3239 * attention to queue seq order. 3240 */ 3241 SCTP_STAT_INCR(sctps_pdrpdnfnd); 3242 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) { 3243 if (tp1->rec.data.TSN_seq == tsn) { 3244 /* found it */ 3245 break; 3246 } 3247 } 3248 } 3249 if (tp1 == NULL) { 3250 SCTP_STAT_INCR(sctps_pdrptsnnf); 3251 } 3252 if ((tp1) && (tp1->sent < SCTP_DATAGRAM_ACKED)) { 3253 uint8_t *ddp; 3254 3255 if (((flg & SCTP_BADCRC) == 0) && 3256 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) { 3257 return (0); 3258 } 3259 if ((stcb->asoc.peers_rwnd == 0) && 3260 ((flg & SCTP_FROM_MIDDLE_BOX) == 0)) { 3261 SCTP_STAT_INCR(sctps_pdrpdiwnp); 3262 return (0); 3263 } 3264 if (stcb->asoc.peers_rwnd == 0 && 3265 (flg & SCTP_FROM_MIDDLE_BOX)) { 3266 SCTP_STAT_INCR(sctps_pdrpdizrw); 3267 return (0); 3268 } 3269 ddp = (uint8_t *) (mtod(tp1->data, caddr_t)+ 3270 sizeof(struct sctp_data_chunk)); 3271 { 3272 unsigned int iii; 3273 3274 for (iii = 0; iii < sizeof(desc->data_bytes); 3275 iii++) { 3276 if (ddp[iii] != desc->data_bytes[iii]) { 3277 SCTP_STAT_INCR(sctps_pdrpbadd); 3278 return (-1); 3279 } 3280 } 3281 } 3282 3283 if (tp1->do_rtt) { 3284 /* 3285 * this guy had a RTO calculation 3286 * pending on it, cancel it 3287 */ 3288 if (tp1->whoTo->rto_needed == 0) { 3289 tp1->whoTo->rto_needed = 1; 3290 } 3291 tp1->do_rtt = 0; 3292 } 3293 SCTP_STAT_INCR(sctps_pdrpmark); 3294 if (tp1->sent != SCTP_DATAGRAM_RESEND) 3295 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3296 /* 3297 * mark it as if we were doing a FR, since 3298 * we will be getting gap ack reports behind 3299 * the info from the router. 3300 */ 3301 tp1->rec.data.doing_fast_retransmit = 1; 3302 /* 3303 * mark the tsn with what sequences can 3304 * cause a new FR. 3305 */ 3306 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) { 3307 tp1->rec.data.fast_retran_tsn = stcb->asoc.sending_seq; 3308 } else { 3309 tp1->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq; 3310 } 3311 3312 /* restart the timer */ 3313 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 3314 stcb, tp1->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_24); 3315 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 3316 stcb, tp1->whoTo); 3317 3318 /* fix counts and things */ 3319 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3320 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_PDRP, 3321 tp1->whoTo->flight_size, 3322 tp1->book_size, 3323 (uintptr_t) stcb, 3324 tp1->rec.data.TSN_seq); 3325 } 3326 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3327 sctp_flight_size_decrease(tp1); 3328 sctp_total_flight_decrease(stcb, tp1); 3329 } 3330 tp1->sent = SCTP_DATAGRAM_RESEND; 3331 } { 3332 /* audit code */ 3333 unsigned int audit; 3334 3335 audit = 0; 3336 TAILQ_FOREACH(tp1, &stcb->asoc.sent_queue, sctp_next) { 3337 if (tp1->sent == SCTP_DATAGRAM_RESEND) 3338 audit++; 3339 } 3340 TAILQ_FOREACH(tp1, &stcb->asoc.control_send_queue, 3341 sctp_next) { 3342 if (tp1->sent == SCTP_DATAGRAM_RESEND) 3343 audit++; 3344 } 3345 if (audit != stcb->asoc.sent_queue_retran_cnt) { 3346 SCTP_PRINTF("**Local Audit finds cnt:%d asoc cnt:%d\n", 3347 audit, stcb->asoc.sent_queue_retran_cnt); 3348#ifndef SCTP_AUDITING_ENABLED 3349 stcb->asoc.sent_queue_retran_cnt = audit; 3350#endif 3351 } 3352 } 3353 } 3354 break; 3355 case SCTP_ASCONF: 3356 { 3357 struct sctp_tmit_chunk *asconf; 3358 3359 TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue, 3360 sctp_next) { 3361 if (asconf->rec.chunk_id.id == SCTP_ASCONF) { 3362 break; 3363 } 3364 } 3365 if (asconf) { 3366 if (asconf->sent != SCTP_DATAGRAM_RESEND) 3367 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3368 asconf->sent = SCTP_DATAGRAM_RESEND; 3369 asconf->snd_count--; 3370 } 3371 } 3372 break; 3373 case SCTP_INITIATION: 3374 /* resend the INIT */ 3375 stcb->asoc.dropped_special_cnt++; 3376 if (stcb->asoc.dropped_special_cnt < SCTP_RETRY_DROPPED_THRESH) { 3377 /* 3378 * If we can get it in, in a few attempts we do 3379 * this, otherwise we let the timer fire. 3380 */ 3381 sctp_timer_stop(SCTP_TIMER_TYPE_INIT, stcb->sctp_ep, 3382 stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_25); 3383 sctp_send_initiate(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 3384 } 3385 break; 3386 case SCTP_SELECTIVE_ACK: 3387 case SCTP_NR_SELECTIVE_ACK: 3388 /* resend the sack */ 3389 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 3390 break; 3391 case SCTP_HEARTBEAT_REQUEST: 3392 /* resend a demand HB */ 3393 if ((stcb->asoc.overall_error_count + 3) < stcb->asoc.max_send_times) { 3394 /* 3395 * Only retransmit if we KNOW we wont destroy the 3396 * tcb 3397 */ 3398 sctp_send_hb(stcb, net, SCTP_SO_NOT_LOCKED); 3399 } 3400 break; 3401 case SCTP_SHUTDOWN: 3402 sctp_send_shutdown(stcb, net); 3403 break; 3404 case SCTP_SHUTDOWN_ACK: 3405 sctp_send_shutdown_ack(stcb, net); 3406 break; 3407 case SCTP_COOKIE_ECHO: 3408 { 3409 struct sctp_tmit_chunk *cookie; 3410 3411 cookie = NULL; 3412 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, 3413 sctp_next) { 3414 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 3415 break; 3416 } 3417 } 3418 if (cookie) { 3419 if (cookie->sent != SCTP_DATAGRAM_RESEND) 3420 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3421 cookie->sent = SCTP_DATAGRAM_RESEND; 3422 sctp_stop_all_cookie_timers(stcb); 3423 } 3424 } 3425 break; 3426 case SCTP_COOKIE_ACK: 3427 sctp_send_cookie_ack(stcb); 3428 break; 3429 case SCTP_ASCONF_ACK: 3430 /* resend last asconf ack */ 3431 sctp_send_asconf_ack(stcb); 3432 break; 3433 case SCTP_FORWARD_CUM_TSN: 3434 send_forward_tsn(stcb, &stcb->asoc); 3435 break; 3436 /* can't do anything with these */ 3437 case SCTP_PACKET_DROPPED: 3438 case SCTP_INITIATION_ACK: /* this should not happen */ 3439 case SCTP_HEARTBEAT_ACK: 3440 case SCTP_ABORT_ASSOCIATION: 3441 case SCTP_OPERATION_ERROR: 3442 case SCTP_SHUTDOWN_COMPLETE: 3443 case SCTP_ECN_ECHO: 3444 case SCTP_ECN_CWR: 3445 default: 3446 break; 3447 } 3448 return (0); 3449} 3450 3451void 3452sctp_reset_in_stream(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t * list) 3453{ 3454 uint32_t i; 3455 uint16_t temp; 3456 3457 /* 3458 * We set things to 0xffff since this is the last delivered sequence 3459 * and we will be sending in 0 after the reset. 3460 */ 3461 3462 if (number_entries) { 3463 for (i = 0; i < number_entries; i++) { 3464 temp = ntohs(list[i]); 3465 if (temp >= stcb->asoc.streamincnt) { 3466 continue; 3467 } 3468 stcb->asoc.strmin[temp].last_sequence_delivered = 0xffff; 3469 } 3470 } else { 3471 list = NULL; 3472 for (i = 0; i < stcb->asoc.streamincnt; i++) { 3473 stcb->asoc.strmin[i].last_sequence_delivered = 0xffff; 3474 } 3475 } 3476 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_RECV, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED); 3477} 3478 3479static void 3480sctp_reset_out_streams(struct sctp_tcb *stcb, uint32_t number_entries, uint16_t * list) 3481{ 3482 uint32_t i; 3483 uint16_t temp; 3484 3485 if (number_entries > 0) { 3486 for (i = 0; i < number_entries; i++) { 3487 temp = ntohs(list[i]); 3488 if (temp >= stcb->asoc.streamoutcnt) { 3489 /* no such stream */ 3490 continue; 3491 } 3492 stcb->asoc.strmout[temp].next_sequence_send = 0; 3493 } 3494 } else { 3495 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 3496 stcb->asoc.strmout[i].next_sequence_send = 0; 3497 } 3498 } 3499 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_SEND, stcb, number_entries, (void *)list, SCTP_SO_NOT_LOCKED); 3500} 3501 3502 3503struct sctp_stream_reset_out_request * 3504sctp_find_stream_reset(struct sctp_tcb *stcb, uint32_t seq, struct sctp_tmit_chunk **bchk) 3505{ 3506 struct sctp_association *asoc; 3507 struct sctp_chunkhdr *ch; 3508 struct sctp_stream_reset_out_request *r; 3509 struct sctp_tmit_chunk *chk; 3510 int len, clen; 3511 3512 asoc = &stcb->asoc; 3513 if (TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { 3514 asoc->stream_reset_outstanding = 0; 3515 return (NULL); 3516 } 3517 if (stcb->asoc.str_reset == NULL) { 3518 asoc->stream_reset_outstanding = 0; 3519 return (NULL); 3520 } 3521 chk = stcb->asoc.str_reset; 3522 if (chk->data == NULL) { 3523 return (NULL); 3524 } 3525 if (bchk) { 3526 /* he wants a copy of the chk pointer */ 3527 *bchk = chk; 3528 } 3529 clen = chk->send_size; 3530 ch = mtod(chk->data, struct sctp_chunkhdr *); 3531 r = (struct sctp_stream_reset_out_request *)(ch + 1); 3532 if (ntohl(r->request_seq) == seq) { 3533 /* found it */ 3534 return (r); 3535 } 3536 len = SCTP_SIZE32(ntohs(r->ph.param_length)); 3537 if (clen > (len + (int)sizeof(struct sctp_chunkhdr))) { 3538 /* move to the next one, there can only be a max of two */ 3539 r = (struct sctp_stream_reset_out_request *)((caddr_t)r + len); 3540 if (ntohl(r->request_seq) == seq) { 3541 return (r); 3542 } 3543 } 3544 /* that seq is not here */ 3545 return (NULL); 3546} 3547 3548static void 3549sctp_clean_up_stream_reset(struct sctp_tcb *stcb) 3550{ 3551 struct sctp_association *asoc; 3552 struct sctp_tmit_chunk *chk = stcb->asoc.str_reset; 3553 3554 if (stcb->asoc.str_reset == NULL) { 3555 return; 3556 } 3557 asoc = &stcb->asoc; 3558 3559 sctp_timer_stop(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo, SCTP_FROM_SCTP_INPUT + SCTP_LOC_26); 3560 TAILQ_REMOVE(&asoc->control_send_queue, 3561 chk, 3562 sctp_next); 3563 if (chk->data) { 3564 sctp_m_freem(chk->data); 3565 chk->data = NULL; 3566 } 3567 asoc->ctrl_queue_cnt--; 3568 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 3569 /* sa_ignore NO_NULL_CHK */ 3570 stcb->asoc.str_reset = NULL; 3571} 3572 3573 3574static int 3575sctp_handle_stream_reset_response(struct sctp_tcb *stcb, 3576 uint32_t seq, uint32_t action, 3577 struct sctp_stream_reset_response *respin) 3578{ 3579 uint16_t type; 3580 int lparm_len; 3581 struct sctp_association *asoc = &stcb->asoc; 3582 struct sctp_tmit_chunk *chk; 3583 struct sctp_stream_reset_out_request *srparam; 3584 uint32_t number_entries; 3585 3586 if (asoc->stream_reset_outstanding == 0) { 3587 /* duplicate */ 3588 return (0); 3589 } 3590 if (seq == stcb->asoc.str_reset_seq_out) { 3591 srparam = sctp_find_stream_reset(stcb, seq, &chk); 3592 if (srparam) { 3593 stcb->asoc.str_reset_seq_out++; 3594 type = ntohs(srparam->ph.param_type); 3595 lparm_len = ntohs(srparam->ph.param_length); 3596 if (type == SCTP_STR_RESET_OUT_REQUEST) { 3597 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t); 3598 asoc->stream_reset_out_is_outstanding = 0; 3599 if (asoc->stream_reset_outstanding) 3600 asoc->stream_reset_outstanding--; 3601 if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) { 3602 /* do it */ 3603 sctp_reset_out_streams(stcb, number_entries, srparam->list_of_streams); 3604 } else if (action == SCTP_STREAM_RESET_RESULT_DENIED) { 3605 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_OUT, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED); 3606 } else { 3607 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_OUT, stcb, number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED); 3608 } 3609 } else if (type == SCTP_STR_RESET_IN_REQUEST) { 3610 /* Answered my request */ 3611 number_entries = (lparm_len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t); 3612 if (asoc->stream_reset_outstanding) 3613 asoc->stream_reset_outstanding--; 3614 if (action == SCTP_STREAM_RESET_RESULT_DENIED) { 3615 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_DENIED_IN, stcb, 3616 number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED); 3617 } else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) { 3618 sctp_ulp_notify(SCTP_NOTIFY_STR_RESET_FAILED_IN, stcb, 3619 number_entries, srparam->list_of_streams, SCTP_SO_NOT_LOCKED); 3620 } 3621 } else if (type == SCTP_STR_RESET_ADD_OUT_STREAMS) { 3622 /* Ok we now may have more streams */ 3623 int num_stream; 3624 3625 num_stream = stcb->asoc.strm_pending_add_size; 3626 if (num_stream > (stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt)) { 3627 /* TSNH */ 3628 num_stream = stcb->asoc.strm_realoutsize - stcb->asoc.streamoutcnt; 3629 } 3630 stcb->asoc.strm_pending_add_size = 0; 3631 if (asoc->stream_reset_outstanding) 3632 asoc->stream_reset_outstanding--; 3633 if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) { 3634 /* Put the new streams into effect */ 3635 stcb->asoc.streamoutcnt += num_stream; 3636 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 0); 3637 } else if (action == SCTP_STREAM_RESET_RESULT_DENIED) { 3638 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 3639 SCTP_STREAM_CHANGE_DENIED); 3640 } else { 3641 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 3642 SCTP_STREAM_CHANGE_FAILED); 3643 } 3644 } else if (type == SCTP_STR_RESET_ADD_IN_STREAMS) { 3645 if (asoc->stream_reset_outstanding) 3646 asoc->stream_reset_outstanding--; 3647 if (action == SCTP_STREAM_RESET_RESULT_DENIED) { 3648 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 3649 SCTP_STREAM_CHANGE_DENIED); 3650 } else if (action != SCTP_STREAM_RESET_RESULT_PERFORMED) { 3651 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 3652 SCTP_STREAM_CHANGE_FAILED); 3653 } 3654 } else if (type == SCTP_STR_RESET_TSN_REQUEST) { 3655 /** 3656 * a) Adopt the new in tsn. 3657 * b) reset the map 3658 * c) Adopt the new out-tsn 3659 */ 3660 struct sctp_stream_reset_response_tsn *resp; 3661 struct sctp_forward_tsn_chunk fwdtsn; 3662 int abort_flag = 0; 3663 3664 if (respin == NULL) { 3665 /* huh ? */ 3666 return (0); 3667 } 3668 if (action == SCTP_STREAM_RESET_RESULT_PERFORMED) { 3669 resp = (struct sctp_stream_reset_response_tsn *)respin; 3670 asoc->stream_reset_outstanding--; 3671 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 3672 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 3673 fwdtsn.new_cumulative_tsn = htonl(ntohl(resp->senders_next_tsn) - 1); 3674 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0); 3675 if (abort_flag) { 3676 return (1); 3677 } 3678 stcb->asoc.highest_tsn_inside_map = (ntohl(resp->senders_next_tsn) - 1); 3679 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 3680 sctp_log_map(0, 7, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 3681 } 3682 stcb->asoc.tsn_last_delivered = stcb->asoc.cumulative_tsn = stcb->asoc.highest_tsn_inside_map; 3683 stcb->asoc.mapping_array_base_tsn = ntohl(resp->senders_next_tsn); 3684 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 3685 3686 stcb->asoc.highest_tsn_inside_nr_map = stcb->asoc.highest_tsn_inside_map; 3687 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size); 3688 3689 stcb->asoc.sending_seq = ntohl(resp->receivers_next_tsn); 3690 stcb->asoc.last_acked_seq = stcb->asoc.cumulative_tsn; 3691 3692 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL); 3693 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL); 3694 sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1), 0); 3695 } else if (action == SCTP_STREAM_RESET_RESULT_DENIED) { 3696 sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1), 3697 SCTP_ASSOC_RESET_DENIED); 3698 } else { 3699 sctp_notify_stream_reset_tsn(stcb, stcb->asoc.sending_seq, (stcb->asoc.mapping_array_base_tsn + 1), 3700 SCTP_ASSOC_RESET_FAILED); 3701 } 3702 } 3703 /* get rid of the request and get the request flags */ 3704 if (asoc->stream_reset_outstanding == 0) { 3705 sctp_clean_up_stream_reset(stcb); 3706 } 3707 } 3708 } 3709 return (0); 3710} 3711 3712static void 3713sctp_handle_str_reset_request_in(struct sctp_tcb *stcb, 3714 struct sctp_tmit_chunk *chk, 3715 struct sctp_stream_reset_in_request *req, int trunc) 3716{ 3717 uint32_t seq; 3718 int len, i; 3719 int number_entries; 3720 uint16_t temp; 3721 3722 /* 3723 * peer wants me to send a str-reset to him for my outgoing seq's if 3724 * seq_in is right. 3725 */ 3726 struct sctp_association *asoc = &stcb->asoc; 3727 3728 seq = ntohl(req->request_seq); 3729 if (asoc->str_reset_seq_in == seq) { 3730 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3731 if (!(asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ)) { 3732 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 3733 } else if (trunc) { 3734 /* Can't do it, since they exceeded our buffer size */ 3735 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 3736 } else if (stcb->asoc.stream_reset_out_is_outstanding == 0) { 3737 len = ntohs(req->ph.param_length); 3738 number_entries = ((len - sizeof(struct sctp_stream_reset_in_request)) / sizeof(uint16_t)); 3739 for (i = 0; i < number_entries; i++) { 3740 temp = ntohs(req->list_of_streams[i]); 3741 req->list_of_streams[i] = temp; 3742 } 3743 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED; 3744 sctp_add_stream_reset_out(chk, number_entries, req->list_of_streams, 3745 asoc->str_reset_seq_out, 3746 seq, (asoc->sending_seq - 1)); 3747 asoc->stream_reset_out_is_outstanding = 1; 3748 asoc->str_reset = chk; 3749 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, stcb->sctp_ep, stcb, chk->whoTo); 3750 stcb->asoc.stream_reset_outstanding++; 3751 } else { 3752 /* Can't do it, since we have sent one out */ 3753 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS; 3754 } 3755 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3756 asoc->str_reset_seq_in++; 3757 } else if (asoc->str_reset_seq_in - 1 == seq) { 3758 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3759 } else if (asoc->str_reset_seq_in - 2 == seq) { 3760 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 3761 } else { 3762 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO); 3763 } 3764} 3765 3766static int 3767sctp_handle_str_reset_request_tsn(struct sctp_tcb *stcb, 3768 struct sctp_tmit_chunk *chk, 3769 struct sctp_stream_reset_tsn_request *req) 3770{ 3771 /* reset all in and out and update the tsn */ 3772 /* 3773 * A) reset my str-seq's on in and out. B) Select a receive next, 3774 * and set cum-ack to it. Also process this selected number as a 3775 * fwd-tsn as well. C) set in the response my next sending seq. 3776 */ 3777 struct sctp_forward_tsn_chunk fwdtsn; 3778 struct sctp_association *asoc = &stcb->asoc; 3779 int abort_flag = 0; 3780 uint32_t seq; 3781 3782 seq = ntohl(req->request_seq); 3783 if (asoc->str_reset_seq_in == seq) { 3784 asoc->last_reset_action[1] = stcb->asoc.last_reset_action[0]; 3785 if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) { 3786 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 3787 } else { 3788 fwdtsn.ch.chunk_length = htons(sizeof(struct sctp_forward_tsn_chunk)); 3789 fwdtsn.ch.chunk_type = SCTP_FORWARD_CUM_TSN; 3790 fwdtsn.ch.chunk_flags = 0; 3791 fwdtsn.new_cumulative_tsn = htonl(stcb->asoc.highest_tsn_inside_map + 1); 3792 sctp_handle_forward_tsn(stcb, &fwdtsn, &abort_flag, NULL, 0); 3793 if (abort_flag) { 3794 return (1); 3795 } 3796 asoc->highest_tsn_inside_map += SCTP_STREAM_RESET_TSN_DELTA; 3797 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 3798 sctp_log_map(0, 10, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 3799 } 3800 asoc->tsn_last_delivered = asoc->cumulative_tsn = asoc->highest_tsn_inside_map; 3801 asoc->mapping_array_base_tsn = asoc->highest_tsn_inside_map + 1; 3802 memset(asoc->mapping_array, 0, asoc->mapping_array_size); 3803 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map; 3804 memset(asoc->nr_mapping_array, 0, asoc->mapping_array_size); 3805 atomic_add_int(&asoc->sending_seq, 1); 3806 /* save off historical data for retrans */ 3807 asoc->last_sending_seq[1] = asoc->last_sending_seq[0]; 3808 asoc->last_sending_seq[0] = asoc->sending_seq; 3809 asoc->last_base_tsnsent[1] = asoc->last_base_tsnsent[0]; 3810 asoc->last_base_tsnsent[0] = asoc->mapping_array_base_tsn; 3811 sctp_reset_out_streams(stcb, 0, (uint16_t *) NULL); 3812 sctp_reset_in_stream(stcb, 0, (uint16_t *) NULL); 3813 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED; 3814 sctp_notify_stream_reset_tsn(stcb, asoc->sending_seq, (asoc->mapping_array_base_tsn + 1), 0); 3815 } 3816 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0], 3817 asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]); 3818 asoc->str_reset_seq_in++; 3819 } else if (asoc->str_reset_seq_in - 1 == seq) { 3820 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[0], 3821 asoc->last_sending_seq[0], asoc->last_base_tsnsent[0]); 3822 } else if (asoc->str_reset_seq_in - 2 == seq) { 3823 sctp_add_stream_reset_result_tsn(chk, seq, asoc->last_reset_action[1], 3824 asoc->last_sending_seq[1], asoc->last_base_tsnsent[1]); 3825 } else { 3826 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO); 3827 } 3828 return (0); 3829} 3830 3831static void 3832sctp_handle_str_reset_request_out(struct sctp_tcb *stcb, 3833 struct sctp_tmit_chunk *chk, 3834 struct sctp_stream_reset_out_request *req, int trunc) 3835{ 3836 uint32_t seq, tsn; 3837 int number_entries, len; 3838 struct sctp_association *asoc = &stcb->asoc; 3839 3840 seq = ntohl(req->request_seq); 3841 3842 /* now if its not a duplicate we process it */ 3843 if (asoc->str_reset_seq_in == seq) { 3844 len = ntohs(req->ph.param_length); 3845 number_entries = ((len - sizeof(struct sctp_stream_reset_out_request)) / sizeof(uint16_t)); 3846 /* 3847 * the sender is resetting, handle the list issue.. we must 3848 * a) verify if we can do the reset, if so no problem b) If 3849 * we can't do the reset we must copy the request. c) queue 3850 * it, and setup the data in processor to trigger it off 3851 * when needed and dequeue all the queued data. 3852 */ 3853 tsn = ntohl(req->send_reset_at_tsn); 3854 3855 /* move the reset action back one */ 3856 asoc->last_reset_action[1] = asoc->last_reset_action[0]; 3857 if (!(asoc->local_strreset_support & SCTP_ENABLE_RESET_STREAM_REQ)) { 3858 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 3859 } else if (trunc) { 3860 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 3861 } else if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { 3862 /* we can do it now */ 3863 sctp_reset_in_stream(stcb, number_entries, req->list_of_streams); 3864 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED; 3865 } else { 3866 /* 3867 * we must queue it up and thus wait for the TSN's 3868 * to arrive that are at or before tsn 3869 */ 3870 struct sctp_stream_reset_list *liste; 3871 int siz; 3872 3873 siz = sizeof(struct sctp_stream_reset_list) + (number_entries * sizeof(uint16_t)); 3874 SCTP_MALLOC(liste, struct sctp_stream_reset_list *, 3875 siz, SCTP_M_STRESET); 3876 if (liste == NULL) { 3877 /* gak out of memory */ 3878 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 3879 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3880 return; 3881 } 3882 liste->tsn = tsn; 3883 liste->number_entries = number_entries; 3884 memcpy(&liste->list_of_streams, req->list_of_streams, number_entries * sizeof(uint16_t)); 3885 TAILQ_INSERT_TAIL(&asoc->resetHead, liste, next_resp); 3886 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED; 3887 } 3888 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3889 asoc->str_reset_seq_in++; 3890 } else if ((asoc->str_reset_seq_in - 1) == seq) { 3891 /* 3892 * one seq back, just echo back last action since my 3893 * response was lost. 3894 */ 3895 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3896 } else if ((asoc->str_reset_seq_in - 2) == seq) { 3897 /* 3898 * two seq back, just echo back last action since my 3899 * response was lost. 3900 */ 3901 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 3902 } else { 3903 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO); 3904 } 3905} 3906 3907static void 3908sctp_handle_str_reset_add_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk, 3909 struct sctp_stream_reset_add_strm *str_add) 3910{ 3911 /* 3912 * Peer is requesting to add more streams. If its within our 3913 * max-streams we will allow it. 3914 */ 3915 uint32_t num_stream, i; 3916 uint32_t seq; 3917 struct sctp_association *asoc = &stcb->asoc; 3918 struct sctp_queued_to_read *ctl, *nctl; 3919 3920 /* Get the number. */ 3921 seq = ntohl(str_add->request_seq); 3922 num_stream = ntohs(str_add->number_of_streams); 3923 /* Now what would be the new total? */ 3924 if (asoc->str_reset_seq_in == seq) { 3925 num_stream += stcb->asoc.streamincnt; 3926 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0]; 3927 if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) { 3928 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 3929 } else if ((num_stream > stcb->asoc.max_inbound_streams) || 3930 (num_stream > 0xffff)) { 3931 /* We must reject it they ask for to many */ 3932 denied: 3933 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 3934 } else { 3935 /* Ok, we can do that :-) */ 3936 struct sctp_stream_in *oldstrm; 3937 3938 /* save off the old */ 3939 oldstrm = stcb->asoc.strmin; 3940 SCTP_MALLOC(stcb->asoc.strmin, struct sctp_stream_in *, 3941 (num_stream * sizeof(struct sctp_stream_in)), 3942 SCTP_M_STRMI); 3943 if (stcb->asoc.strmin == NULL) { 3944 stcb->asoc.strmin = oldstrm; 3945 goto denied; 3946 } 3947 /* copy off the old data */ 3948 for (i = 0; i < stcb->asoc.streamincnt; i++) { 3949 TAILQ_INIT(&stcb->asoc.strmin[i].inqueue); 3950 stcb->asoc.strmin[i].stream_no = i; 3951 stcb->asoc.strmin[i].last_sequence_delivered = oldstrm[i].last_sequence_delivered; 3952 stcb->asoc.strmin[i].delivery_started = oldstrm[i].delivery_started; 3953 /* now anything on those queues? */ 3954 TAILQ_FOREACH_SAFE(ctl, &oldstrm[i].inqueue, next, nctl) { 3955 TAILQ_REMOVE(&oldstrm[i].inqueue, ctl, next); 3956 TAILQ_INSERT_TAIL(&stcb->asoc.strmin[i].inqueue, ctl, next); 3957 } 3958 } 3959 /* Init the new streams */ 3960 for (i = stcb->asoc.streamincnt; i < num_stream; i++) { 3961 TAILQ_INIT(&stcb->asoc.strmin[i].inqueue); 3962 stcb->asoc.strmin[i].stream_no = i; 3963 stcb->asoc.strmin[i].last_sequence_delivered = 0xffff; 3964 stcb->asoc.strmin[i].delivery_started = 0; 3965 } 3966 SCTP_FREE(oldstrm, SCTP_M_STRMI); 3967 /* update the size */ 3968 stcb->asoc.streamincnt = num_stream; 3969 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED; 3970 sctp_notify_stream_reset_add(stcb, stcb->asoc.streamincnt, stcb->asoc.streamoutcnt, 0); 3971 } 3972 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3973 asoc->str_reset_seq_in++; 3974 } else if ((asoc->str_reset_seq_in - 1) == seq) { 3975 /* 3976 * one seq back, just echo back last action since my 3977 * response was lost. 3978 */ 3979 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 3980 } else if ((asoc->str_reset_seq_in - 2) == seq) { 3981 /* 3982 * two seq back, just echo back last action since my 3983 * response was lost. 3984 */ 3985 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 3986 } else { 3987 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO); 3988 3989 } 3990} 3991 3992static void 3993sctp_handle_str_reset_add_out_strm(struct sctp_tcb *stcb, struct sctp_tmit_chunk *chk, 3994 struct sctp_stream_reset_add_strm *str_add) 3995{ 3996 /* 3997 * Peer is requesting to add more streams. If its within our 3998 * max-streams we will allow it. 3999 */ 4000 uint16_t num_stream; 4001 uint32_t seq; 4002 struct sctp_association *asoc = &stcb->asoc; 4003 4004 /* Get the number. */ 4005 seq = ntohl(str_add->request_seq); 4006 num_stream = ntohs(str_add->number_of_streams); 4007 /* Now what would be the new total? */ 4008 if (asoc->str_reset_seq_in == seq) { 4009 stcb->asoc.last_reset_action[1] = stcb->asoc.last_reset_action[0]; 4010 if (!(asoc->local_strreset_support & SCTP_ENABLE_CHANGE_ASSOC_REQ)) { 4011 asoc->last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 4012 } else if (stcb->asoc.stream_reset_outstanding) { 4013 /* We must reject it we have something pending */ 4014 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_ERR_IN_PROGRESS; 4015 } else { 4016 /* Ok, we can do that :-) */ 4017 int mychk; 4018 4019 mychk = stcb->asoc.streamoutcnt; 4020 mychk += num_stream; 4021 if (mychk < 0x10000) { 4022 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_PERFORMED; 4023 if (sctp_send_str_reset_req(stcb, 0, NULL, 0, 0, 0, 1, num_stream, 0, 1)) { 4024 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 4025 } 4026 } else { 4027 stcb->asoc.last_reset_action[0] = SCTP_STREAM_RESET_RESULT_DENIED; 4028 } 4029 } 4030 sctp_add_stream_reset_result(chk, seq, stcb->asoc.last_reset_action[0]); 4031 asoc->str_reset_seq_in++; 4032 } else if ((asoc->str_reset_seq_in - 1) == seq) { 4033 /* 4034 * one seq back, just echo back last action since my 4035 * response was lost. 4036 */ 4037 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[0]); 4038 } else if ((asoc->str_reset_seq_in - 2) == seq) { 4039 /* 4040 * two seq back, just echo back last action since my 4041 * response was lost. 4042 */ 4043 sctp_add_stream_reset_result(chk, seq, asoc->last_reset_action[1]); 4044 } else { 4045 sctp_add_stream_reset_result(chk, seq, SCTP_STREAM_RESET_RESULT_ERR_BAD_SEQNO); 4046 } 4047} 4048 4049#ifdef __GNUC__ 4050__attribute__((noinline)) 4051#endif 4052 static int 4053 sctp_handle_stream_reset(struct sctp_tcb *stcb, struct mbuf *m, int offset, 4054 struct sctp_chunkhdr *ch_req) 4055{ 4056 int chk_length, param_len, ptype; 4057 struct sctp_paramhdr pstore; 4058 uint8_t cstore[SCTP_CHUNK_BUFFER_SIZE]; 4059 uint32_t seq = 0; 4060 int num_req = 0; 4061 int trunc = 0; 4062 struct sctp_tmit_chunk *chk; 4063 struct sctp_chunkhdr *ch; 4064 struct sctp_paramhdr *ph; 4065 int ret_code = 0; 4066 int num_param = 0; 4067 4068 /* now it may be a reset or a reset-response */ 4069 chk_length = ntohs(ch_req->chunk_length); 4070 4071 /* setup for adding the response */ 4072 sctp_alloc_a_chunk(stcb, chk); 4073 if (chk == NULL) { 4074 return (ret_code); 4075 } 4076 chk->rec.chunk_id.id = SCTP_STREAM_RESET; 4077 chk->rec.chunk_id.can_take_data = 0; 4078 chk->asoc = &stcb->asoc; 4079 chk->no_fr_allowed = 0; 4080 chk->book_size = chk->send_size = sizeof(struct sctp_chunkhdr); 4081 chk->book_size_scale = 0; 4082 chk->data = sctp_get_mbuf_for_msg(MCLBYTES, 0, M_NOWAIT, 1, MT_DATA); 4083 if (chk->data == NULL) { 4084strres_nochunk: 4085 if (chk->data) { 4086 sctp_m_freem(chk->data); 4087 chk->data = NULL; 4088 } 4089 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 4090 return (ret_code); 4091 } 4092 SCTP_BUF_RESV_UF(chk->data, SCTP_MIN_OVERHEAD); 4093 4094 /* setup chunk parameters */ 4095 chk->sent = SCTP_DATAGRAM_UNSENT; 4096 chk->snd_count = 0; 4097 chk->whoTo = NULL; 4098 4099 ch = mtod(chk->data, struct sctp_chunkhdr *); 4100 ch->chunk_type = SCTP_STREAM_RESET; 4101 ch->chunk_flags = 0; 4102 ch->chunk_length = htons(chk->send_size); 4103 SCTP_BUF_LEN(chk->data) = SCTP_SIZE32(chk->send_size); 4104 offset += sizeof(struct sctp_chunkhdr); 4105 while ((size_t)chk_length >= sizeof(struct sctp_stream_reset_tsn_request)) { 4106 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, sizeof(pstore), (uint8_t *) & pstore); 4107 if (ph == NULL) 4108 break; 4109 param_len = ntohs(ph->param_length); 4110 if (param_len < (int)sizeof(struct sctp_stream_reset_tsn_request)) { 4111 /* bad param */ 4112 break; 4113 } 4114 ph = (struct sctp_paramhdr *)sctp_m_getptr(m, offset, min(param_len, (int)sizeof(cstore)), 4115 (uint8_t *) & cstore); 4116 ptype = ntohs(ph->param_type); 4117 num_param++; 4118 if (param_len > (int)sizeof(cstore)) { 4119 trunc = 1; 4120 } else { 4121 trunc = 0; 4122 } 4123 if (num_param > SCTP_MAX_RESET_PARAMS) { 4124 /* hit the max of parameters already sorry.. */ 4125 break; 4126 } 4127 if (ptype == SCTP_STR_RESET_OUT_REQUEST) { 4128 struct sctp_stream_reset_out_request *req_out; 4129 4130 req_out = (struct sctp_stream_reset_out_request *)ph; 4131 num_req++; 4132 if (stcb->asoc.stream_reset_outstanding) { 4133 seq = ntohl(req_out->response_seq); 4134 if (seq == stcb->asoc.str_reset_seq_out) { 4135 /* implicit ack */ 4136 (void)sctp_handle_stream_reset_response(stcb, seq, SCTP_STREAM_RESET_RESULT_PERFORMED, NULL); 4137 } 4138 } 4139 sctp_handle_str_reset_request_out(stcb, chk, req_out, trunc); 4140 } else if (ptype == SCTP_STR_RESET_ADD_OUT_STREAMS) { 4141 struct sctp_stream_reset_add_strm *str_add; 4142 4143 str_add = (struct sctp_stream_reset_add_strm *)ph; 4144 num_req++; 4145 sctp_handle_str_reset_add_strm(stcb, chk, str_add); 4146 } else if (ptype == SCTP_STR_RESET_ADD_IN_STREAMS) { 4147 struct sctp_stream_reset_add_strm *str_add; 4148 4149 str_add = (struct sctp_stream_reset_add_strm *)ph; 4150 num_req++; 4151 sctp_handle_str_reset_add_out_strm(stcb, chk, str_add); 4152 } else if (ptype == SCTP_STR_RESET_IN_REQUEST) { 4153 struct sctp_stream_reset_in_request *req_in; 4154 4155 num_req++; 4156 req_in = (struct sctp_stream_reset_in_request *)ph; 4157 sctp_handle_str_reset_request_in(stcb, chk, req_in, trunc); 4158 } else if (ptype == SCTP_STR_RESET_TSN_REQUEST) { 4159 struct sctp_stream_reset_tsn_request *req_tsn; 4160 4161 num_req++; 4162 req_tsn = (struct sctp_stream_reset_tsn_request *)ph; 4163 if (sctp_handle_str_reset_request_tsn(stcb, chk, req_tsn)) { 4164 ret_code = 1; 4165 goto strres_nochunk; 4166 } 4167 /* no more */ 4168 break; 4169 } else if (ptype == SCTP_STR_RESET_RESPONSE) { 4170 struct sctp_stream_reset_response *resp; 4171 uint32_t result; 4172 4173 resp = (struct sctp_stream_reset_response *)ph; 4174 seq = ntohl(resp->response_seq); 4175 result = ntohl(resp->result); 4176 if (sctp_handle_stream_reset_response(stcb, seq, result, resp)) { 4177 ret_code = 1; 4178 goto strres_nochunk; 4179 } 4180 } else { 4181 break; 4182 } 4183 offset += SCTP_SIZE32(param_len); 4184 chk_length -= SCTP_SIZE32(param_len); 4185 } 4186 if (num_req == 0) { 4187 /* we have no response free the stuff */ 4188 goto strres_nochunk; 4189 } 4190 /* ok we have a chunk to link in */ 4191 TAILQ_INSERT_TAIL(&stcb->asoc.control_send_queue, 4192 chk, 4193 sctp_next); 4194 stcb->asoc.ctrl_queue_cnt++; 4195 return (ret_code); 4196} 4197 4198/* 4199 * Handle a router or endpoints report of a packet loss, there are two ways 4200 * to handle this, either we get the whole packet and must disect it 4201 * ourselves (possibly with truncation and or corruption) or it is a summary 4202 * from a middle box that did the disectting for us. 4203 */ 4204static void 4205sctp_handle_packet_dropped(struct sctp_pktdrop_chunk *cp, 4206 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t limit) 4207{ 4208 uint32_t bottle_bw, on_queue; 4209 uint16_t trunc_len; 4210 unsigned int chlen; 4211 unsigned int at; 4212 struct sctp_chunk_desc desc; 4213 struct sctp_chunkhdr *ch; 4214 4215 chlen = ntohs(cp->ch.chunk_length); 4216 chlen -= sizeof(struct sctp_pktdrop_chunk); 4217 /* XXX possible chlen underflow */ 4218 if (chlen == 0) { 4219 ch = NULL; 4220 if (cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) 4221 SCTP_STAT_INCR(sctps_pdrpbwrpt); 4222 } else { 4223 ch = (struct sctp_chunkhdr *)(cp->data + sizeof(struct sctphdr)); 4224 chlen -= sizeof(struct sctphdr); 4225 /* XXX possible chlen underflow */ 4226 memset(&desc, 0, sizeof(desc)); 4227 } 4228 trunc_len = (uint16_t) ntohs(cp->trunc_len); 4229 if (trunc_len > limit) { 4230 trunc_len = limit; 4231 } 4232 /* now the chunks themselves */ 4233 while ((ch != NULL) && (chlen >= sizeof(struct sctp_chunkhdr))) { 4234 desc.chunk_type = ch->chunk_type; 4235 /* get amount we need to move */ 4236 at = ntohs(ch->chunk_length); 4237 if (at < sizeof(struct sctp_chunkhdr)) { 4238 /* corrupt chunk, maybe at the end? */ 4239 SCTP_STAT_INCR(sctps_pdrpcrupt); 4240 break; 4241 } 4242 if (trunc_len == 0) { 4243 /* we are supposed to have all of it */ 4244 if (at > chlen) { 4245 /* corrupt skip it */ 4246 SCTP_STAT_INCR(sctps_pdrpcrupt); 4247 break; 4248 } 4249 } else { 4250 /* is there enough of it left ? */ 4251 if (desc.chunk_type == SCTP_DATA) { 4252 if (chlen < (sizeof(struct sctp_data_chunk) + 4253 sizeof(desc.data_bytes))) { 4254 break; 4255 } 4256 } else { 4257 if (chlen < sizeof(struct sctp_chunkhdr)) { 4258 break; 4259 } 4260 } 4261 } 4262 if (desc.chunk_type == SCTP_DATA) { 4263 /* can we get out the tsn? */ 4264 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) 4265 SCTP_STAT_INCR(sctps_pdrpmbda); 4266 4267 if (chlen >= (sizeof(struct sctp_data_chunk) + sizeof(uint32_t))) { 4268 /* yep */ 4269 struct sctp_data_chunk *dcp; 4270 uint8_t *ddp; 4271 unsigned int iii; 4272 4273 dcp = (struct sctp_data_chunk *)ch; 4274 ddp = (uint8_t *) (dcp + 1); 4275 for (iii = 0; iii < sizeof(desc.data_bytes); iii++) { 4276 desc.data_bytes[iii] = ddp[iii]; 4277 } 4278 desc.tsn_ifany = dcp->dp.tsn; 4279 } else { 4280 /* nope we are done. */ 4281 SCTP_STAT_INCR(sctps_pdrpnedat); 4282 break; 4283 } 4284 } else { 4285 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX)) 4286 SCTP_STAT_INCR(sctps_pdrpmbct); 4287 } 4288 4289 if (process_chunk_drop(stcb, &desc, net, cp->ch.chunk_flags)) { 4290 SCTP_STAT_INCR(sctps_pdrppdbrk); 4291 break; 4292 } 4293 if (SCTP_SIZE32(at) > chlen) { 4294 break; 4295 } 4296 chlen -= SCTP_SIZE32(at); 4297 if (chlen < sizeof(struct sctp_chunkhdr)) { 4298 /* done, none left */ 4299 break; 4300 } 4301 ch = (struct sctp_chunkhdr *)((caddr_t)ch + SCTP_SIZE32(at)); 4302 } 4303 /* Now update any rwnd --- possibly */ 4304 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) == 0) { 4305 /* From a peer, we get a rwnd report */ 4306 uint32_t a_rwnd; 4307 4308 SCTP_STAT_INCR(sctps_pdrpfehos); 4309 4310 bottle_bw = ntohl(cp->bottle_bw); 4311 on_queue = ntohl(cp->current_onq); 4312 if (bottle_bw && on_queue) { 4313 /* a rwnd report is in here */ 4314 if (bottle_bw > on_queue) 4315 a_rwnd = bottle_bw - on_queue; 4316 else 4317 a_rwnd = 0; 4318 4319 if (a_rwnd == 0) 4320 stcb->asoc.peers_rwnd = 0; 4321 else { 4322 if (a_rwnd > stcb->asoc.total_flight) { 4323 stcb->asoc.peers_rwnd = 4324 a_rwnd - stcb->asoc.total_flight; 4325 } else { 4326 stcb->asoc.peers_rwnd = 0; 4327 } 4328 if (stcb->asoc.peers_rwnd < 4329 stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4330 /* SWS sender side engages */ 4331 stcb->asoc.peers_rwnd = 0; 4332 } 4333 } 4334 } 4335 } else { 4336 SCTP_STAT_INCR(sctps_pdrpfmbox); 4337 } 4338 4339 /* now middle boxes in sat networks get a cwnd bump */ 4340 if ((cp->ch.chunk_flags & SCTP_FROM_MIDDLE_BOX) && 4341 (stcb->asoc.sat_t3_loss_recovery == 0) && 4342 (stcb->asoc.sat_network)) { 4343 /* 4344 * This is debateable but for sat networks it makes sense 4345 * Note if a T3 timer has went off, we will prohibit any 4346 * changes to cwnd until we exit the t3 loss recovery. 4347 */ 4348 stcb->asoc.cc_functions.sctp_cwnd_update_after_packet_dropped(stcb, 4349 net, cp, &bottle_bw, &on_queue); 4350 } 4351} 4352 4353/* 4354 * handles all control chunks in a packet inputs: - m: mbuf chain, assumed to 4355 * still contain IP/SCTP header - stcb: is the tcb found for this packet - 4356 * offset: offset into the mbuf chain to first chunkhdr - length: is the 4357 * length of the complete packet outputs: - length: modified to remaining 4358 * length after control processing - netp: modified to new sctp_nets after 4359 * cookie-echo processing - return NULL to discard the packet (ie. no asoc, 4360 * bad packet,...) otherwise return the tcb for this packet 4361 */ 4362#ifdef __GNUC__ 4363__attribute__((noinline)) 4364#endif 4365 static struct sctp_tcb * 4366 sctp_process_control(struct mbuf *m, int iphlen, int *offset, int length, 4367 struct sockaddr *src, struct sockaddr *dst, 4368 struct sctphdr *sh, struct sctp_chunkhdr *ch, struct sctp_inpcb *inp, 4369 struct sctp_tcb *stcb, struct sctp_nets **netp, int *fwd_tsn_seen, 4370 uint8_t use_mflowid, uint32_t mflowid, 4371 uint32_t vrf_id, uint16_t port) 4372{ 4373 struct sctp_association *asoc; 4374 struct mbuf *op_err; 4375 char msg[SCTP_DIAG_INFO_LEN]; 4376 uint32_t vtag_in; 4377 int num_chunks = 0; /* number of control chunks processed */ 4378 uint32_t chk_length; 4379 int ret; 4380 int abort_no_unlock = 0; 4381 int ecne_seen = 0; 4382 4383 /* 4384 * How big should this be, and should it be alloc'd? Lets try the 4385 * d-mtu-ceiling for now (2k) and that should hopefully work ... 4386 * until we get into jumbo grams and such.. 4387 */ 4388 uint8_t chunk_buf[SCTP_CHUNK_BUFFER_SIZE]; 4389 struct sctp_tcb *locked_tcb = stcb; 4390 int got_auth = 0; 4391 uint32_t auth_offset = 0, auth_len = 0; 4392 int auth_skipped = 0; 4393 int asconf_cnt = 0; 4394 4395#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4396 struct socket *so; 4397 4398#endif 4399 4400 SCTPDBG(SCTP_DEBUG_INPUT1, "sctp_process_control: iphlen=%u, offset=%u, length=%u stcb:%p\n", 4401 iphlen, *offset, length, (void *)stcb); 4402 4403 /* validate chunk header length... */ 4404 if (ntohs(ch->chunk_length) < sizeof(*ch)) { 4405 SCTPDBG(SCTP_DEBUG_INPUT1, "Invalid header length %d\n", 4406 ntohs(ch->chunk_length)); 4407 if (locked_tcb) { 4408 SCTP_TCB_UNLOCK(locked_tcb); 4409 } 4410 return (NULL); 4411 } 4412 /* 4413 * validate the verification tag 4414 */ 4415 vtag_in = ntohl(sh->v_tag); 4416 4417 if (locked_tcb) { 4418 SCTP_TCB_LOCK_ASSERT(locked_tcb); 4419 } 4420 if (ch->chunk_type == SCTP_INITIATION) { 4421 SCTPDBG(SCTP_DEBUG_INPUT1, "Its an INIT of len:%d vtag:%x\n", 4422 ntohs(ch->chunk_length), vtag_in); 4423 if (vtag_in != 0) { 4424 /* protocol error- silently discard... */ 4425 SCTP_STAT_INCR(sctps_badvtag); 4426 if (locked_tcb) { 4427 SCTP_TCB_UNLOCK(locked_tcb); 4428 } 4429 return (NULL); 4430 } 4431 } else if (ch->chunk_type != SCTP_COOKIE_ECHO) { 4432 /* 4433 * If there is no stcb, skip the AUTH chunk and process 4434 * later after a stcb is found (to validate the lookup was 4435 * valid. 4436 */ 4437 if ((ch->chunk_type == SCTP_AUTHENTICATION) && 4438 (stcb == NULL) && 4439 !SCTP_BASE_SYSCTL(sctp_auth_disable)) { 4440 /* save this chunk for later processing */ 4441 auth_skipped = 1; 4442 auth_offset = *offset; 4443 auth_len = ntohs(ch->chunk_length); 4444 4445 /* (temporarily) move past this chunk */ 4446 *offset += SCTP_SIZE32(auth_len); 4447 if (*offset >= length) { 4448 /* no more data left in the mbuf chain */ 4449 *offset = length; 4450 if (locked_tcb) { 4451 SCTP_TCB_UNLOCK(locked_tcb); 4452 } 4453 return (NULL); 4454 } 4455 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4456 sizeof(struct sctp_chunkhdr), chunk_buf); 4457 } 4458 if (ch == NULL) { 4459 /* Help */ 4460 *offset = length; 4461 if (locked_tcb) { 4462 SCTP_TCB_UNLOCK(locked_tcb); 4463 } 4464 return (NULL); 4465 } 4466 if (ch->chunk_type == SCTP_COOKIE_ECHO) { 4467 goto process_control_chunks; 4468 } 4469 /* 4470 * first check if it's an ASCONF with an unknown src addr we 4471 * need to look inside to find the association 4472 */ 4473 if (ch->chunk_type == SCTP_ASCONF && stcb == NULL) { 4474 struct sctp_chunkhdr *asconf_ch = ch; 4475 uint32_t asconf_offset = 0, asconf_len = 0; 4476 4477 /* inp's refcount may be reduced */ 4478 SCTP_INP_INCR_REF(inp); 4479 4480 asconf_offset = *offset; 4481 do { 4482 asconf_len = ntohs(asconf_ch->chunk_length); 4483 if (asconf_len < sizeof(struct sctp_asconf_paramhdr)) 4484 break; 4485 stcb = sctp_findassociation_ep_asconf(m, 4486 *offset, 4487 dst, 4488 sh, &inp, netp, vrf_id); 4489 if (stcb != NULL) 4490 break; 4491 asconf_offset += SCTP_SIZE32(asconf_len); 4492 asconf_ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, asconf_offset, 4493 sizeof(struct sctp_chunkhdr), chunk_buf); 4494 } while (asconf_ch != NULL && asconf_ch->chunk_type == SCTP_ASCONF); 4495 if (stcb == NULL) { 4496 /* 4497 * reduce inp's refcount if not reduced in 4498 * sctp_findassociation_ep_asconf(). 4499 */ 4500 SCTP_INP_DECR_REF(inp); 4501 } else { 4502 locked_tcb = stcb; 4503 } 4504 4505 /* now go back and verify any auth chunk to be sure */ 4506 if (auth_skipped && (stcb != NULL)) { 4507 struct sctp_auth_chunk *auth; 4508 4509 auth = (struct sctp_auth_chunk *) 4510 sctp_m_getptr(m, auth_offset, 4511 auth_len, chunk_buf); 4512 got_auth = 1; 4513 auth_skipped = 0; 4514 if ((auth == NULL) || sctp_handle_auth(stcb, auth, m, 4515 auth_offset)) { 4516 /* auth HMAC failed so dump it */ 4517 *offset = length; 4518 if (locked_tcb) { 4519 SCTP_TCB_UNLOCK(locked_tcb); 4520 } 4521 return (NULL); 4522 } else { 4523 /* remaining chunks are HMAC checked */ 4524 stcb->asoc.authenticated = 1; 4525 } 4526 } 4527 } 4528 if (stcb == NULL) { 4529 snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s\n", __FILE__, __LINE__, __FUNCTION__); 4530 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 4531 msg); 4532 /* no association, so it's out of the blue... */ 4533 sctp_handle_ootb(m, iphlen, *offset, src, dst, sh, inp, op_err, 4534 use_mflowid, mflowid, 4535 vrf_id, port); 4536 *offset = length; 4537 if (locked_tcb) { 4538 SCTP_TCB_UNLOCK(locked_tcb); 4539 } 4540 return (NULL); 4541 } 4542 asoc = &stcb->asoc; 4543 /* ABORT and SHUTDOWN can use either v_tag... */ 4544 if ((ch->chunk_type == SCTP_ABORT_ASSOCIATION) || 4545 (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) || 4546 (ch->chunk_type == SCTP_PACKET_DROPPED)) { 4547 /* Take the T-bit always into account. */ 4548 if ((((ch->chunk_flags & SCTP_HAD_NO_TCB) == 0) && 4549 (vtag_in == asoc->my_vtag)) || 4550 (((ch->chunk_flags & SCTP_HAD_NO_TCB) == SCTP_HAD_NO_TCB) && 4551 (vtag_in == asoc->peer_vtag))) { 4552 /* this is valid */ 4553 } else { 4554 /* drop this packet... */ 4555 SCTP_STAT_INCR(sctps_badvtag); 4556 if (locked_tcb) { 4557 SCTP_TCB_UNLOCK(locked_tcb); 4558 } 4559 return (NULL); 4560 } 4561 } else if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 4562 if (vtag_in != asoc->my_vtag) { 4563 /* 4564 * this could be a stale SHUTDOWN-ACK or the 4565 * peer never got the SHUTDOWN-COMPLETE and 4566 * is still hung; we have started a new asoc 4567 * but it won't complete until the shutdown 4568 * is completed 4569 */ 4570 if (locked_tcb) { 4571 SCTP_TCB_UNLOCK(locked_tcb); 4572 } 4573 snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s\n", __FILE__, __LINE__, __FUNCTION__); 4574 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 4575 msg); 4576 sctp_handle_ootb(m, iphlen, *offset, src, dst, 4577 sh, inp, op_err, 4578 use_mflowid, mflowid, 4579 vrf_id, port); 4580 return (NULL); 4581 } 4582 } else { 4583 /* for all other chunks, vtag must match */ 4584 if (vtag_in != asoc->my_vtag) { 4585 /* invalid vtag... */ 4586 SCTPDBG(SCTP_DEBUG_INPUT3, 4587 "invalid vtag: %xh, expect %xh\n", 4588 vtag_in, asoc->my_vtag); 4589 SCTP_STAT_INCR(sctps_badvtag); 4590 if (locked_tcb) { 4591 SCTP_TCB_UNLOCK(locked_tcb); 4592 } 4593 *offset = length; 4594 return (NULL); 4595 } 4596 } 4597 } /* end if !SCTP_COOKIE_ECHO */ 4598 /* 4599 * process all control chunks... 4600 */ 4601 if (((ch->chunk_type == SCTP_SELECTIVE_ACK) || 4602 (ch->chunk_type == SCTP_NR_SELECTIVE_ACK) || 4603 (ch->chunk_type == SCTP_HEARTBEAT_REQUEST)) && 4604 (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED)) { 4605 /* implied cookie-ack.. we must have lost the ack */ 4606 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4607 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4608 stcb->asoc.overall_error_count, 4609 0, 4610 SCTP_FROM_SCTP_INPUT, 4611 __LINE__); 4612 } 4613 stcb->asoc.overall_error_count = 0; 4614 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, 4615 *netp); 4616 } 4617process_control_chunks: 4618 while (IS_SCTP_CONTROL(ch)) { 4619 /* validate chunk length */ 4620 chk_length = ntohs(ch->chunk_length); 4621 SCTPDBG(SCTP_DEBUG_INPUT2, "sctp_process_control: processing a chunk type=%u, len=%u\n", 4622 ch->chunk_type, chk_length); 4623 SCTP_LTRACE_CHK(inp, stcb, ch->chunk_type, chk_length); 4624 if (chk_length < sizeof(*ch) || 4625 (*offset + (int)chk_length) > length) { 4626 *offset = length; 4627 if (locked_tcb) { 4628 SCTP_TCB_UNLOCK(locked_tcb); 4629 } 4630 return (NULL); 4631 } 4632 SCTP_STAT_INCR_COUNTER64(sctps_incontrolchunks); 4633 /* 4634 * INIT-ACK only gets the init ack "header" portion only 4635 * because we don't have to process the peer's COOKIE. All 4636 * others get a complete chunk. 4637 */ 4638 if ((ch->chunk_type == SCTP_INITIATION_ACK) || 4639 (ch->chunk_type == SCTP_INITIATION)) { 4640 /* get an init-ack chunk */ 4641 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4642 sizeof(struct sctp_init_ack_chunk), chunk_buf); 4643 if (ch == NULL) { 4644 *offset = length; 4645 if (locked_tcb) { 4646 SCTP_TCB_UNLOCK(locked_tcb); 4647 } 4648 return (NULL); 4649 } 4650 } else { 4651 /* For cookies and all other chunks. */ 4652 if (chk_length > sizeof(chunk_buf)) { 4653 /* 4654 * use just the size of the chunk buffer so 4655 * the front part of our chunks fit in 4656 * contiguous space up to the chunk buffer 4657 * size (508 bytes). For chunks that need to 4658 * get more than that they must use the 4659 * sctp_m_getptr() function or other means 4660 * (e.g. know how to parse mbuf chains). 4661 * Cookies do this already. 4662 */ 4663 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4664 (sizeof(chunk_buf) - 4), 4665 chunk_buf); 4666 if (ch == NULL) { 4667 *offset = length; 4668 if (locked_tcb) { 4669 SCTP_TCB_UNLOCK(locked_tcb); 4670 } 4671 return (NULL); 4672 } 4673 } else { 4674 /* We can fit it all */ 4675 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 4676 chk_length, chunk_buf); 4677 if (ch == NULL) { 4678 SCTP_PRINTF("sctp_process_control: Can't get the all data....\n"); 4679 *offset = length; 4680 if (locked_tcb) { 4681 SCTP_TCB_UNLOCK(locked_tcb); 4682 } 4683 return (NULL); 4684 } 4685 } 4686 } 4687 num_chunks++; 4688 /* Save off the last place we got a control from */ 4689 if (stcb != NULL) { 4690 if (((netp != NULL) && (*netp != NULL)) || (ch->chunk_type == SCTP_ASCONF)) { 4691 /* 4692 * allow last_control to be NULL if 4693 * ASCONF... ASCONF processing will find the 4694 * right net later 4695 */ 4696 if ((netp != NULL) && (*netp != NULL)) 4697 stcb->asoc.last_control_chunk_from = *netp; 4698 } 4699 } 4700#ifdef SCTP_AUDITING_ENABLED 4701 sctp_audit_log(0xB0, ch->chunk_type); 4702#endif 4703 4704 /* check to see if this chunk required auth, but isn't */ 4705 if ((stcb != NULL) && 4706 !SCTP_BASE_SYSCTL(sctp_auth_disable) && 4707 sctp_auth_is_required_chunk(ch->chunk_type, stcb->asoc.local_auth_chunks) && 4708 !stcb->asoc.authenticated) { 4709 /* "silently" ignore */ 4710 SCTP_STAT_INCR(sctps_recvauthmissing); 4711 goto next_chunk; 4712 } 4713 switch (ch->chunk_type) { 4714 case SCTP_INITIATION: 4715 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT\n"); 4716 /* The INIT chunk must be the only chunk. */ 4717 if ((num_chunks > 1) || 4718 (length - *offset > (int)SCTP_SIZE32(chk_length))) { 4719 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 4720 "INIT not the only chunk"); 4721 sctp_abort_association(inp, stcb, m, iphlen, 4722 src, dst, sh, op_err, 4723 use_mflowid, mflowid, 4724 vrf_id, port); 4725 *offset = length; 4726 return (NULL); 4727 } 4728 /* Honor our resource limit. */ 4729 if (chk_length > SCTP_LARGEST_INIT_ACCEPTED) { 4730 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 4731 sctp_abort_association(inp, stcb, m, iphlen, 4732 src, dst, sh, op_err, 4733 use_mflowid, mflowid, 4734 vrf_id, port); 4735 *offset = length; 4736 return (NULL); 4737 } 4738 sctp_handle_init(m, iphlen, *offset, src, dst, sh, 4739 (struct sctp_init_chunk *)ch, inp, 4740 stcb, &abort_no_unlock, 4741 use_mflowid, mflowid, 4742 vrf_id, port); 4743 *offset = length; 4744 if ((!abort_no_unlock) && (locked_tcb)) { 4745 SCTP_TCB_UNLOCK(locked_tcb); 4746 } 4747 return (NULL); 4748 break; 4749 case SCTP_PAD_CHUNK: 4750 break; 4751 case SCTP_INITIATION_ACK: 4752 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_INIT-ACK\n"); 4753 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 4754 /* We are not interested anymore */ 4755 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 4756 ; 4757 } else { 4758 if (locked_tcb != stcb) { 4759 /* Very unlikely */ 4760 SCTP_TCB_UNLOCK(locked_tcb); 4761 } 4762 *offset = length; 4763 if (stcb) { 4764#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4765 so = SCTP_INP_SO(inp); 4766 atomic_add_int(&stcb->asoc.refcnt, 1); 4767 SCTP_TCB_UNLOCK(stcb); 4768 SCTP_SOCKET_LOCK(so, 1); 4769 SCTP_TCB_LOCK(stcb); 4770 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4771#endif 4772 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27); 4773#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4774 SCTP_SOCKET_UNLOCK(so, 1); 4775#endif 4776 } 4777 return (NULL); 4778 } 4779 } 4780 /* The INIT-ACK chunk must be the only chunk. */ 4781 if ((num_chunks > 1) || 4782 (length - *offset > (int)SCTP_SIZE32(chk_length))) { 4783 *offset = length; 4784 if (locked_tcb) { 4785 SCTP_TCB_UNLOCK(locked_tcb); 4786 } 4787 return (NULL); 4788 } 4789 if ((netp) && (*netp)) { 4790 ret = sctp_handle_init_ack(m, iphlen, *offset, 4791 src, dst, sh, 4792 (struct sctp_init_ack_chunk *)ch, 4793 stcb, *netp, 4794 &abort_no_unlock, 4795 use_mflowid, mflowid, 4796 vrf_id); 4797 } else { 4798 ret = -1; 4799 } 4800 *offset = length; 4801 if (abort_no_unlock) { 4802 return (NULL); 4803 } 4804 /* 4805 * Special case, I must call the output routine to 4806 * get the cookie echoed 4807 */ 4808 if ((stcb != NULL) && (ret == 0)) { 4809 sctp_chunk_output(stcb->sctp_ep, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 4810 } 4811 if (locked_tcb) { 4812 SCTP_TCB_UNLOCK(locked_tcb); 4813 } 4814 return (NULL); 4815 break; 4816 case SCTP_SELECTIVE_ACK: 4817 { 4818 struct sctp_sack_chunk *sack; 4819 int abort_now = 0; 4820 uint32_t a_rwnd, cum_ack; 4821 uint16_t num_seg, num_dup; 4822 uint8_t flags; 4823 int offset_seg, offset_dup; 4824 4825 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK\n"); 4826 SCTP_STAT_INCR(sctps_recvsacks); 4827 if (stcb == NULL) { 4828 SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing SACK chunk\n"); 4829 break; 4830 } 4831 if (chk_length < sizeof(struct sctp_sack_chunk)) { 4832 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on SACK chunk, too small\n"); 4833 break; 4834 } 4835 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 4836 /*- 4837 * If we have sent a shutdown-ack, we will pay no 4838 * attention to a sack sent in to us since 4839 * we don't care anymore. 4840 */ 4841 break; 4842 } 4843 sack = (struct sctp_sack_chunk *)ch; 4844 flags = ch->chunk_flags; 4845 cum_ack = ntohl(sack->sack.cum_tsn_ack); 4846 num_seg = ntohs(sack->sack.num_gap_ack_blks); 4847 num_dup = ntohs(sack->sack.num_dup_tsns); 4848 a_rwnd = (uint32_t) ntohl(sack->sack.a_rwnd); 4849 if (sizeof(struct sctp_sack_chunk) + 4850 num_seg * sizeof(struct sctp_gap_ack_block) + 4851 num_dup * sizeof(uint32_t) != chk_length) { 4852 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of SACK chunk\n"); 4853 break; 4854 } 4855 offset_seg = *offset + sizeof(struct sctp_sack_chunk); 4856 offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block); 4857 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n", 4858 cum_ack, num_seg, a_rwnd); 4859 stcb->asoc.seen_a_sack_this_pkt = 1; 4860 if ((stcb->asoc.pr_sctp_cnt == 0) && 4861 (num_seg == 0) && 4862 SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) && 4863 (stcb->asoc.saw_sack_with_frags == 0) && 4864 (stcb->asoc.saw_sack_with_nr_frags == 0) && 4865 (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) 4866 ) { 4867 /* 4868 * We have a SIMPLE sack having no 4869 * prior segments and data on sent 4870 * queue to be acked.. Use the 4871 * faster path sack processing. We 4872 * also allow window update sacks 4873 * with no missing segments to go 4874 * this way too. 4875 */ 4876 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, &abort_now, ecne_seen); 4877 } else { 4878 if (netp && *netp) 4879 sctp_handle_sack(m, offset_seg, offset_dup, stcb, 4880 num_seg, 0, num_dup, &abort_now, flags, 4881 cum_ack, a_rwnd, ecne_seen); 4882 } 4883 if (abort_now) { 4884 /* ABORT signal from sack processing */ 4885 *offset = length; 4886 return (NULL); 4887 } 4888 if (TAILQ_EMPTY(&stcb->asoc.send_queue) && 4889 TAILQ_EMPTY(&stcb->asoc.sent_queue) && 4890 (stcb->asoc.stream_queue_cnt == 0)) { 4891 sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 4892 } 4893 } 4894 break; 4895 /* 4896 * EY - nr_sack: If the received chunk is an 4897 * nr_sack chunk 4898 */ 4899 case SCTP_NR_SELECTIVE_ACK: 4900 { 4901 struct sctp_nr_sack_chunk *nr_sack; 4902 int abort_now = 0; 4903 uint32_t a_rwnd, cum_ack; 4904 uint16_t num_seg, num_nr_seg, num_dup; 4905 uint8_t flags; 4906 int offset_seg, offset_dup; 4907 4908 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK\n"); 4909 SCTP_STAT_INCR(sctps_recvsacks); 4910 if (stcb == NULL) { 4911 SCTPDBG(SCTP_DEBUG_INDATA1, "No stcb when processing NR-SACK chunk\n"); 4912 break; 4913 } 4914 if ((stcb->asoc.sctp_nr_sack_on_off == 0) || 4915 (stcb->asoc.peer_supports_nr_sack == 0)) { 4916 goto unknown_chunk; 4917 } 4918 if (chk_length < sizeof(struct sctp_nr_sack_chunk)) { 4919 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size on NR-SACK chunk, too small\n"); 4920 break; 4921 } 4922 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_SHUTDOWN_ACK_SENT) { 4923 /*- 4924 * If we have sent a shutdown-ack, we will pay no 4925 * attention to a sack sent in to us since 4926 * we don't care anymore. 4927 */ 4928 break; 4929 } 4930 nr_sack = (struct sctp_nr_sack_chunk *)ch; 4931 flags = ch->chunk_flags; 4932 cum_ack = ntohl(nr_sack->nr_sack.cum_tsn_ack); 4933 num_seg = ntohs(nr_sack->nr_sack.num_gap_ack_blks); 4934 num_nr_seg = ntohs(nr_sack->nr_sack.num_nr_gap_ack_blks); 4935 num_dup = ntohs(nr_sack->nr_sack.num_dup_tsns); 4936 a_rwnd = (uint32_t) ntohl(nr_sack->nr_sack.a_rwnd); 4937 if (sizeof(struct sctp_nr_sack_chunk) + 4938 (num_seg + num_nr_seg) * sizeof(struct sctp_gap_ack_block) + 4939 num_dup * sizeof(uint32_t) != chk_length) { 4940 SCTPDBG(SCTP_DEBUG_INDATA1, "Bad size of NR_SACK chunk\n"); 4941 break; 4942 } 4943 offset_seg = *offset + sizeof(struct sctp_nr_sack_chunk); 4944 offset_dup = offset_seg + num_seg * sizeof(struct sctp_gap_ack_block); 4945 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_NR_SACK process cum_ack:%x num_seg:%d a_rwnd:%d\n", 4946 cum_ack, num_seg, a_rwnd); 4947 stcb->asoc.seen_a_sack_this_pkt = 1; 4948 if ((stcb->asoc.pr_sctp_cnt == 0) && 4949 (num_seg == 0) && (num_nr_seg == 0) && 4950 SCTP_TSN_GE(cum_ack, stcb->asoc.last_acked_seq) && 4951 (stcb->asoc.saw_sack_with_frags == 0) && 4952 (stcb->asoc.saw_sack_with_nr_frags == 0) && 4953 (!TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 4954 /* 4955 * We have a SIMPLE sack having no 4956 * prior segments and data on sent 4957 * queue to be acked. Use the faster 4958 * path sack processing. We also 4959 * allow window update sacks with no 4960 * missing segments to go this way 4961 * too. 4962 */ 4963 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, 4964 &abort_now, ecne_seen); 4965 } else { 4966 if (netp && *netp) 4967 sctp_handle_sack(m, offset_seg, offset_dup, stcb, 4968 num_seg, num_nr_seg, num_dup, &abort_now, flags, 4969 cum_ack, a_rwnd, ecne_seen); 4970 } 4971 if (abort_now) { 4972 /* ABORT signal from sack processing */ 4973 *offset = length; 4974 return (NULL); 4975 } 4976 if (TAILQ_EMPTY(&stcb->asoc.send_queue) && 4977 TAILQ_EMPTY(&stcb->asoc.sent_queue) && 4978 (stcb->asoc.stream_queue_cnt == 0)) { 4979 sctp_ulp_notify(SCTP_NOTIFY_SENDER_DRY, stcb, 0, NULL, SCTP_SO_NOT_LOCKED); 4980 } 4981 } 4982 break; 4983 4984 case SCTP_HEARTBEAT_REQUEST: 4985 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT\n"); 4986 if ((stcb) && netp && *netp) { 4987 SCTP_STAT_INCR(sctps_recvheartbeat); 4988 sctp_send_heartbeat_ack(stcb, m, *offset, 4989 chk_length, *netp); 4990 4991 /* He's alive so give him credit */ 4992 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4993 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4994 stcb->asoc.overall_error_count, 4995 0, 4996 SCTP_FROM_SCTP_INPUT, 4997 __LINE__); 4998 } 4999 stcb->asoc.overall_error_count = 0; 5000 } 5001 break; 5002 case SCTP_HEARTBEAT_ACK: 5003 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_HEARTBEAT-ACK\n"); 5004 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_heartbeat_chunk))) { 5005 /* Its not ours */ 5006 *offset = length; 5007 if (locked_tcb) { 5008 SCTP_TCB_UNLOCK(locked_tcb); 5009 } 5010 return (NULL); 5011 } 5012 /* He's alive so give him credit */ 5013 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5014 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5015 stcb->asoc.overall_error_count, 5016 0, 5017 SCTP_FROM_SCTP_INPUT, 5018 __LINE__); 5019 } 5020 stcb->asoc.overall_error_count = 0; 5021 SCTP_STAT_INCR(sctps_recvheartbeatack); 5022 if (netp && *netp) 5023 sctp_handle_heartbeat_ack((struct sctp_heartbeat_chunk *)ch, 5024 stcb, *netp); 5025 break; 5026 case SCTP_ABORT_ASSOCIATION: 5027 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ABORT, stcb %p\n", 5028 (void *)stcb); 5029 if ((stcb) && netp && *netp) 5030 sctp_handle_abort((struct sctp_abort_chunk *)ch, 5031 stcb, *netp); 5032 *offset = length; 5033 return (NULL); 5034 break; 5035 case SCTP_SHUTDOWN: 5036 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN, stcb %p\n", 5037 (void *)stcb); 5038 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_shutdown_chunk))) { 5039 *offset = length; 5040 if (locked_tcb) { 5041 SCTP_TCB_UNLOCK(locked_tcb); 5042 } 5043 return (NULL); 5044 } 5045 if (netp && *netp) { 5046 int abort_flag = 0; 5047 5048 sctp_handle_shutdown((struct sctp_shutdown_chunk *)ch, 5049 stcb, *netp, &abort_flag); 5050 if (abort_flag) { 5051 *offset = length; 5052 return (NULL); 5053 } 5054 } 5055 break; 5056 case SCTP_SHUTDOWN_ACK: 5057 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-ACK, stcb %p\n", (void *)stcb); 5058 if ((stcb) && (netp) && (*netp)) 5059 sctp_handle_shutdown_ack((struct sctp_shutdown_ack_chunk *)ch, stcb, *netp); 5060 *offset = length; 5061 return (NULL); 5062 break; 5063 5064 case SCTP_OPERATION_ERROR: 5065 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_OP-ERR\n"); 5066 if ((stcb) && netp && *netp && sctp_handle_error(ch, stcb, *netp) < 0) { 5067 *offset = length; 5068 return (NULL); 5069 } 5070 break; 5071 case SCTP_COOKIE_ECHO: 5072 SCTPDBG(SCTP_DEBUG_INPUT3, 5073 "SCTP_COOKIE-ECHO, stcb %p\n", (void *)stcb); 5074 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 5075 ; 5076 } else { 5077 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5078 /* We are not interested anymore */ 5079 abend: 5080 if (stcb) { 5081 SCTP_TCB_UNLOCK(stcb); 5082 } 5083 *offset = length; 5084 return (NULL); 5085 } 5086 } 5087 /* 5088 * First are we accepting? We do this again here 5089 * since it is possible that a previous endpoint WAS 5090 * listening responded to a INIT-ACK and then 5091 * closed. We opened and bound.. and are now no 5092 * longer listening. 5093 */ 5094 5095 if ((stcb == NULL) && (inp->sctp_socket->so_qlen >= inp->sctp_socket->so_qlimit)) { 5096 if ((inp->sctp_flags & SCTP_PCB_FLAGS_TCPTYPE) && 5097 (SCTP_BASE_SYSCTL(sctp_abort_if_one_2_one_hits_limit))) { 5098 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 5099 sctp_abort_association(inp, stcb, m, iphlen, 5100 src, dst, sh, op_err, 5101 use_mflowid, mflowid, 5102 vrf_id, port); 5103 } 5104 *offset = length; 5105 return (NULL); 5106 } else { 5107 struct mbuf *ret_buf; 5108 struct sctp_inpcb *linp; 5109 5110 if (stcb) { 5111 linp = NULL; 5112 } else { 5113 linp = inp; 5114 } 5115 5116 if (linp) { 5117 SCTP_ASOC_CREATE_LOCK(linp); 5118 if ((inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 5119 (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE)) { 5120 SCTP_ASOC_CREATE_UNLOCK(linp); 5121 goto abend; 5122 } 5123 } 5124 if (netp) { 5125 ret_buf = 5126 sctp_handle_cookie_echo(m, iphlen, 5127 *offset, 5128 src, dst, 5129 sh, 5130 (struct sctp_cookie_echo_chunk *)ch, 5131 &inp, &stcb, netp, 5132 auth_skipped, 5133 auth_offset, 5134 auth_len, 5135 &locked_tcb, 5136 use_mflowid, 5137 mflowid, 5138 vrf_id, 5139 port); 5140 } else { 5141 ret_buf = NULL; 5142 } 5143 if (linp) { 5144 SCTP_ASOC_CREATE_UNLOCK(linp); 5145 } 5146 if (ret_buf == NULL) { 5147 if (locked_tcb) { 5148 SCTP_TCB_UNLOCK(locked_tcb); 5149 } 5150 SCTPDBG(SCTP_DEBUG_INPUT3, 5151 "GAK, null buffer\n"); 5152 *offset = length; 5153 return (NULL); 5154 } 5155 /* if AUTH skipped, see if it verified... */ 5156 if (auth_skipped) { 5157 got_auth = 1; 5158 auth_skipped = 0; 5159 } 5160 if (!TAILQ_EMPTY(&stcb->asoc.sent_queue)) { 5161 /* 5162 * Restart the timer if we have 5163 * pending data 5164 */ 5165 struct sctp_tmit_chunk *chk; 5166 5167 chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 5168 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, chk->whoTo); 5169 } 5170 } 5171 break; 5172 case SCTP_COOKIE_ACK: 5173 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_COOKIE-ACK, stcb %p\n", (void *)stcb); 5174 if ((stcb == NULL) || chk_length != sizeof(struct sctp_cookie_ack_chunk)) { 5175 if (locked_tcb) { 5176 SCTP_TCB_UNLOCK(locked_tcb); 5177 } 5178 return (NULL); 5179 } 5180 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5181 /* We are not interested anymore */ 5182 if ((stcb) && (stcb->asoc.total_output_queue_size)) { 5183 ; 5184 } else if (stcb) { 5185#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5186 so = SCTP_INP_SO(inp); 5187 atomic_add_int(&stcb->asoc.refcnt, 1); 5188 SCTP_TCB_UNLOCK(stcb); 5189 SCTP_SOCKET_LOCK(so, 1); 5190 SCTP_TCB_LOCK(stcb); 5191 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5192#endif 5193 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_27); 5194#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5195 SCTP_SOCKET_UNLOCK(so, 1); 5196#endif 5197 *offset = length; 5198 return (NULL); 5199 } 5200 } 5201 /* He's alive so give him credit */ 5202 if ((stcb) && netp && *netp) { 5203 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5204 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5205 stcb->asoc.overall_error_count, 5206 0, 5207 SCTP_FROM_SCTP_INPUT, 5208 __LINE__); 5209 } 5210 stcb->asoc.overall_error_count = 0; 5211 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, *netp); 5212 } 5213 break; 5214 case SCTP_ECN_ECHO: 5215 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-ECHO\n"); 5216 /* He's alive so give him credit */ 5217 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_ecne_chunk))) { 5218 /* Its not ours */ 5219 if (locked_tcb) { 5220 SCTP_TCB_UNLOCK(locked_tcb); 5221 } 5222 *offset = length; 5223 return (NULL); 5224 } 5225 if (stcb) { 5226 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5227 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5228 stcb->asoc.overall_error_count, 5229 0, 5230 SCTP_FROM_SCTP_INPUT, 5231 __LINE__); 5232 } 5233 stcb->asoc.overall_error_count = 0; 5234 sctp_handle_ecn_echo((struct sctp_ecne_chunk *)ch, 5235 stcb); 5236 ecne_seen = 1; 5237 } 5238 break; 5239 case SCTP_ECN_CWR: 5240 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ECN-CWR\n"); 5241 /* He's alive so give him credit */ 5242 if ((stcb == NULL) || (chk_length != sizeof(struct sctp_cwr_chunk))) { 5243 /* Its not ours */ 5244 if (locked_tcb) { 5245 SCTP_TCB_UNLOCK(locked_tcb); 5246 } 5247 *offset = length; 5248 return (NULL); 5249 } 5250 if (stcb) { 5251 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5252 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5253 stcb->asoc.overall_error_count, 5254 0, 5255 SCTP_FROM_SCTP_INPUT, 5256 __LINE__); 5257 } 5258 stcb->asoc.overall_error_count = 0; 5259 sctp_handle_ecn_cwr((struct sctp_cwr_chunk *)ch, stcb, *netp); 5260 } 5261 break; 5262 case SCTP_SHUTDOWN_COMPLETE: 5263 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_SHUTDOWN-COMPLETE, stcb %p\n", (void *)stcb); 5264 /* must be first and only chunk */ 5265 if ((num_chunks > 1) || 5266 (length - *offset > (int)SCTP_SIZE32(chk_length))) { 5267 *offset = length; 5268 if (locked_tcb) { 5269 SCTP_TCB_UNLOCK(locked_tcb); 5270 } 5271 return (NULL); 5272 } 5273 if ((stcb) && netp && *netp) { 5274 sctp_handle_shutdown_complete((struct sctp_shutdown_complete_chunk *)ch, 5275 stcb, *netp); 5276 } 5277 *offset = length; 5278 return (NULL); 5279 break; 5280 case SCTP_ASCONF: 5281 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF\n"); 5282 /* He's alive so give him credit */ 5283 if (stcb) { 5284 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5285 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5286 stcb->asoc.overall_error_count, 5287 0, 5288 SCTP_FROM_SCTP_INPUT, 5289 __LINE__); 5290 } 5291 stcb->asoc.overall_error_count = 0; 5292 sctp_handle_asconf(m, *offset, src, 5293 (struct sctp_asconf_chunk *)ch, stcb, asconf_cnt == 0); 5294 asconf_cnt++; 5295 } 5296 break; 5297 case SCTP_ASCONF_ACK: 5298 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_ASCONF-ACK\n"); 5299 if (chk_length < sizeof(struct sctp_asconf_ack_chunk)) { 5300 /* Its not ours */ 5301 if (locked_tcb) { 5302 SCTP_TCB_UNLOCK(locked_tcb); 5303 } 5304 *offset = length; 5305 return (NULL); 5306 } 5307 if ((stcb) && netp && *netp) { 5308 /* He's alive so give him credit */ 5309 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5310 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5311 stcb->asoc.overall_error_count, 5312 0, 5313 SCTP_FROM_SCTP_INPUT, 5314 __LINE__); 5315 } 5316 stcb->asoc.overall_error_count = 0; 5317 sctp_handle_asconf_ack(m, *offset, 5318 (struct sctp_asconf_ack_chunk *)ch, stcb, *netp, &abort_no_unlock); 5319 if (abort_no_unlock) 5320 return (NULL); 5321 } 5322 break; 5323 case SCTP_FORWARD_CUM_TSN: 5324 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_FWD-TSN\n"); 5325 if (chk_length < sizeof(struct sctp_forward_tsn_chunk)) { 5326 /* Its not ours */ 5327 if (locked_tcb) { 5328 SCTP_TCB_UNLOCK(locked_tcb); 5329 } 5330 *offset = length; 5331 return (NULL); 5332 } 5333 /* He's alive so give him credit */ 5334 if (stcb) { 5335 int abort_flag = 0; 5336 5337 stcb->asoc.overall_error_count = 0; 5338 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5339 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5340 stcb->asoc.overall_error_count, 5341 0, 5342 SCTP_FROM_SCTP_INPUT, 5343 __LINE__); 5344 } 5345 *fwd_tsn_seen = 1; 5346 if (inp->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 5347 /* We are not interested anymore */ 5348#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5349 so = SCTP_INP_SO(inp); 5350 atomic_add_int(&stcb->asoc.refcnt, 1); 5351 SCTP_TCB_UNLOCK(stcb); 5352 SCTP_SOCKET_LOCK(so, 1); 5353 SCTP_TCB_LOCK(stcb); 5354 atomic_subtract_int(&stcb->asoc.refcnt, 1); 5355#endif 5356 (void)sctp_free_assoc(inp, stcb, SCTP_NORMAL_PROC, SCTP_FROM_SCTP_INPUT + SCTP_LOC_29); 5357#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 5358 SCTP_SOCKET_UNLOCK(so, 1); 5359#endif 5360 *offset = length; 5361 return (NULL); 5362 } 5363 sctp_handle_forward_tsn(stcb, 5364 (struct sctp_forward_tsn_chunk *)ch, &abort_flag, m, *offset); 5365 if (abort_flag) { 5366 *offset = length; 5367 return (NULL); 5368 } else { 5369 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5370 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5371 stcb->asoc.overall_error_count, 5372 0, 5373 SCTP_FROM_SCTP_INPUT, 5374 __LINE__); 5375 } 5376 stcb->asoc.overall_error_count = 0; 5377 } 5378 5379 } 5380 break; 5381 case SCTP_STREAM_RESET: 5382 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_STREAM_RESET\n"); 5383 if (((stcb == NULL) || (ch == NULL) || (chk_length < sizeof(struct sctp_stream_reset_tsn_req)))) { 5384 /* Its not ours */ 5385 if (locked_tcb) { 5386 SCTP_TCB_UNLOCK(locked_tcb); 5387 } 5388 *offset = length; 5389 return (NULL); 5390 } 5391 if (stcb->asoc.peer_supports_strreset == 0) { 5392 /* 5393 * hmm, peer should have announced this, but 5394 * we will turn it on since he is sending us 5395 * a stream reset. 5396 */ 5397 stcb->asoc.peer_supports_strreset = 1; 5398 } 5399 if (sctp_handle_stream_reset(stcb, m, *offset, ch)) { 5400 /* stop processing */ 5401 *offset = length; 5402 return (NULL); 5403 } 5404 break; 5405 case SCTP_PACKET_DROPPED: 5406 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_PACKET_DROPPED\n"); 5407 /* re-get it all please */ 5408 if (chk_length < sizeof(struct sctp_pktdrop_chunk)) { 5409 /* Its not ours */ 5410 if (locked_tcb) { 5411 SCTP_TCB_UNLOCK(locked_tcb); 5412 } 5413 *offset = length; 5414 return (NULL); 5415 } 5416 if (ch && (stcb) && netp && (*netp)) { 5417 sctp_handle_packet_dropped((struct sctp_pktdrop_chunk *)ch, 5418 stcb, *netp, 5419 min(chk_length, (sizeof(chunk_buf) - 4))); 5420 5421 } 5422 break; 5423 5424 case SCTP_AUTHENTICATION: 5425 SCTPDBG(SCTP_DEBUG_INPUT3, "SCTP_AUTHENTICATION\n"); 5426 if (SCTP_BASE_SYSCTL(sctp_auth_disable)) 5427 goto unknown_chunk; 5428 5429 if (stcb == NULL) { 5430 /* save the first AUTH for later processing */ 5431 if (auth_skipped == 0) { 5432 auth_offset = *offset; 5433 auth_len = chk_length; 5434 auth_skipped = 1; 5435 } 5436 /* skip this chunk (temporarily) */ 5437 goto next_chunk; 5438 } 5439 if ((chk_length < (sizeof(struct sctp_auth_chunk))) || 5440 (chk_length > (sizeof(struct sctp_auth_chunk) + 5441 SCTP_AUTH_DIGEST_LEN_MAX))) { 5442 /* Its not ours */ 5443 if (locked_tcb) { 5444 SCTP_TCB_UNLOCK(locked_tcb); 5445 } 5446 *offset = length; 5447 return (NULL); 5448 } 5449 if (got_auth == 1) { 5450 /* skip this chunk... it's already auth'd */ 5451 goto next_chunk; 5452 } 5453 got_auth = 1; 5454 if ((ch == NULL) || sctp_handle_auth(stcb, (struct sctp_auth_chunk *)ch, 5455 m, *offset)) { 5456 /* auth HMAC failed so dump the packet */ 5457 *offset = length; 5458 return (stcb); 5459 } else { 5460 /* remaining chunks are HMAC checked */ 5461 stcb->asoc.authenticated = 1; 5462 } 5463 break; 5464 5465 default: 5466 unknown_chunk: 5467 /* it's an unknown chunk! */ 5468 if ((ch->chunk_type & 0x40) && (stcb != NULL)) { 5469 struct mbuf *mm; 5470 struct sctp_paramhdr *phd; 5471 5472 mm = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr), 5473 0, M_NOWAIT, 1, MT_DATA); 5474 if (mm) { 5475 phd = mtod(mm, struct sctp_paramhdr *); 5476 /* 5477 * We cheat and use param type since 5478 * we did not bother to define a 5479 * error cause struct. They are the 5480 * same basic format with different 5481 * names. 5482 */ 5483 phd->param_type = htons(SCTP_CAUSE_UNRECOG_CHUNK); 5484 phd->param_length = htons(chk_length + sizeof(*phd)); 5485 SCTP_BUF_LEN(mm) = sizeof(*phd); 5486 SCTP_BUF_NEXT(mm) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT); 5487 if (SCTP_BUF_NEXT(mm)) { 5488 if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(mm), SCTP_SIZE32(chk_length) - chk_length, NULL) == NULL) { 5489 sctp_m_freem(mm); 5490 } else { 5491#ifdef SCTP_MBUF_LOGGING 5492 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 5493 struct mbuf *mat; 5494 5495 for (mat = SCTP_BUF_NEXT(mm); mat; mat = SCTP_BUF_NEXT(mat)) { 5496 if (SCTP_BUF_IS_EXTENDED(mat)) { 5497 sctp_log_mb(mat, SCTP_MBUF_ICOPY); 5498 } 5499 } 5500 } 5501#endif 5502 sctp_queue_op_err(stcb, mm); 5503 } 5504 } else { 5505 sctp_m_freem(mm); 5506 } 5507 } 5508 } 5509 if ((ch->chunk_type & 0x80) == 0) { 5510 /* discard this packet */ 5511 *offset = length; 5512 return (stcb); 5513 } /* else skip this bad chunk and continue... */ 5514 break; 5515 } /* switch (ch->chunk_type) */ 5516 5517 5518next_chunk: 5519 /* get the next chunk */ 5520 *offset += SCTP_SIZE32(chk_length); 5521 if (*offset >= length) { 5522 /* no more data left in the mbuf chain */ 5523 break; 5524 } 5525 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 5526 sizeof(struct sctp_chunkhdr), chunk_buf); 5527 if (ch == NULL) { 5528 if (locked_tcb) { 5529 SCTP_TCB_UNLOCK(locked_tcb); 5530 } 5531 *offset = length; 5532 return (NULL); 5533 } 5534 } /* while */ 5535 5536 if (asconf_cnt > 0 && stcb != NULL) { 5537 sctp_send_asconf_ack(stcb); 5538 } 5539 return (stcb); 5540} 5541 5542 5543#ifdef INVARIANTS 5544#ifdef __GNUC__ 5545__attribute__((noinline)) 5546#endif 5547 void 5548 sctp_validate_no_locks(struct sctp_inpcb *inp) 5549{ 5550 struct sctp_tcb *lstcb; 5551 5552 LIST_FOREACH(lstcb, &inp->sctp_asoc_list, sctp_tcblist) { 5553 if (mtx_owned(&lstcb->tcb_mtx)) { 5554 panic("Own lock on stcb at return from input"); 5555 } 5556 } 5557 if (mtx_owned(&inp->inp_create_mtx)) { 5558 panic("Own create lock on inp"); 5559 } 5560 if (mtx_owned(&inp->inp_mtx)) { 5561 panic("Own inp lock on inp"); 5562 } 5563} 5564 5565#endif 5566 5567/* 5568 * common input chunk processing (v4 and v6) 5569 */ 5570void 5571sctp_common_input_processing(struct mbuf **mm, int iphlen, int offset, int length, 5572 struct sockaddr *src, struct sockaddr *dst, 5573 struct sctphdr *sh, struct sctp_chunkhdr *ch, 5574#if !defined(SCTP_WITH_NO_CSUM) 5575 uint8_t compute_crc, 5576#endif 5577 uint8_t ecn_bits, 5578 uint8_t use_mflowid, uint32_t mflowid, 5579 uint32_t vrf_id, uint16_t port) 5580{ 5581 uint32_t high_tsn; 5582 int fwd_tsn_seen = 0, data_processed = 0; 5583 struct mbuf *m = *mm, *op_err; 5584 char msg[SCTP_DIAG_INFO_LEN]; 5585 int un_sent; 5586 int cnt_ctrl_ready = 0; 5587 struct sctp_inpcb *inp = NULL, *inp_decr = NULL; 5588 struct sctp_tcb *stcb = NULL; 5589 struct sctp_nets *net = NULL; 5590 5591 SCTP_STAT_INCR(sctps_recvdatagrams); 5592#ifdef SCTP_AUDITING_ENABLED 5593 sctp_audit_log(0xE0, 1); 5594 sctp_auditing(0, inp, stcb, net); 5595#endif 5596#if !defined(SCTP_WITH_NO_CSUM) 5597 if (compute_crc != 0) { 5598 uint32_t check, calc_check; 5599 5600 check = sh->checksum; 5601 sh->checksum = 0; 5602 calc_check = sctp_calculate_cksum(m, iphlen); 5603 sh->checksum = check; 5604 if (calc_check != check) { 5605 SCTPDBG(SCTP_DEBUG_INPUT1, "Bad CSUM on SCTP packet calc_check:%x check:%x m:%p mlen:%d iphlen:%d\n", 5606 calc_check, check, (void *)m, length, iphlen); 5607 stcb = sctp_findassociation_addr(m, offset, src, dst, 5608 sh, ch, &inp, &net, vrf_id); 5609#if defined(INET) || defined(INET6) 5610 if ((net != NULL) && (port != 0)) { 5611 if (net->port == 0) { 5612 sctp_pathmtu_adjustment(stcb, net->mtu - sizeof(struct udphdr)); 5613 } 5614 net->port = port; 5615 } 5616#endif 5617 if ((net != NULL) && (use_mflowid != 0)) { 5618 net->flowid = mflowid; 5619#ifdef INVARIANTS 5620 net->flowidset = 1; 5621#endif 5622 } 5623 if ((inp != NULL) && (stcb != NULL)) { 5624 sctp_send_packet_dropped(stcb, net, m, length, iphlen, 1); 5625 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_INPUT_ERROR, SCTP_SO_NOT_LOCKED); 5626 } else if ((inp != NULL) && (stcb == NULL)) { 5627 inp_decr = inp; 5628 } 5629 SCTP_STAT_INCR(sctps_badsum); 5630 SCTP_STAT_INCR_COUNTER32(sctps_checksumerrors); 5631 goto out; 5632 } 5633 } 5634#endif 5635 /* Destination port of 0 is illegal, based on RFC4960. */ 5636 if (sh->dest_port == 0) { 5637 SCTP_STAT_INCR(sctps_hdrops); 5638 goto out; 5639 } 5640 stcb = sctp_findassociation_addr(m, offset, src, dst, 5641 sh, ch, &inp, &net, vrf_id); 5642#if defined(INET) || defined(INET6) 5643 if ((net != NULL) && (port != 0)) { 5644 if (net->port == 0) { 5645 sctp_pathmtu_adjustment(stcb, net->mtu - sizeof(struct udphdr)); 5646 } 5647 net->port = port; 5648 } 5649#endif 5650 if ((net != NULL) && (use_mflowid != 0)) { 5651 net->flowid = mflowid; 5652#ifdef INVARIANTS 5653 net->flowidset = 1; 5654#endif 5655 } 5656 if (inp == NULL) { 5657 SCTP_STAT_INCR(sctps_noport); 5658 if (badport_bandlim(BANDLIM_SCTP_OOTB) < 0) { 5659 goto out; 5660 } 5661 if (ch->chunk_type == SCTP_SHUTDOWN_ACK) { 5662 sctp_send_shutdown_complete2(src, dst, sh, 5663 use_mflowid, mflowid, 5664 vrf_id, port); 5665 goto out; 5666 } 5667 if (ch->chunk_type == SCTP_SHUTDOWN_COMPLETE) { 5668 goto out; 5669 } 5670 if (ch->chunk_type != SCTP_ABORT_ASSOCIATION) { 5671 if ((SCTP_BASE_SYSCTL(sctp_blackhole) == 0) || 5672 ((SCTP_BASE_SYSCTL(sctp_blackhole) == 1) && 5673 (ch->chunk_type != SCTP_INIT))) { 5674 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 5675 "Out of the blue"); 5676 sctp_send_abort(m, iphlen, src, dst, 5677 sh, 0, op_err, 5678 use_mflowid, mflowid, 5679 vrf_id, port); 5680 } 5681 } 5682 goto out; 5683 } else if (stcb == NULL) { 5684 inp_decr = inp; 5685 } 5686#ifdef IPSEC 5687 /*- 5688 * I very much doubt any of the IPSEC stuff will work but I have no 5689 * idea, so I will leave it in place. 5690 */ 5691 if (inp != NULL) { 5692 switch (dst->sa_family) { 5693#ifdef INET 5694 case AF_INET: 5695 if (ipsec4_in_reject(m, &inp->ip_inp.inp)) { 5696 IPSECSTAT_INC(ips_in_polvio); 5697 SCTP_STAT_INCR(sctps_hdrops); 5698 goto out; 5699 } 5700 break; 5701#endif 5702#ifdef INET6 5703 case AF_INET6: 5704 if (ipsec6_in_reject(m, &inp->ip_inp.inp)) { 5705 IPSEC6STAT_INC(ips_in_polvio); 5706 SCTP_STAT_INCR(sctps_hdrops); 5707 goto out; 5708 } 5709 break; 5710#endif 5711 default: 5712 break; 5713 } 5714 } 5715#endif 5716 SCTPDBG(SCTP_DEBUG_INPUT1, "Ok, Common input processing called, m:%p iphlen:%d offset:%d length:%d stcb:%p\n", 5717 (void *)m, iphlen, offset, length, (void *)stcb); 5718 if (stcb) { 5719 /* always clear this before beginning a packet */ 5720 stcb->asoc.authenticated = 0; 5721 stcb->asoc.seen_a_sack_this_pkt = 0; 5722 SCTPDBG(SCTP_DEBUG_INPUT1, "stcb:%p state:%x\n", 5723 (void *)stcb, stcb->asoc.state); 5724 5725 if ((stcb->asoc.state & SCTP_STATE_WAS_ABORTED) || 5726 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED)) { 5727 /*- 5728 * If we hit here, we had a ref count 5729 * up when the assoc was aborted and the 5730 * timer is clearing out the assoc, we should 5731 * NOT respond to any packet.. its OOTB. 5732 */ 5733 SCTP_TCB_UNLOCK(stcb); 5734 stcb = NULL; 5735 snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s\n", __FILE__, __LINE__, __FUNCTION__); 5736 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 5737 msg); 5738 sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err, 5739 use_mflowid, mflowid, 5740 vrf_id, port); 5741 goto out; 5742 } 5743 } 5744 if (IS_SCTP_CONTROL(ch)) { 5745 /* process the control portion of the SCTP packet */ 5746 /* sa_ignore NO_NULL_CHK */ 5747 stcb = sctp_process_control(m, iphlen, &offset, length, 5748 src, dst, sh, ch, 5749 inp, stcb, &net, &fwd_tsn_seen, 5750 use_mflowid, mflowid, 5751 vrf_id, port); 5752 if (stcb) { 5753 /* 5754 * This covers us if the cookie-echo was there and 5755 * it changes our INP. 5756 */ 5757 inp = stcb->sctp_ep; 5758#if defined(INET) || defined(INET6) 5759 if ((net) && (port)) { 5760 if (net->port == 0) { 5761 sctp_pathmtu_adjustment(stcb, net->mtu - sizeof(struct udphdr)); 5762 } 5763 net->port = port; 5764 } 5765#endif 5766 } 5767 } else { 5768 /* 5769 * no control chunks, so pre-process DATA chunks (these 5770 * checks are taken care of by control processing) 5771 */ 5772 5773 /* 5774 * if DATA only packet, and auth is required, then punt... 5775 * can't have authenticated without any AUTH (control) 5776 * chunks 5777 */ 5778 if ((stcb != NULL) && 5779 !SCTP_BASE_SYSCTL(sctp_auth_disable) && 5780 sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks)) { 5781 /* "silently" ignore */ 5782 SCTP_STAT_INCR(sctps_recvauthmissing); 5783 goto out; 5784 } 5785 if (stcb == NULL) { 5786 /* out of the blue DATA chunk */ 5787 snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s\n", __FILE__, __LINE__, __FUNCTION__); 5788 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 5789 msg); 5790 sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err, 5791 use_mflowid, mflowid, 5792 vrf_id, port); 5793 goto out; 5794 } 5795 if (stcb->asoc.my_vtag != ntohl(sh->v_tag)) { 5796 /* v_tag mismatch! */ 5797 SCTP_STAT_INCR(sctps_badvtag); 5798 goto out; 5799 } 5800 } 5801 5802 if (stcb == NULL) { 5803 /* 5804 * no valid TCB for this packet, or we found it's a bad 5805 * packet while processing control, or we're done with this 5806 * packet (done or skip rest of data), so we drop it... 5807 */ 5808 goto out; 5809 } 5810 /* 5811 * DATA chunk processing 5812 */ 5813 /* plow through the data chunks while length > offset */ 5814 5815 /* 5816 * Rest should be DATA only. Check authentication state if AUTH for 5817 * DATA is required. 5818 */ 5819 if ((length > offset) && 5820 (stcb != NULL) && 5821 !SCTP_BASE_SYSCTL(sctp_auth_disable) && 5822 sctp_auth_is_required_chunk(SCTP_DATA, stcb->asoc.local_auth_chunks) && 5823 !stcb->asoc.authenticated) { 5824 /* "silently" ignore */ 5825 SCTP_STAT_INCR(sctps_recvauthmissing); 5826 SCTPDBG(SCTP_DEBUG_AUTH1, 5827 "Data chunk requires AUTH, skipped\n"); 5828 goto trigger_send; 5829 } 5830 if (length > offset) { 5831 int retval; 5832 5833 /* 5834 * First check to make sure our state is correct. We would 5835 * not get here unless we really did have a tag, so we don't 5836 * abort if this happens, just dump the chunk silently. 5837 */ 5838 switch (SCTP_GET_STATE(&stcb->asoc)) { 5839 case SCTP_STATE_COOKIE_ECHOED: 5840 /* 5841 * we consider data with valid tags in this state 5842 * shows us the cookie-ack was lost. Imply it was 5843 * there. 5844 */ 5845 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 5846 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 5847 stcb->asoc.overall_error_count, 5848 0, 5849 SCTP_FROM_SCTP_INPUT, 5850 __LINE__); 5851 } 5852 stcb->asoc.overall_error_count = 0; 5853 sctp_handle_cookie_ack((struct sctp_cookie_ack_chunk *)ch, stcb, net); 5854 break; 5855 case SCTP_STATE_COOKIE_WAIT: 5856 /* 5857 * We consider OOTB any data sent during asoc setup. 5858 */ 5859 snprintf(msg, sizeof(msg), "OOTB, %s:%d at %s\n", __FILE__, __LINE__, __FUNCTION__); 5860 op_err = sctp_generate_cause(SCTP_BASE_SYSCTL(sctp_diag_info_code), 5861 msg); 5862 sctp_handle_ootb(m, iphlen, offset, src, dst, sh, inp, op_err, 5863 use_mflowid, mflowid, 5864 vrf_id, port); 5865 goto out; 5866 /* sa_ignore NOTREACHED */ 5867 break; 5868 case SCTP_STATE_EMPTY: /* should not happen */ 5869 case SCTP_STATE_INUSE: /* should not happen */ 5870 case SCTP_STATE_SHUTDOWN_RECEIVED: /* This is a peer error */ 5871 case SCTP_STATE_SHUTDOWN_ACK_SENT: 5872 default: 5873 goto out; 5874 /* sa_ignore NOTREACHED */ 5875 break; 5876 case SCTP_STATE_OPEN: 5877 case SCTP_STATE_SHUTDOWN_SENT: 5878 break; 5879 } 5880 /* plow through the data chunks while length > offset */ 5881 retval = sctp_process_data(mm, iphlen, &offset, length, 5882 src, dst, sh, 5883 inp, stcb, net, &high_tsn, 5884 use_mflowid, mflowid, 5885 vrf_id, port); 5886 if (retval == 2) { 5887 /* 5888 * The association aborted, NO UNLOCK needed since 5889 * the association is destroyed. 5890 */ 5891 stcb = NULL; 5892 goto out; 5893 } 5894 data_processed = 1; 5895 /* 5896 * Anything important needs to have been m_copy'ed in 5897 * process_data 5898 */ 5899 } 5900 /* take care of ecn */ 5901 if ((data_processed == 1) && 5902 (stcb->asoc.ecn_supported == 1) && 5903 ((ecn_bits & SCTP_CE_BITS) == SCTP_CE_BITS)) { 5904 /* Yep, we need to add a ECNE */ 5905 sctp_send_ecn_echo(stcb, net, high_tsn); 5906 } 5907 if ((data_processed == 0) && (fwd_tsn_seen)) { 5908 int was_a_gap; 5909 uint32_t highest_tsn; 5910 5911 if (SCTP_TSN_GT(stcb->asoc.highest_tsn_inside_nr_map, stcb->asoc.highest_tsn_inside_map)) { 5912 highest_tsn = stcb->asoc.highest_tsn_inside_nr_map; 5913 } else { 5914 highest_tsn = stcb->asoc.highest_tsn_inside_map; 5915 } 5916 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 5917 stcb->asoc.send_sack = 1; 5918 sctp_sack_check(stcb, was_a_gap); 5919 } else if (fwd_tsn_seen) { 5920 stcb->asoc.send_sack = 1; 5921 } 5922 /* trigger send of any chunks in queue... */ 5923trigger_send: 5924#ifdef SCTP_AUDITING_ENABLED 5925 sctp_audit_log(0xE0, 2); 5926 sctp_auditing(1, inp, stcb, net); 5927#endif 5928 SCTPDBG(SCTP_DEBUG_INPUT1, 5929 "Check for chunk output prw:%d tqe:%d tf=%d\n", 5930 stcb->asoc.peers_rwnd, 5931 TAILQ_EMPTY(&stcb->asoc.control_send_queue), 5932 stcb->asoc.total_flight); 5933 un_sent = (stcb->asoc.total_output_queue_size - stcb->asoc.total_flight); 5934 if (!TAILQ_EMPTY(&stcb->asoc.control_send_queue)) { 5935 cnt_ctrl_ready = stcb->asoc.ctrl_queue_cnt - stcb->asoc.ecn_echo_cnt_onq; 5936 } 5937 if (cnt_ctrl_ready || 5938 ((un_sent) && 5939 (stcb->asoc.peers_rwnd > 0 || 5940 (stcb->asoc.peers_rwnd <= 0 && stcb->asoc.total_flight == 0)))) { 5941 SCTPDBG(SCTP_DEBUG_INPUT3, "Calling chunk OUTPUT\n"); 5942 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_CONTROL_PROC, SCTP_SO_NOT_LOCKED); 5943 SCTPDBG(SCTP_DEBUG_INPUT3, "chunk OUTPUT returns\n"); 5944 } 5945#ifdef SCTP_AUDITING_ENABLED 5946 sctp_audit_log(0xE0, 3); 5947 sctp_auditing(2, inp, stcb, net); 5948#endif 5949out: 5950 if (stcb != NULL) { 5951 SCTP_TCB_UNLOCK(stcb); 5952 } 5953 if (inp_decr != NULL) { 5954 /* reduce ref-count */ 5955 SCTP_INP_WLOCK(inp_decr); 5956 SCTP_INP_DECR_REF(inp_decr); 5957 SCTP_INP_WUNLOCK(inp_decr); 5958 } 5959#ifdef INVARIANTS 5960 if (inp != NULL) { 5961 sctp_validate_no_locks(inp); 5962 } 5963#endif 5964 return; 5965} 5966 5967#if 0 5968static void 5969sctp_print_mbuf_chain(struct mbuf *m) 5970{ 5971 for (; m; m = SCTP_BUF_NEXT(m)) { 5972 SCTP_PRINTF("%p: m_len = %ld\n", (void *)m, SCTP_BUF_LEN(m)); 5973 if (SCTP_BUF_IS_EXTENDED(m)) 5974 SCTP_PRINTF("%p: extend_size = %d\n", (void *)m, SCTP_BUF_EXTEND_SIZE(m)); 5975 } 5976} 5977 5978#endif 5979 5980#ifdef INET 5981void 5982sctp_input_with_port(struct mbuf *i_pak, int off, uint16_t port) 5983{ 5984 struct mbuf *m; 5985 int iphlen; 5986 uint32_t vrf_id = 0; 5987 uint8_t ecn_bits; 5988 struct sockaddr_in src, dst; 5989 struct ip *ip; 5990 struct sctphdr *sh; 5991 struct sctp_chunkhdr *ch; 5992 int length, offset; 5993 5994#if !defined(SCTP_WITH_NO_CSUM) 5995 uint8_t compute_crc; 5996 5997#endif 5998 uint32_t mflowid; 5999 uint8_t use_mflowid; 6000 6001 iphlen = off; 6002 if (SCTP_GET_PKT_VRFID(i_pak, vrf_id)) { 6003 SCTP_RELEASE_PKT(i_pak); 6004 return; 6005 } 6006 m = SCTP_HEADER_TO_CHAIN(i_pak); 6007#ifdef SCTP_MBUF_LOGGING 6008 /* Log in any input mbufs */ 6009 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 6010 struct mbuf *mat; 6011 6012 for (mat = m; mat; mat = SCTP_BUF_NEXT(mat)) { 6013 if (SCTP_BUF_IS_EXTENDED(mat)) { 6014 sctp_log_mb(mat, SCTP_MBUF_INPUT); 6015 } 6016 } 6017 } 6018#endif 6019#ifdef SCTP_PACKET_LOGGING 6020 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LAST_PACKET_TRACING) { 6021 sctp_packet_log(m); 6022 } 6023#endif 6024 SCTPDBG(SCTP_DEBUG_CRCOFFLOAD, 6025 "sctp_input(): Packet of length %d received on %s with csum_flags 0x%b.\n", 6026 m->m_pkthdr.len, 6027 if_name(m->m_pkthdr.rcvif), 6028 (int)m->m_pkthdr.csum_flags, CSUM_BITS); 6029 if (m->m_flags & M_FLOWID) { 6030 mflowid = m->m_pkthdr.flowid; 6031 use_mflowid = 1; 6032 } else { 6033 mflowid = 0; 6034 use_mflowid = 0; 6035 } 6036 SCTP_STAT_INCR(sctps_recvpackets); 6037 SCTP_STAT_INCR_COUNTER64(sctps_inpackets); 6038 /* Get IP, SCTP, and first chunk header together in the first mbuf. */ 6039 offset = iphlen + sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr); 6040 if (SCTP_BUF_LEN(m) < offset) { 6041 if ((m = m_pullup(m, offset)) == NULL) { 6042 SCTP_STAT_INCR(sctps_hdrops); 6043 return; 6044 } 6045 } 6046 ip = mtod(m, struct ip *); 6047 sh = (struct sctphdr *)((caddr_t)ip + iphlen); 6048 ch = (struct sctp_chunkhdr *)((caddr_t)sh + sizeof(struct sctphdr)); 6049 offset -= sizeof(struct sctp_chunkhdr); 6050 memset(&src, 0, sizeof(struct sockaddr_in)); 6051 src.sin_family = AF_INET; 6052 src.sin_len = sizeof(struct sockaddr_in); 6053 src.sin_port = sh->src_port; 6054 src.sin_addr = ip->ip_src; 6055 memset(&dst, 0, sizeof(struct sockaddr_in)); 6056 dst.sin_family = AF_INET; 6057 dst.sin_len = sizeof(struct sockaddr_in); 6058 dst.sin_port = sh->dest_port; 6059 dst.sin_addr = ip->ip_dst; 6060 length = ntohs(ip->ip_len); 6061 /* Validate mbuf chain length with IP payload length. */ 6062 if (SCTP_HEADER_LEN(m) != length) { 6063 SCTPDBG(SCTP_DEBUG_INPUT1, 6064 "sctp_input() length:%d reported length:%d\n", length, SCTP_HEADER_LEN(m)); 6065 SCTP_STAT_INCR(sctps_hdrops); 6066 goto out; 6067 } 6068 /* SCTP does not allow broadcasts or multicasts */ 6069 if (IN_MULTICAST(ntohl(dst.sin_addr.s_addr))) { 6070 goto out; 6071 } 6072 if (SCTP_IS_IT_BROADCAST(dst.sin_addr, m)) { 6073 goto out; 6074 } 6075 ecn_bits = ip->ip_tos; 6076#if defined(SCTP_WITH_NO_CSUM) 6077 SCTP_STAT_INCR(sctps_recvnocrc); 6078#else 6079 if (m->m_pkthdr.csum_flags & CSUM_SCTP_VALID) { 6080 SCTP_STAT_INCR(sctps_recvhwcrc); 6081 compute_crc = 0; 6082 } else { 6083 SCTP_STAT_INCR(sctps_recvswcrc); 6084 compute_crc = 1; 6085 } 6086#endif 6087 sctp_common_input_processing(&m, iphlen, offset, length, 6088 (struct sockaddr *)&src, 6089 (struct sockaddr *)&dst, 6090 sh, ch, 6091#if !defined(SCTP_WITH_NO_CSUM) 6092 compute_crc, 6093#endif 6094 ecn_bits, 6095 use_mflowid, mflowid, 6096 vrf_id, port); 6097out: 6098 if (m) { 6099 sctp_m_freem(m); 6100 } 6101 return; 6102} 6103 6104#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP) 6105extern int *sctp_cpuarry; 6106 6107#endif 6108 6109void 6110sctp_input(struct mbuf *m, int off) 6111{ 6112#if defined(__FreeBSD__) && defined(SCTP_MCORE_INPUT) && defined(SMP) 6113 struct ip *ip; 6114 struct sctphdr *sh; 6115 int offset; 6116 int cpu_to_use; 6117 uint32_t flowid, tag; 6118 6119 if (mp_ncpus > 1) { 6120 if (m->m_flags & M_FLOWID) { 6121 flowid = m->m_pkthdr.flowid; 6122 } else { 6123 /* 6124 * No flow id built by lower layers fix it so we 6125 * create one. 6126 */ 6127 offset = off + sizeof(struct sctphdr); 6128 if (SCTP_BUF_LEN(m) < offset) { 6129 if ((m = m_pullup(m, offset)) == NULL) { 6130 SCTP_STAT_INCR(sctps_hdrops); 6131 return; 6132 } 6133 } 6134 ip = mtod(m, struct ip *); 6135 sh = (struct sctphdr *)((caddr_t)ip + off); 6136 tag = htonl(sh->v_tag); 6137 flowid = tag ^ ntohs(sh->dest_port) ^ ntohs(sh->src_port); 6138 m->m_pkthdr.flowid = flowid; 6139 m->m_flags |= M_FLOWID; 6140 } 6141 cpu_to_use = sctp_cpuarry[flowid % mp_ncpus]; 6142 sctp_queue_to_mcore(m, off, cpu_to_use); 6143 return; 6144 } 6145#endif 6146 sctp_input_with_port(m, off, 0); 6147} 6148 6149#endif 6150