sctp_indata.c revision 347154
1/*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved. 4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions are met: 8 * 9 * a) Redistributions of source code must retain the above copyright notice, 10 * this list of conditions and the following disclaimer. 11 * 12 * b) Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the distribution. 15 * 16 * c) Neither the name of Cisco Systems, Inc. nor the names of its 17 * contributors may be used to endorse or promote products derived 18 * from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 30 * THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: stable/11/sys/netinet/sctp_indata.c 347154 2019-05-05 12:28:39Z tuexen $"); 35 36#include <netinet/sctp_os.h> 37#include <sys/proc.h> 38#include <netinet/sctp_var.h> 39#include <netinet/sctp_sysctl.h> 40#include <netinet/sctp_header.h> 41#include <netinet/sctp_pcb.h> 42#include <netinet/sctputil.h> 43#include <netinet/sctp_output.h> 44#include <netinet/sctp_uio.h> 45#include <netinet/sctp_auth.h> 46#include <netinet/sctp_timer.h> 47#include <netinet/sctp_asconf.h> 48#include <netinet/sctp_indata.h> 49#include <netinet/sctp_bsd_addr.h> 50#include <netinet/sctp_input.h> 51#include <netinet/sctp_crc32.h> 52#include <netinet/sctp_lock_bsd.h> 53/* 54 * NOTES: On the outbound side of things I need to check the sack timer to 55 * see if I should generate a sack into the chunk queue (if I have data to 56 * send that is and will be sending it .. for bundling. 57 * 58 * The callback in sctp_usrreq.c will get called when the socket is read from. 59 * This will cause sctp_service_queues() to get called on the top entry in 60 * the list. 61 */ 62static uint32_t 63sctp_add_chk_to_control(struct sctp_queued_to_read *control, 64 struct sctp_stream_in *strm, 65 struct sctp_tcb *stcb, 66 struct sctp_association *asoc, 67 struct sctp_tmit_chunk *chk, int lock_held); 68 69 70void 71sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 72{ 73 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc); 74} 75 76/* Calculate what the rwnd would be */ 77uint32_t 78sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc) 79{ 80 uint32_t calc = 0; 81 82 /* 83 * This is really set wrong with respect to a 1-2-m socket. Since 84 * the sb_cc is the count that everyone as put up. When we re-write 85 * sctp_soreceive then we will fix this so that ONLY this 86 * associations data is taken into account. 87 */ 88 if (stcb->sctp_socket == NULL) { 89 return (calc); 90 } 91 92 KASSERT(asoc->cnt_on_reasm_queue > 0 || asoc->size_on_reasm_queue == 0, 93 ("size_on_reasm_queue is %u", asoc->size_on_reasm_queue)); 94 KASSERT(asoc->cnt_on_all_streams > 0 || asoc->size_on_all_streams == 0, 95 ("size_on_all_streams is %u", asoc->size_on_all_streams)); 96 if (stcb->asoc.sb_cc == 0 && 97 asoc->cnt_on_reasm_queue == 0 && 98 asoc->cnt_on_all_streams == 0) { 99 /* Full rwnd granted */ 100 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND); 101 return (calc); 102 } 103 /* get actual space */ 104 calc = (uint32_t)sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv); 105 /* 106 * take out what has NOT been put on socket queue and we yet hold 107 * for putting up. 108 */ 109 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_reasm_queue + 110 asoc->cnt_on_reasm_queue * MSIZE)); 111 calc = sctp_sbspace_sub(calc, (uint32_t)(asoc->size_on_all_streams + 112 asoc->cnt_on_all_streams * MSIZE)); 113 if (calc == 0) { 114 /* out of space */ 115 return (calc); 116 } 117 118 /* what is the overhead of all these rwnd's */ 119 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len); 120 /* 121 * If the window gets too small due to ctrl-stuff, reduce it to 1, 122 * even it is 0. SWS engaged 123 */ 124 if (calc < stcb->asoc.my_rwnd_control_len) { 125 calc = 1; 126 } 127 return (calc); 128} 129 130 131 132/* 133 * Build out our readq entry based on the incoming packet. 134 */ 135struct sctp_queued_to_read * 136sctp_build_readq_entry(struct sctp_tcb *stcb, 137 struct sctp_nets *net, 138 uint32_t tsn, uint32_t ppid, 139 uint32_t context, uint16_t sid, 140 uint32_t mid, uint8_t flags, 141 struct mbuf *dm) 142{ 143 struct sctp_queued_to_read *read_queue_e = NULL; 144 145 sctp_alloc_a_readq(stcb, read_queue_e); 146 if (read_queue_e == NULL) { 147 goto failed_build; 148 } 149 memset(read_queue_e, 0, sizeof(struct sctp_queued_to_read)); 150 read_queue_e->sinfo_stream = sid; 151 read_queue_e->sinfo_flags = (flags << 8); 152 read_queue_e->sinfo_ppid = ppid; 153 read_queue_e->sinfo_context = context; 154 read_queue_e->sinfo_tsn = tsn; 155 read_queue_e->sinfo_cumtsn = tsn; 156 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb); 157 read_queue_e->mid = mid; 158 read_queue_e->top_fsn = read_queue_e->fsn_included = 0xffffffff; 159 TAILQ_INIT(&read_queue_e->reasm); 160 read_queue_e->whoFrom = net; 161 atomic_add_int(&net->ref_count, 1); 162 read_queue_e->data = dm; 163 read_queue_e->stcb = stcb; 164 read_queue_e->port_from = stcb->rport; 165failed_build: 166 return (read_queue_e); 167} 168 169struct mbuf * 170sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo) 171{ 172 struct sctp_extrcvinfo *seinfo; 173 struct sctp_sndrcvinfo *outinfo; 174 struct sctp_rcvinfo *rcvinfo; 175 struct sctp_nxtinfo *nxtinfo; 176 struct cmsghdr *cmh; 177 struct mbuf *ret; 178 int len; 179 int use_extended; 180 int provide_nxt; 181 182 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) && 183 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) && 184 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) { 185 /* user does not want any ancillary data */ 186 return (NULL); 187 } 188 189 len = 0; 190 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { 191 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 192 } 193 seinfo = (struct sctp_extrcvinfo *)sinfo; 194 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) && 195 (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) { 196 provide_nxt = 1; 197 len += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); 198 } else { 199 provide_nxt = 0; 200 } 201 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 202 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) { 203 use_extended = 1; 204 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); 205 } else { 206 use_extended = 0; 207 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); 208 } 209 } else { 210 use_extended = 0; 211 } 212 213 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA); 214 if (ret == NULL) { 215 /* No space */ 216 return (ret); 217 } 218 SCTP_BUF_LEN(ret) = 0; 219 220 /* We need a CMSG header followed by the struct */ 221 cmh = mtod(ret, struct cmsghdr *); 222 /* 223 * Make sure that there is no un-initialized padding between the 224 * cmsg header and cmsg data and after the cmsg data. 225 */ 226 memset(cmh, 0, len); 227 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) { 228 cmh->cmsg_level = IPPROTO_SCTP; 229 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo)); 230 cmh->cmsg_type = SCTP_RCVINFO; 231 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh); 232 rcvinfo->rcv_sid = sinfo->sinfo_stream; 233 rcvinfo->rcv_ssn = sinfo->sinfo_ssn; 234 rcvinfo->rcv_flags = sinfo->sinfo_flags; 235 rcvinfo->rcv_ppid = sinfo->sinfo_ppid; 236 rcvinfo->rcv_tsn = sinfo->sinfo_tsn; 237 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn; 238 rcvinfo->rcv_context = sinfo->sinfo_context; 239 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id; 240 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo))); 241 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo)); 242 } 243 if (provide_nxt) { 244 cmh->cmsg_level = IPPROTO_SCTP; 245 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo)); 246 cmh->cmsg_type = SCTP_NXTINFO; 247 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh); 248 nxtinfo->nxt_sid = seinfo->serinfo_next_stream; 249 nxtinfo->nxt_flags = 0; 250 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) { 251 nxtinfo->nxt_flags |= SCTP_UNORDERED; 252 } 253 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) { 254 nxtinfo->nxt_flags |= SCTP_NOTIFICATION; 255 } 256 if (seinfo->serinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) { 257 nxtinfo->nxt_flags |= SCTP_COMPLETE; 258 } 259 nxtinfo->nxt_ppid = seinfo->serinfo_next_ppid; 260 nxtinfo->nxt_length = seinfo->serinfo_next_length; 261 nxtinfo->nxt_assoc_id = seinfo->serinfo_next_aid; 262 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo))); 263 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo)); 264 } 265 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) { 266 cmh->cmsg_level = IPPROTO_SCTP; 267 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh); 268 if (use_extended) { 269 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo)); 270 cmh->cmsg_type = SCTP_EXTRCV; 271 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo)); 272 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo)); 273 } else { 274 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo)); 275 cmh->cmsg_type = SCTP_SNDRCV; 276 *outinfo = *sinfo; 277 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo)); 278 } 279 } 280 return (ret); 281} 282 283 284static void 285sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn) 286{ 287 uint32_t gap, i, cumackp1; 288 int fnd = 0; 289 int in_r = 0, in_nr = 0; 290 291 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 292 return; 293 } 294 cumackp1 = asoc->cumulative_tsn + 1; 295 if (SCTP_TSN_GT(cumackp1, tsn)) { 296 /* 297 * this tsn is behind the cum ack and thus we don't need to 298 * worry about it being moved from one to the other. 299 */ 300 return; 301 } 302 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 303 in_r = SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap); 304 in_nr = SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap); 305 if ((in_r == 0) && (in_nr == 0)) { 306#ifdef INVARIANTS 307 panic("Things are really messed up now"); 308#else 309 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn); 310 sctp_print_mapping_array(asoc); 311#endif 312 } 313 if (in_nr == 0) 314 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 315 if (in_r) 316 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap); 317 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 318 asoc->highest_tsn_inside_nr_map = tsn; 319 } 320 if (tsn == asoc->highest_tsn_inside_map) { 321 /* We must back down to see what the new highest is */ 322 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) { 323 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn); 324 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) { 325 asoc->highest_tsn_inside_map = i; 326 fnd = 1; 327 break; 328 } 329 } 330 if (!fnd) { 331 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1; 332 } 333 } 334} 335 336static int 337sctp_place_control_in_stream(struct sctp_stream_in *strm, 338 struct sctp_association *asoc, 339 struct sctp_queued_to_read *control) 340{ 341 struct sctp_queued_to_read *at; 342 struct sctp_readhead *q; 343 uint8_t flags, unordered; 344 345 flags = (control->sinfo_flags >> 8); 346 unordered = flags & SCTP_DATA_UNORDERED; 347 if (unordered) { 348 q = &strm->uno_inqueue; 349 if (asoc->idata_supported == 0) { 350 if (!TAILQ_EMPTY(q)) { 351 /* 352 * Only one stream can be here in old style 353 * -- abort 354 */ 355 return (-1); 356 } 357 TAILQ_INSERT_TAIL(q, control, next_instrm); 358 control->on_strm_q = SCTP_ON_UNORDERED; 359 return (0); 360 } 361 } else { 362 q = &strm->inqueue; 363 } 364 if ((flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 365 control->end_added = 1; 366 control->first_frag_seen = 1; 367 control->last_frag_seen = 1; 368 } 369 if (TAILQ_EMPTY(q)) { 370 /* Empty queue */ 371 TAILQ_INSERT_HEAD(q, control, next_instrm); 372 if (unordered) { 373 control->on_strm_q = SCTP_ON_UNORDERED; 374 } else { 375 control->on_strm_q = SCTP_ON_ORDERED; 376 } 377 return (0); 378 } else { 379 TAILQ_FOREACH(at, q, next_instrm) { 380 if (SCTP_MID_GT(asoc->idata_supported, at->mid, control->mid)) { 381 /* 382 * one in queue is bigger than the new one, 383 * insert before this one 384 */ 385 TAILQ_INSERT_BEFORE(at, control, next_instrm); 386 if (unordered) { 387 control->on_strm_q = SCTP_ON_UNORDERED; 388 } else { 389 control->on_strm_q = SCTP_ON_ORDERED; 390 } 391 break; 392 } else if (SCTP_MID_EQ(asoc->idata_supported, at->mid, control->mid)) { 393 /* 394 * Gak, He sent me a duplicate msg id 395 * number?? return -1 to abort. 396 */ 397 return (-1); 398 } else { 399 if (TAILQ_NEXT(at, next_instrm) == NULL) { 400 /* 401 * We are at the end, insert it 402 * after this one 403 */ 404 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 405 sctp_log_strm_del(control, at, 406 SCTP_STR_LOG_FROM_INSERT_TL); 407 } 408 TAILQ_INSERT_AFTER(q, at, control, next_instrm); 409 if (unordered) { 410 control->on_strm_q = SCTP_ON_UNORDERED; 411 } else { 412 control->on_strm_q = SCTP_ON_ORDERED; 413 } 414 break; 415 } 416 } 417 } 418 } 419 return (0); 420} 421 422static void 423sctp_abort_in_reasm(struct sctp_tcb *stcb, 424 struct sctp_queued_to_read *control, 425 struct sctp_tmit_chunk *chk, 426 int *abort_flag, int opspot) 427{ 428 char msg[SCTP_DIAG_INFO_LEN]; 429 struct mbuf *oper; 430 431 if (stcb->asoc.idata_supported) { 432 snprintf(msg, sizeof(msg), 433 "Reass %x,CF:%x,TSN=%8.8x,SID=%4.4x,FSN=%8.8x,MID:%8.8x", 434 opspot, 435 control->fsn_included, 436 chk->rec.data.tsn, 437 chk->rec.data.sid, 438 chk->rec.data.fsn, chk->rec.data.mid); 439 } else { 440 snprintf(msg, sizeof(msg), 441 "Reass %x,CI:%x,TSN=%8.8x,SID=%4.4x,FSN=%4.4x,SSN:%4.4x", 442 opspot, 443 control->fsn_included, 444 chk->rec.data.tsn, 445 chk->rec.data.sid, 446 chk->rec.data.fsn, 447 (uint16_t)chk->rec.data.mid); 448 } 449 oper = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 450 sctp_m_freem(chk->data); 451 chk->data = NULL; 452 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 453 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1; 454 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED); 455 *abort_flag = 1; 456} 457 458static void 459sctp_clean_up_control(struct sctp_tcb *stcb, struct sctp_queued_to_read *control) 460{ 461 /* 462 * The control could not be placed and must be cleaned. 463 */ 464 struct sctp_tmit_chunk *chk, *nchk; 465 466 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { 467 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 468 if (chk->data) 469 sctp_m_freem(chk->data); 470 chk->data = NULL; 471 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 472 } 473 sctp_free_a_readq(stcb, control); 474} 475 476/* 477 * Queue the chunk either right into the socket buffer if it is the next one 478 * to go OR put it in the correct place in the delivery queue. If we do 479 * append to the so_buf, keep doing so until we are out of order as 480 * long as the control's entered are non-fragmented. 481 */ 482static void 483sctp_queue_data_to_stream(struct sctp_tcb *stcb, 484 struct sctp_association *asoc, 485 struct sctp_queued_to_read *control, int *abort_flag, int *need_reasm) 486{ 487 /* 488 * FIX-ME maybe? What happens when the ssn wraps? If we are getting 489 * all the data in one stream this could happen quite rapidly. One 490 * could use the TSN to keep track of things, but this scheme breaks 491 * down in the other type of stream usage that could occur. Send a 492 * single msg to stream 0, send 4Billion messages to stream 1, now 493 * send a message to stream 0. You have a situation where the TSN 494 * has wrapped but not in the stream. Is this worth worrying about 495 * or should we just change our queue sort at the bottom to be by 496 * TSN. 497 * 498 * Could it also be legal for a peer to send ssn 1 with TSN 2 and 499 * ssn 2 with TSN 1? If the peer is doing some sort of funky TSN/SSN 500 * assignment this could happen... and I don't see how this would be 501 * a violation. So for now I am undecided an will leave the sort by 502 * SSN alone. Maybe a hybred approach is the answer 503 * 504 */ 505 struct sctp_queued_to_read *at; 506 int queue_needed; 507 uint32_t nxt_todel; 508 struct mbuf *op_err; 509 struct sctp_stream_in *strm; 510 char msg[SCTP_DIAG_INFO_LEN]; 511 512 strm = &asoc->strmin[control->sinfo_stream]; 513 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 514 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD); 515 } 516 if (SCTP_MID_GT((asoc->idata_supported), strm->last_mid_delivered, control->mid)) { 517 /* The incoming sseq is behind where we last delivered? */ 518 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ: %u delivered: %u from peer, Abort association\n", 519 strm->last_mid_delivered, control->mid); 520 /* 521 * throw it in the stream so it gets cleaned up in 522 * association destruction 523 */ 524 TAILQ_INSERT_HEAD(&strm->inqueue, control, next_instrm); 525 if (asoc->idata_supported) { 526 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x", 527 strm->last_mid_delivered, control->sinfo_tsn, 528 control->sinfo_stream, control->mid); 529 } else { 530 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 531 (uint16_t)strm->last_mid_delivered, 532 control->sinfo_tsn, 533 control->sinfo_stream, 534 (uint16_t)control->mid); 535 } 536 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 537 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2; 538 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 539 *abort_flag = 1; 540 return; 541 542 } 543 queue_needed = 1; 544 asoc->size_on_all_streams += control->length; 545 sctp_ucount_incr(asoc->cnt_on_all_streams); 546 nxt_todel = strm->last_mid_delivered + 1; 547 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) { 548#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 549 struct socket *so; 550 551 so = SCTP_INP_SO(stcb->sctp_ep); 552 atomic_add_int(&stcb->asoc.refcnt, 1); 553 SCTP_TCB_UNLOCK(stcb); 554 SCTP_SOCKET_LOCK(so, 1); 555 SCTP_TCB_LOCK(stcb); 556 atomic_subtract_int(&stcb->asoc.refcnt, 1); 557 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) { 558 SCTP_SOCKET_UNLOCK(so, 1); 559 return; 560 } 561#endif 562 /* can be delivered right away? */ 563 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 564 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL); 565 } 566 /* EY it wont be queued if it could be delivered directly */ 567 queue_needed = 0; 568 if (asoc->size_on_all_streams >= control->length) { 569 asoc->size_on_all_streams -= control->length; 570 } else { 571#ifdef INVARIANTS 572 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 573#else 574 asoc->size_on_all_streams = 0; 575#endif 576 } 577 sctp_ucount_decr(asoc->cnt_on_all_streams); 578 strm->last_mid_delivered++; 579 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 580 sctp_add_to_readq(stcb->sctp_ep, stcb, 581 control, 582 &stcb->sctp_socket->so_rcv, 1, 583 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED); 584 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next_instrm, at) { 585 /* all delivered */ 586 nxt_todel = strm->last_mid_delivered + 1; 587 if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid) && 588 (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG)) { 589 if (control->on_strm_q == SCTP_ON_ORDERED) { 590 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 591 if (asoc->size_on_all_streams >= control->length) { 592 asoc->size_on_all_streams -= control->length; 593 } else { 594#ifdef INVARIANTS 595 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 596#else 597 asoc->size_on_all_streams = 0; 598#endif 599 } 600 sctp_ucount_decr(asoc->cnt_on_all_streams); 601#ifdef INVARIANTS 602 } else { 603 panic("Huh control: %p is on_strm_q: %d", 604 control, control->on_strm_q); 605#endif 606 } 607 control->on_strm_q = 0; 608 strm->last_mid_delivered++; 609 /* 610 * We ignore the return of deliver_data here 611 * since we always can hold the chunk on the 612 * d-queue. And we have a finite number that 613 * can be delivered from the strq. 614 */ 615 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 616 sctp_log_strm_del(control, NULL, 617 SCTP_STR_LOG_FROM_IMMED_DEL); 618 } 619 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 620 sctp_add_to_readq(stcb->sctp_ep, stcb, 621 control, 622 &stcb->sctp_socket->so_rcv, 1, 623 SCTP_READ_LOCK_NOT_HELD, 624 SCTP_SO_LOCKED); 625 continue; 626 } else if (SCTP_MID_EQ(asoc->idata_supported, nxt_todel, control->mid)) { 627 *need_reasm = 1; 628 } 629 break; 630 } 631#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 632 SCTP_SOCKET_UNLOCK(so, 1); 633#endif 634 } 635 if (queue_needed) { 636 /* 637 * Ok, we did not deliver this guy, find the correct place 638 * to put it on the queue. 639 */ 640 if (sctp_place_control_in_stream(strm, asoc, control)) { 641 snprintf(msg, sizeof(msg), 642 "Queue to str MID: %u duplicate", 643 control->mid); 644 sctp_clean_up_control(stcb, control); 645 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 646 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3; 647 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 648 *abort_flag = 1; 649 } 650 } 651} 652 653 654static void 655sctp_setup_tail_pointer(struct sctp_queued_to_read *control) 656{ 657 struct mbuf *m, *prev = NULL; 658 struct sctp_tcb *stcb; 659 660 stcb = control->stcb; 661 control->held_length = 0; 662 control->length = 0; 663 m = control->data; 664 while (m) { 665 if (SCTP_BUF_LEN(m) == 0) { 666 /* Skip mbufs with NO length */ 667 if (prev == NULL) { 668 /* First one */ 669 control->data = sctp_m_free(m); 670 m = control->data; 671 } else { 672 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 673 m = SCTP_BUF_NEXT(prev); 674 } 675 if (m == NULL) { 676 control->tail_mbuf = prev; 677 } 678 continue; 679 } 680 prev = m; 681 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 682 if (control->on_read_q) { 683 /* 684 * On read queue so we must increment the SB stuff, 685 * we assume caller has done any locks of SB. 686 */ 687 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m); 688 } 689 m = SCTP_BUF_NEXT(m); 690 } 691 if (prev) { 692 control->tail_mbuf = prev; 693 } 694} 695 696static void 697sctp_add_to_tail_pointer(struct sctp_queued_to_read *control, struct mbuf *m, uint32_t *added) 698{ 699 struct mbuf *prev = NULL; 700 struct sctp_tcb *stcb; 701 702 stcb = control->stcb; 703 if (stcb == NULL) { 704#ifdef INVARIANTS 705 panic("Control broken"); 706#else 707 return; 708#endif 709 } 710 if (control->tail_mbuf == NULL) { 711 /* TSNH */ 712 control->data = m; 713 sctp_setup_tail_pointer(control); 714 return; 715 } 716 control->tail_mbuf->m_next = m; 717 while (m) { 718 if (SCTP_BUF_LEN(m) == 0) { 719 /* Skip mbufs with NO length */ 720 if (prev == NULL) { 721 /* First one */ 722 control->tail_mbuf->m_next = sctp_m_free(m); 723 m = control->tail_mbuf->m_next; 724 } else { 725 SCTP_BUF_NEXT(prev) = sctp_m_free(m); 726 m = SCTP_BUF_NEXT(prev); 727 } 728 if (m == NULL) { 729 control->tail_mbuf = prev; 730 } 731 continue; 732 } 733 prev = m; 734 if (control->on_read_q) { 735 /* 736 * On read queue so we must increment the SB stuff, 737 * we assume caller has done any locks of SB. 738 */ 739 sctp_sballoc(stcb, &stcb->sctp_socket->so_rcv, m); 740 } 741 *added += SCTP_BUF_LEN(m); 742 atomic_add_int(&control->length, SCTP_BUF_LEN(m)); 743 m = SCTP_BUF_NEXT(m); 744 } 745 if (prev) { 746 control->tail_mbuf = prev; 747 } 748} 749 750static void 751sctp_build_readq_entry_from_ctl(struct sctp_queued_to_read *nc, struct sctp_queued_to_read *control) 752{ 753 memset(nc, 0, sizeof(struct sctp_queued_to_read)); 754 nc->sinfo_stream = control->sinfo_stream; 755 nc->mid = control->mid; 756 TAILQ_INIT(&nc->reasm); 757 nc->top_fsn = control->top_fsn; 758 nc->mid = control->mid; 759 nc->sinfo_flags = control->sinfo_flags; 760 nc->sinfo_ppid = control->sinfo_ppid; 761 nc->sinfo_context = control->sinfo_context; 762 nc->fsn_included = 0xffffffff; 763 nc->sinfo_tsn = control->sinfo_tsn; 764 nc->sinfo_cumtsn = control->sinfo_cumtsn; 765 nc->sinfo_assoc_id = control->sinfo_assoc_id; 766 nc->whoFrom = control->whoFrom; 767 atomic_add_int(&nc->whoFrom->ref_count, 1); 768 nc->stcb = control->stcb; 769 nc->port_from = control->port_from; 770} 771 772static void 773sctp_reset_a_control(struct sctp_queued_to_read *control, 774 struct sctp_inpcb *inp, uint32_t tsn) 775{ 776 control->fsn_included = tsn; 777 if (control->on_read_q) { 778 /* 779 * We have to purge it from there, hopefully this will work 780 * :-) 781 */ 782 TAILQ_REMOVE(&inp->read_queue, control, next); 783 control->on_read_q = 0; 784 } 785} 786 787static int 788sctp_handle_old_unordered_data(struct sctp_tcb *stcb, 789 struct sctp_association *asoc, 790 struct sctp_stream_in *strm, 791 struct sctp_queued_to_read *control, 792 uint32_t pd_point, 793 int inp_read_lock_held) 794{ 795 /* 796 * Special handling for the old un-ordered data chunk. All the 797 * chunks/TSN's go to mid 0. So we have to do the old style watching 798 * to see if we have it all. If you return one, no other control 799 * entries on the un-ordered queue will be looked at. In theory 800 * there should be no others entries in reality, unless the guy is 801 * sending both unordered NDATA and unordered DATA... 802 */ 803 struct sctp_tmit_chunk *chk, *lchk, *tchk; 804 uint32_t fsn; 805 struct sctp_queued_to_read *nc; 806 int cnt_added; 807 808 if (control->first_frag_seen == 0) { 809 /* Nothing we can do, we have not seen the first piece yet */ 810 return (1); 811 } 812 /* Collapse any we can */ 813 cnt_added = 0; 814restart: 815 fsn = control->fsn_included + 1; 816 /* Now what can we add? */ 817 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, lchk) { 818 if (chk->rec.data.fsn == fsn) { 819 /* Ok lets add it */ 820 sctp_alloc_a_readq(stcb, nc); 821 if (nc == NULL) { 822 break; 823 } 824 memset(nc, 0, sizeof(struct sctp_queued_to_read)); 825 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 826 sctp_add_chk_to_control(control, strm, stcb, asoc, chk, SCTP_READ_LOCK_NOT_HELD); 827 fsn++; 828 cnt_added++; 829 chk = NULL; 830 if (control->end_added) { 831 /* We are done */ 832 if (!TAILQ_EMPTY(&control->reasm)) { 833 /* 834 * Ok we have to move anything left 835 * on the control queue to a new 836 * control. 837 */ 838 sctp_build_readq_entry_from_ctl(nc, control); 839 tchk = TAILQ_FIRST(&control->reasm); 840 if (tchk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 841 TAILQ_REMOVE(&control->reasm, tchk, sctp_next); 842 if (asoc->size_on_reasm_queue >= tchk->send_size) { 843 asoc->size_on_reasm_queue -= tchk->send_size; 844 } else { 845#ifdef INVARIANTS 846 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, tchk->send_size); 847#else 848 asoc->size_on_reasm_queue = 0; 849#endif 850 } 851 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 852 nc->first_frag_seen = 1; 853 nc->fsn_included = tchk->rec.data.fsn; 854 nc->data = tchk->data; 855 nc->sinfo_ppid = tchk->rec.data.ppid; 856 nc->sinfo_tsn = tchk->rec.data.tsn; 857 sctp_mark_non_revokable(asoc, tchk->rec.data.tsn); 858 tchk->data = NULL; 859 sctp_free_a_chunk(stcb, tchk, SCTP_SO_NOT_LOCKED); 860 sctp_setup_tail_pointer(nc); 861 tchk = TAILQ_FIRST(&control->reasm); 862 } 863 /* Spin the rest onto the queue */ 864 while (tchk) { 865 TAILQ_REMOVE(&control->reasm, tchk, sctp_next); 866 TAILQ_INSERT_TAIL(&nc->reasm, tchk, sctp_next); 867 tchk = TAILQ_FIRST(&control->reasm); 868 } 869 /* 870 * Now lets add it to the queue 871 * after removing control 872 */ 873 TAILQ_INSERT_TAIL(&strm->uno_inqueue, nc, next_instrm); 874 nc->on_strm_q = SCTP_ON_UNORDERED; 875 if (control->on_strm_q) { 876 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 877 control->on_strm_q = 0; 878 } 879 } 880 if (control->pdapi_started) { 881 strm->pd_api_started = 0; 882 control->pdapi_started = 0; 883 } 884 if (control->on_strm_q) { 885 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 886 control->on_strm_q = 0; 887 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 888 } 889 if (control->on_read_q == 0) { 890 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 891 &stcb->sctp_socket->so_rcv, control->end_added, 892 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 893 } 894 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 895 if ((nc->first_frag_seen) && !TAILQ_EMPTY(&nc->reasm)) { 896 /* 897 * Switch to the new guy and 898 * continue 899 */ 900 control = nc; 901 goto restart; 902 } else { 903 if (nc->on_strm_q == 0) { 904 sctp_free_a_readq(stcb, nc); 905 } 906 } 907 return (1); 908 } else { 909 sctp_free_a_readq(stcb, nc); 910 } 911 } else { 912 /* Can't add more */ 913 break; 914 } 915 } 916 if ((control->length > pd_point) && (strm->pd_api_started == 0)) { 917 strm->pd_api_started = 1; 918 control->pdapi_started = 1; 919 sctp_add_to_readq(stcb->sctp_ep, stcb, control, 920 &stcb->sctp_socket->so_rcv, control->end_added, 921 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 922 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 923 return (0); 924 } else { 925 return (1); 926 } 927} 928 929static void 930sctp_inject_old_unordered_data(struct sctp_tcb *stcb, 931 struct sctp_association *asoc, 932 struct sctp_queued_to_read *control, 933 struct sctp_tmit_chunk *chk, 934 int *abort_flag) 935{ 936 struct sctp_tmit_chunk *at; 937 int inserted; 938 939 /* 940 * Here we need to place the chunk into the control structure sorted 941 * in the correct order. 942 */ 943 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 944 /* Its the very first one. */ 945 SCTPDBG(SCTP_DEBUG_XXX, 946 "chunk is a first fsn: %u becomes fsn_included\n", 947 chk->rec.data.fsn); 948 if (control->first_frag_seen) { 949 /* 950 * In old un-ordered we can reassembly on one 951 * control multiple messages. As long as the next 952 * FIRST is greater then the old first (TSN i.e. FSN 953 * wise) 954 */ 955 struct mbuf *tdata; 956 uint32_t tmp; 957 958 if (SCTP_TSN_GT(chk->rec.data.fsn, control->fsn_included)) { 959 /* 960 * Easy way the start of a new guy beyond 961 * the lowest 962 */ 963 goto place_chunk; 964 } 965 if ((chk->rec.data.fsn == control->fsn_included) || 966 (control->pdapi_started)) { 967 /* 968 * Ok this should not happen, if it does we 969 * started the pd-api on the higher TSN 970 * (since the equals part is a TSN failure 971 * it must be that). 972 * 973 * We are completly hosed in that case since 974 * I have no way to recover. This really 975 * will only happen if we can get more TSN's 976 * higher before the pd-api-point. 977 */ 978 sctp_abort_in_reasm(stcb, control, chk, 979 abort_flag, 980 SCTP_FROM_SCTP_INDATA + SCTP_LOC_4); 981 982 return; 983 } 984 /* 985 * Ok we have two firsts and the one we just got is 986 * smaller than the one we previously placed.. yuck! 987 * We must swap them out. 988 */ 989 /* swap the mbufs */ 990 tdata = control->data; 991 control->data = chk->data; 992 chk->data = tdata; 993 /* Save the lengths */ 994 chk->send_size = control->length; 995 /* Recompute length of control and tail pointer */ 996 sctp_setup_tail_pointer(control); 997 /* Fix the FSN included */ 998 tmp = control->fsn_included; 999 control->fsn_included = chk->rec.data.fsn; 1000 chk->rec.data.fsn = tmp; 1001 /* Fix the TSN included */ 1002 tmp = control->sinfo_tsn; 1003 control->sinfo_tsn = chk->rec.data.tsn; 1004 chk->rec.data.tsn = tmp; 1005 /* Fix the PPID included */ 1006 tmp = control->sinfo_ppid; 1007 control->sinfo_ppid = chk->rec.data.ppid; 1008 chk->rec.data.ppid = tmp; 1009 /* Fix tail pointer */ 1010 goto place_chunk; 1011 } 1012 control->first_frag_seen = 1; 1013 control->fsn_included = chk->rec.data.fsn; 1014 control->top_fsn = chk->rec.data.fsn; 1015 control->sinfo_tsn = chk->rec.data.tsn; 1016 control->sinfo_ppid = chk->rec.data.ppid; 1017 control->data = chk->data; 1018 sctp_mark_non_revokable(asoc, chk->rec.data.tsn); 1019 chk->data = NULL; 1020 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1021 sctp_setup_tail_pointer(control); 1022 return; 1023 } 1024place_chunk: 1025 inserted = 0; 1026 TAILQ_FOREACH(at, &control->reasm, sctp_next) { 1027 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) { 1028 /* 1029 * This one in queue is bigger than the new one, 1030 * insert the new one before at. 1031 */ 1032 asoc->size_on_reasm_queue += chk->send_size; 1033 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1034 inserted = 1; 1035 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 1036 break; 1037 } else if (at->rec.data.fsn == chk->rec.data.fsn) { 1038 /* 1039 * They sent a duplicate fsn number. This really 1040 * should not happen since the FSN is a TSN and it 1041 * should have been dropped earlier. 1042 */ 1043 sctp_abort_in_reasm(stcb, control, chk, 1044 abort_flag, 1045 SCTP_FROM_SCTP_INDATA + SCTP_LOC_5); 1046 return; 1047 } 1048 1049 } 1050 if (inserted == 0) { 1051 /* Its at the end */ 1052 asoc->size_on_reasm_queue += chk->send_size; 1053 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1054 control->top_fsn = chk->rec.data.fsn; 1055 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next); 1056 } 1057} 1058 1059static int 1060sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc, 1061 struct sctp_stream_in *strm, int inp_read_lock_held) 1062{ 1063 /* 1064 * Given a stream, strm, see if any of the SSN's on it that are 1065 * fragmented are ready to deliver. If so go ahead and place them on 1066 * the read queue. In so placing if we have hit the end, then we 1067 * need to remove them from the stream's queue. 1068 */ 1069 struct sctp_queued_to_read *control, *nctl = NULL; 1070 uint32_t next_to_del; 1071 uint32_t pd_point; 1072 int ret = 0; 1073 1074 if (stcb->sctp_socket) { 1075 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT, 1076 stcb->sctp_ep->partial_delivery_point); 1077 } else { 1078 pd_point = stcb->sctp_ep->partial_delivery_point; 1079 } 1080 control = TAILQ_FIRST(&strm->uno_inqueue); 1081 1082 if ((control != NULL) && 1083 (asoc->idata_supported == 0)) { 1084 /* Special handling needed for "old" data format */ 1085 if (sctp_handle_old_unordered_data(stcb, asoc, strm, control, pd_point, inp_read_lock_held)) { 1086 goto done_un; 1087 } 1088 } 1089 if (strm->pd_api_started) { 1090 /* Can't add more */ 1091 return (0); 1092 } 1093 while (control) { 1094 SCTPDBG(SCTP_DEBUG_XXX, "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u -uo\n", 1095 control, control->end_added, control->mid, control->top_fsn, control->fsn_included); 1096 nctl = TAILQ_NEXT(control, next_instrm); 1097 if (control->end_added) { 1098 /* We just put the last bit on */ 1099 if (control->on_strm_q) { 1100#ifdef INVARIANTS 1101 if (control->on_strm_q != SCTP_ON_UNORDERED) { 1102 panic("Huh control: %p on_q: %d -- not unordered?", 1103 control, control->on_strm_q); 1104 } 1105#endif 1106 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1107 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 1108 control->on_strm_q = 0; 1109 } 1110 if (control->on_read_q == 0) { 1111 sctp_add_to_readq(stcb->sctp_ep, stcb, 1112 control, 1113 &stcb->sctp_socket->so_rcv, control->end_added, 1114 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1115 } 1116 } else { 1117 /* Can we do a PD-API for this un-ordered guy? */ 1118 if ((control->length >= pd_point) && (strm->pd_api_started == 0)) { 1119 strm->pd_api_started = 1; 1120 control->pdapi_started = 1; 1121 sctp_add_to_readq(stcb->sctp_ep, stcb, 1122 control, 1123 &stcb->sctp_socket->so_rcv, control->end_added, 1124 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1125 1126 break; 1127 } 1128 } 1129 control = nctl; 1130 } 1131done_un: 1132 control = TAILQ_FIRST(&strm->inqueue); 1133 if (strm->pd_api_started) { 1134 /* Can't add more */ 1135 return (0); 1136 } 1137 if (control == NULL) { 1138 return (ret); 1139 } 1140 if (SCTP_MID_EQ(asoc->idata_supported, strm->last_mid_delivered, control->mid)) { 1141 /* 1142 * Ok the guy at the top was being partially delivered 1143 * completed, so we remove it. Note the pd_api flag was 1144 * taken off when the chunk was merged on in 1145 * sctp_queue_data_for_reasm below. 1146 */ 1147 nctl = TAILQ_NEXT(control, next_instrm); 1148 SCTPDBG(SCTP_DEBUG_XXX, 1149 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (lastdel: %u)- o\n", 1150 control, control->end_added, control->mid, 1151 control->top_fsn, control->fsn_included, 1152 strm->last_mid_delivered); 1153 if (control->end_added) { 1154 if (control->on_strm_q) { 1155#ifdef INVARIANTS 1156 if (control->on_strm_q != SCTP_ON_ORDERED) { 1157 panic("Huh control: %p on_q: %d -- not ordered?", 1158 control, control->on_strm_q); 1159 } 1160#endif 1161 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1162 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1163 if (asoc->size_on_all_streams >= control->length) { 1164 asoc->size_on_all_streams -= control->length; 1165 } else { 1166#ifdef INVARIANTS 1167 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1168#else 1169 asoc->size_on_all_streams = 0; 1170#endif 1171 } 1172 sctp_ucount_decr(asoc->cnt_on_all_streams); 1173 control->on_strm_q = 0; 1174 } 1175 if (strm->pd_api_started && control->pdapi_started) { 1176 control->pdapi_started = 0; 1177 strm->pd_api_started = 0; 1178 } 1179 if (control->on_read_q == 0) { 1180 sctp_add_to_readq(stcb->sctp_ep, stcb, 1181 control, 1182 &stcb->sctp_socket->so_rcv, control->end_added, 1183 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1184 } 1185 control = nctl; 1186 } 1187 } 1188 if (strm->pd_api_started) { 1189 /* 1190 * Can't add more must have gotten an un-ordered above being 1191 * partially delivered. 1192 */ 1193 return (0); 1194 } 1195deliver_more: 1196 next_to_del = strm->last_mid_delivered + 1; 1197 if (control) { 1198 SCTPDBG(SCTP_DEBUG_XXX, 1199 "Looking at control: %p e(%d) ssn: %u top_fsn: %u inc_fsn: %u (nxtdel: %u)- o\n", 1200 control, control->end_added, control->mid, control->top_fsn, control->fsn_included, 1201 next_to_del); 1202 nctl = TAILQ_NEXT(control, next_instrm); 1203 if (SCTP_MID_EQ(asoc->idata_supported, control->mid, next_to_del) && 1204 (control->first_frag_seen)) { 1205 int done; 1206 1207 /* Ok we can deliver it onto the stream. */ 1208 if (control->end_added) { 1209 /* We are done with it afterwards */ 1210 if (control->on_strm_q) { 1211#ifdef INVARIANTS 1212 if (control->on_strm_q != SCTP_ON_ORDERED) { 1213 panic("Huh control: %p on_q: %d -- not ordered?", 1214 control, control->on_strm_q); 1215 } 1216#endif 1217 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs); 1218 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1219 if (asoc->size_on_all_streams >= control->length) { 1220 asoc->size_on_all_streams -= control->length; 1221 } else { 1222#ifdef INVARIANTS 1223 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1224#else 1225 asoc->size_on_all_streams = 0; 1226#endif 1227 } 1228 sctp_ucount_decr(asoc->cnt_on_all_streams); 1229 control->on_strm_q = 0; 1230 } 1231 ret++; 1232 } 1233 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 1234 /* 1235 * A singleton now slipping through - mark 1236 * it non-revokable too 1237 */ 1238 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 1239 } else if (control->end_added == 0) { 1240 /* 1241 * Check if we can defer adding until its 1242 * all there 1243 */ 1244 if ((control->length < pd_point) || (strm->pd_api_started)) { 1245 /* 1246 * Don't need it or cannot add more 1247 * (one being delivered that way) 1248 */ 1249 goto out; 1250 } 1251 } 1252 done = (control->end_added) && (control->last_frag_seen); 1253 if (control->on_read_q == 0) { 1254 if (!done) { 1255 if (asoc->size_on_all_streams >= control->length) { 1256 asoc->size_on_all_streams -= control->length; 1257 } else { 1258#ifdef INVARIANTS 1259 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 1260#else 1261 asoc->size_on_all_streams = 0; 1262#endif 1263 } 1264 strm->pd_api_started = 1; 1265 control->pdapi_started = 1; 1266 } 1267 sctp_add_to_readq(stcb->sctp_ep, stcb, 1268 control, 1269 &stcb->sctp_socket->so_rcv, control->end_added, 1270 inp_read_lock_held, SCTP_SO_NOT_LOCKED); 1271 } 1272 strm->last_mid_delivered = next_to_del; 1273 if (done) { 1274 control = nctl; 1275 goto deliver_more; 1276 } 1277 } 1278 } 1279out: 1280 return (ret); 1281} 1282 1283 1284uint32_t 1285sctp_add_chk_to_control(struct sctp_queued_to_read *control, 1286 struct sctp_stream_in *strm, 1287 struct sctp_tcb *stcb, struct sctp_association *asoc, 1288 struct sctp_tmit_chunk *chk, int hold_rlock) 1289{ 1290 /* 1291 * Given a control and a chunk, merge the data from the chk onto the 1292 * control and free up the chunk resources. 1293 */ 1294 uint32_t added = 0; 1295 int i_locked = 0; 1296 1297 if (control->on_read_q && (hold_rlock == 0)) { 1298 /* 1299 * Its being pd-api'd so we must do some locks. 1300 */ 1301 SCTP_INP_READ_LOCK(stcb->sctp_ep); 1302 i_locked = 1; 1303 } 1304 if (control->data == NULL) { 1305 control->data = chk->data; 1306 sctp_setup_tail_pointer(control); 1307 } else { 1308 sctp_add_to_tail_pointer(control, chk->data, &added); 1309 } 1310 control->fsn_included = chk->rec.data.fsn; 1311 asoc->size_on_reasm_queue -= chk->send_size; 1312 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 1313 sctp_mark_non_revokable(asoc, chk->rec.data.tsn); 1314 chk->data = NULL; 1315 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1316 control->first_frag_seen = 1; 1317 control->sinfo_tsn = chk->rec.data.tsn; 1318 control->sinfo_ppid = chk->rec.data.ppid; 1319 } 1320 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1321 /* Its complete */ 1322 if ((control->on_strm_q) && (control->on_read_q)) { 1323 if (control->pdapi_started) { 1324 control->pdapi_started = 0; 1325 strm->pd_api_started = 0; 1326 } 1327 if (control->on_strm_q == SCTP_ON_UNORDERED) { 1328 /* Unordered */ 1329 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 1330 control->on_strm_q = 0; 1331 } else if (control->on_strm_q == SCTP_ON_ORDERED) { 1332 /* Ordered */ 1333 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 1334 /* 1335 * Don't need to decrement 1336 * size_on_all_streams, since control is on 1337 * the read queue. 1338 */ 1339 sctp_ucount_decr(asoc->cnt_on_all_streams); 1340 control->on_strm_q = 0; 1341#ifdef INVARIANTS 1342 } else if (control->on_strm_q) { 1343 panic("Unknown state on ctrl: %p on_strm_q: %d", control, 1344 control->on_strm_q); 1345#endif 1346 } 1347 } 1348 control->end_added = 1; 1349 control->last_frag_seen = 1; 1350 } 1351 if (i_locked) { 1352 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 1353 } 1354 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1355 return (added); 1356} 1357 1358/* 1359 * Dump onto the re-assembly queue, in its proper place. After dumping on the 1360 * queue, see if anthing can be delivered. If so pull it off (or as much as 1361 * we can. If we run out of space then we must dump what we can and set the 1362 * appropriate flag to say we queued what we could. 1363 */ 1364static void 1365sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc, 1366 struct sctp_queued_to_read *control, 1367 struct sctp_tmit_chunk *chk, 1368 int created_control, 1369 int *abort_flag, uint32_t tsn) 1370{ 1371 uint32_t next_fsn; 1372 struct sctp_tmit_chunk *at, *nat; 1373 struct sctp_stream_in *strm; 1374 int do_wakeup, unordered; 1375 uint32_t lenadded; 1376 1377 strm = &asoc->strmin[control->sinfo_stream]; 1378 /* 1379 * For old un-ordered data chunks. 1380 */ 1381 if ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) { 1382 unordered = 1; 1383 } else { 1384 unordered = 0; 1385 } 1386 /* Must be added to the stream-in queue */ 1387 if (created_control) { 1388 if (unordered == 0) { 1389 sctp_ucount_incr(asoc->cnt_on_all_streams); 1390 } 1391 if (sctp_place_control_in_stream(strm, asoc, control)) { 1392 /* Duplicate SSN? */ 1393 sctp_abort_in_reasm(stcb, control, chk, 1394 abort_flag, 1395 SCTP_FROM_SCTP_INDATA + SCTP_LOC_6); 1396 sctp_clean_up_control(stcb, control); 1397 return; 1398 } 1399 if ((tsn == (asoc->cumulative_tsn + 1) && (asoc->idata_supported == 0))) { 1400 /* 1401 * Ok we created this control and now lets validate 1402 * that its legal i.e. there is a B bit set, if not 1403 * and we have up to the cum-ack then its invalid. 1404 */ 1405 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) { 1406 sctp_abort_in_reasm(stcb, control, chk, 1407 abort_flag, 1408 SCTP_FROM_SCTP_INDATA + SCTP_LOC_7); 1409 return; 1410 } 1411 } 1412 } 1413 if ((asoc->idata_supported == 0) && (unordered == 1)) { 1414 sctp_inject_old_unordered_data(stcb, asoc, control, chk, abort_flag); 1415 return; 1416 } 1417 /* 1418 * Ok we must queue the chunk into the reasembly portion: o if its 1419 * the first it goes to the control mbuf. o if its not first but the 1420 * next in sequence it goes to the control, and each succeeding one 1421 * in order also goes. o if its not in order we place it on the list 1422 * in its place. 1423 */ 1424 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 1425 /* Its the very first one. */ 1426 SCTPDBG(SCTP_DEBUG_XXX, 1427 "chunk is a first fsn: %u becomes fsn_included\n", 1428 chk->rec.data.fsn); 1429 if (control->first_frag_seen) { 1430 /* 1431 * Error on senders part, they either sent us two 1432 * data chunks with FIRST, or they sent two 1433 * un-ordered chunks that were fragmented at the 1434 * same time in the same stream. 1435 */ 1436 sctp_abort_in_reasm(stcb, control, chk, 1437 abort_flag, 1438 SCTP_FROM_SCTP_INDATA + SCTP_LOC_8); 1439 return; 1440 } 1441 control->first_frag_seen = 1; 1442 control->sinfo_ppid = chk->rec.data.ppid; 1443 control->sinfo_tsn = chk->rec.data.tsn; 1444 control->fsn_included = chk->rec.data.fsn; 1445 control->data = chk->data; 1446 sctp_mark_non_revokable(asoc, chk->rec.data.tsn); 1447 chk->data = NULL; 1448 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 1449 sctp_setup_tail_pointer(control); 1450 asoc->size_on_all_streams += control->length; 1451 } else { 1452 /* Place the chunk in our list */ 1453 int inserted = 0; 1454 1455 if (control->last_frag_seen == 0) { 1456 /* Still willing to raise highest FSN seen */ 1457 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) { 1458 SCTPDBG(SCTP_DEBUG_XXX, 1459 "We have a new top_fsn: %u\n", 1460 chk->rec.data.fsn); 1461 control->top_fsn = chk->rec.data.fsn; 1462 } 1463 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1464 SCTPDBG(SCTP_DEBUG_XXX, 1465 "The last fsn is now in place fsn: %u\n", 1466 chk->rec.data.fsn); 1467 control->last_frag_seen = 1; 1468 } 1469 if (asoc->idata_supported || control->first_frag_seen) { 1470 /* 1471 * For IDATA we always check since we know 1472 * that the first fragment is 0. For old 1473 * DATA we have to receive the first before 1474 * we know the first FSN (which is the TSN). 1475 */ 1476 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) { 1477 /* 1478 * We have already delivered up to 1479 * this so its a dup 1480 */ 1481 sctp_abort_in_reasm(stcb, control, chk, 1482 abort_flag, 1483 SCTP_FROM_SCTP_INDATA + SCTP_LOC_9); 1484 return; 1485 } 1486 } 1487 } else { 1488 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) { 1489 /* Second last? huh? */ 1490 SCTPDBG(SCTP_DEBUG_XXX, 1491 "Duplicate last fsn: %u (top: %u) -- abort\n", 1492 chk->rec.data.fsn, control->top_fsn); 1493 sctp_abort_in_reasm(stcb, control, 1494 chk, abort_flag, 1495 SCTP_FROM_SCTP_INDATA + SCTP_LOC_10); 1496 return; 1497 } 1498 if (asoc->idata_supported || control->first_frag_seen) { 1499 /* 1500 * For IDATA we always check since we know 1501 * that the first fragment is 0. For old 1502 * DATA we have to receive the first before 1503 * we know the first FSN (which is the TSN). 1504 */ 1505 1506 if (SCTP_TSN_GE(control->fsn_included, chk->rec.data.fsn)) { 1507 /* 1508 * We have already delivered up to 1509 * this so its a dup 1510 */ 1511 SCTPDBG(SCTP_DEBUG_XXX, 1512 "New fsn: %u is already seen in included_fsn: %u -- abort\n", 1513 chk->rec.data.fsn, control->fsn_included); 1514 sctp_abort_in_reasm(stcb, control, chk, 1515 abort_flag, 1516 SCTP_FROM_SCTP_INDATA + SCTP_LOC_11); 1517 return; 1518 } 1519 } 1520 /* 1521 * validate not beyond top FSN if we have seen last 1522 * one 1523 */ 1524 if (SCTP_TSN_GT(chk->rec.data.fsn, control->top_fsn)) { 1525 SCTPDBG(SCTP_DEBUG_XXX, 1526 "New fsn: %u is beyond or at top_fsn: %u -- abort\n", 1527 chk->rec.data.fsn, 1528 control->top_fsn); 1529 sctp_abort_in_reasm(stcb, control, chk, 1530 abort_flag, 1531 SCTP_FROM_SCTP_INDATA + SCTP_LOC_12); 1532 return; 1533 } 1534 } 1535 /* 1536 * If we reach here, we need to place the new chunk in the 1537 * reassembly for this control. 1538 */ 1539 SCTPDBG(SCTP_DEBUG_XXX, 1540 "chunk is a not first fsn: %u needs to be inserted\n", 1541 chk->rec.data.fsn); 1542 TAILQ_FOREACH(at, &control->reasm, sctp_next) { 1543 if (SCTP_TSN_GT(at->rec.data.fsn, chk->rec.data.fsn)) { 1544 /* 1545 * This one in queue is bigger than the new 1546 * one, insert the new one before at. 1547 */ 1548 SCTPDBG(SCTP_DEBUG_XXX, 1549 "Insert it before fsn: %u\n", 1550 at->rec.data.fsn); 1551 asoc->size_on_reasm_queue += chk->send_size; 1552 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1553 TAILQ_INSERT_BEFORE(at, chk, sctp_next); 1554 inserted = 1; 1555 break; 1556 } else if (at->rec.data.fsn == chk->rec.data.fsn) { 1557 /* 1558 * Gak, He sent me a duplicate str seq 1559 * number 1560 */ 1561 /* 1562 * foo bar, I guess I will just free this 1563 * new guy, should we abort too? FIX ME 1564 * MAYBE? Or it COULD be that the SSN's have 1565 * wrapped. Maybe I should compare to TSN 1566 * somehow... sigh for now just blow away 1567 * the chunk! 1568 */ 1569 SCTPDBG(SCTP_DEBUG_XXX, 1570 "Duplicate to fsn: %u -- abort\n", 1571 at->rec.data.fsn); 1572 sctp_abort_in_reasm(stcb, control, 1573 chk, abort_flag, 1574 SCTP_FROM_SCTP_INDATA + SCTP_LOC_13); 1575 return; 1576 } 1577 } 1578 if (inserted == 0) { 1579 /* Goes on the end */ 1580 SCTPDBG(SCTP_DEBUG_XXX, "Inserting at tail of list fsn: %u\n", 1581 chk->rec.data.fsn); 1582 asoc->size_on_reasm_queue += chk->send_size; 1583 sctp_ucount_incr(asoc->cnt_on_reasm_queue); 1584 TAILQ_INSERT_TAIL(&control->reasm, chk, sctp_next); 1585 } 1586 } 1587 /* 1588 * Ok lets see if we can suck any up into the control structure that 1589 * are in seq if it makes sense. 1590 */ 1591 do_wakeup = 0; 1592 /* 1593 * If the first fragment has not been seen there is no sense in 1594 * looking. 1595 */ 1596 if (control->first_frag_seen) { 1597 next_fsn = control->fsn_included + 1; 1598 TAILQ_FOREACH_SAFE(at, &control->reasm, sctp_next, nat) { 1599 if (at->rec.data.fsn == next_fsn) { 1600 /* We can add this one now to the control */ 1601 SCTPDBG(SCTP_DEBUG_XXX, 1602 "Adding more to control: %p at: %p fsn: %u next_fsn: %u included: %u\n", 1603 control, at, 1604 at->rec.data.fsn, 1605 next_fsn, control->fsn_included); 1606 TAILQ_REMOVE(&control->reasm, at, sctp_next); 1607 lenadded = sctp_add_chk_to_control(control, strm, stcb, asoc, at, SCTP_READ_LOCK_NOT_HELD); 1608 if (control->on_read_q) { 1609 do_wakeup = 1; 1610 } else { 1611 /* 1612 * We only add to the 1613 * size-on-all-streams if its not on 1614 * the read q. The read q flag will 1615 * cause a sballoc so its accounted 1616 * for there. 1617 */ 1618 asoc->size_on_all_streams += lenadded; 1619 } 1620 next_fsn++; 1621 if (control->end_added && control->pdapi_started) { 1622 if (strm->pd_api_started) { 1623 strm->pd_api_started = 0; 1624 control->pdapi_started = 0; 1625 } 1626 if (control->on_read_q == 0) { 1627 sctp_add_to_readq(stcb->sctp_ep, stcb, 1628 control, 1629 &stcb->sctp_socket->so_rcv, control->end_added, 1630 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 1631 } 1632 break; 1633 } 1634 } else { 1635 break; 1636 } 1637 } 1638 } 1639 if (do_wakeup) { 1640 /* Need to wakeup the reader */ 1641 sctp_wakeup_the_read_socket(stcb->sctp_ep, stcb, SCTP_SO_NOT_LOCKED); 1642 } 1643} 1644 1645static struct sctp_queued_to_read * 1646sctp_find_reasm_entry(struct sctp_stream_in *strm, uint32_t mid, int ordered, int idata_supported) 1647{ 1648 struct sctp_queued_to_read *control; 1649 1650 if (ordered) { 1651 TAILQ_FOREACH(control, &strm->inqueue, next_instrm) { 1652 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) { 1653 break; 1654 } 1655 } 1656 } else { 1657 if (idata_supported) { 1658 TAILQ_FOREACH(control, &strm->uno_inqueue, next_instrm) { 1659 if (SCTP_MID_EQ(idata_supported, control->mid, mid)) { 1660 break; 1661 } 1662 } 1663 } else { 1664 control = TAILQ_FIRST(&strm->uno_inqueue); 1665 } 1666 } 1667 return (control); 1668} 1669 1670static int 1671sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc, 1672 struct mbuf **m, int offset, int chk_length, 1673 struct sctp_nets *net, uint32_t *high_tsn, int *abort_flag, 1674 int *break_flag, int last_chunk, uint8_t chk_type) 1675{ 1676 struct sctp_tmit_chunk *chk = NULL; /* make gcc happy */ 1677 uint32_t tsn, fsn, gap, mid; 1678 struct mbuf *dmbuf; 1679 int the_len; 1680 int need_reasm_check = 0; 1681 uint16_t sid; 1682 struct mbuf *op_err; 1683 char msg[SCTP_DIAG_INFO_LEN]; 1684 struct sctp_queued_to_read *control, *ncontrol; 1685 uint32_t ppid; 1686 uint8_t chk_flags; 1687 struct sctp_stream_reset_list *liste; 1688 int ordered; 1689 size_t clen; 1690 int created_control = 0; 1691 1692 if (chk_type == SCTP_IDATA) { 1693 struct sctp_idata_chunk *chunk, chunk_buf; 1694 1695 chunk = (struct sctp_idata_chunk *)sctp_m_getptr(*m, offset, 1696 sizeof(struct sctp_idata_chunk), (uint8_t *)&chunk_buf); 1697 chk_flags = chunk->ch.chunk_flags; 1698 clen = sizeof(struct sctp_idata_chunk); 1699 tsn = ntohl(chunk->dp.tsn); 1700 sid = ntohs(chunk->dp.sid); 1701 mid = ntohl(chunk->dp.mid); 1702 if (chk_flags & SCTP_DATA_FIRST_FRAG) { 1703 fsn = 0; 1704 ppid = chunk->dp.ppid_fsn.ppid; 1705 } else { 1706 fsn = ntohl(chunk->dp.ppid_fsn.fsn); 1707 ppid = 0xffffffff; /* Use as an invalid value. */ 1708 } 1709 } else { 1710 struct sctp_data_chunk *chunk, chunk_buf; 1711 1712 chunk = (struct sctp_data_chunk *)sctp_m_getptr(*m, offset, 1713 sizeof(struct sctp_data_chunk), (uint8_t *)&chunk_buf); 1714 chk_flags = chunk->ch.chunk_flags; 1715 clen = sizeof(struct sctp_data_chunk); 1716 tsn = ntohl(chunk->dp.tsn); 1717 sid = ntohs(chunk->dp.sid); 1718 mid = (uint32_t)(ntohs(chunk->dp.ssn)); 1719 fsn = tsn; 1720 ppid = chunk->dp.ppid; 1721 } 1722 if ((size_t)chk_length == clen) { 1723 /* 1724 * Need to send an abort since we had a empty data chunk. 1725 */ 1726 op_err = sctp_generate_no_user_data_cause(tsn); 1727 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14; 1728 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1729 *abort_flag = 1; 1730 return (0); 1731 } 1732 if ((chk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) { 1733 asoc->send_sack = 1; 1734 } 1735 ordered = ((chk_flags & SCTP_DATA_UNORDERED) == 0); 1736 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 1737 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS); 1738 } 1739 if (stcb == NULL) { 1740 return (0); 1741 } 1742 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, chk_type, tsn); 1743 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) { 1744 /* It is a duplicate */ 1745 SCTP_STAT_INCR(sctps_recvdupdata); 1746 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1747 /* Record a dup for the next outbound sack */ 1748 asoc->dup_tsns[asoc->numduptsns] = tsn; 1749 asoc->numduptsns++; 1750 } 1751 asoc->send_sack = 1; 1752 return (0); 1753 } 1754 /* Calculate the number of TSN's between the base and this TSN */ 1755 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn); 1756 if (gap >= (SCTP_MAPPING_ARRAY << 3)) { 1757 /* Can't hold the bit in the mapping at max array, toss it */ 1758 return (0); 1759 } 1760 if (gap >= (uint32_t)(asoc->mapping_array_size << 3)) { 1761 SCTP_TCB_LOCK_ASSERT(stcb); 1762 if (sctp_expand_mapping_array(asoc, gap)) { 1763 /* Can't expand, drop it */ 1764 return (0); 1765 } 1766 } 1767 if (SCTP_TSN_GT(tsn, *high_tsn)) { 1768 *high_tsn = tsn; 1769 } 1770 /* See if we have received this one already */ 1771 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) || 1772 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) { 1773 SCTP_STAT_INCR(sctps_recvdupdata); 1774 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) { 1775 /* Record a dup for the next outbound sack */ 1776 asoc->dup_tsns[asoc->numduptsns] = tsn; 1777 asoc->numduptsns++; 1778 } 1779 asoc->send_sack = 1; 1780 return (0); 1781 } 1782 /* 1783 * Check to see about the GONE flag, duplicates would cause a sack 1784 * to be sent up above 1785 */ 1786 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) || 1787 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) || 1788 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) { 1789 /* 1790 * wait a minute, this guy is gone, there is no longer a 1791 * receiver. Send peer an ABORT! 1792 */ 1793 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, ""); 1794 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1795 *abort_flag = 1; 1796 return (0); 1797 } 1798 /* 1799 * Now before going further we see if there is room. If NOT then we 1800 * MAY let one through only IF this TSN is the one we are waiting 1801 * for on a partial delivery API. 1802 */ 1803 1804 /* Is the stream valid? */ 1805 if (sid >= asoc->streamincnt) { 1806 struct sctp_error_invalid_stream *cause; 1807 1808 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_error_invalid_stream), 1809 0, M_NOWAIT, 1, MT_DATA); 1810 if (op_err != NULL) { 1811 /* add some space up front so prepend will work well */ 1812 SCTP_BUF_RESV_UF(op_err, sizeof(struct sctp_chunkhdr)); 1813 cause = mtod(op_err, struct sctp_error_invalid_stream *); 1814 /* 1815 * Error causes are just param's and this one has 1816 * two back to back phdr, one with the error type 1817 * and size, the other with the streamid and a rsvd 1818 */ 1819 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_error_invalid_stream); 1820 cause->cause.code = htons(SCTP_CAUSE_INVALID_STREAM); 1821 cause->cause.length = htons(sizeof(struct sctp_error_invalid_stream)); 1822 cause->stream_id = htons(sid); 1823 cause->reserved = htons(0); 1824 sctp_queue_op_err(stcb, op_err); 1825 } 1826 SCTP_STAT_INCR(sctps_badsid); 1827 SCTP_TCB_LOCK_ASSERT(stcb); 1828 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 1829 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1830 asoc->highest_tsn_inside_nr_map = tsn; 1831 } 1832 if (tsn == (asoc->cumulative_tsn + 1)) { 1833 /* Update cum-ack */ 1834 asoc->cumulative_tsn = tsn; 1835 } 1836 return (0); 1837 } 1838 /* 1839 * If its a fragmented message, lets see if we can find the control 1840 * on the reassembly queues. 1841 */ 1842 if ((chk_type == SCTP_IDATA) && 1843 ((chk_flags & SCTP_DATA_FIRST_FRAG) == 0) && 1844 (fsn == 0)) { 1845 /* 1846 * The first *must* be fsn 0, and other (middle/end) pieces 1847 * can *not* be fsn 0. XXX: This can happen in case of a 1848 * wrap around. Ignore is for now. 1849 */ 1850 snprintf(msg, sizeof(msg), "FSN zero for MID=%8.8x, but flags=%2.2x", 1851 mid, chk_flags); 1852 goto err_out; 1853 } 1854 control = sctp_find_reasm_entry(&asoc->strmin[sid], mid, ordered, asoc->idata_supported); 1855 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags:0x%x look for control on queues %p\n", 1856 chk_flags, control); 1857 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 1858 /* See if we can find the re-assembly entity */ 1859 if (control != NULL) { 1860 /* We found something, does it belong? */ 1861 if (ordered && (mid != control->mid)) { 1862 snprintf(msg, sizeof(msg), "Reassembly problem (MID=%8.8x)", mid); 1863 err_out: 1864 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 1865 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15; 1866 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 1867 *abort_flag = 1; 1868 return (0); 1869 } 1870 if (ordered && ((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED)) { 1871 /* 1872 * We can't have a switched order with an 1873 * unordered chunk 1874 */ 1875 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)", 1876 tsn); 1877 goto err_out; 1878 } 1879 if (!ordered && (((control->sinfo_flags >> 8) & SCTP_DATA_UNORDERED) == 0)) { 1880 /* 1881 * We can't have a switched unordered with a 1882 * ordered chunk 1883 */ 1884 snprintf(msg, sizeof(msg), "All fragments of a user message must be ordered or unordered (TSN=%8.8x)", 1885 tsn); 1886 goto err_out; 1887 } 1888 } 1889 } else { 1890 /* 1891 * Its a complete segment. Lets validate we don't have a 1892 * re-assembly going on with the same Stream/Seq (for 1893 * ordered) or in the same Stream for unordered. 1894 */ 1895 if (control != NULL) { 1896 if (ordered || asoc->idata_supported) { 1897 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x dup detected on MID: %u\n", 1898 chk_flags, mid); 1899 snprintf(msg, sizeof(msg), "Duplicate MID=%8.8x detected.", mid); 1900 goto err_out; 1901 } else { 1902 if ((tsn == control->fsn_included + 1) && 1903 (control->end_added == 0)) { 1904 snprintf(msg, sizeof(msg), "Illegal message sequence, missing end for MID: %8.8x", control->fsn_included); 1905 goto err_out; 1906 } else { 1907 control = NULL; 1908 } 1909 } 1910 } 1911 } 1912 /* now do the tests */ 1913 if (((asoc->cnt_on_all_streams + 1914 asoc->cnt_on_reasm_queue + 1915 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) || 1916 (((int)asoc->my_rwnd) <= 0)) { 1917 /* 1918 * When we have NO room in the rwnd we check to make sure 1919 * the reader is doing its job... 1920 */ 1921 if (stcb->sctp_socket->so_rcv.sb_cc) { 1922 /* some to read, wake-up */ 1923#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1924 struct socket *so; 1925 1926 so = SCTP_INP_SO(stcb->sctp_ep); 1927 atomic_add_int(&stcb->asoc.refcnt, 1); 1928 SCTP_TCB_UNLOCK(stcb); 1929 SCTP_SOCKET_LOCK(so, 1); 1930 SCTP_TCB_LOCK(stcb); 1931 atomic_subtract_int(&stcb->asoc.refcnt, 1); 1932 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 1933 /* assoc was freed while we were unlocked */ 1934 SCTP_SOCKET_UNLOCK(so, 1); 1935 return (0); 1936 } 1937#endif 1938 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket); 1939#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 1940 SCTP_SOCKET_UNLOCK(so, 1); 1941#endif 1942 } 1943 /* now is it in the mapping array of what we have accepted? */ 1944 if (chk_type == SCTP_DATA) { 1945 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) && 1946 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 1947 /* Nope not in the valid range dump it */ 1948 dump_packet: 1949 sctp_set_rwnd(stcb, asoc); 1950 if ((asoc->cnt_on_all_streams + 1951 asoc->cnt_on_reasm_queue + 1952 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) { 1953 SCTP_STAT_INCR(sctps_datadropchklmt); 1954 } else { 1955 SCTP_STAT_INCR(sctps_datadroprwnd); 1956 } 1957 *break_flag = 1; 1958 return (0); 1959 } 1960 } else { 1961 if (control == NULL) { 1962 goto dump_packet; 1963 } 1964 if (SCTP_TSN_GT(fsn, control->top_fsn)) { 1965 goto dump_packet; 1966 } 1967 } 1968 } 1969#ifdef SCTP_ASOCLOG_OF_TSNS 1970 SCTP_TCB_LOCK_ASSERT(stcb); 1971 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) { 1972 asoc->tsn_in_at = 0; 1973 asoc->tsn_in_wrapped = 1; 1974 } 1975 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn; 1976 asoc->in_tsnlog[asoc->tsn_in_at].strm = sid; 1977 asoc->in_tsnlog[asoc->tsn_in_at].seq = mid; 1978 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length; 1979 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags; 1980 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb; 1981 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at; 1982 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1; 1983 asoc->tsn_in_at++; 1984#endif 1985 /* 1986 * Before we continue lets validate that we are not being fooled by 1987 * an evil attacker. We can only have Nk chunks based on our TSN 1988 * spread allowed by the mapping array N * 8 bits, so there is no 1989 * way our stream sequence numbers could have wrapped. We of course 1990 * only validate the FIRST fragment so the bit must be set. 1991 */ 1992 if ((chk_flags & SCTP_DATA_FIRST_FRAG) && 1993 (TAILQ_EMPTY(&asoc->resetHead)) && 1994 (chk_flags & SCTP_DATA_UNORDERED) == 0 && 1995 SCTP_MID_GE(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered, mid)) { 1996 /* The incoming sseq is behind where we last delivered? */ 1997 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ: %u delivered: %u from peer, Abort!\n", 1998 mid, asoc->strmin[sid].last_mid_delivered); 1999 2000 if (asoc->idata_supported) { 2001 snprintf(msg, sizeof(msg), "Delivered MID=%8.8x, got TSN=%8.8x, SID=%4.4x, MID=%8.8x", 2002 asoc->strmin[sid].last_mid_delivered, 2003 tsn, 2004 sid, 2005 mid); 2006 } else { 2007 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x", 2008 (uint16_t)asoc->strmin[sid].last_mid_delivered, 2009 tsn, 2010 sid, 2011 (uint16_t)mid); 2012 } 2013 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2014 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16; 2015 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 2016 *abort_flag = 1; 2017 return (0); 2018 } 2019 if (chk_type == SCTP_IDATA) { 2020 the_len = (chk_length - sizeof(struct sctp_idata_chunk)); 2021 } else { 2022 the_len = (chk_length - sizeof(struct sctp_data_chunk)); 2023 } 2024 if (last_chunk == 0) { 2025 if (chk_type == SCTP_IDATA) { 2026 dmbuf = SCTP_M_COPYM(*m, 2027 (offset + sizeof(struct sctp_idata_chunk)), 2028 the_len, M_NOWAIT); 2029 } else { 2030 dmbuf = SCTP_M_COPYM(*m, 2031 (offset + sizeof(struct sctp_data_chunk)), 2032 the_len, M_NOWAIT); 2033 } 2034#ifdef SCTP_MBUF_LOGGING 2035 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) { 2036 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY); 2037 } 2038#endif 2039 } else { 2040 /* We can steal the last chunk */ 2041 int l_len; 2042 2043 dmbuf = *m; 2044 /* lop off the top part */ 2045 if (chk_type == SCTP_IDATA) { 2046 m_adj(dmbuf, (offset + sizeof(struct sctp_idata_chunk))); 2047 } else { 2048 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk))); 2049 } 2050 if (SCTP_BUF_NEXT(dmbuf) == NULL) { 2051 l_len = SCTP_BUF_LEN(dmbuf); 2052 } else { 2053 /* 2054 * need to count up the size hopefully does not hit 2055 * this to often :-0 2056 */ 2057 struct mbuf *lat; 2058 2059 l_len = 0; 2060 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) { 2061 l_len += SCTP_BUF_LEN(lat); 2062 } 2063 } 2064 if (l_len > the_len) { 2065 /* Trim the end round bytes off too */ 2066 m_adj(dmbuf, -(l_len - the_len)); 2067 } 2068 } 2069 if (dmbuf == NULL) { 2070 SCTP_STAT_INCR(sctps_nomem); 2071 return (0); 2072 } 2073 /* 2074 * Now no matter what, we need a control, get one if we don't have 2075 * one (we may have gotten it above when we found the message was 2076 * fragmented 2077 */ 2078 if (control == NULL) { 2079 sctp_alloc_a_readq(stcb, control); 2080 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn, 2081 ppid, 2082 sid, 2083 chk_flags, 2084 NULL, fsn, mid); 2085 if (control == NULL) { 2086 SCTP_STAT_INCR(sctps_nomem); 2087 return (0); 2088 } 2089 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 2090 struct mbuf *mm; 2091 2092 control->data = dmbuf; 2093 for (mm = control->data; mm; mm = mm->m_next) { 2094 control->length += SCTP_BUF_LEN(mm); 2095 } 2096 control->tail_mbuf = NULL; 2097 control->end_added = 1; 2098 control->last_frag_seen = 1; 2099 control->first_frag_seen = 1; 2100 control->fsn_included = fsn; 2101 control->top_fsn = fsn; 2102 } 2103 created_control = 1; 2104 } 2105 SCTPDBG(SCTP_DEBUG_XXX, "chunk_flags: 0x%x ordered: %d MID: %u control: %p\n", 2106 chk_flags, ordered, mid, control); 2107 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG && 2108 TAILQ_EMPTY(&asoc->resetHead) && 2109 ((ordered == 0) || 2110 (SCTP_MID_EQ(asoc->idata_supported, asoc->strmin[sid].last_mid_delivered + 1, mid) && 2111 TAILQ_EMPTY(&asoc->strmin[sid].inqueue)))) { 2112 /* Candidate for express delivery */ 2113 /* 2114 * Its not fragmented, No PD-API is up, Nothing in the 2115 * delivery queue, Its un-ordered OR ordered and the next to 2116 * deliver AND nothing else is stuck on the stream queue, 2117 * And there is room for it in the socket buffer. Lets just 2118 * stuff it up the buffer.... 2119 */ 2120 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 2121 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 2122 asoc->highest_tsn_inside_nr_map = tsn; 2123 } 2124 SCTPDBG(SCTP_DEBUG_XXX, "Injecting control: %p to be read (MID: %u)\n", 2125 control, mid); 2126 2127 sctp_add_to_readq(stcb->sctp_ep, stcb, 2128 control, &stcb->sctp_socket->so_rcv, 2129 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 2130 2131 if ((chk_flags & SCTP_DATA_UNORDERED) == 0) { 2132 /* for ordered, bump what we delivered */ 2133 asoc->strmin[sid].last_mid_delivered++; 2134 } 2135 SCTP_STAT_INCR(sctps_recvexpress); 2136 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 2137 sctp_log_strm_del_alt(stcb, tsn, mid, sid, 2138 SCTP_STR_LOG_FROM_EXPRS_DEL); 2139 } 2140 control = NULL; 2141 goto finish_express_del; 2142 } 2143 2144 /* Now will we need a chunk too? */ 2145 if ((chk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) { 2146 sctp_alloc_a_chunk(stcb, chk); 2147 if (chk == NULL) { 2148 /* No memory so we drop the chunk */ 2149 SCTP_STAT_INCR(sctps_nomem); 2150 if (last_chunk == 0) { 2151 /* we copied it, free the copy */ 2152 sctp_m_freem(dmbuf); 2153 } 2154 return (0); 2155 } 2156 chk->rec.data.tsn = tsn; 2157 chk->no_fr_allowed = 0; 2158 chk->rec.data.fsn = fsn; 2159 chk->rec.data.mid = mid; 2160 chk->rec.data.sid = sid; 2161 chk->rec.data.ppid = ppid; 2162 chk->rec.data.context = stcb->asoc.context; 2163 chk->rec.data.doing_fast_retransmit = 0; 2164 chk->rec.data.rcv_flags = chk_flags; 2165 chk->asoc = asoc; 2166 chk->send_size = the_len; 2167 chk->whoTo = net; 2168 SCTPDBG(SCTP_DEBUG_XXX, "Building ck: %p for control: %p to be read (MID: %u)\n", 2169 chk, 2170 control, mid); 2171 atomic_add_int(&net->ref_count, 1); 2172 chk->data = dmbuf; 2173 } 2174 /* Set the appropriate TSN mark */ 2175 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) { 2176 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap); 2177 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) { 2178 asoc->highest_tsn_inside_nr_map = tsn; 2179 } 2180 } else { 2181 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap); 2182 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) { 2183 asoc->highest_tsn_inside_map = tsn; 2184 } 2185 } 2186 /* Now is it complete (i.e. not fragmented)? */ 2187 if ((chk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 2188 /* 2189 * Special check for when streams are resetting. We could be 2190 * more smart about this and check the actual stream to see 2191 * if it is not being reset.. that way we would not create a 2192 * HOLB when amongst streams being reset and those not being 2193 * reset. 2194 * 2195 */ 2196 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2197 SCTP_TSN_GT(tsn, liste->tsn)) { 2198 /* 2199 * yep its past where we need to reset... go ahead 2200 * and queue it. 2201 */ 2202 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) { 2203 /* first one on */ 2204 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2205 } else { 2206 struct sctp_queued_to_read *lcontrol, *nlcontrol; 2207 unsigned char inserted = 0; 2208 2209 TAILQ_FOREACH_SAFE(lcontrol, &asoc->pending_reply_queue, next, nlcontrol) { 2210 if (SCTP_TSN_GT(control->sinfo_tsn, lcontrol->sinfo_tsn)) { 2211 2212 continue; 2213 } else { 2214 /* found it */ 2215 TAILQ_INSERT_BEFORE(lcontrol, control, next); 2216 inserted = 1; 2217 break; 2218 } 2219 } 2220 if (inserted == 0) { 2221 /* 2222 * must be put at end, use prevP 2223 * (all setup from loop) to setup 2224 * nextP. 2225 */ 2226 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next); 2227 } 2228 } 2229 goto finish_express_del; 2230 } 2231 if (chk_flags & SCTP_DATA_UNORDERED) { 2232 /* queue directly into socket buffer */ 2233 SCTPDBG(SCTP_DEBUG_XXX, "Unordered data to be read control: %p MID: %u\n", 2234 control, mid); 2235 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 2236 sctp_add_to_readq(stcb->sctp_ep, stcb, 2237 control, 2238 &stcb->sctp_socket->so_rcv, 1, 2239 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED); 2240 2241 } else { 2242 SCTPDBG(SCTP_DEBUG_XXX, "Queue control: %p for reordering MID: %u\n", control, 2243 mid); 2244 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); 2245 if (*abort_flag) { 2246 if (last_chunk) { 2247 *m = NULL; 2248 } 2249 return (0); 2250 } 2251 } 2252 goto finish_express_del; 2253 } 2254 /* If we reach here its a reassembly */ 2255 need_reasm_check = 1; 2256 SCTPDBG(SCTP_DEBUG_XXX, 2257 "Queue data to stream for reasm control: %p MID: %u\n", 2258 control, mid); 2259 sctp_queue_data_for_reasm(stcb, asoc, control, chk, created_control, abort_flag, tsn); 2260 if (*abort_flag) { 2261 /* 2262 * the assoc is now gone and chk was put onto the reasm 2263 * queue, which has all been freed. 2264 */ 2265 if (last_chunk) { 2266 *m = NULL; 2267 } 2268 return (0); 2269 } 2270finish_express_del: 2271 /* Here we tidy up things */ 2272 if (tsn == (asoc->cumulative_tsn + 1)) { 2273 /* Update cum-ack */ 2274 asoc->cumulative_tsn = tsn; 2275 } 2276 if (last_chunk) { 2277 *m = NULL; 2278 } 2279 if (ordered) { 2280 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks); 2281 } else { 2282 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks); 2283 } 2284 SCTP_STAT_INCR(sctps_recvdata); 2285 /* Set it present please */ 2286 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) { 2287 sctp_log_strm_del_alt(stcb, tsn, mid, sid, SCTP_STR_LOG_FROM_MARK_TSN); 2288 } 2289 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2290 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn, 2291 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE); 2292 } 2293 if (need_reasm_check) { 2294 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[sid], SCTP_READ_LOCK_NOT_HELD); 2295 need_reasm_check = 0; 2296 } 2297 /* check the special flag for stream resets */ 2298 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) && 2299 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) { 2300 /* 2301 * we have finished working through the backlogged TSN's now 2302 * time to reset streams. 1: call reset function. 2: free 2303 * pending_reply space 3: distribute any chunks in 2304 * pending_reply_queue. 2305 */ 2306 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams); 2307 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp); 2308 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED); 2309 SCTP_FREE(liste, SCTP_M_STRESET); 2310 /* sa_ignore FREED_MEMORY */ 2311 liste = TAILQ_FIRST(&asoc->resetHead); 2312 if (TAILQ_EMPTY(&asoc->resetHead)) { 2313 /* All can be removed */ 2314 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) { 2315 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next); 2316 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); 2317 if (*abort_flag) { 2318 return (0); 2319 } 2320 if (need_reasm_check) { 2321 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD); 2322 need_reasm_check = 0; 2323 } 2324 } 2325 } else { 2326 TAILQ_FOREACH_SAFE(control, &asoc->pending_reply_queue, next, ncontrol) { 2327 if (SCTP_TSN_GT(control->sinfo_tsn, liste->tsn)) { 2328 break; 2329 } 2330 /* 2331 * if control->sinfo_tsn is <= liste->tsn we 2332 * can process it which is the NOT of 2333 * control->sinfo_tsn > liste->tsn 2334 */ 2335 TAILQ_REMOVE(&asoc->pending_reply_queue, control, next); 2336 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag, &need_reasm_check); 2337 if (*abort_flag) { 2338 return (0); 2339 } 2340 if (need_reasm_check) { 2341 (void)sctp_deliver_reasm_check(stcb, asoc, &asoc->strmin[control->sinfo_stream], SCTP_READ_LOCK_NOT_HELD); 2342 need_reasm_check = 0; 2343 } 2344 } 2345 } 2346 } 2347 return (1); 2348} 2349 2350static const int8_t sctp_map_lookup_tab[256] = { 2351 0, 1, 0, 2, 0, 1, 0, 3, 2352 0, 1, 0, 2, 0, 1, 0, 4, 2353 0, 1, 0, 2, 0, 1, 0, 3, 2354 0, 1, 0, 2, 0, 1, 0, 5, 2355 0, 1, 0, 2, 0, 1, 0, 3, 2356 0, 1, 0, 2, 0, 1, 0, 4, 2357 0, 1, 0, 2, 0, 1, 0, 3, 2358 0, 1, 0, 2, 0, 1, 0, 6, 2359 0, 1, 0, 2, 0, 1, 0, 3, 2360 0, 1, 0, 2, 0, 1, 0, 4, 2361 0, 1, 0, 2, 0, 1, 0, 3, 2362 0, 1, 0, 2, 0, 1, 0, 5, 2363 0, 1, 0, 2, 0, 1, 0, 3, 2364 0, 1, 0, 2, 0, 1, 0, 4, 2365 0, 1, 0, 2, 0, 1, 0, 3, 2366 0, 1, 0, 2, 0, 1, 0, 7, 2367 0, 1, 0, 2, 0, 1, 0, 3, 2368 0, 1, 0, 2, 0, 1, 0, 4, 2369 0, 1, 0, 2, 0, 1, 0, 3, 2370 0, 1, 0, 2, 0, 1, 0, 5, 2371 0, 1, 0, 2, 0, 1, 0, 3, 2372 0, 1, 0, 2, 0, 1, 0, 4, 2373 0, 1, 0, 2, 0, 1, 0, 3, 2374 0, 1, 0, 2, 0, 1, 0, 6, 2375 0, 1, 0, 2, 0, 1, 0, 3, 2376 0, 1, 0, 2, 0, 1, 0, 4, 2377 0, 1, 0, 2, 0, 1, 0, 3, 2378 0, 1, 0, 2, 0, 1, 0, 5, 2379 0, 1, 0, 2, 0, 1, 0, 3, 2380 0, 1, 0, 2, 0, 1, 0, 4, 2381 0, 1, 0, 2, 0, 1, 0, 3, 2382 0, 1, 0, 2, 0, 1, 0, 8 2383}; 2384 2385 2386void 2387sctp_slide_mapping_arrays(struct sctp_tcb *stcb) 2388{ 2389 /* 2390 * Now we also need to check the mapping array in a couple of ways. 2391 * 1) Did we move the cum-ack point? 2392 * 2393 * When you first glance at this you might think that all entries 2394 * that make up the position of the cum-ack would be in the 2395 * nr-mapping array only.. i.e. things up to the cum-ack are always 2396 * deliverable. Thats true with one exception, when its a fragmented 2397 * message we may not deliver the data until some threshold (or all 2398 * of it) is in place. So we must OR the nr_mapping_array and 2399 * mapping_array to get a true picture of the cum-ack. 2400 */ 2401 struct sctp_association *asoc; 2402 int at; 2403 uint8_t val; 2404 int slide_from, slide_end, lgap, distance; 2405 uint32_t old_cumack, old_base, old_highest, highest_tsn; 2406 2407 asoc = &stcb->asoc; 2408 2409 old_cumack = asoc->cumulative_tsn; 2410 old_base = asoc->mapping_array_base_tsn; 2411 old_highest = asoc->highest_tsn_inside_map; 2412 /* 2413 * We could probably improve this a small bit by calculating the 2414 * offset of the current cum-ack as the starting point. 2415 */ 2416 at = 0; 2417 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) { 2418 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from]; 2419 if (val == 0xff) { 2420 at += 8; 2421 } else { 2422 /* there is a 0 bit */ 2423 at += sctp_map_lookup_tab[val]; 2424 break; 2425 } 2426 } 2427 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1); 2428 2429 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) && 2430 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) { 2431#ifdef INVARIANTS 2432 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map", 2433 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2434#else 2435 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n", 2436 asoc->cumulative_tsn, asoc->highest_tsn_inside_map); 2437 sctp_print_mapping_array(asoc); 2438 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2439 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 2440 } 2441 asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2442 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn; 2443#endif 2444 } 2445 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2446 highest_tsn = asoc->highest_tsn_inside_nr_map; 2447 } else { 2448 highest_tsn = asoc->highest_tsn_inside_map; 2449 } 2450 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) { 2451 /* The complete array was completed by a single FR */ 2452 /* highest becomes the cum-ack */ 2453 int clr; 2454#ifdef INVARIANTS 2455 unsigned int i; 2456#endif 2457 2458 /* clear the array */ 2459 clr = ((at + 7) >> 3); 2460 if (clr > asoc->mapping_array_size) { 2461 clr = asoc->mapping_array_size; 2462 } 2463 memset(asoc->mapping_array, 0, clr); 2464 memset(asoc->nr_mapping_array, 0, clr); 2465#ifdef INVARIANTS 2466 for (i = 0; i < asoc->mapping_array_size; i++) { 2467 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) { 2468 SCTP_PRINTF("Error Mapping array's not clean at clear\n"); 2469 sctp_print_mapping_array(asoc); 2470 } 2471 } 2472#endif 2473 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1; 2474 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn; 2475 } else if (at >= 8) { 2476 /* we can slide the mapping array down */ 2477 /* slide_from holds where we hit the first NON 0xff byte */ 2478 2479 /* 2480 * now calculate the ceiling of the move using our highest 2481 * TSN value 2482 */ 2483 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn); 2484 slide_end = (lgap >> 3); 2485 if (slide_end < slide_from) { 2486 sctp_print_mapping_array(asoc); 2487#ifdef INVARIANTS 2488 panic("impossible slide"); 2489#else 2490 SCTP_PRINTF("impossible slide lgap: %x slide_end: %x slide_from: %x? at: %d\n", 2491 lgap, slide_end, slide_from, at); 2492 return; 2493#endif 2494 } 2495 if (slide_end > asoc->mapping_array_size) { 2496#ifdef INVARIANTS 2497 panic("would overrun buffer"); 2498#else 2499 SCTP_PRINTF("Gak, would have overrun map end: %d slide_end: %d\n", 2500 asoc->mapping_array_size, slide_end); 2501 slide_end = asoc->mapping_array_size; 2502#endif 2503 } 2504 distance = (slide_end - slide_from) + 1; 2505 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2506 sctp_log_map(old_base, old_cumack, old_highest, 2507 SCTP_MAP_PREPARE_SLIDE); 2508 sctp_log_map((uint32_t)slide_from, (uint32_t)slide_end, 2509 (uint32_t)lgap, SCTP_MAP_SLIDE_FROM); 2510 } 2511 if (distance + slide_from > asoc->mapping_array_size || 2512 distance < 0) { 2513 /* 2514 * Here we do NOT slide forward the array so that 2515 * hopefully when more data comes in to fill it up 2516 * we will be able to slide it forward. Really I 2517 * don't think this should happen :-0 2518 */ 2519 2520 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2521 sctp_log_map((uint32_t)distance, (uint32_t)slide_from, 2522 (uint32_t)asoc->mapping_array_size, 2523 SCTP_MAP_SLIDE_NONE); 2524 } 2525 } else { 2526 int ii; 2527 2528 for (ii = 0; ii < distance; ii++) { 2529 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii]; 2530 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii]; 2531 2532 } 2533 for (ii = distance; ii < asoc->mapping_array_size; ii++) { 2534 asoc->mapping_array[ii] = 0; 2535 asoc->nr_mapping_array[ii] = 0; 2536 } 2537 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) { 2538 asoc->highest_tsn_inside_map += (slide_from << 3); 2539 } 2540 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) { 2541 asoc->highest_tsn_inside_nr_map += (slide_from << 3); 2542 } 2543 asoc->mapping_array_base_tsn += (slide_from << 3); 2544 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 2545 sctp_log_map(asoc->mapping_array_base_tsn, 2546 asoc->cumulative_tsn, asoc->highest_tsn_inside_map, 2547 SCTP_MAP_SLIDE_RESULT); 2548 } 2549 } 2550 } 2551} 2552 2553void 2554sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap) 2555{ 2556 struct sctp_association *asoc; 2557 uint32_t highest_tsn; 2558 int is_a_gap; 2559 2560 sctp_slide_mapping_arrays(stcb); 2561 asoc = &stcb->asoc; 2562 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2563 highest_tsn = asoc->highest_tsn_inside_nr_map; 2564 } else { 2565 highest_tsn = asoc->highest_tsn_inside_map; 2566 } 2567 /* Is there a gap now? */ 2568 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 2569 2570 /* 2571 * Now we need to see if we need to queue a sack or just start the 2572 * timer (if allowed). 2573 */ 2574 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2575 /* 2576 * Ok special case, in SHUTDOWN-SENT case. here we maker 2577 * sure SACK timer is off and instead send a SHUTDOWN and a 2578 * SACK 2579 */ 2580 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2581 sctp_timer_stop(SCTP_TIMER_TYPE_RECV, 2582 stcb->sctp_ep, stcb, NULL, 2583 SCTP_FROM_SCTP_INDATA + SCTP_LOC_17); 2584 } 2585 sctp_send_shutdown(stcb, 2586 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination)); 2587 if (is_a_gap) { 2588 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 2589 } 2590 } else { 2591 /* 2592 * CMT DAC algorithm: increase number of packets received 2593 * since last ack 2594 */ 2595 stcb->asoc.cmt_dac_pkts_rcvd++; 2596 2597 if ((stcb->asoc.send_sack == 1) || /* We need to send a 2598 * SACK */ 2599 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no 2600 * longer is one */ 2601 (stcb->asoc.numduptsns) || /* we have dup's */ 2602 (is_a_gap) || /* is still a gap */ 2603 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */ 2604 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */ 2605 ) { 2606 2607 if ((stcb->asoc.sctp_cmt_on_off > 0) && 2608 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) && 2609 (stcb->asoc.send_sack == 0) && 2610 (stcb->asoc.numduptsns == 0) && 2611 (stcb->asoc.delayed_ack) && 2612 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) { 2613 2614 /* 2615 * CMT DAC algorithm: With CMT, delay acks 2616 * even in the face of 2617 * 2618 * reordering. Therefore, if acks that do 2619 * not have to be sent because of the above 2620 * reasons, will be delayed. That is, acks 2621 * that would have been sent due to gap 2622 * reports will be delayed with DAC. Start 2623 * the delayed ack timer. 2624 */ 2625 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2626 stcb->sctp_ep, stcb, NULL); 2627 } else { 2628 /* 2629 * Ok we must build a SACK since the timer 2630 * is pending, we got our first packet OR 2631 * there are gaps or duplicates. 2632 */ 2633 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer); 2634 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED); 2635 } 2636 } else { 2637 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 2638 sctp_timer_start(SCTP_TIMER_TYPE_RECV, 2639 stcb->sctp_ep, stcb, NULL); 2640 } 2641 } 2642 } 2643} 2644 2645int 2646sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length, 2647 struct sctp_inpcb *inp, struct sctp_tcb *stcb, 2648 struct sctp_nets *net, uint32_t *high_tsn) 2649{ 2650 struct sctp_chunkhdr *ch, chunk_buf; 2651 struct sctp_association *asoc; 2652 int num_chunks = 0; /* number of control chunks processed */ 2653 int stop_proc = 0; 2654 int break_flag, last_chunk; 2655 int abort_flag = 0, was_a_gap; 2656 struct mbuf *m; 2657 uint32_t highest_tsn; 2658 uint16_t chk_length; 2659 2660 /* set the rwnd */ 2661 sctp_set_rwnd(stcb, &stcb->asoc); 2662 2663 m = *mm; 2664 SCTP_TCB_LOCK_ASSERT(stcb); 2665 asoc = &stcb->asoc; 2666 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) { 2667 highest_tsn = asoc->highest_tsn_inside_nr_map; 2668 } else { 2669 highest_tsn = asoc->highest_tsn_inside_map; 2670 } 2671 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn); 2672 /* 2673 * setup where we got the last DATA packet from for any SACK that 2674 * may need to go out. Don't bump the net. This is done ONLY when a 2675 * chunk is assigned. 2676 */ 2677 asoc->last_data_chunk_from = net; 2678 2679 /*- 2680 * Now before we proceed we must figure out if this is a wasted 2681 * cluster... i.e. it is a small packet sent in and yet the driver 2682 * underneath allocated a full cluster for it. If so we must copy it 2683 * to a smaller mbuf and free up the cluster mbuf. This will help 2684 * with cluster starvation. Note for __Panda__ we don't do this 2685 * since it has clusters all the way down to 64 bytes. 2686 */ 2687 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) { 2688 /* we only handle mbufs that are singletons.. not chains */ 2689 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA); 2690 if (m) { 2691 /* ok lets see if we can copy the data up */ 2692 caddr_t *from, *to; 2693 2694 /* get the pointers and copy */ 2695 to = mtod(m, caddr_t *); 2696 from = mtod((*mm), caddr_t *); 2697 memcpy(to, from, SCTP_BUF_LEN((*mm))); 2698 /* copy the length and free up the old */ 2699 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm)); 2700 sctp_m_freem(*mm); 2701 /* success, back copy */ 2702 *mm = m; 2703 } else { 2704 /* We are in trouble in the mbuf world .. yikes */ 2705 m = *mm; 2706 } 2707 } 2708 /* get pointer to the first chunk header */ 2709 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 2710 sizeof(struct sctp_chunkhdr), 2711 (uint8_t *)&chunk_buf); 2712 if (ch == NULL) { 2713 return (1); 2714 } 2715 /* 2716 * process all DATA chunks... 2717 */ 2718 *high_tsn = asoc->cumulative_tsn; 2719 break_flag = 0; 2720 asoc->data_pkts_seen++; 2721 while (stop_proc == 0) { 2722 /* validate chunk length */ 2723 chk_length = ntohs(ch->chunk_length); 2724 if (length - *offset < chk_length) { 2725 /* all done, mutulated chunk */ 2726 stop_proc = 1; 2727 continue; 2728 } 2729 if ((asoc->idata_supported == 1) && 2730 (ch->chunk_type == SCTP_DATA)) { 2731 struct mbuf *op_err; 2732 char msg[SCTP_DIAG_INFO_LEN]; 2733 2734 snprintf(msg, sizeof(msg), "%s", "I-DATA chunk received when DATA was negotiated"); 2735 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2736 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18; 2737 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2738 return (2); 2739 } 2740 if ((asoc->idata_supported == 0) && 2741 (ch->chunk_type == SCTP_IDATA)) { 2742 struct mbuf *op_err; 2743 char msg[SCTP_DIAG_INFO_LEN]; 2744 2745 snprintf(msg, sizeof(msg), "%s", "DATA chunk received when I-DATA was negotiated"); 2746 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2747 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19; 2748 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2749 return (2); 2750 } 2751 if ((ch->chunk_type == SCTP_DATA) || 2752 (ch->chunk_type == SCTP_IDATA)) { 2753 uint16_t clen; 2754 2755 if (ch->chunk_type == SCTP_DATA) { 2756 clen = sizeof(struct sctp_data_chunk); 2757 } else { 2758 clen = sizeof(struct sctp_idata_chunk); 2759 } 2760 if (chk_length < clen) { 2761 /* 2762 * Need to send an abort since we had a 2763 * invalid data chunk. 2764 */ 2765 struct mbuf *op_err; 2766 char msg[SCTP_DIAG_INFO_LEN]; 2767 2768 snprintf(msg, sizeof(msg), "%s chunk of length %u", 2769 ch->chunk_type == SCTP_DATA ? "DATA" : "I-DATA", 2770 chk_length); 2771 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2772 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20; 2773 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2774 return (2); 2775 } 2776#ifdef SCTP_AUDITING_ENABLED 2777 sctp_audit_log(0xB1, 0); 2778#endif 2779 if (SCTP_SIZE32(chk_length) == (length - *offset)) { 2780 last_chunk = 1; 2781 } else { 2782 last_chunk = 0; 2783 } 2784 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, 2785 chk_length, net, high_tsn, &abort_flag, &break_flag, 2786 last_chunk, ch->chunk_type)) { 2787 num_chunks++; 2788 } 2789 if (abort_flag) 2790 return (2); 2791 2792 if (break_flag) { 2793 /* 2794 * Set because of out of rwnd space and no 2795 * drop rep space left. 2796 */ 2797 stop_proc = 1; 2798 continue; 2799 } 2800 } else { 2801 /* not a data chunk in the data region */ 2802 switch (ch->chunk_type) { 2803 case SCTP_INITIATION: 2804 case SCTP_INITIATION_ACK: 2805 case SCTP_SELECTIVE_ACK: 2806 case SCTP_NR_SELECTIVE_ACK: 2807 case SCTP_HEARTBEAT_REQUEST: 2808 case SCTP_HEARTBEAT_ACK: 2809 case SCTP_ABORT_ASSOCIATION: 2810 case SCTP_SHUTDOWN: 2811 case SCTP_SHUTDOWN_ACK: 2812 case SCTP_OPERATION_ERROR: 2813 case SCTP_COOKIE_ECHO: 2814 case SCTP_COOKIE_ACK: 2815 case SCTP_ECN_ECHO: 2816 case SCTP_ECN_CWR: 2817 case SCTP_SHUTDOWN_COMPLETE: 2818 case SCTP_AUTHENTICATION: 2819 case SCTP_ASCONF_ACK: 2820 case SCTP_PACKET_DROPPED: 2821 case SCTP_STREAM_RESET: 2822 case SCTP_FORWARD_CUM_TSN: 2823 case SCTP_ASCONF: 2824 { 2825 /* 2826 * Now, what do we do with KNOWN 2827 * chunks that are NOT in the right 2828 * place? 2829 * 2830 * For now, I do nothing but ignore 2831 * them. We may later want to add 2832 * sysctl stuff to switch out and do 2833 * either an ABORT() or possibly 2834 * process them. 2835 */ 2836 struct mbuf *op_err; 2837 char msg[SCTP_DIAG_INFO_LEN]; 2838 2839 snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x", 2840 ch->chunk_type); 2841 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2842 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2843 return (2); 2844 } 2845 default: 2846 /* 2847 * Unknown chunk type: use bit rules after 2848 * checking length 2849 */ 2850 if (chk_length < sizeof(struct sctp_chunkhdr)) { 2851 /* 2852 * Need to send an abort since we 2853 * had a invalid chunk. 2854 */ 2855 struct mbuf *op_err; 2856 char msg[SCTP_DIAG_INFO_LEN]; 2857 2858 snprintf(msg, sizeof(msg), "Chunk of length %u", 2859 chk_length); 2860 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 2861 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_20; 2862 sctp_abort_an_association(inp, stcb, op_err, SCTP_SO_NOT_LOCKED); 2863 return (2); 2864 } 2865 if (ch->chunk_type & 0x40) { 2866 /* Add a error report to the queue */ 2867 struct mbuf *op_err; 2868 struct sctp_gen_error_cause *cause; 2869 2870 op_err = sctp_get_mbuf_for_msg(sizeof(struct sctp_gen_error_cause), 2871 0, M_NOWAIT, 1, MT_DATA); 2872 if (op_err != NULL) { 2873 cause = mtod(op_err, struct sctp_gen_error_cause *); 2874 cause->code = htons(SCTP_CAUSE_UNRECOG_CHUNK); 2875 cause->length = htons((uint16_t)(chk_length + sizeof(struct sctp_gen_error_cause))); 2876 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_gen_error_cause); 2877 SCTP_BUF_NEXT(op_err) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT); 2878 if (SCTP_BUF_NEXT(op_err) != NULL) { 2879 sctp_queue_op_err(stcb, op_err); 2880 } else { 2881 sctp_m_freem(op_err); 2882 } 2883 } 2884 } 2885 if ((ch->chunk_type & 0x80) == 0) { 2886 /* discard the rest of this packet */ 2887 stop_proc = 1; 2888 } /* else skip this bad chunk and 2889 * continue... */ 2890 break; 2891 } /* switch of chunk type */ 2892 } 2893 *offset += SCTP_SIZE32(chk_length); 2894 if ((*offset >= length) || stop_proc) { 2895 /* no more data left in the mbuf chain */ 2896 stop_proc = 1; 2897 continue; 2898 } 2899 ch = (struct sctp_chunkhdr *)sctp_m_getptr(m, *offset, 2900 sizeof(struct sctp_chunkhdr), 2901 (uint8_t *)&chunk_buf); 2902 if (ch == NULL) { 2903 *offset = length; 2904 stop_proc = 1; 2905 continue; 2906 } 2907 } 2908 if (break_flag) { 2909 /* 2910 * we need to report rwnd overrun drops. 2911 */ 2912 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0); 2913 } 2914 if (num_chunks) { 2915 /* 2916 * Did we get data, if so update the time for auto-close and 2917 * give peer credit for being alive. 2918 */ 2919 SCTP_STAT_INCR(sctps_recvpktwithdata); 2920 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 2921 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 2922 stcb->asoc.overall_error_count, 2923 0, 2924 SCTP_FROM_SCTP_INDATA, 2925 __LINE__); 2926 } 2927 stcb->asoc.overall_error_count = 0; 2928 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd); 2929 } 2930 /* now service all of the reassm queue if needed */ 2931 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) { 2932 /* Assure that we ack right away */ 2933 stcb->asoc.send_sack = 1; 2934 } 2935 /* Start a sack timer or QUEUE a SACK for sending */ 2936 sctp_sack_check(stcb, was_a_gap); 2937 return (0); 2938} 2939 2940static int 2941sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn, 2942 uint16_t frag_strt, uint16_t frag_end, int nr_sacking, 2943 int *num_frs, 2944 uint32_t *biggest_newly_acked_tsn, 2945 uint32_t *this_sack_lowest_newack, 2946 int *rto_ok) 2947{ 2948 struct sctp_tmit_chunk *tp1; 2949 unsigned int theTSN; 2950 int j, wake_him = 0, circled = 0; 2951 2952 /* Recover the tp1 we last saw */ 2953 tp1 = *p_tp1; 2954 if (tp1 == NULL) { 2955 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 2956 } 2957 for (j = frag_strt; j <= frag_end; j++) { 2958 theTSN = j + last_tsn; 2959 while (tp1) { 2960 if (tp1->rec.data.doing_fast_retransmit) 2961 (*num_frs) += 1; 2962 2963 /*- 2964 * CMT: CUCv2 algorithm. For each TSN being 2965 * processed from the sent queue, track the 2966 * next expected pseudo-cumack, or 2967 * rtx_pseudo_cumack, if required. Separate 2968 * cumack trackers for first transmissions, 2969 * and retransmissions. 2970 */ 2971 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 2972 (tp1->whoTo->find_pseudo_cumack == 1) && 2973 (tp1->snd_count == 1)) { 2974 tp1->whoTo->pseudo_cumack = tp1->rec.data.tsn; 2975 tp1->whoTo->find_pseudo_cumack = 0; 2976 } 2977 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 2978 (tp1->whoTo->find_rtx_pseudo_cumack == 1) && 2979 (tp1->snd_count > 1)) { 2980 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.tsn; 2981 tp1->whoTo->find_rtx_pseudo_cumack = 0; 2982 } 2983 if (tp1->rec.data.tsn == theTSN) { 2984 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 2985 /*- 2986 * must be held until 2987 * cum-ack passes 2988 */ 2989 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 2990 /*- 2991 * If it is less than RESEND, it is 2992 * now no-longer in flight. 2993 * Higher values may already be set 2994 * via previous Gap Ack Blocks... 2995 * i.e. ACKED or RESEND. 2996 */ 2997 if (SCTP_TSN_GT(tp1->rec.data.tsn, 2998 *biggest_newly_acked_tsn)) { 2999 *biggest_newly_acked_tsn = tp1->rec.data.tsn; 3000 } 3001 /*- 3002 * CMT: SFR algo (and HTNA) - set 3003 * saw_newack to 1 for dest being 3004 * newly acked. update 3005 * this_sack_highest_newack if 3006 * appropriate. 3007 */ 3008 if (tp1->rec.data.chunk_was_revoked == 0) 3009 tp1->whoTo->saw_newack = 1; 3010 3011 if (SCTP_TSN_GT(tp1->rec.data.tsn, 3012 tp1->whoTo->this_sack_highest_newack)) { 3013 tp1->whoTo->this_sack_highest_newack = 3014 tp1->rec.data.tsn; 3015 } 3016 /*- 3017 * CMT DAC algo: also update 3018 * this_sack_lowest_newack 3019 */ 3020 if (*this_sack_lowest_newack == 0) { 3021 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3022 sctp_log_sack(*this_sack_lowest_newack, 3023 last_tsn, 3024 tp1->rec.data.tsn, 3025 0, 3026 0, 3027 SCTP_LOG_TSN_ACKED); 3028 } 3029 *this_sack_lowest_newack = tp1->rec.data.tsn; 3030 } 3031 /*- 3032 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp 3033 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set 3034 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be 3035 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack. 3036 * Separate pseudo_cumack trackers for first transmissions and 3037 * retransmissions. 3038 */ 3039 if (tp1->rec.data.tsn == tp1->whoTo->pseudo_cumack) { 3040 if (tp1->rec.data.chunk_was_revoked == 0) { 3041 tp1->whoTo->new_pseudo_cumack = 1; 3042 } 3043 tp1->whoTo->find_pseudo_cumack = 1; 3044 } 3045 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 3046 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); 3047 } 3048 if (tp1->rec.data.tsn == tp1->whoTo->rtx_pseudo_cumack) { 3049 if (tp1->rec.data.chunk_was_revoked == 0) { 3050 tp1->whoTo->new_pseudo_cumack = 1; 3051 } 3052 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3053 } 3054 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3055 sctp_log_sack(*biggest_newly_acked_tsn, 3056 last_tsn, 3057 tp1->rec.data.tsn, 3058 frag_strt, 3059 frag_end, 3060 SCTP_LOG_TSN_ACKED); 3061 } 3062 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3063 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP, 3064 tp1->whoTo->flight_size, 3065 tp1->book_size, 3066 (uint32_t)(uintptr_t)tp1->whoTo, 3067 tp1->rec.data.tsn); 3068 } 3069 sctp_flight_size_decrease(tp1); 3070 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3071 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3072 tp1); 3073 } 3074 sctp_total_flight_decrease(stcb, tp1); 3075 3076 tp1->whoTo->net_ack += tp1->send_size; 3077 if (tp1->snd_count < 2) { 3078 /*- 3079 * True non-retransmited chunk 3080 */ 3081 tp1->whoTo->net_ack2 += tp1->send_size; 3082 3083 /*- 3084 * update RTO too ? 3085 */ 3086 if (tp1->do_rtt) { 3087 if (*rto_ok) { 3088 tp1->whoTo->RTO = 3089 sctp_calculate_rto(stcb, 3090 &stcb->asoc, 3091 tp1->whoTo, 3092 &tp1->sent_rcv_time, 3093 SCTP_RTT_FROM_DATA); 3094 *rto_ok = 0; 3095 } 3096 if (tp1->whoTo->rto_needed == 0) { 3097 tp1->whoTo->rto_needed = 1; 3098 } 3099 tp1->do_rtt = 0; 3100 } 3101 } 3102 3103 } 3104 if (tp1->sent <= SCTP_DATAGRAM_RESEND) { 3105 if (SCTP_TSN_GT(tp1->rec.data.tsn, 3106 stcb->asoc.this_sack_highest_gap)) { 3107 stcb->asoc.this_sack_highest_gap = 3108 tp1->rec.data.tsn; 3109 } 3110 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3111 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt); 3112#ifdef SCTP_AUDITING_ENABLED 3113 sctp_audit_log(0xB2, 3114 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff)); 3115#endif 3116 } 3117 } 3118 /*- 3119 * All chunks NOT UNSENT fall through here and are marked 3120 * (leave PR-SCTP ones that are to skip alone though) 3121 */ 3122 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) && 3123 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { 3124 tp1->sent = SCTP_DATAGRAM_MARKED; 3125 } 3126 if (tp1->rec.data.chunk_was_revoked) { 3127 /* deflate the cwnd */ 3128 tp1->whoTo->cwnd -= tp1->book_size; 3129 tp1->rec.data.chunk_was_revoked = 0; 3130 } 3131 /* NR Sack code here */ 3132 if (nr_sacking && 3133 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) { 3134 if (stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues > 0) { 3135 stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues--; 3136#ifdef INVARIANTS 3137 } else { 3138 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); 3139#endif 3140 } 3141 if ((stcb->asoc.strmout[tp1->rec.data.sid].chunks_on_queues == 0) && 3142 (stcb->asoc.strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && 3143 TAILQ_EMPTY(&stcb->asoc.strmout[tp1->rec.data.sid].outqueue)) { 3144 stcb->asoc.trigger_reset = 1; 3145 } 3146 tp1->sent = SCTP_DATAGRAM_NR_ACKED; 3147 if (tp1->data) { 3148 /* 3149 * sa_ignore 3150 * NO_NULL_CHK 3151 */ 3152 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1); 3153 sctp_m_freem(tp1->data); 3154 tp1->data = NULL; 3155 } 3156 wake_him++; 3157 } 3158 } 3159 break; 3160 } /* if (tp1->tsn == theTSN) */ 3161 if (SCTP_TSN_GT(tp1->rec.data.tsn, theTSN)) { 3162 break; 3163 } 3164 tp1 = TAILQ_NEXT(tp1, sctp_next); 3165 if ((tp1 == NULL) && (circled == 0)) { 3166 circled++; 3167 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 3168 } 3169 } /* end while (tp1) */ 3170 if (tp1 == NULL) { 3171 circled = 0; 3172 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue); 3173 } 3174 /* In case the fragments were not in order we must reset */ 3175 } /* end for (j = fragStart */ 3176 *p_tp1 = tp1; 3177 return (wake_him); /* Return value only used for nr-sack */ 3178} 3179 3180 3181static int 3182sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc, 3183 uint32_t last_tsn, uint32_t *biggest_tsn_acked, 3184 uint32_t *biggest_newly_acked_tsn, uint32_t *this_sack_lowest_newack, 3185 int num_seg, int num_nr_seg, int *rto_ok) 3186{ 3187 struct sctp_gap_ack_block *frag, block; 3188 struct sctp_tmit_chunk *tp1; 3189 int i; 3190 int num_frs = 0; 3191 int chunk_freed; 3192 int non_revocable; 3193 uint16_t frag_strt, frag_end, prev_frag_end; 3194 3195 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3196 prev_frag_end = 0; 3197 chunk_freed = 0; 3198 3199 for (i = 0; i < (num_seg + num_nr_seg); i++) { 3200 if (i == num_seg) { 3201 prev_frag_end = 0; 3202 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3203 } 3204 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset, 3205 sizeof(struct sctp_gap_ack_block), (uint8_t *)&block); 3206 *offset += sizeof(block); 3207 if (frag == NULL) { 3208 return (chunk_freed); 3209 } 3210 frag_strt = ntohs(frag->start); 3211 frag_end = ntohs(frag->end); 3212 3213 if (frag_strt > frag_end) { 3214 /* This gap report is malformed, skip it. */ 3215 continue; 3216 } 3217 if (frag_strt <= prev_frag_end) { 3218 /* This gap report is not in order, so restart. */ 3219 tp1 = TAILQ_FIRST(&asoc->sent_queue); 3220 } 3221 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) { 3222 *biggest_tsn_acked = last_tsn + frag_end; 3223 } 3224 if (i < num_seg) { 3225 non_revocable = 0; 3226 } else { 3227 non_revocable = 1; 3228 } 3229 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end, 3230 non_revocable, &num_frs, biggest_newly_acked_tsn, 3231 this_sack_lowest_newack, rto_ok)) { 3232 chunk_freed = 1; 3233 } 3234 prev_frag_end = frag_end; 3235 } 3236 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3237 if (num_frs) 3238 sctp_log_fr(*biggest_tsn_acked, 3239 *biggest_newly_acked_tsn, 3240 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS); 3241 } 3242 return (chunk_freed); 3243} 3244 3245static void 3246sctp_check_for_revoked(struct sctp_tcb *stcb, 3247 struct sctp_association *asoc, uint32_t cumack, 3248 uint32_t biggest_tsn_acked) 3249{ 3250 struct sctp_tmit_chunk *tp1; 3251 3252 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 3253 if (SCTP_TSN_GT(tp1->rec.data.tsn, cumack)) { 3254 /* 3255 * ok this guy is either ACK or MARKED. If it is 3256 * ACKED it has been previously acked but not this 3257 * time i.e. revoked. If it is MARKED it was ACK'ed 3258 * again. 3259 */ 3260 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked)) { 3261 break; 3262 } 3263 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 3264 /* it has been revoked */ 3265 tp1->sent = SCTP_DATAGRAM_SENT; 3266 tp1->rec.data.chunk_was_revoked = 1; 3267 /* 3268 * We must add this stuff back in to assure 3269 * timers and such get started. 3270 */ 3271 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3272 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 3273 tp1->whoTo->flight_size, 3274 tp1->book_size, 3275 (uint32_t)(uintptr_t)tp1->whoTo, 3276 tp1->rec.data.tsn); 3277 } 3278 sctp_flight_size_increase(tp1); 3279 sctp_total_flight_increase(stcb, tp1); 3280 /* 3281 * We inflate the cwnd to compensate for our 3282 * artificial inflation of the flight_size. 3283 */ 3284 tp1->whoTo->cwnd += tp1->book_size; 3285 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 3286 sctp_log_sack(asoc->last_acked_seq, 3287 cumack, 3288 tp1->rec.data.tsn, 3289 0, 3290 0, 3291 SCTP_LOG_TSN_REVOKED); 3292 } 3293 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) { 3294 /* it has been re-acked in this SACK */ 3295 tp1->sent = SCTP_DATAGRAM_ACKED; 3296 } 3297 } 3298 if (tp1->sent == SCTP_DATAGRAM_UNSENT) 3299 break; 3300 } 3301} 3302 3303 3304static void 3305sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc, 3306 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved) 3307{ 3308 struct sctp_tmit_chunk *tp1; 3309 int strike_flag = 0; 3310 struct timeval now; 3311 int tot_retrans = 0; 3312 uint32_t sending_seq; 3313 struct sctp_nets *net; 3314 int num_dests_sacked = 0; 3315 3316 /* 3317 * select the sending_seq, this is either the next thing ready to be 3318 * sent but not transmitted, OR, the next seq we assign. 3319 */ 3320 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue); 3321 if (tp1 == NULL) { 3322 sending_seq = asoc->sending_seq; 3323 } else { 3324 sending_seq = tp1->rec.data.tsn; 3325 } 3326 3327 /* CMT DAC algo: finding out if SACK is a mixed SACK */ 3328 if ((asoc->sctp_cmt_on_off > 0) && 3329 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3330 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3331 if (net->saw_newack) 3332 num_dests_sacked++; 3333 } 3334 } 3335 if (stcb->asoc.prsctp_supported) { 3336 (void)SCTP_GETTIME_TIMEVAL(&now); 3337 } 3338 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 3339 strike_flag = 0; 3340 if (tp1->no_fr_allowed) { 3341 /* this one had a timeout or something */ 3342 continue; 3343 } 3344 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3345 if (tp1->sent < SCTP_DATAGRAM_RESEND) 3346 sctp_log_fr(biggest_tsn_newly_acked, 3347 tp1->rec.data.tsn, 3348 tp1->sent, 3349 SCTP_FR_LOG_CHECK_STRIKE); 3350 } 3351 if (SCTP_TSN_GT(tp1->rec.data.tsn, biggest_tsn_acked) || 3352 tp1->sent == SCTP_DATAGRAM_UNSENT) { 3353 /* done */ 3354 break; 3355 } 3356 if (stcb->asoc.prsctp_supported) { 3357 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) { 3358 /* Is it expired? */ 3359 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3360 /* Yes so drop it */ 3361 if (tp1->data != NULL) { 3362 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, 3363 SCTP_SO_NOT_LOCKED); 3364 } 3365 continue; 3366 } 3367 } 3368 3369 } 3370 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->this_sack_highest_gap) && 3371 !(accum_moved && asoc->fast_retran_loss_recovery)) { 3372 /* we are beyond the tsn in the sack */ 3373 break; 3374 } 3375 if (tp1->sent >= SCTP_DATAGRAM_RESEND) { 3376 /* either a RESEND, ACKED, or MARKED */ 3377 /* skip */ 3378 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) { 3379 /* Continue strikin FWD-TSN chunks */ 3380 tp1->rec.data.fwd_tsn_cnt++; 3381 } 3382 continue; 3383 } 3384 /* 3385 * CMT : SFR algo (covers part of DAC and HTNA as well) 3386 */ 3387 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) { 3388 /* 3389 * No new acks were receieved for data sent to this 3390 * dest. Therefore, according to the SFR algo for 3391 * CMT, no data sent to this dest can be marked for 3392 * FR using this SACK. 3393 */ 3394 continue; 3395 } else if (tp1->whoTo && 3396 SCTP_TSN_GT(tp1->rec.data.tsn, 3397 tp1->whoTo->this_sack_highest_newack) && 3398 !(accum_moved && asoc->fast_retran_loss_recovery)) { 3399 /* 3400 * CMT: New acks were receieved for data sent to 3401 * this dest. But no new acks were seen for data 3402 * sent after tp1. Therefore, according to the SFR 3403 * algo for CMT, tp1 cannot be marked for FR using 3404 * this SACK. This step covers part of the DAC algo 3405 * and the HTNA algo as well. 3406 */ 3407 continue; 3408 } 3409 /* 3410 * Here we check to see if we were have already done a FR 3411 * and if so we see if the biggest TSN we saw in the sack is 3412 * smaller than the recovery point. If so we don't strike 3413 * the tsn... otherwise we CAN strike the TSN. 3414 */ 3415 /* 3416 * @@@ JRI: Check for CMT if (accum_moved && 3417 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off == 3418 * 0)) { 3419 */ 3420 if (accum_moved && asoc->fast_retran_loss_recovery) { 3421 /* 3422 * Strike the TSN if in fast-recovery and cum-ack 3423 * moved. 3424 */ 3425 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3426 sctp_log_fr(biggest_tsn_newly_acked, 3427 tp1->rec.data.tsn, 3428 tp1->sent, 3429 SCTP_FR_LOG_STRIKE_CHUNK); 3430 } 3431 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3432 tp1->sent++; 3433 } 3434 if ((asoc->sctp_cmt_on_off > 0) && 3435 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3436 /* 3437 * CMT DAC algorithm: If SACK flag is set to 3438 * 0, then lowest_newack test will not pass 3439 * because it would have been set to the 3440 * cumack earlier. If not already to be 3441 * rtx'd, If not a mixed sack and if tp1 is 3442 * not between two sacked TSNs, then mark by 3443 * one more. NOTE that we are marking by one 3444 * additional time since the SACK DAC flag 3445 * indicates that two packets have been 3446 * received after this missing TSN. 3447 */ 3448 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3449 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) { 3450 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3451 sctp_log_fr(16 + num_dests_sacked, 3452 tp1->rec.data.tsn, 3453 tp1->sent, 3454 SCTP_FR_LOG_STRIKE_CHUNK); 3455 } 3456 tp1->sent++; 3457 } 3458 } 3459 } else if ((tp1->rec.data.doing_fast_retransmit) && 3460 (asoc->sctp_cmt_on_off == 0)) { 3461 /* 3462 * For those that have done a FR we must take 3463 * special consideration if we strike. I.e the 3464 * biggest_newly_acked must be higher than the 3465 * sending_seq at the time we did the FR. 3466 */ 3467 if ( 3468#ifdef SCTP_FR_TO_ALTERNATE 3469 /* 3470 * If FR's go to new networks, then we must only do 3471 * this for singly homed asoc's. However if the FR's 3472 * go to the same network (Armando's work) then its 3473 * ok to FR multiple times. 3474 */ 3475 (asoc->numnets < 2) 3476#else 3477 (1) 3478#endif 3479 ) { 3480 3481 if (SCTP_TSN_GE(biggest_tsn_newly_acked, 3482 tp1->rec.data.fast_retran_tsn)) { 3483 /* 3484 * Strike the TSN, since this ack is 3485 * beyond where things were when we 3486 * did a FR. 3487 */ 3488 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3489 sctp_log_fr(biggest_tsn_newly_acked, 3490 tp1->rec.data.tsn, 3491 tp1->sent, 3492 SCTP_FR_LOG_STRIKE_CHUNK); 3493 } 3494 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3495 tp1->sent++; 3496 } 3497 strike_flag = 1; 3498 if ((asoc->sctp_cmt_on_off > 0) && 3499 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3500 /* 3501 * CMT DAC algorithm: If 3502 * SACK flag is set to 0, 3503 * then lowest_newack test 3504 * will not pass because it 3505 * would have been set to 3506 * the cumack earlier. If 3507 * not already to be rtx'd, 3508 * If not a mixed sack and 3509 * if tp1 is not between two 3510 * sacked TSNs, then mark by 3511 * one more. NOTE that we 3512 * are marking by one 3513 * additional time since the 3514 * SACK DAC flag indicates 3515 * that two packets have 3516 * been received after this 3517 * missing TSN. 3518 */ 3519 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && 3520 (num_dests_sacked == 1) && 3521 SCTP_TSN_GT(this_sack_lowest_newack, 3522 tp1->rec.data.tsn)) { 3523 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3524 sctp_log_fr(32 + num_dests_sacked, 3525 tp1->rec.data.tsn, 3526 tp1->sent, 3527 SCTP_FR_LOG_STRIKE_CHUNK); 3528 } 3529 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3530 tp1->sent++; 3531 } 3532 } 3533 } 3534 } 3535 } 3536 /* 3537 * JRI: TODO: remove code for HTNA algo. CMT's SFR 3538 * algo covers HTNA. 3539 */ 3540 } else if (SCTP_TSN_GT(tp1->rec.data.tsn, 3541 biggest_tsn_newly_acked)) { 3542 /* 3543 * We don't strike these: This is the HTNA 3544 * algorithm i.e. we don't strike If our TSN is 3545 * larger than the Highest TSN Newly Acked. 3546 */ 3547 ; 3548 } else { 3549 /* Strike the TSN */ 3550 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3551 sctp_log_fr(biggest_tsn_newly_acked, 3552 tp1->rec.data.tsn, 3553 tp1->sent, 3554 SCTP_FR_LOG_STRIKE_CHUNK); 3555 } 3556 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 3557 tp1->sent++; 3558 } 3559 if ((asoc->sctp_cmt_on_off > 0) && 3560 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) { 3561 /* 3562 * CMT DAC algorithm: If SACK flag is set to 3563 * 0, then lowest_newack test will not pass 3564 * because it would have been set to the 3565 * cumack earlier. If not already to be 3566 * rtx'd, If not a mixed sack and if tp1 is 3567 * not between two sacked TSNs, then mark by 3568 * one more. NOTE that we are marking by one 3569 * additional time since the SACK DAC flag 3570 * indicates that two packets have been 3571 * received after this missing TSN. 3572 */ 3573 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) && 3574 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.tsn)) { 3575 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3576 sctp_log_fr(48 + num_dests_sacked, 3577 tp1->rec.data.tsn, 3578 tp1->sent, 3579 SCTP_FR_LOG_STRIKE_CHUNK); 3580 } 3581 tp1->sent++; 3582 } 3583 } 3584 } 3585 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 3586 struct sctp_nets *alt; 3587 3588 /* fix counts and things */ 3589 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3590 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND, 3591 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0), 3592 tp1->book_size, 3593 (uint32_t)(uintptr_t)tp1->whoTo, 3594 tp1->rec.data.tsn); 3595 } 3596 if (tp1->whoTo) { 3597 tp1->whoTo->net_ack++; 3598 sctp_flight_size_decrease(tp1); 3599 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3600 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3601 tp1); 3602 } 3603 } 3604 3605 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 3606 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND, 3607 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3608 } 3609 /* add back to the rwnd */ 3610 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)); 3611 3612 /* remove from the total flight */ 3613 sctp_total_flight_decrease(stcb, tp1); 3614 3615 if ((stcb->asoc.prsctp_supported) && 3616 (PR_SCTP_RTX_ENABLED(tp1->flags))) { 3617 /* 3618 * Has it been retransmitted tv_sec times? - 3619 * we store the retran count there. 3620 */ 3621 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) { 3622 /* Yes, so drop it */ 3623 if (tp1->data != NULL) { 3624 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1, 3625 SCTP_SO_NOT_LOCKED); 3626 } 3627 /* Make sure to flag we had a FR */ 3628 if (tp1->whoTo != NULL) { 3629 tp1->whoTo->net_ack++; 3630 } 3631 continue; 3632 } 3633 } 3634 /* 3635 * SCTP_PRINTF("OK, we are now ready to FR this 3636 * guy\n"); 3637 */ 3638 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 3639 sctp_log_fr(tp1->rec.data.tsn, tp1->snd_count, 3640 0, SCTP_FR_MARKED); 3641 } 3642 if (strike_flag) { 3643 /* This is a subsequent FR */ 3644 SCTP_STAT_INCR(sctps_sendmultfastretrans); 3645 } 3646 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 3647 if (asoc->sctp_cmt_on_off > 0) { 3648 /* 3649 * CMT: Using RTX_SSTHRESH policy for CMT. 3650 * If CMT is being used, then pick dest with 3651 * largest ssthresh for any retransmission. 3652 */ 3653 tp1->no_fr_allowed = 1; 3654 alt = tp1->whoTo; 3655 /* sa_ignore NO_NULL_CHK */ 3656 if (asoc->sctp_cmt_pf > 0) { 3657 /* 3658 * JRS 5/18/07 - If CMT PF is on, 3659 * use the PF version of 3660 * find_alt_net() 3661 */ 3662 alt = sctp_find_alternate_net(stcb, alt, 2); 3663 } else { 3664 /* 3665 * JRS 5/18/07 - If only CMT is on, 3666 * use the CMT version of 3667 * find_alt_net() 3668 */ 3669 /* sa_ignore NO_NULL_CHK */ 3670 alt = sctp_find_alternate_net(stcb, alt, 1); 3671 } 3672 if (alt == NULL) { 3673 alt = tp1->whoTo; 3674 } 3675 /* 3676 * CUCv2: If a different dest is picked for 3677 * the retransmission, then new 3678 * (rtx-)pseudo_cumack needs to be tracked 3679 * for orig dest. Let CUCv2 track new (rtx-) 3680 * pseudo-cumack always. 3681 */ 3682 if (tp1->whoTo) { 3683 tp1->whoTo->find_pseudo_cumack = 1; 3684 tp1->whoTo->find_rtx_pseudo_cumack = 1; 3685 } 3686 3687 } else { /* CMT is OFF */ 3688 3689#ifdef SCTP_FR_TO_ALTERNATE 3690 /* Can we find an alternate? */ 3691 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0); 3692#else 3693 /* 3694 * default behavior is to NOT retransmit 3695 * FR's to an alternate. Armando Caro's 3696 * paper details why. 3697 */ 3698 alt = tp1->whoTo; 3699#endif 3700 } 3701 3702 tp1->rec.data.doing_fast_retransmit = 1; 3703 tot_retrans++; 3704 /* mark the sending seq for possible subsequent FR's */ 3705 /* 3706 * SCTP_PRINTF("Marking TSN for FR new value %x\n", 3707 * (uint32_t)tpi->rec.data.tsn); 3708 */ 3709 if (TAILQ_EMPTY(&asoc->send_queue)) { 3710 /* 3711 * If the queue of send is empty then its 3712 * the next sequence number that will be 3713 * assigned so we subtract one from this to 3714 * get the one we last sent. 3715 */ 3716 tp1->rec.data.fast_retran_tsn = sending_seq; 3717 } else { 3718 /* 3719 * If there are chunks on the send queue 3720 * (unsent data that has made it from the 3721 * stream queues but not out the door, we 3722 * take the first one (which will have the 3723 * lowest TSN) and subtract one to get the 3724 * one we last sent. 3725 */ 3726 struct sctp_tmit_chunk *ttt; 3727 3728 ttt = TAILQ_FIRST(&asoc->send_queue); 3729 tp1->rec.data.fast_retran_tsn = 3730 ttt->rec.data.tsn; 3731 } 3732 3733 if (tp1->do_rtt) { 3734 /* 3735 * this guy had a RTO calculation pending on 3736 * it, cancel it 3737 */ 3738 if ((tp1->whoTo != NULL) && 3739 (tp1->whoTo->rto_needed == 0)) { 3740 tp1->whoTo->rto_needed = 1; 3741 } 3742 tp1->do_rtt = 0; 3743 } 3744 if (alt != tp1->whoTo) { 3745 /* yes, there is an alternate. */ 3746 sctp_free_remote_addr(tp1->whoTo); 3747 /* sa_ignore FREED_MEMORY */ 3748 tp1->whoTo = alt; 3749 atomic_add_int(&alt->ref_count, 1); 3750 } 3751 } 3752 } 3753} 3754 3755struct sctp_tmit_chunk * 3756sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb, 3757 struct sctp_association *asoc) 3758{ 3759 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL; 3760 struct timeval now; 3761 int now_filled = 0; 3762 3763 if (asoc->prsctp_supported == 0) { 3764 return (NULL); 3765 } 3766 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 3767 if (tp1->sent != SCTP_FORWARD_TSN_SKIP && 3768 tp1->sent != SCTP_DATAGRAM_RESEND && 3769 tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 3770 /* no chance to advance, out of here */ 3771 break; 3772 } 3773 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 3774 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || 3775 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { 3776 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 3777 asoc->advanced_peer_ack_point, 3778 tp1->rec.data.tsn, 0, 0); 3779 } 3780 } 3781 if (!PR_SCTP_ENABLED(tp1->flags)) { 3782 /* 3783 * We can't fwd-tsn past any that are reliable aka 3784 * retransmitted until the asoc fails. 3785 */ 3786 break; 3787 } 3788 if (!now_filled) { 3789 (void)SCTP_GETTIME_TIMEVAL(&now); 3790 now_filled = 1; 3791 } 3792 /* 3793 * now we got a chunk which is marked for another 3794 * retransmission to a PR-stream but has run out its chances 3795 * already maybe OR has been marked to skip now. Can we skip 3796 * it if its a resend? 3797 */ 3798 if (tp1->sent == SCTP_DATAGRAM_RESEND && 3799 (PR_SCTP_TTL_ENABLED(tp1->flags))) { 3800 /* 3801 * Now is this one marked for resend and its time is 3802 * now up? 3803 */ 3804 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) { 3805 /* Yes so drop it */ 3806 if (tp1->data) { 3807 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 3808 1, SCTP_SO_NOT_LOCKED); 3809 } 3810 } else { 3811 /* 3812 * No, we are done when hit one for resend 3813 * whos time as not expired. 3814 */ 3815 break; 3816 } 3817 } 3818 /* 3819 * Ok now if this chunk is marked to drop it we can clean up 3820 * the chunk, advance our peer ack point and we can check 3821 * the next chunk. 3822 */ 3823 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) || 3824 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) { 3825 /* advance PeerAckPoint goes forward */ 3826 if (SCTP_TSN_GT(tp1->rec.data.tsn, asoc->advanced_peer_ack_point)) { 3827 asoc->advanced_peer_ack_point = tp1->rec.data.tsn; 3828 a_adv = tp1; 3829 } else if (tp1->rec.data.tsn == asoc->advanced_peer_ack_point) { 3830 /* No update but we do save the chk */ 3831 a_adv = tp1; 3832 } 3833 } else { 3834 /* 3835 * If it is still in RESEND we can advance no 3836 * further 3837 */ 3838 break; 3839 } 3840 } 3841 return (a_adv); 3842} 3843 3844static int 3845sctp_fs_audit(struct sctp_association *asoc) 3846{ 3847 struct sctp_tmit_chunk *chk; 3848 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0; 3849 int ret; 3850#ifndef INVARIANTS 3851 int entry_flight, entry_cnt; 3852#endif 3853 3854 ret = 0; 3855#ifndef INVARIANTS 3856 entry_flight = asoc->total_flight; 3857 entry_cnt = asoc->total_flight_count; 3858#endif 3859 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt) 3860 return (0); 3861 3862 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 3863 if (chk->sent < SCTP_DATAGRAM_RESEND) { 3864 SCTP_PRINTF("Chk TSN: %u size: %d inflight cnt: %d\n", 3865 chk->rec.data.tsn, 3866 chk->send_size, 3867 chk->snd_count); 3868 inflight++; 3869 } else if (chk->sent == SCTP_DATAGRAM_RESEND) { 3870 resend++; 3871 } else if (chk->sent < SCTP_DATAGRAM_ACKED) { 3872 inbetween++; 3873 } else if (chk->sent > SCTP_DATAGRAM_ACKED) { 3874 above++; 3875 } else { 3876 acked++; 3877 } 3878 } 3879 3880 if ((inflight > 0) || (inbetween > 0)) { 3881#ifdef INVARIANTS 3882 panic("Flight size-express incorrect? \n"); 3883#else 3884 SCTP_PRINTF("asoc->total_flight: %d cnt: %d\n", 3885 entry_flight, entry_cnt); 3886 3887 SCTP_PRINTF("Flight size-express incorrect F: %d I: %d R: %d Ab: %d ACK: %d\n", 3888 inflight, inbetween, resend, above, acked); 3889 ret = 1; 3890#endif 3891 } 3892 return (ret); 3893} 3894 3895 3896static void 3897sctp_window_probe_recovery(struct sctp_tcb *stcb, 3898 struct sctp_association *asoc, 3899 struct sctp_tmit_chunk *tp1) 3900{ 3901 tp1->window_probe = 0; 3902 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) { 3903 /* TSN's skipped we do NOT move back. */ 3904 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD, 3905 tp1->whoTo ? tp1->whoTo->flight_size : 0, 3906 tp1->book_size, 3907 (uint32_t)(uintptr_t)tp1->whoTo, 3908 tp1->rec.data.tsn); 3909 return; 3910 } 3911 /* First setup this by shrinking flight */ 3912 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 3913 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 3914 tp1); 3915 } 3916 sctp_flight_size_decrease(tp1); 3917 sctp_total_flight_decrease(stcb, tp1); 3918 /* Now mark for resend */ 3919 tp1->sent = SCTP_DATAGRAM_RESEND; 3920 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 3921 3922 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 3923 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP, 3924 tp1->whoTo->flight_size, 3925 tp1->book_size, 3926 (uint32_t)(uintptr_t)tp1->whoTo, 3927 tp1->rec.data.tsn); 3928 } 3929} 3930 3931void 3932sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack, 3933 uint32_t rwnd, int *abort_now, int ecne_seen) 3934{ 3935 struct sctp_nets *net; 3936 struct sctp_association *asoc; 3937 struct sctp_tmit_chunk *tp1, *tp2; 3938 uint32_t old_rwnd; 3939 int win_probe_recovery = 0; 3940 int win_probe_recovered = 0; 3941 int j, done_once = 0; 3942 int rto_ok = 1; 3943 uint32_t send_s; 3944 3945 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 3946 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack, 3947 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 3948 } 3949 SCTP_TCB_LOCK_ASSERT(stcb); 3950#ifdef SCTP_ASOCLOG_OF_TSNS 3951 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack; 3952 stcb->asoc.cumack_log_at++; 3953 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 3954 stcb->asoc.cumack_log_at = 0; 3955 } 3956#endif 3957 asoc = &stcb->asoc; 3958 old_rwnd = asoc->peers_rwnd; 3959 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) { 3960 /* old ack */ 3961 return; 3962 } else if (asoc->last_acked_seq == cumack) { 3963 /* Window update sack */ 3964 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 3965 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 3966 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 3967 /* SWS sender side engages */ 3968 asoc->peers_rwnd = 0; 3969 } 3970 if (asoc->peers_rwnd > old_rwnd) { 3971 goto again; 3972 } 3973 return; 3974 } 3975 3976 /* First setup for CC stuff */ 3977 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 3978 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) { 3979 /* Drag along the window_tsn for cwr's */ 3980 net->cwr_window_tsn = cumack; 3981 } 3982 net->prev_cwnd = net->cwnd; 3983 net->net_ack = 0; 3984 net->net_ack2 = 0; 3985 3986 /* 3987 * CMT: Reset CUC and Fast recovery algo variables before 3988 * SACK processing 3989 */ 3990 net->new_pseudo_cumack = 0; 3991 net->will_exit_fast_recovery = 0; 3992 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { 3993 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net); 3994 } 3995 } 3996 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 3997 tp1 = TAILQ_LAST(&asoc->sent_queue, 3998 sctpchunk_listhead); 3999 send_s = tp1->rec.data.tsn + 1; 4000 } else { 4001 send_s = asoc->sending_seq; 4002 } 4003 if (SCTP_TSN_GE(cumack, send_s)) { 4004 struct mbuf *op_err; 4005 char msg[SCTP_DIAG_INFO_LEN]; 4006 4007 *abort_now = 1; 4008 /* XXX */ 4009 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x", 4010 cumack, send_s); 4011 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 4012 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21; 4013 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 4014 return; 4015 } 4016 asoc->this_sack_highest_gap = cumack; 4017 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4018 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4019 stcb->asoc.overall_error_count, 4020 0, 4021 SCTP_FROM_SCTP_INDATA, 4022 __LINE__); 4023 } 4024 stcb->asoc.overall_error_count = 0; 4025 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) { 4026 /* process the new consecutive TSN first */ 4027 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 4028 if (SCTP_TSN_GE(cumack, tp1->rec.data.tsn)) { 4029 if (tp1->sent == SCTP_DATAGRAM_UNSENT) { 4030 SCTP_PRINTF("Warning, an unsent is now acked?\n"); 4031 } 4032 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4033 /* 4034 * If it is less than ACKED, it is 4035 * now no-longer in flight. Higher 4036 * values may occur during marking 4037 */ 4038 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4039 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4040 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 4041 tp1->whoTo->flight_size, 4042 tp1->book_size, 4043 (uint32_t)(uintptr_t)tp1->whoTo, 4044 tp1->rec.data.tsn); 4045 } 4046 sctp_flight_size_decrease(tp1); 4047 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 4048 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 4049 tp1); 4050 } 4051 /* sa_ignore NO_NULL_CHK */ 4052 sctp_total_flight_decrease(stcb, tp1); 4053 } 4054 tp1->whoTo->net_ack += tp1->send_size; 4055 if (tp1->snd_count < 2) { 4056 /* 4057 * True non-retransmited 4058 * chunk 4059 */ 4060 tp1->whoTo->net_ack2 += 4061 tp1->send_size; 4062 4063 /* update RTO too? */ 4064 if (tp1->do_rtt) { 4065 if (rto_ok) { 4066 tp1->whoTo->RTO = 4067 /* 4068 * sa_ignore 4069 * NO_NULL_CHK 4070 */ 4071 sctp_calculate_rto(stcb, 4072 asoc, tp1->whoTo, 4073 &tp1->sent_rcv_time, 4074 SCTP_RTT_FROM_DATA); 4075 rto_ok = 0; 4076 } 4077 if (tp1->whoTo->rto_needed == 0) { 4078 tp1->whoTo->rto_needed = 1; 4079 } 4080 tp1->do_rtt = 0; 4081 } 4082 } 4083 /* 4084 * CMT: CUCv2 algorithm. From the 4085 * cumack'd TSNs, for each TSN being 4086 * acked for the first time, set the 4087 * following variables for the 4088 * corresp destination. 4089 * new_pseudo_cumack will trigger a 4090 * cwnd update. 4091 * find_(rtx_)pseudo_cumack will 4092 * trigger search for the next 4093 * expected (rtx-)pseudo-cumack. 4094 */ 4095 tp1->whoTo->new_pseudo_cumack = 1; 4096 tp1->whoTo->find_pseudo_cumack = 1; 4097 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4098 4099 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 4100 /* sa_ignore NO_NULL_CHK */ 4101 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); 4102 } 4103 } 4104 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4105 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4106 } 4107 if (tp1->rec.data.chunk_was_revoked) { 4108 /* deflate the cwnd */ 4109 tp1->whoTo->cwnd -= tp1->book_size; 4110 tp1->rec.data.chunk_was_revoked = 0; 4111 } 4112 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4113 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) { 4114 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--; 4115#ifdef INVARIANTS 4116 } else { 4117 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); 4118#endif 4119 } 4120 } 4121 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) && 4122 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && 4123 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) { 4124 asoc->trigger_reset = 1; 4125 } 4126 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4127 if (tp1->data) { 4128 /* sa_ignore NO_NULL_CHK */ 4129 sctp_free_bufspace(stcb, asoc, tp1, 1); 4130 sctp_m_freem(tp1->data); 4131 tp1->data = NULL; 4132 } 4133 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4134 sctp_log_sack(asoc->last_acked_seq, 4135 cumack, 4136 tp1->rec.data.tsn, 4137 0, 4138 0, 4139 SCTP_LOG_FREE_SENT); 4140 } 4141 asoc->sent_queue_cnt--; 4142 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); 4143 } else { 4144 break; 4145 } 4146 } 4147 4148 } 4149 /* sa_ignore NO_NULL_CHK */ 4150 if (stcb->sctp_socket) { 4151#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4152 struct socket *so; 4153 4154#endif 4155 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4156 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4157 /* sa_ignore NO_NULL_CHK */ 4158 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK); 4159 } 4160#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4161 so = SCTP_INP_SO(stcb->sctp_ep); 4162 atomic_add_int(&stcb->asoc.refcnt, 1); 4163 SCTP_TCB_UNLOCK(stcb); 4164 SCTP_SOCKET_LOCK(so, 1); 4165 SCTP_TCB_LOCK(stcb); 4166 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4167 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4168 /* assoc was freed while we were unlocked */ 4169 SCTP_SOCKET_UNLOCK(so, 1); 4170 return; 4171 } 4172#endif 4173 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4174#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4175 SCTP_SOCKET_UNLOCK(so, 1); 4176#endif 4177 } else { 4178 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4179 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK); 4180 } 4181 } 4182 4183 /* JRS - Use the congestion control given in the CC module */ 4184 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) { 4185 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4186 if (net->net_ack2 > 0) { 4187 /* 4188 * Karn's rule applies to clearing error 4189 * count, this is optional. 4190 */ 4191 net->error_count = 0; 4192 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { 4193 /* addr came good */ 4194 net->dest_state |= SCTP_ADDR_REACHABLE; 4195 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 4196 0, (void *)net, SCTP_SO_NOT_LOCKED); 4197 } 4198 if (net == stcb->asoc.primary_destination) { 4199 if (stcb->asoc.alternate) { 4200 /* 4201 * release the alternate, 4202 * primary is good 4203 */ 4204 sctp_free_remote_addr(stcb->asoc.alternate); 4205 stcb->asoc.alternate = NULL; 4206 } 4207 } 4208 if (net->dest_state & SCTP_ADDR_PF) { 4209 net->dest_state &= ~SCTP_ADDR_PF; 4210 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, 4211 stcb->sctp_ep, stcb, net, 4212 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22); 4213 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 4214 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 4215 /* Done with this net */ 4216 net->net_ack = 0; 4217 } 4218 /* restore any doubled timers */ 4219 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 4220 if (net->RTO < stcb->asoc.minrto) { 4221 net->RTO = stcb->asoc.minrto; 4222 } 4223 if (net->RTO > stcb->asoc.maxrto) { 4224 net->RTO = stcb->asoc.maxrto; 4225 } 4226 } 4227 } 4228 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0); 4229 } 4230 asoc->last_acked_seq = cumack; 4231 4232 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4233 /* nothing left in-flight */ 4234 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4235 net->flight_size = 0; 4236 net->partial_bytes_acked = 0; 4237 } 4238 asoc->total_flight = 0; 4239 asoc->total_flight_count = 0; 4240 } 4241 4242 /* RWND update */ 4243 asoc->peers_rwnd = sctp_sbspace_sub(rwnd, 4244 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 4245 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4246 /* SWS sender side engages */ 4247 asoc->peers_rwnd = 0; 4248 } 4249 if (asoc->peers_rwnd > old_rwnd) { 4250 win_probe_recovery = 1; 4251 } 4252 /* Now assure a timer where data is queued at */ 4253again: 4254 j = 0; 4255 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4256 if (win_probe_recovery && (net->window_probe)) { 4257 win_probe_recovered = 1; 4258 /* 4259 * Find first chunk that was used with window probe 4260 * and clear the sent 4261 */ 4262 /* sa_ignore FREED_MEMORY */ 4263 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4264 if (tp1->window_probe) { 4265 /* move back to data send queue */ 4266 sctp_window_probe_recovery(stcb, asoc, tp1); 4267 break; 4268 } 4269 } 4270 } 4271 if (net->flight_size) { 4272 j++; 4273 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net); 4274 if (net->window_probe) { 4275 net->window_probe = 0; 4276 } 4277 } else { 4278 if (net->window_probe) { 4279 /* 4280 * In window probes we must assure a timer 4281 * is still running there 4282 */ 4283 net->window_probe = 0; 4284 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4285 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, net); 4286 } 4287 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 4288 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4289 stcb, net, 4290 SCTP_FROM_SCTP_INDATA + SCTP_LOC_23); 4291 } 4292 } 4293 } 4294 if ((j == 0) && 4295 (!TAILQ_EMPTY(&asoc->sent_queue)) && 4296 (asoc->sent_queue_retran_cnt == 0) && 4297 (win_probe_recovered == 0) && 4298 (done_once == 0)) { 4299 /* 4300 * huh, this should not happen unless all packets are 4301 * PR-SCTP and marked to skip of course. 4302 */ 4303 if (sctp_fs_audit(asoc)) { 4304 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4305 net->flight_size = 0; 4306 } 4307 asoc->total_flight = 0; 4308 asoc->total_flight_count = 0; 4309 asoc->sent_queue_retran_cnt = 0; 4310 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4311 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4312 sctp_flight_size_increase(tp1); 4313 sctp_total_flight_increase(stcb, tp1); 4314 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4315 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 4316 } 4317 } 4318 } 4319 done_once = 1; 4320 goto again; 4321 } 4322 /**********************************/ 4323 /* Now what about shutdown issues */ 4324 /**********************************/ 4325 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 4326 /* nothing left on sendqueue.. consider done */ 4327 /* clean up */ 4328 if ((asoc->stream_queue_cnt == 1) && 4329 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4330 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 4331 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) { 4332 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 4333 } 4334 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 4335 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 4336 (asoc->stream_queue_cnt == 1) && 4337 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 4338 struct mbuf *op_err; 4339 4340 *abort_now = 1; 4341 /* XXX */ 4342 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); 4343 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; 4344 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 4345 return; 4346 } 4347 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 4348 (asoc->stream_queue_cnt == 0)) { 4349 struct sctp_nets *netp; 4350 4351 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 4352 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 4353 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4354 } 4355 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 4356 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4357 sctp_stop_timers_for_shutdown(stcb); 4358 if (asoc->alternate) { 4359 netp = asoc->alternate; 4360 } else { 4361 netp = asoc->primary_destination; 4362 } 4363 sctp_send_shutdown(stcb, netp); 4364 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 4365 stcb->sctp_ep, stcb, netp); 4366 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 4367 stcb->sctp_ep, stcb, netp); 4368 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 4369 (asoc->stream_queue_cnt == 0)) { 4370 struct sctp_nets *netp; 4371 4372 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 4373 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 4374 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 4375 sctp_stop_timers_for_shutdown(stcb); 4376 if (asoc->alternate) { 4377 netp = asoc->alternate; 4378 } else { 4379 netp = asoc->primary_destination; 4380 } 4381 sctp_send_shutdown_ack(stcb, netp); 4382 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 4383 stcb->sctp_ep, stcb, netp); 4384 } 4385 } 4386 /*********************************************/ 4387 /* Here we perform PR-SCTP procedures */ 4388 /* (section 4.2) */ 4389 /*********************************************/ 4390 /* C1. update advancedPeerAckPoint */ 4391 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) { 4392 asoc->advanced_peer_ack_point = cumack; 4393 } 4394 /* PR-Sctp issues need to be addressed too */ 4395 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) { 4396 struct sctp_tmit_chunk *lchk; 4397 uint32_t old_adv_peer_ack_point; 4398 4399 old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 4400 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 4401 /* C3. See if we need to send a Fwd-TSN */ 4402 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) { 4403 /* 4404 * ISSUE with ECN, see FWD-TSN processing. 4405 */ 4406 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { 4407 send_forward_tsn(stcb, asoc); 4408 } else if (lchk) { 4409 /* try to FR fwd-tsn's that get lost too */ 4410 if (lchk->rec.data.fwd_tsn_cnt >= 3) { 4411 send_forward_tsn(stcb, asoc); 4412 } 4413 } 4414 } 4415 if (lchk) { 4416 /* Assure a timer is up */ 4417 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 4418 stcb->sctp_ep, stcb, lchk->whoTo); 4419 } 4420 } 4421 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 4422 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 4423 rwnd, 4424 stcb->asoc.peers_rwnd, 4425 stcb->asoc.total_flight, 4426 stcb->asoc.total_output_queue_size); 4427 } 4428} 4429 4430void 4431sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup, 4432 struct sctp_tcb *stcb, 4433 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup, 4434 int *abort_now, uint8_t flags, 4435 uint32_t cum_ack, uint32_t rwnd, int ecne_seen) 4436{ 4437 struct sctp_association *asoc; 4438 struct sctp_tmit_chunk *tp1, *tp2; 4439 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack; 4440 uint16_t wake_him = 0; 4441 uint32_t send_s = 0; 4442 long j; 4443 int accum_moved = 0; 4444 int will_exit_fast_recovery = 0; 4445 uint32_t a_rwnd, old_rwnd; 4446 int win_probe_recovery = 0; 4447 int win_probe_recovered = 0; 4448 struct sctp_nets *net = NULL; 4449 int done_once; 4450 int rto_ok = 1; 4451 uint8_t reneged_all = 0; 4452 uint8_t cmt_dac_flag; 4453 4454 /* 4455 * we take any chance we can to service our queues since we cannot 4456 * get awoken when the socket is read from :< 4457 */ 4458 /* 4459 * Now perform the actual SACK handling: 1) Verify that it is not an 4460 * old sack, if so discard. 2) If there is nothing left in the send 4461 * queue (cum-ack is equal to last acked) then you have a duplicate 4462 * too, update any rwnd change and verify no timers are running. 4463 * then return. 3) Process any new consequtive data i.e. cum-ack 4464 * moved process these first and note that it moved. 4) Process any 4465 * sack blocks. 5) Drop any acked from the queue. 6) Check for any 4466 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left, 4467 * sync up flightsizes and things, stop all timers and also check 4468 * for shutdown_pending state. If so then go ahead and send off the 4469 * shutdown. If in shutdown recv, send off the shutdown-ack and 4470 * start that timer, Ret. 9) Strike any non-acked things and do FR 4471 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp 4472 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK 4473 * if in shutdown_recv state. 4474 */ 4475 SCTP_TCB_LOCK_ASSERT(stcb); 4476 /* CMT DAC algo */ 4477 this_sack_lowest_newack = 0; 4478 SCTP_STAT_INCR(sctps_slowpath_sack); 4479 last_tsn = cum_ack; 4480 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC; 4481#ifdef SCTP_ASOCLOG_OF_TSNS 4482 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack; 4483 stcb->asoc.cumack_log_at++; 4484 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) { 4485 stcb->asoc.cumack_log_at = 0; 4486 } 4487#endif 4488 a_rwnd = rwnd; 4489 4490 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) { 4491 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack, 4492 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd); 4493 } 4494 4495 old_rwnd = stcb->asoc.peers_rwnd; 4496 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 4497 sctp_misc_ints(SCTP_THRESHOLD_CLEAR, 4498 stcb->asoc.overall_error_count, 4499 0, 4500 SCTP_FROM_SCTP_INDATA, 4501 __LINE__); 4502 } 4503 stcb->asoc.overall_error_count = 0; 4504 asoc = &stcb->asoc; 4505 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4506 sctp_log_sack(asoc->last_acked_seq, 4507 cum_ack, 4508 0, 4509 num_seg, 4510 num_dup, 4511 SCTP_LOG_NEW_SACK); 4512 } 4513 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) { 4514 uint16_t i; 4515 uint32_t *dupdata, dblock; 4516 4517 for (i = 0; i < num_dup; i++) { 4518 dupdata = (uint32_t *)sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t), 4519 sizeof(uint32_t), (uint8_t *)&dblock); 4520 if (dupdata == NULL) { 4521 break; 4522 } 4523 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED); 4524 } 4525 } 4526 /* reality check */ 4527 if (!TAILQ_EMPTY(&asoc->sent_queue)) { 4528 tp1 = TAILQ_LAST(&asoc->sent_queue, 4529 sctpchunk_listhead); 4530 send_s = tp1->rec.data.tsn + 1; 4531 } else { 4532 tp1 = NULL; 4533 send_s = asoc->sending_seq; 4534 } 4535 if (SCTP_TSN_GE(cum_ack, send_s)) { 4536 struct mbuf *op_err; 4537 char msg[SCTP_DIAG_INFO_LEN]; 4538 4539 /* 4540 * no way, we have not even sent this TSN out yet. Peer is 4541 * hopelessly messed up with us. 4542 */ 4543 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n", 4544 cum_ack, send_s); 4545 if (tp1) { 4546 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1: %p\n", 4547 tp1->rec.data.tsn, (void *)tp1); 4548 } 4549hopeless_peer: 4550 *abort_now = 1; 4551 /* XXX */ 4552 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x", 4553 cum_ack, send_s); 4554 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 4555 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25; 4556 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 4557 return; 4558 } 4559 /**********************/ 4560 /* 1) check the range */ 4561 /**********************/ 4562 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) { 4563 /* acking something behind */ 4564 return; 4565 } 4566 4567 /* update the Rwnd of the peer */ 4568 if (TAILQ_EMPTY(&asoc->sent_queue) && 4569 TAILQ_EMPTY(&asoc->send_queue) && 4570 (asoc->stream_queue_cnt == 0)) { 4571 /* nothing left on send/sent and strmq */ 4572 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 4573 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 4574 asoc->peers_rwnd, 0, 0, a_rwnd); 4575 } 4576 asoc->peers_rwnd = a_rwnd; 4577 if (asoc->sent_queue_retran_cnt) { 4578 asoc->sent_queue_retran_cnt = 0; 4579 } 4580 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 4581 /* SWS sender side engages */ 4582 asoc->peers_rwnd = 0; 4583 } 4584 /* stop any timers */ 4585 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4586 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4587 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26); 4588 net->partial_bytes_acked = 0; 4589 net->flight_size = 0; 4590 } 4591 asoc->total_flight = 0; 4592 asoc->total_flight_count = 0; 4593 return; 4594 } 4595 /* 4596 * We init netAckSz and netAckSz2 to 0. These are used to track 2 4597 * things. The total byte count acked is tracked in netAckSz AND 4598 * netAck2 is used to track the total bytes acked that are un- 4599 * amibguious and were never retransmitted. We track these on a per 4600 * destination address basis. 4601 */ 4602 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4603 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) { 4604 /* Drag along the window_tsn for cwr's */ 4605 net->cwr_window_tsn = cum_ack; 4606 } 4607 net->prev_cwnd = net->cwnd; 4608 net->net_ack = 0; 4609 net->net_ack2 = 0; 4610 4611 /* 4612 * CMT: Reset CUC and Fast recovery algo variables before 4613 * SACK processing 4614 */ 4615 net->new_pseudo_cumack = 0; 4616 net->will_exit_fast_recovery = 0; 4617 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) { 4618 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net); 4619 } 4620 4621 /* 4622 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has 4623 * to be greater than the cumack. Also reset saw_newack to 0 4624 * for all dests. 4625 */ 4626 net->saw_newack = 0; 4627 net->this_sack_highest_newack = last_tsn; 4628 } 4629 /* process the new consecutive TSN first */ 4630 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4631 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.tsn)) { 4632 if (tp1->sent != SCTP_DATAGRAM_UNSENT) { 4633 accum_moved = 1; 4634 if (tp1->sent < SCTP_DATAGRAM_ACKED) { 4635 /* 4636 * If it is less than ACKED, it is 4637 * now no-longer in flight. Higher 4638 * values may occur during marking 4639 */ 4640 if ((tp1->whoTo->dest_state & 4641 SCTP_ADDR_UNCONFIRMED) && 4642 (tp1->snd_count < 2)) { 4643 /* 4644 * If there was no retran 4645 * and the address is 4646 * un-confirmed and we sent 4647 * there and are now 4648 * sacked.. its confirmed, 4649 * mark it so. 4650 */ 4651 tp1->whoTo->dest_state &= 4652 ~SCTP_ADDR_UNCONFIRMED; 4653 } 4654 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 4655 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4656 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA, 4657 tp1->whoTo->flight_size, 4658 tp1->book_size, 4659 (uint32_t)(uintptr_t)tp1->whoTo, 4660 tp1->rec.data.tsn); 4661 } 4662 sctp_flight_size_decrease(tp1); 4663 sctp_total_flight_decrease(stcb, tp1); 4664 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) { 4665 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo, 4666 tp1); 4667 } 4668 } 4669 tp1->whoTo->net_ack += tp1->send_size; 4670 4671 /* CMT SFR and DAC algos */ 4672 this_sack_lowest_newack = tp1->rec.data.tsn; 4673 tp1->whoTo->saw_newack = 1; 4674 4675 if (tp1->snd_count < 2) { 4676 /* 4677 * True non-retransmited 4678 * chunk 4679 */ 4680 tp1->whoTo->net_ack2 += 4681 tp1->send_size; 4682 4683 /* update RTO too? */ 4684 if (tp1->do_rtt) { 4685 if (rto_ok) { 4686 tp1->whoTo->RTO = 4687 sctp_calculate_rto(stcb, 4688 asoc, tp1->whoTo, 4689 &tp1->sent_rcv_time, 4690 SCTP_RTT_FROM_DATA); 4691 rto_ok = 0; 4692 } 4693 if (tp1->whoTo->rto_needed == 0) { 4694 tp1->whoTo->rto_needed = 1; 4695 } 4696 tp1->do_rtt = 0; 4697 } 4698 } 4699 /* 4700 * CMT: CUCv2 algorithm. From the 4701 * cumack'd TSNs, for each TSN being 4702 * acked for the first time, set the 4703 * following variables for the 4704 * corresp destination. 4705 * new_pseudo_cumack will trigger a 4706 * cwnd update. 4707 * find_(rtx_)pseudo_cumack will 4708 * trigger search for the next 4709 * expected (rtx-)pseudo-cumack. 4710 */ 4711 tp1->whoTo->new_pseudo_cumack = 1; 4712 tp1->whoTo->find_pseudo_cumack = 1; 4713 tp1->whoTo->find_rtx_pseudo_cumack = 1; 4714 4715 4716 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4717 sctp_log_sack(asoc->last_acked_seq, 4718 cum_ack, 4719 tp1->rec.data.tsn, 4720 0, 4721 0, 4722 SCTP_LOG_TSN_ACKED); 4723 } 4724 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 4725 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.tsn, SCTP_CWND_LOG_FROM_SACK); 4726 } 4727 } 4728 if (tp1->sent == SCTP_DATAGRAM_RESEND) { 4729 sctp_ucount_decr(asoc->sent_queue_retran_cnt); 4730#ifdef SCTP_AUDITING_ENABLED 4731 sctp_audit_log(0xB3, 4732 (asoc->sent_queue_retran_cnt & 0x000000ff)); 4733#endif 4734 } 4735 if (tp1->rec.data.chunk_was_revoked) { 4736 /* deflate the cwnd */ 4737 tp1->whoTo->cwnd -= tp1->book_size; 4738 tp1->rec.data.chunk_was_revoked = 0; 4739 } 4740 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4741 tp1->sent = SCTP_DATAGRAM_ACKED; 4742 } 4743 } 4744 } else { 4745 break; 4746 } 4747 } 4748 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn; 4749 /* always set this up to cum-ack */ 4750 asoc->this_sack_highest_gap = last_tsn; 4751 4752 if ((num_seg > 0) || (num_nr_seg > 0)) { 4753 4754 /* 4755 * thisSackHighestGap will increase while handling NEW 4756 * segments this_sack_highest_newack will increase while 4757 * handling NEWLY ACKED chunks. this_sack_lowest_newack is 4758 * used for CMT DAC algo. saw_newack will also change. 4759 */ 4760 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked, 4761 &biggest_tsn_newly_acked, &this_sack_lowest_newack, 4762 num_seg, num_nr_seg, &rto_ok)) { 4763 wake_him++; 4764 } 4765 /* 4766 * validate the biggest_tsn_acked in the gap acks if strict 4767 * adherence is wanted. 4768 */ 4769 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) { 4770 /* 4771 * peer is either confused or we are under attack. 4772 * We must abort. 4773 */ 4774 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n", 4775 biggest_tsn_acked, send_s); 4776 goto hopeless_peer; 4777 } 4778 } 4779 /*******************************************/ 4780 /* cancel ALL T3-send timer if accum moved */ 4781 /*******************************************/ 4782 if (asoc->sctp_cmt_on_off > 0) { 4783 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4784 if (net->new_pseudo_cumack) 4785 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4786 stcb, net, 4787 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27); 4788 4789 } 4790 } else { 4791 if (accum_moved) { 4792 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4793 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 4794 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28); 4795 } 4796 } 4797 } 4798 /********************************************/ 4799 /* drop the acked chunks from the sentqueue */ 4800 /********************************************/ 4801 asoc->last_acked_seq = cum_ack; 4802 4803 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) { 4804 if (SCTP_TSN_GT(tp1->rec.data.tsn, cum_ack)) { 4805 break; 4806 } 4807 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) { 4808 if (asoc->strmout[tp1->rec.data.sid].chunks_on_queues > 0) { 4809 asoc->strmout[tp1->rec.data.sid].chunks_on_queues--; 4810#ifdef INVARIANTS 4811 } else { 4812 panic("No chunks on the queues for sid %u.", tp1->rec.data.sid); 4813#endif 4814 } 4815 } 4816 if ((asoc->strmout[tp1->rec.data.sid].chunks_on_queues == 0) && 4817 (asoc->strmout[tp1->rec.data.sid].state == SCTP_STREAM_RESET_PENDING) && 4818 TAILQ_EMPTY(&asoc->strmout[tp1->rec.data.sid].outqueue)) { 4819 asoc->trigger_reset = 1; 4820 } 4821 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next); 4822 if (PR_SCTP_ENABLED(tp1->flags)) { 4823 if (asoc->pr_sctp_cnt != 0) 4824 asoc->pr_sctp_cnt--; 4825 } 4826 asoc->sent_queue_cnt--; 4827 if (tp1->data) { 4828 /* sa_ignore NO_NULL_CHK */ 4829 sctp_free_bufspace(stcb, asoc, tp1, 1); 4830 sctp_m_freem(tp1->data); 4831 tp1->data = NULL; 4832 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) { 4833 asoc->sent_queue_cnt_removeable--; 4834 } 4835 } 4836 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) { 4837 sctp_log_sack(asoc->last_acked_seq, 4838 cum_ack, 4839 tp1->rec.data.tsn, 4840 0, 4841 0, 4842 SCTP_LOG_FREE_SENT); 4843 } 4844 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED); 4845 wake_him++; 4846 } 4847 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) { 4848#ifdef INVARIANTS 4849 panic("Warning flight size is positive and should be 0"); 4850#else 4851 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n", 4852 asoc->total_flight); 4853#endif 4854 asoc->total_flight = 0; 4855 } 4856 4857 /* sa_ignore NO_NULL_CHK */ 4858 if ((wake_him) && (stcb->sctp_socket)) { 4859#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4860 struct socket *so; 4861 4862#endif 4863 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd); 4864 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4865 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK); 4866 } 4867#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4868 so = SCTP_INP_SO(stcb->sctp_ep); 4869 atomic_add_int(&stcb->asoc.refcnt, 1); 4870 SCTP_TCB_UNLOCK(stcb); 4871 SCTP_SOCKET_LOCK(so, 1); 4872 SCTP_TCB_LOCK(stcb); 4873 atomic_subtract_int(&stcb->asoc.refcnt, 1); 4874 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) { 4875 /* assoc was freed while we were unlocked */ 4876 SCTP_SOCKET_UNLOCK(so, 1); 4877 return; 4878 } 4879#endif 4880 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket); 4881#if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING) 4882 SCTP_SOCKET_UNLOCK(so, 1); 4883#endif 4884 } else { 4885 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) { 4886 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK); 4887 } 4888 } 4889 4890 if (asoc->fast_retran_loss_recovery && accum_moved) { 4891 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) { 4892 /* Setup so we will exit RFC2582 fast recovery */ 4893 will_exit_fast_recovery = 1; 4894 } 4895 } 4896 /* 4897 * Check for revoked fragments: 4898 * 4899 * if Previous sack - Had no frags then we can't have any revoked if 4900 * Previous sack - Had frag's then - If we now have frags aka 4901 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked 4902 * some of them. else - The peer revoked all ACKED fragments, since 4903 * we had some before and now we have NONE. 4904 */ 4905 4906 if (num_seg) { 4907 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked); 4908 asoc->saw_sack_with_frags = 1; 4909 } else if (asoc->saw_sack_with_frags) { 4910 int cnt_revoked = 0; 4911 4912 /* Peer revoked all dg's marked or acked */ 4913 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 4914 if (tp1->sent == SCTP_DATAGRAM_ACKED) { 4915 tp1->sent = SCTP_DATAGRAM_SENT; 4916 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 4917 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE, 4918 tp1->whoTo->flight_size, 4919 tp1->book_size, 4920 (uint32_t)(uintptr_t)tp1->whoTo, 4921 tp1->rec.data.tsn); 4922 } 4923 sctp_flight_size_increase(tp1); 4924 sctp_total_flight_increase(stcb, tp1); 4925 tp1->rec.data.chunk_was_revoked = 1; 4926 /* 4927 * To ensure that this increase in 4928 * flightsize, which is artificial, does not 4929 * throttle the sender, we also increase the 4930 * cwnd artificially. 4931 */ 4932 tp1->whoTo->cwnd += tp1->book_size; 4933 cnt_revoked++; 4934 } 4935 } 4936 if (cnt_revoked) { 4937 reneged_all = 1; 4938 } 4939 asoc->saw_sack_with_frags = 0; 4940 } 4941 if (num_nr_seg > 0) 4942 asoc->saw_sack_with_nr_frags = 1; 4943 else 4944 asoc->saw_sack_with_nr_frags = 0; 4945 4946 /* JRS - Use the congestion control given in the CC module */ 4947 if (ecne_seen == 0) { 4948 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4949 if (net->net_ack2 > 0) { 4950 /* 4951 * Karn's rule applies to clearing error 4952 * count, this is optional. 4953 */ 4954 net->error_count = 0; 4955 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) { 4956 /* addr came good */ 4957 net->dest_state |= SCTP_ADDR_REACHABLE; 4958 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 4959 0, (void *)net, SCTP_SO_NOT_LOCKED); 4960 } 4961 4962 if (net == stcb->asoc.primary_destination) { 4963 if (stcb->asoc.alternate) { 4964 /* 4965 * release the alternate, 4966 * primary is good 4967 */ 4968 sctp_free_remote_addr(stcb->asoc.alternate); 4969 stcb->asoc.alternate = NULL; 4970 } 4971 } 4972 4973 if (net->dest_state & SCTP_ADDR_PF) { 4974 net->dest_state &= ~SCTP_ADDR_PF; 4975 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, 4976 stcb->sctp_ep, stcb, net, 4977 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29); 4978 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net); 4979 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net); 4980 /* Done with this net */ 4981 net->net_ack = 0; 4982 } 4983 /* restore any doubled timers */ 4984 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv; 4985 if (net->RTO < stcb->asoc.minrto) { 4986 net->RTO = stcb->asoc.minrto; 4987 } 4988 if (net->RTO > stcb->asoc.maxrto) { 4989 net->RTO = stcb->asoc.maxrto; 4990 } 4991 } 4992 } 4993 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery); 4994 } 4995 4996 if (TAILQ_EMPTY(&asoc->sent_queue)) { 4997 /* nothing left in-flight */ 4998 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 4999 /* stop all timers */ 5000 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 5001 stcb, net, 5002 SCTP_FROM_SCTP_INDATA + SCTP_LOC_30); 5003 net->flight_size = 0; 5004 net->partial_bytes_acked = 0; 5005 } 5006 asoc->total_flight = 0; 5007 asoc->total_flight_count = 0; 5008 } 5009 5010 /**********************************/ 5011 /* Now what about shutdown issues */ 5012 /**********************************/ 5013 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) { 5014 /* nothing left on sendqueue.. consider done */ 5015 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 5016 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 5017 asoc->peers_rwnd, 0, 0, a_rwnd); 5018 } 5019 asoc->peers_rwnd = a_rwnd; 5020 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 5021 /* SWS sender side engages */ 5022 asoc->peers_rwnd = 0; 5023 } 5024 /* clean up */ 5025 if ((asoc->stream_queue_cnt == 1) && 5026 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 5027 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) && 5028 ((*asoc->ss_functions.sctp_ss_is_user_msgs_incomplete) (stcb, asoc))) { 5029 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT; 5030 } 5031 if (((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) || 5032 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) && 5033 (asoc->stream_queue_cnt == 1) && 5034 (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT)) { 5035 struct mbuf *op_err; 5036 5037 *abort_now = 1; 5038 /* XXX */ 5039 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, ""); 5040 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24; 5041 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 5042 return; 5043 } 5044 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) && 5045 (asoc->stream_queue_cnt == 0)) { 5046 struct sctp_nets *netp; 5047 5048 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 5049 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 5050 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 5051 } 5052 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 5053 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 5054 sctp_stop_timers_for_shutdown(stcb); 5055 if (asoc->alternate) { 5056 netp = asoc->alternate; 5057 } else { 5058 netp = asoc->primary_destination; 5059 } 5060 sctp_send_shutdown(stcb, netp); 5061 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 5062 stcb->sctp_ep, stcb, netp); 5063 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 5064 stcb->sctp_ep, stcb, netp); 5065 return; 5066 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) && 5067 (asoc->stream_queue_cnt == 0)) { 5068 struct sctp_nets *netp; 5069 5070 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 5071 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT); 5072 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 5073 sctp_stop_timers_for_shutdown(stcb); 5074 if (asoc->alternate) { 5075 netp = asoc->alternate; 5076 } else { 5077 netp = asoc->primary_destination; 5078 } 5079 sctp_send_shutdown_ack(stcb, netp); 5080 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, 5081 stcb->sctp_ep, stcb, netp); 5082 return; 5083 } 5084 } 5085 /* 5086 * Now here we are going to recycle net_ack for a different use... 5087 * HEADS UP. 5088 */ 5089 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5090 net->net_ack = 0; 5091 } 5092 5093 /* 5094 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking 5095 * to be done. Setting this_sack_lowest_newack to the cum_ack will 5096 * automatically ensure that. 5097 */ 5098 if ((asoc->sctp_cmt_on_off > 0) && 5099 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) && 5100 (cmt_dac_flag == 0)) { 5101 this_sack_lowest_newack = cum_ack; 5102 } 5103 if ((num_seg > 0) || (num_nr_seg > 0)) { 5104 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked, 5105 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved); 5106 } 5107 /* JRS - Use the congestion control given in the CC module */ 5108 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc); 5109 5110 /* Now are we exiting loss recovery ? */ 5111 if (will_exit_fast_recovery) { 5112 /* Ok, we must exit fast recovery */ 5113 asoc->fast_retran_loss_recovery = 0; 5114 } 5115 if ((asoc->sat_t3_loss_recovery) && 5116 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) { 5117 /* end satellite t3 loss recovery */ 5118 asoc->sat_t3_loss_recovery = 0; 5119 } 5120 /* 5121 * CMT Fast recovery 5122 */ 5123 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5124 if (net->will_exit_fast_recovery) { 5125 /* Ok, we must exit fast recovery */ 5126 net->fast_retran_loss_recovery = 0; 5127 } 5128 } 5129 5130 /* Adjust and set the new rwnd value */ 5131 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) { 5132 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK, 5133 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd); 5134 } 5135 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd, 5136 (uint32_t)(asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)))); 5137 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) { 5138 /* SWS sender side engages */ 5139 asoc->peers_rwnd = 0; 5140 } 5141 if (asoc->peers_rwnd > old_rwnd) { 5142 win_probe_recovery = 1; 5143 } 5144 5145 /* 5146 * Now we must setup so we have a timer up for anyone with 5147 * outstanding data. 5148 */ 5149 done_once = 0; 5150again: 5151 j = 0; 5152 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5153 if (win_probe_recovery && (net->window_probe)) { 5154 win_probe_recovered = 1; 5155 /*- 5156 * Find first chunk that was used with 5157 * window probe and clear the event. Put 5158 * it back into the send queue as if has 5159 * not been sent. 5160 */ 5161 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5162 if (tp1->window_probe) { 5163 sctp_window_probe_recovery(stcb, asoc, tp1); 5164 break; 5165 } 5166 } 5167 } 5168 if (net->flight_size) { 5169 j++; 5170 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5171 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5172 stcb->sctp_ep, stcb, net); 5173 } 5174 if (net->window_probe) { 5175 net->window_probe = 0; 5176 } 5177 } else { 5178 if (net->window_probe) { 5179 /* 5180 * In window probes we must assure a timer 5181 * is still running there 5182 */ 5183 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5184 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5185 stcb->sctp_ep, stcb, net); 5186 5187 } 5188 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) { 5189 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 5190 stcb, net, 5191 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); 5192 } 5193 } 5194 } 5195 if ((j == 0) && 5196 (!TAILQ_EMPTY(&asoc->sent_queue)) && 5197 (asoc->sent_queue_retran_cnt == 0) && 5198 (win_probe_recovered == 0) && 5199 (done_once == 0)) { 5200 /* 5201 * huh, this should not happen unless all packets are 5202 * PR-SCTP and marked to skip of course. 5203 */ 5204 if (sctp_fs_audit(asoc)) { 5205 TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 5206 net->flight_size = 0; 5207 } 5208 asoc->total_flight = 0; 5209 asoc->total_flight_count = 0; 5210 asoc->sent_queue_retran_cnt = 0; 5211 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) { 5212 if (tp1->sent < SCTP_DATAGRAM_RESEND) { 5213 sctp_flight_size_increase(tp1); 5214 sctp_total_flight_increase(stcb, tp1); 5215 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) { 5216 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 5217 } 5218 } 5219 } 5220 done_once = 1; 5221 goto again; 5222 } 5223 /*********************************************/ 5224 /* Here we perform PR-SCTP procedures */ 5225 /* (section 4.2) */ 5226 /*********************************************/ 5227 /* C1. update advancedPeerAckPoint */ 5228 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) { 5229 asoc->advanced_peer_ack_point = cum_ack; 5230 } 5231 /* C2. try to further move advancedPeerAckPoint ahead */ 5232 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) { 5233 struct sctp_tmit_chunk *lchk; 5234 uint32_t old_adv_peer_ack_point; 5235 5236 old_adv_peer_ack_point = asoc->advanced_peer_ack_point; 5237 lchk = sctp_try_advance_peer_ack_point(stcb, asoc); 5238 /* C3. See if we need to send a Fwd-TSN */ 5239 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) { 5240 /* 5241 * ISSUE with ECN, see FWD-TSN processing. 5242 */ 5243 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) { 5244 sctp_misc_ints(SCTP_FWD_TSN_CHECK, 5245 0xee, cum_ack, asoc->advanced_peer_ack_point, 5246 old_adv_peer_ack_point); 5247 } 5248 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) { 5249 send_forward_tsn(stcb, asoc); 5250 } else if (lchk) { 5251 /* try to FR fwd-tsn's that get lost too */ 5252 if (lchk->rec.data.fwd_tsn_cnt >= 3) { 5253 send_forward_tsn(stcb, asoc); 5254 } 5255 } 5256 } 5257 if (lchk) { 5258 /* Assure a timer is up */ 5259 sctp_timer_start(SCTP_TIMER_TYPE_SEND, 5260 stcb->sctp_ep, stcb, lchk->whoTo); 5261 } 5262 } 5263 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) { 5264 sctp_misc_ints(SCTP_SACK_RWND_UPDATE, 5265 a_rwnd, 5266 stcb->asoc.peers_rwnd, 5267 stcb->asoc.total_flight, 5268 stcb->asoc.total_output_queue_size); 5269 } 5270} 5271 5272void 5273sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag) 5274{ 5275 /* Copy cum-ack */ 5276 uint32_t cum_ack, a_rwnd; 5277 5278 cum_ack = ntohl(cp->cumulative_tsn_ack); 5279 /* Arrange so a_rwnd does NOT change */ 5280 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight; 5281 5282 /* Now call the express sack handling */ 5283 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0); 5284} 5285 5286static void 5287sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb, 5288 struct sctp_stream_in *strmin) 5289{ 5290 struct sctp_queued_to_read *control, *ncontrol; 5291 struct sctp_association *asoc; 5292 uint32_t mid; 5293 int need_reasm_check = 0; 5294 5295 asoc = &stcb->asoc; 5296 mid = strmin->last_mid_delivered; 5297 /* 5298 * First deliver anything prior to and including the stream no that 5299 * came in. 5300 */ 5301 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) { 5302 if (SCTP_MID_GE(asoc->idata_supported, mid, control->mid)) { 5303 /* this is deliverable now */ 5304 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 5305 if (control->on_strm_q) { 5306 if (control->on_strm_q == SCTP_ON_ORDERED) { 5307 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm); 5308 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5309 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm); 5310#ifdef INVARIANTS 5311 } else { 5312 panic("strmin: %p ctl: %p unknown %d", 5313 strmin, control, control->on_strm_q); 5314#endif 5315 } 5316 control->on_strm_q = 0; 5317 } 5318 /* subtract pending on streams */ 5319 if (asoc->size_on_all_streams >= control->length) { 5320 asoc->size_on_all_streams -= control->length; 5321 } else { 5322#ifdef INVARIANTS 5323 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5324#else 5325 asoc->size_on_all_streams = 0; 5326#endif 5327 } 5328 sctp_ucount_decr(asoc->cnt_on_all_streams); 5329 /* deliver it to at least the delivery-q */ 5330 if (stcb->sctp_socket) { 5331 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 5332 sctp_add_to_readq(stcb->sctp_ep, stcb, 5333 control, 5334 &stcb->sctp_socket->so_rcv, 5335 1, SCTP_READ_LOCK_HELD, 5336 SCTP_SO_NOT_LOCKED); 5337 } 5338 } else { 5339 /* Its a fragmented message */ 5340 if (control->first_frag_seen) { 5341 /* 5342 * Make it so this is next to 5343 * deliver, we restore later 5344 */ 5345 strmin->last_mid_delivered = control->mid - 1; 5346 need_reasm_check = 1; 5347 break; 5348 } 5349 } 5350 } else { 5351 /* no more delivery now. */ 5352 break; 5353 } 5354 } 5355 if (need_reasm_check) { 5356 int ret; 5357 5358 ret = sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD); 5359 if (SCTP_MID_GT(asoc->idata_supported, mid, strmin->last_mid_delivered)) { 5360 /* Restore the next to deliver unless we are ahead */ 5361 strmin->last_mid_delivered = mid; 5362 } 5363 if (ret == 0) { 5364 /* Left the front Partial one on */ 5365 return; 5366 } 5367 need_reasm_check = 0; 5368 } 5369 /* 5370 * now we must deliver things in queue the normal way if any are 5371 * now ready. 5372 */ 5373 mid = strmin->last_mid_delivered + 1; 5374 TAILQ_FOREACH_SAFE(control, &strmin->inqueue, next_instrm, ncontrol) { 5375 if (SCTP_MID_EQ(asoc->idata_supported, mid, control->mid)) { 5376 if (((control->sinfo_flags >> 8) & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG) { 5377 /* this is deliverable now */ 5378 if (control->on_strm_q) { 5379 if (control->on_strm_q == SCTP_ON_ORDERED) { 5380 TAILQ_REMOVE(&strmin->inqueue, control, next_instrm); 5381 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5382 TAILQ_REMOVE(&strmin->uno_inqueue, control, next_instrm); 5383#ifdef INVARIANTS 5384 } else { 5385 panic("strmin: %p ctl: %p unknown %d", 5386 strmin, control, control->on_strm_q); 5387#endif 5388 } 5389 control->on_strm_q = 0; 5390 } 5391 /* subtract pending on streams */ 5392 if (asoc->size_on_all_streams >= control->length) { 5393 asoc->size_on_all_streams -= control->length; 5394 } else { 5395#ifdef INVARIANTS 5396 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5397#else 5398 asoc->size_on_all_streams = 0; 5399#endif 5400 } 5401 sctp_ucount_decr(asoc->cnt_on_all_streams); 5402 /* deliver it to at least the delivery-q */ 5403 strmin->last_mid_delivered = control->mid; 5404 if (stcb->sctp_socket) { 5405 sctp_mark_non_revokable(asoc, control->sinfo_tsn); 5406 sctp_add_to_readq(stcb->sctp_ep, stcb, 5407 control, 5408 &stcb->sctp_socket->so_rcv, 1, 5409 SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED); 5410 5411 } 5412 mid = strmin->last_mid_delivered + 1; 5413 } else { 5414 /* Its a fragmented message */ 5415 if (control->first_frag_seen) { 5416 /* 5417 * Make it so this is next to 5418 * deliver 5419 */ 5420 strmin->last_mid_delivered = control->mid - 1; 5421 need_reasm_check = 1; 5422 break; 5423 } 5424 } 5425 } else { 5426 break; 5427 } 5428 } 5429 if (need_reasm_check) { 5430 (void)sctp_deliver_reasm_check(stcb, &stcb->asoc, strmin, SCTP_READ_LOCK_HELD); 5431 } 5432} 5433 5434 5435 5436static void 5437sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb, 5438 struct sctp_association *asoc, 5439 uint16_t stream, uint32_t mid, int ordered, uint32_t cumtsn) 5440{ 5441 struct sctp_queued_to_read *control; 5442 struct sctp_stream_in *strm; 5443 struct sctp_tmit_chunk *chk, *nchk; 5444 int cnt_removed = 0; 5445 5446 /* 5447 * For now large messages held on the stream reasm that are complete 5448 * will be tossed too. We could in theory do more work to spin 5449 * through and stop after dumping one msg aka seeing the start of a 5450 * new msg at the head, and call the delivery function... to see if 5451 * it can be delivered... But for now we just dump everything on the 5452 * queue. 5453 */ 5454 strm = &asoc->strmin[stream]; 5455 control = sctp_find_reasm_entry(strm, mid, ordered, asoc->idata_supported); 5456 if (control == NULL) { 5457 /* Not found */ 5458 return; 5459 } 5460 if (!asoc->idata_supported && !ordered && SCTP_TSN_GT(control->fsn_included, cumtsn)) { 5461 return; 5462 } 5463 TAILQ_FOREACH_SAFE(chk, &control->reasm, sctp_next, nchk) { 5464 /* Purge hanging chunks */ 5465 if (!asoc->idata_supported && (ordered == 0)) { 5466 if (SCTP_TSN_GT(chk->rec.data.tsn, cumtsn)) { 5467 break; 5468 } 5469 } 5470 cnt_removed++; 5471 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 5472 if (asoc->size_on_reasm_queue >= chk->send_size) { 5473 asoc->size_on_reasm_queue -= chk->send_size; 5474 } else { 5475#ifdef INVARIANTS 5476 panic("size_on_reasm_queue = %u smaller than chunk length %u", asoc->size_on_reasm_queue, chk->send_size); 5477#else 5478 asoc->size_on_reasm_queue = 0; 5479#endif 5480 } 5481 sctp_ucount_decr(asoc->cnt_on_reasm_queue); 5482 if (chk->data) { 5483 sctp_m_freem(chk->data); 5484 chk->data = NULL; 5485 } 5486 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED); 5487 } 5488 if (!TAILQ_EMPTY(&control->reasm)) { 5489 /* This has to be old data, unordered */ 5490 if (control->data) { 5491 sctp_m_freem(control->data); 5492 control->data = NULL; 5493 } 5494 sctp_reset_a_control(control, stcb->sctp_ep, cumtsn); 5495 chk = TAILQ_FIRST(&control->reasm); 5496 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) { 5497 TAILQ_REMOVE(&control->reasm, chk, sctp_next); 5498 sctp_add_chk_to_control(control, strm, stcb, asoc, 5499 chk, SCTP_READ_LOCK_HELD); 5500 } 5501 sctp_deliver_reasm_check(stcb, asoc, strm, SCTP_READ_LOCK_HELD); 5502 return; 5503 } 5504 if (control->on_strm_q == SCTP_ON_ORDERED) { 5505 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 5506 if (asoc->size_on_all_streams >= control->length) { 5507 asoc->size_on_all_streams -= control->length; 5508 } else { 5509#ifdef INVARIANTS 5510 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5511#else 5512 asoc->size_on_all_streams = 0; 5513#endif 5514 } 5515 sctp_ucount_decr(asoc->cnt_on_all_streams); 5516 control->on_strm_q = 0; 5517 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5518 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 5519 control->on_strm_q = 0; 5520#ifdef INVARIANTS 5521 } else if (control->on_strm_q) { 5522 panic("strm: %p ctl: %p unknown %d", 5523 strm, control, control->on_strm_q); 5524#endif 5525 } 5526 control->on_strm_q = 0; 5527 if (control->on_read_q == 0) { 5528 sctp_free_remote_addr(control->whoFrom); 5529 if (control->data) { 5530 sctp_m_freem(control->data); 5531 control->data = NULL; 5532 } 5533 sctp_free_a_readq(stcb, control); 5534 } 5535} 5536 5537void 5538sctp_handle_forward_tsn(struct sctp_tcb *stcb, 5539 struct sctp_forward_tsn_chunk *fwd, 5540 int *abort_flag, struct mbuf *m, int offset) 5541{ 5542 /* The pr-sctp fwd tsn */ 5543 /* 5544 * here we will perform all the data receiver side steps for 5545 * processing FwdTSN, as required in by pr-sctp draft: 5546 * 5547 * Assume we get FwdTSN(x): 5548 * 5549 * 1) update local cumTSN to x 2) try to further advance cumTSN to x 5550 * + others we have 3) examine and update re-ordering queue on 5551 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to 5552 * report where we are. 5553 */ 5554 struct sctp_association *asoc; 5555 uint32_t new_cum_tsn, gap; 5556 unsigned int i, fwd_sz, m_size; 5557 uint32_t str_seq; 5558 struct sctp_stream_in *strm; 5559 struct sctp_queued_to_read *control, *sv; 5560 5561 asoc = &stcb->asoc; 5562 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) { 5563 SCTPDBG(SCTP_DEBUG_INDATA1, 5564 "Bad size too small/big fwd-tsn\n"); 5565 return; 5566 } 5567 m_size = (stcb->asoc.mapping_array_size << 3); 5568 /*************************************************************/ 5569 /* 1. Here we update local cumTSN and shift the bitmap array */ 5570 /*************************************************************/ 5571 new_cum_tsn = ntohl(fwd->new_cumulative_tsn); 5572 5573 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) { 5574 /* Already got there ... */ 5575 return; 5576 } 5577 /* 5578 * now we know the new TSN is more advanced, let's find the actual 5579 * gap 5580 */ 5581 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn); 5582 asoc->cumulative_tsn = new_cum_tsn; 5583 if (gap >= m_size) { 5584 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) { 5585 struct mbuf *op_err; 5586 char msg[SCTP_DIAG_INFO_LEN]; 5587 5588 /* 5589 * out of range (of single byte chunks in the rwnd I 5590 * give out). This must be an attacker. 5591 */ 5592 *abort_flag = 1; 5593 snprintf(msg, sizeof(msg), 5594 "New cum ack %8.8x too high, highest TSN %8.8x", 5595 new_cum_tsn, asoc->highest_tsn_inside_map); 5596 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg); 5597 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33; 5598 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED); 5599 return; 5600 } 5601 SCTP_STAT_INCR(sctps_fwdtsn_map_over); 5602 5603 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size); 5604 asoc->mapping_array_base_tsn = new_cum_tsn + 1; 5605 asoc->highest_tsn_inside_map = new_cum_tsn; 5606 5607 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size); 5608 asoc->highest_tsn_inside_nr_map = new_cum_tsn; 5609 5610 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) { 5611 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT); 5612 } 5613 } else { 5614 SCTP_TCB_LOCK_ASSERT(stcb); 5615 for (i = 0; i <= gap; i++) { 5616 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) && 5617 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) { 5618 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i); 5619 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) { 5620 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i; 5621 } 5622 } 5623 } 5624 } 5625 /*************************************************************/ 5626 /* 2. Clear up re-assembly queue */ 5627 /*************************************************************/ 5628 5629 /* This is now done as part of clearing up the stream/seq */ 5630 if (asoc->idata_supported == 0) { 5631 uint16_t sid; 5632 5633 /* Flush all the un-ordered data based on cum-tsn */ 5634 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5635 for (sid = 0; sid < asoc->streamincnt; sid++) { 5636 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, 0, 0, new_cum_tsn); 5637 } 5638 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5639 } 5640 /*******************************************************/ 5641 /* 3. Update the PR-stream re-ordering queues and fix */ 5642 /* delivery issues as needed. */ 5643 /*******************************************************/ 5644 fwd_sz -= sizeof(*fwd); 5645 if (m && fwd_sz) { 5646 /* New method. */ 5647 unsigned int num_str; 5648 uint32_t mid, cur_mid; 5649 uint16_t sid; 5650 uint16_t ordered, flags; 5651 struct sctp_strseq *stseq, strseqbuf; 5652 struct sctp_strseq_mid *stseq_m, strseqbuf_m; 5653 5654 offset += sizeof(*fwd); 5655 5656 SCTP_INP_READ_LOCK(stcb->sctp_ep); 5657 if (asoc->idata_supported) { 5658 num_str = fwd_sz / sizeof(struct sctp_strseq_mid); 5659 } else { 5660 num_str = fwd_sz / sizeof(struct sctp_strseq); 5661 } 5662 for (i = 0; i < num_str; i++) { 5663 if (asoc->idata_supported) { 5664 stseq_m = (struct sctp_strseq_mid *)sctp_m_getptr(m, offset, 5665 sizeof(struct sctp_strseq_mid), 5666 (uint8_t *)&strseqbuf_m); 5667 offset += sizeof(struct sctp_strseq_mid); 5668 if (stseq_m == NULL) { 5669 break; 5670 } 5671 sid = ntohs(stseq_m->sid); 5672 mid = ntohl(stseq_m->mid); 5673 flags = ntohs(stseq_m->flags); 5674 if (flags & PR_SCTP_UNORDERED_FLAG) { 5675 ordered = 0; 5676 } else { 5677 ordered = 1; 5678 } 5679 } else { 5680 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset, 5681 sizeof(struct sctp_strseq), 5682 (uint8_t *)&strseqbuf); 5683 offset += sizeof(struct sctp_strseq); 5684 if (stseq == NULL) { 5685 break; 5686 } 5687 sid = ntohs(stseq->sid); 5688 mid = (uint32_t)ntohs(stseq->ssn); 5689 ordered = 1; 5690 } 5691 /* Convert */ 5692 5693 /* now process */ 5694 5695 /* 5696 * Ok we now look for the stream/seq on the read 5697 * queue where its not all delivered. If we find it 5698 * we transmute the read entry into a PDI_ABORTED. 5699 */ 5700 if (sid >= asoc->streamincnt) { 5701 /* screwed up streams, stop! */ 5702 break; 5703 } 5704 if ((asoc->str_of_pdapi == sid) && 5705 (asoc->ssn_of_pdapi == mid)) { 5706 /* 5707 * If this is the one we were partially 5708 * delivering now then we no longer are. 5709 * Note this will change with the reassembly 5710 * re-write. 5711 */ 5712 asoc->fragmented_delivery_inprogress = 0; 5713 } 5714 strm = &asoc->strmin[sid]; 5715 for (cur_mid = strm->last_mid_delivered; SCTP_MID_GE(asoc->idata_supported, mid, cur_mid); cur_mid++) { 5716 sctp_flush_reassm_for_str_seq(stcb, asoc, sid, cur_mid, ordered, new_cum_tsn); 5717 } 5718 TAILQ_FOREACH(control, &stcb->sctp_ep->read_queue, next) { 5719 if ((control->sinfo_stream == sid) && 5720 (SCTP_MID_EQ(asoc->idata_supported, control->mid, mid))) { 5721 str_seq = (sid << 16) | (0x0000ffff & mid); 5722 control->pdapi_aborted = 1; 5723 sv = stcb->asoc.control_pdapi; 5724 control->end_added = 1; 5725 if (control->on_strm_q == SCTP_ON_ORDERED) { 5726 TAILQ_REMOVE(&strm->inqueue, control, next_instrm); 5727 if (asoc->size_on_all_streams >= control->length) { 5728 asoc->size_on_all_streams -= control->length; 5729 } else { 5730#ifdef INVARIANTS 5731 panic("size_on_all_streams = %u smaller than control length %u", asoc->size_on_all_streams, control->length); 5732#else 5733 asoc->size_on_all_streams = 0; 5734#endif 5735 } 5736 sctp_ucount_decr(asoc->cnt_on_all_streams); 5737 } else if (control->on_strm_q == SCTP_ON_UNORDERED) { 5738 TAILQ_REMOVE(&strm->uno_inqueue, control, next_instrm); 5739#ifdef INVARIANTS 5740 } else if (control->on_strm_q) { 5741 panic("strm: %p ctl: %p unknown %d", 5742 strm, control, control->on_strm_q); 5743#endif 5744 } 5745 control->on_strm_q = 0; 5746 stcb->asoc.control_pdapi = control; 5747 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION, 5748 stcb, 5749 SCTP_PARTIAL_DELIVERY_ABORTED, 5750 (void *)&str_seq, 5751 SCTP_SO_NOT_LOCKED); 5752 stcb->asoc.control_pdapi = sv; 5753 break; 5754 } else if ((control->sinfo_stream == sid) && 5755 SCTP_MID_GT(asoc->idata_supported, control->mid, mid)) { 5756 /* We are past our victim SSN */ 5757 break; 5758 } 5759 } 5760 if (SCTP_MID_GT(asoc->idata_supported, mid, strm->last_mid_delivered)) { 5761 /* Update the sequence number */ 5762 strm->last_mid_delivered = mid; 5763 } 5764 /* now kick the stream the new way */ 5765 /* sa_ignore NO_NULL_CHK */ 5766 sctp_kick_prsctp_reorder_queue(stcb, strm); 5767 } 5768 SCTP_INP_READ_UNLOCK(stcb->sctp_ep); 5769 } 5770 /* 5771 * Now slide thing forward. 5772 */ 5773 sctp_slide_mapping_arrays(stcb); 5774} 5775