sctp_timer.c revision 216825
1/*- 2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions are met: 6 * 7 * a) Redistributions of source code must retain the above copyright notice, 8 * this list of conditions and the following disclaimer. 9 * 10 * b) Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the distribution. 13 * 14 * c) Neither the name of Cisco Systems, Inc. nor the names of its 15 * contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28 * THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31/* $KAME: sctp_timer.c,v 1.29 2005/03/06 16:04:18 itojun Exp $ */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: head/sys/netinet/sctp_timer.c 216825 2010-12-30 21:32:35Z tuexen $"); 35 36#define _IP_VHL 37#include <netinet/sctp_os.h> 38#include <netinet/sctp_pcb.h> 39#ifdef INET6 40#endif 41#include <netinet/sctp_var.h> 42#include <netinet/sctp_sysctl.h> 43#include <netinet/sctp_timer.h> 44#include <netinet/sctputil.h> 45#include <netinet/sctp_output.h> 46#include <netinet/sctp_header.h> 47#include <netinet/sctp_indata.h> 48#include <netinet/sctp_asconf.h> 49#include <netinet/sctp_input.h> 50#include <netinet/sctp.h> 51#include <netinet/sctp_uio.h> 52#include <netinet/udp.h> 53 54 55void 56sctp_early_fr_timer(struct sctp_inpcb *inp, 57 struct sctp_tcb *stcb, 58 struct sctp_nets *net) 59{ 60 struct sctp_tmit_chunk *chk, *pchk; 61 struct timeval now, min_wait, tv; 62 unsigned int cur_rtt, cnt = 0, cnt_resend = 0; 63 64 /* an early FR is occuring. */ 65 (void)SCTP_GETTIME_TIMEVAL(&now); 66 /* get cur rto in micro-seconds */ 67 if (net->lastsa == 0) { 68 /* Hmm no rtt estimate yet? */ 69 cur_rtt = stcb->asoc.initial_rto >> 2; 70 } else { 71 72 cur_rtt = ((net->lastsa >> 2) + net->lastsv) >> 1; 73 } 74 if (cur_rtt < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) { 75 cur_rtt = SCTP_BASE_SYSCTL(sctp_early_fr_msec); 76 } 77 cur_rtt *= 1000; 78 tv.tv_sec = cur_rtt / 1000000; 79 tv.tv_usec = cur_rtt % 1000000; 80 min_wait = now; 81 timevalsub(&min_wait, &tv); 82 if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) { 83 /* 84 * if we hit here, we don't have enough seconds on the clock 85 * to account for the RTO. We just let the lower seconds be 86 * the bounds and don't worry about it. This may mean we 87 * will mark a lot more than we should. 88 */ 89 min_wait.tv_sec = min_wait.tv_usec = 0; 90 } 91 TAILQ_FOREACH_REVERSE_SAFE(chk, &stcb->asoc.sent_queue, sctpchunk_listhead, sctp_next, pchk) { 92 if (chk->whoTo != net) { 93 continue; 94 } 95 if (chk->sent == SCTP_DATAGRAM_RESEND) 96 cnt_resend++; 97 else if ((chk->sent > SCTP_DATAGRAM_UNSENT) && 98 (chk->sent < SCTP_DATAGRAM_RESEND)) { 99 /* pending, may need retran */ 100 if (chk->sent_rcv_time.tv_sec > min_wait.tv_sec) { 101 /* 102 * we have reached a chunk that was sent 103 * some seconds past our min.. forget it we 104 * will find no more to send. 105 */ 106 continue; 107 } else if (chk->sent_rcv_time.tv_sec == min_wait.tv_sec) { 108 /* 109 * we must look at the micro seconds to 110 * know. 111 */ 112 if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) { 113 /* 114 * ok it was sent after our boundary 115 * time. 116 */ 117 continue; 118 } 119 } 120 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_EARLYFR_LOGGING_ENABLE) { 121 sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count, 122 4, SCTP_FR_MARKED_EARLY); 123 } 124 SCTP_STAT_INCR(sctps_earlyfrmrkretrans); 125 chk->sent = SCTP_DATAGRAM_RESEND; 126 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 127 /* double book size since we are doing an early FR */ 128 chk->book_size_scale++; 129 cnt += chk->send_size; 130 if ((cnt + net->flight_size) > net->cwnd) { 131 /* Mark all we could possibly resend */ 132 break; 133 } 134 } 135 } 136 if (cnt) { 137 /* 138 * JRS - Use the congestion control given in the congestion 139 * control module 140 */ 141 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer(inp, stcb, net); 142 } else if (cnt_resend) { 143 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR, SCTP_SO_NOT_LOCKED); 144 } 145 /* Restart it? */ 146 if (net->flight_size < net->cwnd) { 147 SCTP_STAT_INCR(sctps_earlyfrstrtmr); 148 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net); 149 } 150} 151 152void 153sctp_audit_retranmission_queue(struct sctp_association *asoc) 154{ 155 struct sctp_tmit_chunk *chk; 156 157 SCTPDBG(SCTP_DEBUG_TIMER4, "Audit invoked on send queue cnt:%d onqueue:%d\n", 158 asoc->sent_queue_retran_cnt, 159 asoc->sent_queue_cnt); 160 asoc->sent_queue_retran_cnt = 0; 161 asoc->sent_queue_cnt = 0; 162 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 163 if (chk->sent == SCTP_DATAGRAM_RESEND) { 164 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 165 } 166 asoc->sent_queue_cnt++; 167 } 168 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 169 if (chk->sent == SCTP_DATAGRAM_RESEND) { 170 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 171 } 172 } 173 TAILQ_FOREACH(chk, &asoc->asconf_send_queue, sctp_next) { 174 if (chk->sent == SCTP_DATAGRAM_RESEND) { 175 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 176 } 177 } 178 SCTPDBG(SCTP_DEBUG_TIMER4, "Audit completes retran:%d onqueue:%d\n", 179 asoc->sent_queue_retran_cnt, 180 asoc->sent_queue_cnt); 181} 182 183int 184sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 185 struct sctp_nets *net, uint16_t threshold) 186{ 187 if (net) { 188 net->error_count++; 189 SCTPDBG(SCTP_DEBUG_TIMER4, "Error count for %p now %d thresh:%d\n", 190 net, net->error_count, 191 net->failure_threshold); 192 if (net->error_count > net->failure_threshold) { 193 /* We had a threshold failure */ 194 if (net->dest_state & SCTP_ADDR_REACHABLE) { 195 net->dest_state &= ~SCTP_ADDR_REACHABLE; 196 net->dest_state |= SCTP_ADDR_NOT_REACHABLE; 197 net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY; 198 if (net == stcb->asoc.primary_destination) { 199 net->dest_state |= SCTP_ADDR_WAS_PRIMARY; 200 } 201 /* 202 * JRS 5/14/07 - If a destination is 203 * unreachable, the PF bit is turned off. 204 * This allows an unambiguous use of the PF 205 * bit for destinations that are reachable 206 * but potentially failed. If the 207 * destination is set to the unreachable 208 * state, also set the destination to the PF 209 * state. 210 */ 211 /* 212 * Add debug message here if destination is 213 * not in PF state. 214 */ 215 /* Stop any running T3 timers here? */ 216 if ((stcb->asoc.sctp_cmt_on_off > 0) && 217 (stcb->asoc.sctp_cmt_pf > 0)) { 218 net->dest_state &= ~SCTP_ADDR_PF; 219 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n", 220 net); 221 } 222 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 223 stcb, 224 SCTP_FAILED_THRESHOLD, 225 (void *)net, SCTP_SO_NOT_LOCKED); 226 } 227 } 228 /*********HOLD THIS COMMENT FOR PATCH OF ALTERNATE 229 *********ROUTING CODE 230 */ 231 /*********HOLD THIS COMMENT FOR END OF PATCH OF ALTERNATE 232 *********ROUTING CODE 233 */ 234 } 235 if (stcb == NULL) 236 return (0); 237 238 if (net) { 239 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) { 240 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 241 sctp_misc_ints(SCTP_THRESHOLD_INCR, 242 stcb->asoc.overall_error_count, 243 (stcb->asoc.overall_error_count + 1), 244 SCTP_FROM_SCTP_TIMER, 245 __LINE__); 246 } 247 stcb->asoc.overall_error_count++; 248 } 249 } else { 250 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 251 sctp_misc_ints(SCTP_THRESHOLD_INCR, 252 stcb->asoc.overall_error_count, 253 (stcb->asoc.overall_error_count + 1), 254 SCTP_FROM_SCTP_TIMER, 255 __LINE__); 256 } 257 stcb->asoc.overall_error_count++; 258 } 259 SCTPDBG(SCTP_DEBUG_TIMER4, "Overall error count for %p now %d thresh:%u state:%x\n", 260 &stcb->asoc, stcb->asoc.overall_error_count, 261 (uint32_t) threshold, 262 ((net == NULL) ? (uint32_t) 0 : (uint32_t) net->dest_state)); 263 /* 264 * We specifically do not do >= to give the assoc one more change 265 * before we fail it. 266 */ 267 if (stcb->asoc.overall_error_count > threshold) { 268 /* Abort notification sends a ULP notify */ 269 struct mbuf *oper; 270 271 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 272 0, M_DONTWAIT, 1, MT_DATA); 273 if (oper) { 274 struct sctp_paramhdr *ph; 275 uint32_t *ippp; 276 277 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 278 sizeof(uint32_t); 279 ph = mtod(oper, struct sctp_paramhdr *); 280 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 281 ph->param_length = htons(SCTP_BUF_LEN(oper)); 282 ippp = (uint32_t *) (ph + 1); 283 *ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_1); 284 } 285 inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_1; 286 sctp_abort_an_association(inp, stcb, SCTP_FAILED_THRESHOLD, oper, SCTP_SO_NOT_LOCKED); 287 return (1); 288 } 289 return (0); 290} 291 292/* 293 * sctp_find_alternate_net() returns a non-NULL pointer as long 294 * the argument net is non-NULL. 295 */ 296struct sctp_nets * 297sctp_find_alternate_net(struct sctp_tcb *stcb, 298 struct sctp_nets *net, 299 int mode) 300{ 301 /* Find and return an alternate network if possible */ 302 struct sctp_nets *alt, *mnet, *min_errors_net = NULL, *max_cwnd_net = NULL; 303 int once; 304 305 /* JRS 5/14/07 - Initialize min_errors to an impossible value. */ 306 int min_errors = -1; 307 uint32_t max_cwnd = 0; 308 309 if (stcb->asoc.numnets == 1) { 310 /* No others but net */ 311 return (TAILQ_FIRST(&stcb->asoc.nets)); 312 } 313 /* 314 * JRS 5/14/07 - If mode is set to 2, use the CMT PF find alternate 315 * net algorithm. This algorithm chooses the active destination (not 316 * in PF state) with the largest cwnd value. If all destinations are 317 * in PF state, unreachable, or unconfirmed, choose the desination 318 * that is in PF state with the lowest error count. In case of a 319 * tie, choose the destination that was most recently active. 320 */ 321 if (mode == 2) { 322 TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) { 323 /* 324 * JRS 5/14/07 - If the destination is unreachable 325 * or unconfirmed, skip it. 326 */ 327 if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) || 328 (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) { 329 continue; 330 } 331 /* 332 * JRS 5/14/07 - If the destination is reachable 333 * but in PF state, compare the error count of the 334 * destination to the minimum error count seen thus 335 * far. Store the destination with the lower error 336 * count. If the error counts are equal, store the 337 * destination that was most recently active. 338 */ 339 if (mnet->dest_state & SCTP_ADDR_PF) { 340 /* 341 * JRS 5/14/07 - If the destination under 342 * consideration is the current destination, 343 * work as if the error count is one higher. 344 * The actual error count will not be 345 * incremented until later in the t3 346 * handler. 347 */ 348 if (mnet == net) { 349 if (min_errors == -1) { 350 min_errors = mnet->error_count + 1; 351 min_errors_net = mnet; 352 } else if (mnet->error_count + 1 < min_errors) { 353 min_errors = mnet->error_count + 1; 354 min_errors_net = mnet; 355 } else if (mnet->error_count + 1 == min_errors 356 && mnet->last_active > min_errors_net->last_active) { 357 min_errors_net = mnet; 358 min_errors = mnet->error_count + 1; 359 } 360 continue; 361 } else { 362 if (min_errors == -1) { 363 min_errors = mnet->error_count; 364 min_errors_net = mnet; 365 } else if (mnet->error_count < min_errors) { 366 min_errors = mnet->error_count; 367 min_errors_net = mnet; 368 } else if (mnet->error_count == min_errors 369 && mnet->last_active > min_errors_net->last_active) { 370 min_errors_net = mnet; 371 min_errors = mnet->error_count; 372 } 373 continue; 374 } 375 } 376 /* 377 * JRS 5/14/07 - If the destination is reachable and 378 * not in PF state, compare the cwnd of the 379 * destination to the highest cwnd seen thus far. 380 * Store the destination with the higher cwnd value. 381 * If the cwnd values are equal, randomly choose one 382 * of the two destinations. 383 */ 384 if (max_cwnd < mnet->cwnd) { 385 max_cwnd_net = mnet; 386 max_cwnd = mnet->cwnd; 387 } else if (max_cwnd == mnet->cwnd) { 388 uint32_t rndval; 389 uint8_t this_random; 390 391 if (stcb->asoc.hb_random_idx > 3) { 392 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 393 memcpy(stcb->asoc.hb_random_values, &rndval, sizeof(stcb->asoc.hb_random_values)); 394 this_random = stcb->asoc.hb_random_values[0]; 395 stcb->asoc.hb_random_idx++; 396 stcb->asoc.hb_ect_randombit = 0; 397 } else { 398 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 399 stcb->asoc.hb_random_idx++; 400 stcb->asoc.hb_ect_randombit = 0; 401 } 402 if (this_random % 2 == 1) { 403 max_cwnd_net = mnet; 404 max_cwnd = mnet->cwnd; /* Useless? */ 405 } 406 } 407 } 408 /* 409 * JRS 5/14/07 - After all destination have been considered 410 * as alternates, check to see if there was some active 411 * destination (not in PF state). If not, check to see if 412 * there was some PF destination with the minimum number of 413 * errors. If not, return the original destination. If 414 * there is a min_errors_net, remove the PF flag from that 415 * destination, set the cwnd to one or two MTUs, and return 416 * the destination as an alt. If there was some active 417 * destination with a highest cwnd, return the destination 418 * as an alt. 419 */ 420 if (max_cwnd_net == NULL) { 421 if (min_errors_net == NULL) { 422 return (net); 423 } 424 min_errors_net->dest_state &= ~SCTP_ADDR_PF; 425 min_errors_net->cwnd = min_errors_net->mtu * stcb->asoc.sctp_cmt_pf; 426 if (SCTP_OS_TIMER_PENDING(&min_errors_net->rxt_timer.timer)) { 427 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 428 stcb, min_errors_net, 429 SCTP_FROM_SCTP_TIMER + SCTP_LOC_2); 430 } 431 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to active with %d errors.\n", 432 min_errors_net, min_errors_net->error_count); 433 return (min_errors_net); 434 } else { 435 return (max_cwnd_net); 436 } 437 } 438 /* 439 * JRS 5/14/07 - If mode is set to 1, use the CMT policy for 440 * choosing an alternate net. 441 */ 442 else if (mode == 1) { 443 TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) { 444 if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) || 445 (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) { 446 /* 447 * will skip ones that are not-reachable or 448 * unconfirmed 449 */ 450 continue; 451 } 452 if (max_cwnd < mnet->cwnd) { 453 max_cwnd_net = mnet; 454 max_cwnd = mnet->cwnd; 455 } else if (max_cwnd == mnet->cwnd) { 456 uint32_t rndval; 457 uint8_t this_random; 458 459 if (stcb->asoc.hb_random_idx > 3) { 460 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 461 memcpy(stcb->asoc.hb_random_values, &rndval, 462 sizeof(stcb->asoc.hb_random_values)); 463 this_random = stcb->asoc.hb_random_values[0]; 464 stcb->asoc.hb_random_idx = 0; 465 stcb->asoc.hb_ect_randombit = 0; 466 } else { 467 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 468 stcb->asoc.hb_random_idx++; 469 stcb->asoc.hb_ect_randombit = 0; 470 } 471 if (this_random % 2) { 472 max_cwnd_net = mnet; 473 max_cwnd = mnet->cwnd; 474 } 475 } 476 } 477 if (max_cwnd_net) { 478 return (max_cwnd_net); 479 } 480 } 481 mnet = net; 482 once = 0; 483 484 if (mnet == NULL) { 485 mnet = TAILQ_FIRST(&stcb->asoc.nets); 486 if (mnet == NULL) { 487 return (NULL); 488 } 489 } 490 do { 491 alt = TAILQ_NEXT(mnet, sctp_next); 492 if (alt == NULL) { 493 once++; 494 if (once > 1) { 495 break; 496 } 497 alt = TAILQ_FIRST(&stcb->asoc.nets); 498 if (alt == NULL) { 499 return (NULL); 500 } 501 } 502 if (alt->ro.ro_rt == NULL) { 503 if (alt->ro._s_addr) { 504 sctp_free_ifa(alt->ro._s_addr); 505 alt->ro._s_addr = NULL; 506 } 507 alt->src_addr_selected = 0; 508 } 509 /* sa_ignore NO_NULL_CHK */ 510 if (((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) && 511 (alt->ro.ro_rt != NULL) && 512 (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED))) { 513 /* Found a reachable address */ 514 break; 515 } 516 mnet = alt; 517 } while (alt != NULL); 518 519 if (alt == NULL) { 520 /* Case where NO insv network exists (dormant state) */ 521 /* we rotate destinations */ 522 once = 0; 523 mnet = net; 524 do { 525 if (mnet == NULL) { 526 return (TAILQ_FIRST(&stcb->asoc.nets)); 527 } 528 alt = TAILQ_NEXT(mnet, sctp_next); 529 if (alt == NULL) { 530 once++; 531 if (once > 1) { 532 break; 533 } 534 alt = TAILQ_FIRST(&stcb->asoc.nets); 535 } 536 /* sa_ignore NO_NULL_CHK */ 537 if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) && 538 (alt != net)) { 539 /* Found an alternate address */ 540 break; 541 } 542 mnet = alt; 543 } while (alt != NULL); 544 } 545 if (alt == NULL) { 546 return (net); 547 } 548 return (alt); 549} 550 551static void 552sctp_backoff_on_timeout(struct sctp_tcb *stcb, 553 struct sctp_nets *net, 554 int win_probe, 555 int num_marked, int num_abandoned) 556{ 557 if (net->RTO == 0) { 558 net->RTO = stcb->asoc.minrto; 559 } 560 net->RTO <<= 1; 561 if (net->RTO > stcb->asoc.maxrto) { 562 net->RTO = stcb->asoc.maxrto; 563 } 564 if ((win_probe == 0) && (num_marked || num_abandoned)) { 565 /* We don't apply penalty to window probe scenarios */ 566 /* JRS - Use the congestion control given in the CC module */ 567 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout(stcb, net); 568 } 569} 570 571#ifndef INVARIANTS 572static void 573sctp_recover_sent_list(struct sctp_tcb *stcb) 574{ 575 struct sctp_tmit_chunk *chk, *nchk; 576 struct sctp_association *asoc; 577 578 asoc = &stcb->asoc; 579 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 580 if (SCTP_TSN_GE(asoc->last_acked_seq, chk->rec.data.TSN_seq)) { 581 SCTP_PRINTF("Found chk:%p tsn:%x <= last_acked_seq:%x\n", 582 chk, chk->rec.data.TSN_seq, asoc->last_acked_seq); 583 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 584 if (chk->pr_sctp_on) { 585 if (asoc->pr_sctp_cnt != 0) 586 asoc->pr_sctp_cnt--; 587 } 588 if (chk->data) { 589 /* sa_ignore NO_NULL_CHK */ 590 sctp_free_bufspace(stcb, asoc, chk, 1); 591 sctp_m_freem(chk->data); 592 chk->data = NULL; 593 if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(chk->flags)) { 594 asoc->sent_queue_cnt_removeable--; 595 } 596 } 597 asoc->sent_queue_cnt--; 598 sctp_free_a_chunk(stcb, chk); 599 } 600 } 601 SCTP_PRINTF("after recover order is as follows\n"); 602 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 603 SCTP_PRINTF("chk:%p TSN:%x\n", chk, chk->rec.data.TSN_seq); 604 } 605} 606 607#endif 608 609static int 610sctp_mark_all_for_resend(struct sctp_tcb *stcb, 611 struct sctp_nets *net, 612 struct sctp_nets *alt, 613 int window_probe, 614 int *num_marked, 615 int *num_abandoned) 616{ 617 618 /* 619 * Mark all chunks (well not all) that were sent to *net for 620 * retransmission. Move them to alt for there destination as well... 621 * We only mark chunks that have been outstanding long enough to 622 * have received feed-back. 623 */ 624 struct sctp_tmit_chunk *chk, *nchk; 625 struct sctp_nets *lnets; 626 struct timeval now, min_wait, tv; 627 int cur_rtt; 628 int cnt_abandoned; 629 int audit_tf, num_mk, fir; 630 unsigned int cnt_mk; 631 uint32_t orig_flight, orig_tf; 632 uint32_t tsnlast, tsnfirst; 633 int recovery_cnt = 0; 634 635 636 /* none in flight now */ 637 audit_tf = 0; 638 fir = 0; 639 /* 640 * figure out how long a data chunk must be pending before we can 641 * mark it .. 642 */ 643 (void)SCTP_GETTIME_TIMEVAL(&now); 644 /* get cur rto in micro-seconds */ 645 cur_rtt = (((net->lastsa >> 2) + net->lastsv) >> 1); 646 cur_rtt *= 1000; 647 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 648 sctp_log_fr(cur_rtt, 649 stcb->asoc.peers_rwnd, 650 window_probe, 651 SCTP_FR_T3_MARK_TIME); 652 sctp_log_fr(net->flight_size, 653 SCTP_OS_TIMER_PENDING(&net->fr_timer.timer), 654 SCTP_OS_TIMER_ACTIVE(&net->fr_timer.timer), 655 SCTP_FR_CWND_REPORT); 656 sctp_log_fr(net->flight_size, net->cwnd, stcb->asoc.total_flight, SCTP_FR_CWND_REPORT); 657 } 658 tv.tv_sec = cur_rtt / 1000000; 659 tv.tv_usec = cur_rtt % 1000000; 660 min_wait = now; 661 timevalsub(&min_wait, &tv); 662 if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) { 663 /* 664 * if we hit here, we don't have enough seconds on the clock 665 * to account for the RTO. We just let the lower seconds be 666 * the bounds and don't worry about it. This may mean we 667 * will mark a lot more than we should. 668 */ 669 min_wait.tv_sec = min_wait.tv_usec = 0; 670 } 671 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 672 sctp_log_fr(cur_rtt, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME); 673 sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME); 674 } 675 /* 676 * Our rwnd will be incorrect here since we are not adding back the 677 * cnt * mbuf but we will fix that down below. 678 */ 679 orig_flight = net->flight_size; 680 orig_tf = stcb->asoc.total_flight; 681 682 net->fast_retran_ip = 0; 683 /* Now on to each chunk */ 684 cnt_abandoned = 0; 685 num_mk = cnt_mk = 0; 686 tsnfirst = tsnlast = 0; 687#ifndef INVARIANTS 688start_again: 689#endif 690 TAILQ_FOREACH_SAFE(chk, &stcb->asoc.sent_queue, sctp_next, nchk) { 691 if (SCTP_TSN_GE(stcb->asoc.last_acked_seq, chk->rec.data.TSN_seq)) { 692 /* Strange case our list got out of order? */ 693 SCTP_PRINTF("Our list is out of order? last_acked:%x chk:%x", 694 (unsigned int)stcb->asoc.last_acked_seq, (unsigned int)chk->rec.data.TSN_seq); 695 recovery_cnt++; 696#ifdef INVARIANTS 697 panic("last acked >= chk on sent-Q"); 698#else 699 SCTP_PRINTF("Recover attempts a restart cnt:%d\n", recovery_cnt); 700 sctp_recover_sent_list(stcb); 701 if (recovery_cnt < 10) { 702 goto start_again; 703 } else { 704 SCTP_PRINTF("Recovery fails %d times??\n", recovery_cnt); 705 } 706#endif 707 } 708 if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) { 709 /* 710 * found one to mark: If it is less than 711 * DATAGRAM_ACKED it MUST not be a skipped or marked 712 * TSN but instead one that is either already set 713 * for retransmission OR one that needs 714 * retransmission. 715 */ 716 717 /* validate its been outstanding long enough */ 718 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 719 sctp_log_fr(chk->rec.data.TSN_seq, 720 chk->sent_rcv_time.tv_sec, 721 chk->sent_rcv_time.tv_usec, 722 SCTP_FR_T3_MARK_TIME); 723 } 724 if ((chk->sent_rcv_time.tv_sec > min_wait.tv_sec) && (window_probe == 0)) { 725 /* 726 * we have reached a chunk that was sent 727 * some seconds past our min.. forget it we 728 * will find no more to send. 729 */ 730 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 731 sctp_log_fr(0, 732 chk->sent_rcv_time.tv_sec, 733 chk->sent_rcv_time.tv_usec, 734 SCTP_FR_T3_STOPPED); 735 } 736 continue; 737 } else if ((chk->sent_rcv_time.tv_sec == min_wait.tv_sec) && 738 (window_probe == 0)) { 739 /* 740 * we must look at the micro seconds to 741 * know. 742 */ 743 if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) { 744 /* 745 * ok it was sent after our boundary 746 * time. 747 */ 748 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 749 sctp_log_fr(0, 750 chk->sent_rcv_time.tv_sec, 751 chk->sent_rcv_time.tv_usec, 752 SCTP_FR_T3_STOPPED); 753 } 754 continue; 755 } 756 } 757 if (stcb->asoc.peer_supports_prsctp && PR_SCTP_TTL_ENABLED(chk->flags)) { 758 /* Is it expired? */ 759 if (timevalcmp(&now, &chk->rec.data.timetodrop, >)) { 760 /* Yes so drop it */ 761 if (chk->data) { 762 (void)sctp_release_pr_sctp_chunk(stcb, 763 chk, 764 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 765 SCTP_SO_NOT_LOCKED); 766 cnt_abandoned++; 767 } 768 continue; 769 } 770 } 771 if (stcb->asoc.peer_supports_prsctp && PR_SCTP_RTX_ENABLED(chk->flags)) { 772 /* Has it been retransmitted tv_sec times? */ 773 if (chk->snd_count > chk->rec.data.timetodrop.tv_sec) { 774 if (chk->data) { 775 (void)sctp_release_pr_sctp_chunk(stcb, 776 chk, 777 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 778 SCTP_SO_NOT_LOCKED); 779 cnt_abandoned++; 780 } 781 continue; 782 } 783 } 784 if (chk->sent < SCTP_DATAGRAM_RESEND) { 785 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 786 num_mk++; 787 if (fir == 0) { 788 fir = 1; 789 tsnfirst = chk->rec.data.TSN_seq; 790 } 791 tsnlast = chk->rec.data.TSN_seq; 792 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 793 sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count, 794 0, SCTP_FR_T3_MARKED); 795 } 796 if (chk->rec.data.chunk_was_revoked) { 797 /* deflate the cwnd */ 798 chk->whoTo->cwnd -= chk->book_size; 799 chk->rec.data.chunk_was_revoked = 0; 800 } 801 net->marked_retrans++; 802 stcb->asoc.marked_retrans++; 803 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 804 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND_TO, 805 chk->whoTo->flight_size, 806 chk->book_size, 807 (uintptr_t) chk->whoTo, 808 chk->rec.data.TSN_seq); 809 } 810 sctp_flight_size_decrease(chk); 811 sctp_total_flight_decrease(stcb, chk); 812 stcb->asoc.peers_rwnd += chk->send_size; 813 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 814 } 815 chk->sent = SCTP_DATAGRAM_RESEND; 816 SCTP_STAT_INCR(sctps_markedretrans); 817 818 /* reset the TSN for striking and other FR stuff */ 819 chk->rec.data.doing_fast_retransmit = 0; 820 /* Clear any time so NO RTT is being done */ 821 chk->do_rtt = 0; 822 if (alt != net) { 823 sctp_free_remote_addr(chk->whoTo); 824 chk->no_fr_allowed = 1; 825 chk->whoTo = alt; 826 atomic_add_int(&alt->ref_count, 1); 827 } else { 828 chk->no_fr_allowed = 0; 829 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) { 830 chk->rec.data.fast_retran_tsn = stcb->asoc.sending_seq; 831 } else { 832 chk->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq; 833 } 834 } 835 /* 836 * CMT: Do not allow FRs on retransmitted TSNs. 837 */ 838 if (stcb->asoc.sctp_cmt_on_off > 0) { 839 chk->no_fr_allowed = 1; 840 } 841#ifdef THIS_SHOULD_NOT_BE_DONE 842 } else if (chk->sent == SCTP_DATAGRAM_ACKED) { 843 /* remember highest acked one */ 844 could_be_sent = chk; 845#endif 846 } 847 if (chk->sent == SCTP_DATAGRAM_RESEND) { 848 cnt_mk++; 849 } 850 } 851 if ((orig_flight - net->flight_size) != (orig_tf - stcb->asoc.total_flight)) { 852 /* we did not subtract the same things? */ 853 audit_tf = 1; 854 } 855 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 856 sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT); 857 } 858#ifdef SCTP_DEBUG 859 if (num_mk) { 860 SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n", 861 tsnlast); 862 SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%ld\n", 863 num_mk, (u_long)stcb->asoc.peers_rwnd); 864 SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n", 865 tsnlast); 866 SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%d\n", 867 num_mk, 868 (int)stcb->asoc.peers_rwnd); 869 } 870#endif 871 *num_marked = num_mk; 872 *num_abandoned = cnt_abandoned; 873 /* 874 * Now check for a ECN Echo that may be stranded And include the 875 * cnt_mk'd to have all resends in the control queue. 876 */ 877 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 878 if (chk->sent == SCTP_DATAGRAM_RESEND) { 879 cnt_mk++; 880 } 881 if ((chk->whoTo == net) && 882 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 883 sctp_free_remote_addr(chk->whoTo); 884 chk->whoTo = alt; 885 if (chk->sent != SCTP_DATAGRAM_RESEND) { 886 chk->sent = SCTP_DATAGRAM_RESEND; 887 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 888 cnt_mk++; 889 } 890 atomic_add_int(&alt->ref_count, 1); 891 } 892 } 893#ifdef THIS_SHOULD_NOT_BE_DONE 894 if ((stcb->asoc.sent_queue_retran_cnt == 0) && (could_be_sent)) { 895 /* fix it so we retransmit the highest acked anyway */ 896 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 897 cnt_mk++; 898 could_be_sent->sent = SCTP_DATAGRAM_RESEND; 899 } 900#endif 901 if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) { 902#ifdef INVARIANTS 903 SCTP_PRINTF("Local Audit says there are %d for retran asoc cnt:%d we marked:%d this time\n", 904 cnt_mk, stcb->asoc.sent_queue_retran_cnt, num_mk); 905#endif 906#ifndef SCTP_AUDITING_ENABLED 907 stcb->asoc.sent_queue_retran_cnt = cnt_mk; 908#endif 909 } 910 if (audit_tf) { 911 SCTPDBG(SCTP_DEBUG_TIMER4, 912 "Audit total flight due to negative value net:%p\n", 913 net); 914 stcb->asoc.total_flight = 0; 915 stcb->asoc.total_flight_count = 0; 916 /* Clear all networks flight size */ 917 TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) { 918 lnets->flight_size = 0; 919 SCTPDBG(SCTP_DEBUG_TIMER4, 920 "Net:%p c-f cwnd:%d ssthresh:%d\n", 921 lnets, lnets->cwnd, lnets->ssthresh); 922 } 923 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 924 if (chk->sent < SCTP_DATAGRAM_RESEND) { 925 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 926 sctp_misc_ints(SCTP_FLIGHT_LOG_UP, 927 chk->whoTo->flight_size, 928 chk->book_size, 929 (uintptr_t) chk->whoTo, 930 chk->rec.data.TSN_seq); 931 } 932 sctp_flight_size_increase(chk); 933 sctp_total_flight_increase(stcb, chk); 934 } 935 } 936 } 937 /* 938 * Setup the ecn nonce re-sync point. We do this since 939 * retranmissions are NOT setup for ECN. This means that do to 940 * Karn's rule, we don't know the total of the peers ecn bits. 941 */ 942 chk = TAILQ_FIRST(&stcb->asoc.send_queue); 943 if (chk == NULL) { 944 stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq; 945 } else { 946 stcb->asoc.nonce_resync_tsn = chk->rec.data.TSN_seq; 947 } 948 stcb->asoc.nonce_wait_for_ecne = 0; 949 stcb->asoc.nonce_sum_check = 0; 950 /* We return 1 if we only have a window probe outstanding */ 951 return (0); 952} 953 954 955int 956sctp_t3rxt_timer(struct sctp_inpcb *inp, 957 struct sctp_tcb *stcb, 958 struct sctp_nets *net) 959{ 960 struct sctp_nets *alt; 961 int win_probe, num_mk, num_abandoned; 962 963 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 964 sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT); 965 } 966 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 967 struct sctp_nets *lnet; 968 969 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 970 if (net == lnet) { 971 sctp_log_cwnd(stcb, lnet, 1, SCTP_CWND_LOG_FROM_T3); 972 } else { 973 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_LOG_FROM_T3); 974 } 975 } 976 } 977 /* Find an alternate and mark those for retransmission */ 978 if ((stcb->asoc.peers_rwnd == 0) && 979 (stcb->asoc.total_flight < net->mtu)) { 980 SCTP_STAT_INCR(sctps_timowindowprobe); 981 win_probe = 1; 982 } else { 983 win_probe = 0; 984 } 985 986 /* 987 * JRS 5/14/07 - If CMT PF is on and the destination if not already 988 * in PF state, set the destination to PF state and store the 989 * current time as the time that the destination was last active. In 990 * addition, find an alternate destination with PF-based 991 * find_alt_net(). 992 */ 993 if ((stcb->asoc.sctp_cmt_on_off > 0) && 994 (stcb->asoc.sctp_cmt_pf > 0)) { 995 if ((net->dest_state & SCTP_ADDR_PF) != SCTP_ADDR_PF) { 996 net->dest_state |= SCTP_ADDR_PF; 997 net->last_active = sctp_get_tick_count(); 998 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from active to PF.\n", 999 net); 1000 } 1001 alt = sctp_find_alternate_net(stcb, net, 2); 1002 } else if (stcb->asoc.sctp_cmt_on_off > 0) { 1003 /* 1004 * CMT: Using RTX_SSTHRESH policy for CMT. If CMT is being 1005 * used, then pick dest with largest ssthresh for any 1006 * retransmission. 1007 */ 1008 alt = sctp_find_alternate_net(stcb, net, 1); 1009 /* 1010 * CUCv2: If a different dest is picked for the 1011 * retransmission, then new (rtx-)pseudo_cumack needs to be 1012 * tracked for orig dest. Let CUCv2 track new (rtx-) 1013 * pseudo-cumack always. 1014 */ 1015 net->find_pseudo_cumack = 1; 1016 net->find_rtx_pseudo_cumack = 1; 1017 } else { /* CMT is OFF */ 1018 alt = sctp_find_alternate_net(stcb, net, 0); 1019 } 1020 num_mk = 0; 1021 num_abandoned = 0; 1022 (void)sctp_mark_all_for_resend(stcb, net, alt, win_probe, 1023 &num_mk, &num_abandoned); 1024 /* FR Loss recovery just ended with the T3. */ 1025 stcb->asoc.fast_retran_loss_recovery = 0; 1026 1027 /* CMT FR loss recovery ended with the T3 */ 1028 net->fast_retran_loss_recovery = 0; 1029 1030 /* 1031 * setup the sat loss recovery that prevents satellite cwnd advance. 1032 */ 1033 stcb->asoc.sat_t3_loss_recovery = 1; 1034 stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq; 1035 1036 /* Backoff the timer and cwnd */ 1037 sctp_backoff_on_timeout(stcb, net, win_probe, num_mk, num_abandoned); 1038 if (win_probe == 0) { 1039 /* We don't do normal threshold management on window probes */ 1040 if (sctp_threshold_management(inp, stcb, net, 1041 stcb->asoc.max_send_times)) { 1042 /* Association was destroyed */ 1043 return (1); 1044 } else { 1045 if (net != stcb->asoc.primary_destination) { 1046 /* send a immediate HB if our RTO is stale */ 1047 struct timeval now; 1048 unsigned int ms_goneby; 1049 1050 (void)SCTP_GETTIME_TIMEVAL(&now); 1051 if (net->last_sent_time.tv_sec) { 1052 ms_goneby = (now.tv_sec - net->last_sent_time.tv_sec) * 1000; 1053 } else { 1054 ms_goneby = 0; 1055 } 1056 if ((ms_goneby > net->RTO) || (net->RTO == 0)) { 1057 /* 1058 * no recent feed back in an RTO or 1059 * more, request a RTT update 1060 */ 1061 if (sctp_send_hb(stcb, 1, net) < 0) 1062 /* 1063 * Less than 0 means we lost 1064 * the assoc 1065 */ 1066 return (1); 1067 } 1068 } 1069 } 1070 } else { 1071 /* 1072 * For a window probe we don't penalize the net's but only 1073 * the association. This may fail it if SACKs are not coming 1074 * back. If sack's are coming with rwnd locked at 0, we will 1075 * continue to hold things waiting for rwnd to raise 1076 */ 1077 if (sctp_threshold_management(inp, stcb, NULL, 1078 stcb->asoc.max_send_times)) { 1079 /* Association was destroyed */ 1080 return (1); 1081 } 1082 } 1083 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 1084 /* Move all pending over too */ 1085 sctp_move_chunks_from_net(stcb, net); 1086 1087 /* 1088 * Get the address that failed, to force a new src address 1089 * selecton and a route allocation. 1090 */ 1091 if (net->ro._s_addr) { 1092 sctp_free_ifa(net->ro._s_addr); 1093 net->ro._s_addr = NULL; 1094 } 1095 net->src_addr_selected = 0; 1096 1097 /* Force a route allocation too */ 1098 if (net->ro.ro_rt) { 1099 RTFREE(net->ro.ro_rt); 1100 net->ro.ro_rt = NULL; 1101 } 1102 /* Was it our primary? */ 1103 if ((stcb->asoc.primary_destination == net) && (alt != net)) { 1104 /* 1105 * Yes, note it as such and find an alternate note: 1106 * this means HB code must use this to resent the 1107 * primary if it goes active AND if someone does a 1108 * change-primary then this flag must be cleared 1109 * from any net structures. 1110 */ 1111 if (sctp_set_primary_addr(stcb, 1112 (struct sockaddr *)NULL, 1113 alt) == 0) { 1114 net->dest_state |= SCTP_ADDR_WAS_PRIMARY; 1115 } 1116 } 1117 } else if ((stcb->asoc.sctp_cmt_on_off > 0) && 1118 (stcb->asoc.sctp_cmt_pf > 0) && 1119 ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) { 1120 /* 1121 * JRS 5/14/07 - If the destination hasn't failed completely 1122 * but is in PF state, a PF-heartbeat needs to be sent 1123 * manually. 1124 */ 1125 if (sctp_send_hb(stcb, 1, net) < 0) 1126 /* Return less than 0 means we lost the association */ 1127 return (1); 1128 } 1129 /* 1130 * Special case for cookie-echo'ed case, we don't do output but must 1131 * await the COOKIE-ACK before retransmission 1132 */ 1133 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { 1134 /* 1135 * Here we just reset the timer and start again since we 1136 * have not established the asoc 1137 */ 1138 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 1139 return (0); 1140 } 1141 if (stcb->asoc.peer_supports_prsctp) { 1142 struct sctp_tmit_chunk *lchk; 1143 1144 lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc); 1145 /* C3. See if we need to send a Fwd-TSN */ 1146 if (SCTP_TSN_GT(stcb->asoc.advanced_peer_ack_point, stcb->asoc.last_acked_seq)) { 1147 /* 1148 * ISSUE with ECN, see FWD-TSN processing for notes 1149 * on issues that will occur when the ECN NONCE 1150 * stuff is put into SCTP for cross checking. 1151 */ 1152 send_forward_tsn(stcb, &stcb->asoc); 1153 if (lchk) { 1154 /* Assure a timer is up */ 1155 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo); 1156 } 1157 } 1158 } 1159 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1160 sctp_log_cwnd(stcb, net, net->cwnd, SCTP_CWND_LOG_FROM_RTX); 1161 } 1162 return (0); 1163} 1164 1165int 1166sctp_t1init_timer(struct sctp_inpcb *inp, 1167 struct sctp_tcb *stcb, 1168 struct sctp_nets *net) 1169{ 1170 /* bump the thresholds */ 1171 if (stcb->asoc.delayed_connection) { 1172 /* 1173 * special hook for delayed connection. The library did NOT 1174 * complete the rest of its sends. 1175 */ 1176 stcb->asoc.delayed_connection = 0; 1177 sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED); 1178 return (0); 1179 } 1180 if (SCTP_GET_STATE((&stcb->asoc)) != SCTP_STATE_COOKIE_WAIT) { 1181 return (0); 1182 } 1183 if (sctp_threshold_management(inp, stcb, net, 1184 stcb->asoc.max_init_times)) { 1185 /* Association was destroyed */ 1186 return (1); 1187 } 1188 stcb->asoc.dropped_special_cnt = 0; 1189 sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0, 0); 1190 if (stcb->asoc.initial_init_rto_max < net->RTO) { 1191 net->RTO = stcb->asoc.initial_init_rto_max; 1192 } 1193 if (stcb->asoc.numnets > 1) { 1194 /* If we have more than one addr use it */ 1195 struct sctp_nets *alt; 1196 1197 alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination, 0); 1198 if (alt != stcb->asoc.primary_destination) { 1199 sctp_move_chunks_from_net(stcb, stcb->asoc.primary_destination); 1200 stcb->asoc.primary_destination = alt; 1201 } 1202 } 1203 /* Send out a new init */ 1204 sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED); 1205 return (0); 1206} 1207 1208/* 1209 * For cookie and asconf we actually need to find and mark for resend, then 1210 * increment the resend counter (after all the threshold management stuff of 1211 * course). 1212 */ 1213int 1214sctp_cookie_timer(struct sctp_inpcb *inp, 1215 struct sctp_tcb *stcb, 1216 struct sctp_nets *net) 1217{ 1218 struct sctp_nets *alt; 1219 struct sctp_tmit_chunk *cookie; 1220 1221 /* first before all else we must find the cookie */ 1222 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) { 1223 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 1224 break; 1225 } 1226 } 1227 if (cookie == NULL) { 1228 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { 1229 /* FOOBAR! */ 1230 struct mbuf *oper; 1231 1232 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1233 0, M_DONTWAIT, 1, MT_DATA); 1234 if (oper) { 1235 struct sctp_paramhdr *ph; 1236 uint32_t *ippp; 1237 1238 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 1239 sizeof(uint32_t); 1240 ph = mtod(oper, struct sctp_paramhdr *); 1241 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1242 ph->param_length = htons(SCTP_BUF_LEN(oper)); 1243 ippp = (uint32_t *) (ph + 1); 1244 *ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_3); 1245 } 1246 inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_4; 1247 sctp_abort_an_association(inp, stcb, SCTP_INTERNAL_ERROR, 1248 oper, SCTP_SO_NOT_LOCKED); 1249 } else { 1250#ifdef INVARIANTS 1251 panic("Cookie timer expires in wrong state?"); 1252#else 1253 SCTP_PRINTF("Strange in state %d not cookie-echoed yet c-e timer expires?\n", SCTP_GET_STATE(&stcb->asoc)); 1254 return (0); 1255#endif 1256 } 1257 return (0); 1258 } 1259 /* Ok we found the cookie, threshold management next */ 1260 if (sctp_threshold_management(inp, stcb, cookie->whoTo, 1261 stcb->asoc.max_init_times)) { 1262 /* Assoc is over */ 1263 return (1); 1264 } 1265 /* 1266 * cleared theshold management now lets backoff the address & select 1267 * an alternate 1268 */ 1269 stcb->asoc.dropped_special_cnt = 0; 1270 sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0, 0); 1271 alt = sctp_find_alternate_net(stcb, cookie->whoTo, 0); 1272 if (alt != cookie->whoTo) { 1273 sctp_free_remote_addr(cookie->whoTo); 1274 cookie->whoTo = alt; 1275 atomic_add_int(&alt->ref_count, 1); 1276 } 1277 /* Now mark the retran info */ 1278 if (cookie->sent != SCTP_DATAGRAM_RESEND) { 1279 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1280 } 1281 cookie->sent = SCTP_DATAGRAM_RESEND; 1282 /* 1283 * Now call the output routine to kick out the cookie again, Note we 1284 * don't mark any chunks for retran so that FR will need to kick in 1285 * to move these (or a send timer). 1286 */ 1287 return (0); 1288} 1289 1290int 1291sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1292 struct sctp_nets *net) 1293{ 1294 struct sctp_nets *alt; 1295 struct sctp_tmit_chunk *strrst = NULL, *chk = NULL; 1296 1297 if (stcb->asoc.stream_reset_outstanding == 0) { 1298 return (0); 1299 } 1300 /* find the existing STRRESET, we use the seq number we sent out on */ 1301 (void)sctp_find_stream_reset(stcb, stcb->asoc.str_reset_seq_out, &strrst); 1302 if (strrst == NULL) { 1303 return (0); 1304 } 1305 /* do threshold management */ 1306 if (sctp_threshold_management(inp, stcb, strrst->whoTo, 1307 stcb->asoc.max_send_times)) { 1308 /* Assoc is over */ 1309 return (1); 1310 } 1311 /* 1312 * cleared theshold management now lets backoff the address & select 1313 * an alternate 1314 */ 1315 sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0, 0); 1316 alt = sctp_find_alternate_net(stcb, strrst->whoTo, 0); 1317 sctp_free_remote_addr(strrst->whoTo); 1318 strrst->whoTo = alt; 1319 atomic_add_int(&alt->ref_count, 1); 1320 1321 /* See if a ECN Echo is also stranded */ 1322 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 1323 if ((chk->whoTo == net) && 1324 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 1325 sctp_free_remote_addr(chk->whoTo); 1326 if (chk->sent != SCTP_DATAGRAM_RESEND) { 1327 chk->sent = SCTP_DATAGRAM_RESEND; 1328 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1329 } 1330 chk->whoTo = alt; 1331 atomic_add_int(&alt->ref_count, 1); 1332 } 1333 } 1334 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 1335 /* 1336 * If the address went un-reachable, we need to move to 1337 * alternates for ALL chk's in queue 1338 */ 1339 sctp_move_chunks_from_net(stcb, net); 1340 } 1341 /* mark the retran info */ 1342 if (strrst->sent != SCTP_DATAGRAM_RESEND) 1343 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1344 strrst->sent = SCTP_DATAGRAM_RESEND; 1345 1346 /* restart the timer */ 1347 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, strrst->whoTo); 1348 return (0); 1349} 1350 1351int 1352sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1353 struct sctp_nets *net) 1354{ 1355 struct sctp_nets *alt; 1356 struct sctp_tmit_chunk *asconf, *chk; 1357 1358 /* is this a first send, or a retransmission? */ 1359 if (TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) { 1360 /* compose a new ASCONF chunk and send it */ 1361 sctp_send_asconf(stcb, net, SCTP_ADDR_NOT_LOCKED); 1362 } else { 1363 /* 1364 * Retransmission of the existing ASCONF is needed 1365 */ 1366 1367 /* find the existing ASCONF */ 1368 asconf = TAILQ_FIRST(&stcb->asoc.asconf_send_queue); 1369 if (asconf == NULL) { 1370 return (0); 1371 } 1372 /* do threshold management */ 1373 if (sctp_threshold_management(inp, stcb, asconf->whoTo, 1374 stcb->asoc.max_send_times)) { 1375 /* Assoc is over */ 1376 return (1); 1377 } 1378 if (asconf->snd_count > stcb->asoc.max_send_times) { 1379 /* 1380 * Something is rotten: our peer is not responding 1381 * to ASCONFs but apparently is to other chunks. 1382 * i.e. it is not properly handling the chunk type 1383 * upper bits. Mark this peer as ASCONF incapable 1384 * and cleanup. 1385 */ 1386 SCTPDBG(SCTP_DEBUG_TIMER1, "asconf_timer: Peer has not responded to our repeated ASCONFs\n"); 1387 sctp_asconf_cleanup(stcb, net); 1388 return (0); 1389 } 1390 /* 1391 * cleared threshold management, so now backoff the net and 1392 * select an alternate 1393 */ 1394 sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0, 0); 1395 alt = sctp_find_alternate_net(stcb, asconf->whoTo, 0); 1396 if (asconf->whoTo != alt) { 1397 sctp_free_remote_addr(asconf->whoTo); 1398 asconf->whoTo = alt; 1399 atomic_add_int(&alt->ref_count, 1); 1400 } 1401 /* See if an ECN Echo is also stranded */ 1402 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 1403 if ((chk->whoTo == net) && 1404 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 1405 sctp_free_remote_addr(chk->whoTo); 1406 chk->whoTo = alt; 1407 if (chk->sent != SCTP_DATAGRAM_RESEND) { 1408 chk->sent = SCTP_DATAGRAM_RESEND; 1409 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1410 } 1411 atomic_add_int(&alt->ref_count, 1); 1412 } 1413 } 1414 TAILQ_FOREACH(chk, &stcb->asoc.asconf_send_queue, sctp_next) { 1415 if (chk->whoTo != alt) { 1416 sctp_free_remote_addr(chk->whoTo); 1417 chk->whoTo = alt; 1418 atomic_add_int(&alt->ref_count, 1); 1419 } 1420 if (asconf->sent != SCTP_DATAGRAM_RESEND && chk->sent != SCTP_DATAGRAM_UNSENT) 1421 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1422 chk->sent = SCTP_DATAGRAM_RESEND; 1423 } 1424 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 1425 /* 1426 * If the address went un-reachable, we need to move 1427 * to the alternate for ALL chunks in queue 1428 */ 1429 sctp_move_chunks_from_net(stcb, net); 1430 } 1431 /* mark the retran info */ 1432 if (asconf->sent != SCTP_DATAGRAM_RESEND) 1433 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1434 asconf->sent = SCTP_DATAGRAM_RESEND; 1435 1436 /* send another ASCONF if any and we can do */ 1437 sctp_send_asconf(stcb, alt, SCTP_ADDR_NOT_LOCKED); 1438 } 1439 return (0); 1440} 1441 1442/* Mobility adaptation */ 1443void 1444sctp_delete_prim_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1445 struct sctp_nets *net) 1446{ 1447 if (stcb->asoc.deleted_primary == NULL) { 1448 SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: deleted_primary is not stored...\n"); 1449 sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); 1450 return; 1451 } 1452 SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: finished to keep deleted primary "); 1453 SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &stcb->asoc.deleted_primary->ro._l_addr.sa); 1454 sctp_free_remote_addr(stcb->asoc.deleted_primary); 1455 stcb->asoc.deleted_primary = NULL; 1456 sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); 1457 return; 1458} 1459 1460/* 1461 * For the shutdown and shutdown-ack, we do not keep one around on the 1462 * control queue. This means we must generate a new one and call the general 1463 * chunk output routine, AFTER having done threshold management. 1464 * It is assumed that net is non-NULL. 1465 */ 1466int 1467sctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1468 struct sctp_nets *net) 1469{ 1470 struct sctp_nets *alt; 1471 1472 /* first threshold managment */ 1473 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { 1474 /* Assoc is over */ 1475 return (1); 1476 } 1477 sctp_backoff_on_timeout(stcb, net, 1, 0, 0); 1478 /* second select an alternative */ 1479 alt = sctp_find_alternate_net(stcb, net, 0); 1480 1481 /* third generate a shutdown into the queue for out net */ 1482 sctp_send_shutdown(stcb, alt); 1483 1484 /* fourth restart timer */ 1485 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt); 1486 return (0); 1487} 1488 1489int 1490sctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1491 struct sctp_nets *net) 1492{ 1493 struct sctp_nets *alt; 1494 1495 /* first threshold managment */ 1496 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { 1497 /* Assoc is over */ 1498 return (1); 1499 } 1500 sctp_backoff_on_timeout(stcb, net, 1, 0, 0); 1501 /* second select an alternative */ 1502 alt = sctp_find_alternate_net(stcb, net, 0); 1503 1504 /* third generate a shutdown into the queue for out net */ 1505 sctp_send_shutdown_ack(stcb, alt); 1506 1507 /* fourth restart timer */ 1508 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt); 1509 return (0); 1510} 1511 1512static void 1513sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp, 1514 struct sctp_tcb *stcb) 1515{ 1516 struct sctp_stream_out *outs; 1517 struct sctp_stream_queue_pending *sp; 1518 unsigned int chks_in_queue = 0; 1519 int being_filled = 0; 1520 1521 /* 1522 * This function is ONLY called when the send/sent queues are empty. 1523 */ 1524 if ((stcb == NULL) || (inp == NULL)) 1525 return; 1526 1527 if (stcb->asoc.sent_queue_retran_cnt) { 1528 SCTP_PRINTF("Hmm, sent_queue_retran_cnt is non-zero %d\n", 1529 stcb->asoc.sent_queue_retran_cnt); 1530 stcb->asoc.sent_queue_retran_cnt = 0; 1531 } 1532 SCTP_TCB_SEND_LOCK(stcb); 1533 if (TAILQ_EMPTY(&stcb->asoc.out_wheel)) { 1534 int i, cnt = 0; 1535 1536 /* Check to see if a spoke fell off the wheel */ 1537 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1538 if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) { 1539 sctp_insert_on_wheel(stcb, &stcb->asoc, &stcb->asoc.strmout[i], 1); 1540 cnt++; 1541 } 1542 } 1543 if (cnt) { 1544 /* yep, we lost a spoke or two */ 1545 SCTP_PRINTF("Found an additional %d streams NOT on outwheel, corrected\n", cnt); 1546 } else { 1547 /* no spokes lost, */ 1548 stcb->asoc.total_output_queue_size = 0; 1549 } 1550 SCTP_TCB_SEND_UNLOCK(stcb); 1551 return; 1552 } 1553 SCTP_TCB_SEND_UNLOCK(stcb); 1554 /* Check to see if some data queued, if so report it */ 1555 TAILQ_FOREACH(outs, &stcb->asoc.out_wheel, next_spoke) { 1556 if (!TAILQ_EMPTY(&outs->outqueue)) { 1557 TAILQ_FOREACH(sp, &outs->outqueue, next) { 1558 if (sp->msg_is_complete) 1559 being_filled++; 1560 chks_in_queue++; 1561 } 1562 } 1563 } 1564 if (chks_in_queue != stcb->asoc.stream_queue_cnt) { 1565 SCTP_PRINTF("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n", 1566 stcb->asoc.stream_queue_cnt, chks_in_queue); 1567 } 1568 if (chks_in_queue) { 1569 /* call the output queue function */ 1570 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1571 if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) && 1572 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 1573 /* 1574 * Probably should go in and make it go back through 1575 * and add fragments allowed 1576 */ 1577 if (being_filled == 0) { 1578 SCTP_PRINTF("Still nothing moved %d chunks are stuck\n", 1579 chks_in_queue); 1580 } 1581 } 1582 } else { 1583 SCTP_PRINTF("Found no chunks on any queue tot:%lu\n", 1584 (u_long)stcb->asoc.total_output_queue_size); 1585 stcb->asoc.total_output_queue_size = 0; 1586 } 1587} 1588 1589int 1590sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1591 struct sctp_nets *net, int cnt_of_unconf) 1592{ 1593 int ret; 1594 1595 if (net) { 1596 if (net->hb_responded == 0) { 1597 if (net->ro._s_addr) { 1598 /* 1599 * Invalidate the src address if we did not 1600 * get a response last time. 1601 */ 1602 sctp_free_ifa(net->ro._s_addr); 1603 net->ro._s_addr = NULL; 1604 net->src_addr_selected = 0; 1605 } 1606 sctp_backoff_on_timeout(stcb, net, 1, 0, 0); 1607 } 1608 /* Zero PBA, if it needs it */ 1609 if (net->partial_bytes_acked) { 1610 net->partial_bytes_acked = 0; 1611 } 1612 } 1613 if ((stcb->asoc.total_output_queue_size > 0) && 1614 (TAILQ_EMPTY(&stcb->asoc.send_queue)) && 1615 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 1616 sctp_audit_stream_queues_for_size(inp, stcb); 1617 } 1618 /* Send a new HB, this will do threshold managment, pick a new dest */ 1619 if (cnt_of_unconf == 0) { 1620 if (sctp_send_hb(stcb, 0, NULL) < 0) { 1621 return (1); 1622 } 1623 } else { 1624 /* 1625 * this will send out extra hb's up to maxburst if there are 1626 * any unconfirmed addresses. 1627 */ 1628 uint32_t cnt_sent = 0; 1629 1630 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1631 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) && 1632 (net->dest_state & SCTP_ADDR_REACHABLE)) { 1633 cnt_sent++; 1634 if (net->hb_responded == 0) { 1635 /* Did we respond last time? */ 1636 if (net->ro._s_addr) { 1637 sctp_free_ifa(net->ro._s_addr); 1638 net->ro._s_addr = NULL; 1639 net->src_addr_selected = 0; 1640 } 1641 } 1642 ret = sctp_send_hb(stcb, 1, net); 1643 if (ret < 0) 1644 return 1; 1645 else if (ret == 0) { 1646 break; 1647 } 1648 if (cnt_sent >= SCTP_BASE_SYSCTL(sctp_hb_maxburst)) 1649 break; 1650 } 1651 } 1652 } 1653 return (0); 1654} 1655 1656void 1657sctp_pathmtu_timer(struct sctp_inpcb *inp, 1658 struct sctp_tcb *stcb, 1659 struct sctp_nets *net) 1660{ 1661 uint32_t next_mtu, mtu; 1662 1663 next_mtu = sctp_get_next_mtu(inp, net->mtu); 1664 1665 if ((next_mtu > net->mtu) && (net->port == 0)) { 1666 if ((net->src_addr_selected == 0) || 1667 (net->ro._s_addr == NULL) || 1668 (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) { 1669 if ((net->ro._s_addr != NULL) && (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) { 1670 sctp_free_ifa(net->ro._s_addr); 1671 net->ro._s_addr = NULL; 1672 net->src_addr_selected = 0; 1673 } else if (net->ro._s_addr == NULL) { 1674#if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE) 1675 if (net->ro._l_addr.sa.sa_family == AF_INET6) { 1676 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 1677 1678 /* KAME hack: embed scopeid */ 1679 (void)sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)); 1680 } 1681#endif 1682 1683 net->ro._s_addr = sctp_source_address_selection(inp, 1684 stcb, 1685 (sctp_route_t *) & net->ro, 1686 net, 0, stcb->asoc.vrf_id); 1687#if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE) 1688 if (net->ro._l_addr.sa.sa_family == AF_INET6) { 1689 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 1690 1691 (void)sa6_recoverscope(sin6); 1692 } 1693#endif /* INET6 */ 1694 } 1695 if (net->ro._s_addr) 1696 net->src_addr_selected = 1; 1697 } 1698 if (net->ro._s_addr) { 1699 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._s_addr.sa, net->ro.ro_rt); 1700 if (net->port) { 1701 mtu -= sizeof(struct udphdr); 1702 } 1703 if (mtu > next_mtu) { 1704 net->mtu = next_mtu; 1705 } 1706 } 1707 } 1708 /* restart the timer */ 1709 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 1710} 1711 1712void 1713sctp_autoclose_timer(struct sctp_inpcb *inp, 1714 struct sctp_tcb *stcb, 1715 struct sctp_nets *net) 1716{ 1717 struct timeval tn, *tim_touse; 1718 struct sctp_association *asoc; 1719 int ticks_gone_by; 1720 1721 (void)SCTP_GETTIME_TIMEVAL(&tn); 1722 if (stcb->asoc.sctp_autoclose_ticks && 1723 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1724 /* Auto close is on */ 1725 asoc = &stcb->asoc; 1726 /* pick the time to use */ 1727 if (asoc->time_last_rcvd.tv_sec > 1728 asoc->time_last_sent.tv_sec) { 1729 tim_touse = &asoc->time_last_rcvd; 1730 } else { 1731 tim_touse = &asoc->time_last_sent; 1732 } 1733 /* Now has long enough transpired to autoclose? */ 1734 ticks_gone_by = SEC_TO_TICKS(tn.tv_sec - tim_touse->tv_sec); 1735 if ((ticks_gone_by > 0) && 1736 (ticks_gone_by >= (int)asoc->sctp_autoclose_ticks)) { 1737 /* 1738 * autoclose time has hit, call the output routine, 1739 * which should do nothing just to be SURE we don't 1740 * have hanging data. We can then safely check the 1741 * queues and know that we are clear to send 1742 * shutdown 1743 */ 1744 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 1745 /* Are we clean? */ 1746 if (TAILQ_EMPTY(&asoc->send_queue) && 1747 TAILQ_EMPTY(&asoc->sent_queue)) { 1748 /* 1749 * there is nothing queued to send, so I'm 1750 * done... 1751 */ 1752 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1753 /* only send SHUTDOWN 1st time thru */ 1754 sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 1755 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 1756 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 1757 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1758 } 1759 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 1760 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 1761 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 1762 stcb->sctp_ep, stcb, 1763 asoc->primary_destination); 1764 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1765 stcb->sctp_ep, stcb, 1766 asoc->primary_destination); 1767 } 1768 } 1769 } else { 1770 /* 1771 * No auto close at this time, reset t-o to check 1772 * later 1773 */ 1774 int tmp; 1775 1776 /* fool the timer startup to use the time left */ 1777 tmp = asoc->sctp_autoclose_ticks; 1778 asoc->sctp_autoclose_ticks -= ticks_gone_by; 1779 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, 1780 net); 1781 /* restore the real tick value */ 1782 asoc->sctp_autoclose_ticks = tmp; 1783 } 1784 } 1785} 1786