sctp_timer.c revision 218186
12116Sjkh/*- 22116Sjkh * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 32116Sjkh * 42116Sjkh * Redistribution and use in source and binary forms, with or without 52116Sjkh * modification, are permitted provided that the following conditions are met: 62116Sjkh * 72116Sjkh * a) Redistributions of source code must retain the above copyright notice, 88870Srgrimes * this list of conditions and the following disclaimer. 92116Sjkh * 102116Sjkh * b) Redistributions in binary form must reproduce the above copyright 112116Sjkh * notice, this list of conditions and the following disclaimer in 122116Sjkh * the documentation and/or other materials provided with the distribution. 13176451Sdas * 14176451Sdas * c) Neither the name of Cisco Systems, Inc. nor the names of its 152116Sjkh * contributors may be used to endorse or promote products derived 162116Sjkh * from this software without specific prior written permission. 172116Sjkh * 182116Sjkh * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 192116Sjkh * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 202116Sjkh * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 212116Sjkh * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 222116Sjkh * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 232116Sjkh * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 242116Sjkh * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 2597413Salfred * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26117912Speter * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 272116Sjkh * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 282116Sjkh * THE POSSIBILITY OF SUCH DAMAGE. 292116Sjkh */ 30 31/* $KAME: sctp_timer.c,v 1.29 2005/03/06 16:04:18 itojun Exp $ */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: head/sys/netinet/sctp_timer.c 218186 2011-02-02 11:13:23Z rrs $"); 35 36#define _IP_VHL 37#include <netinet/sctp_os.h> 38#include <netinet/sctp_pcb.h> 39#ifdef INET6 40#endif 41#include <netinet/sctp_var.h> 42#include <netinet/sctp_sysctl.h> 43#include <netinet/sctp_timer.h> 44#include <netinet/sctputil.h> 45#include <netinet/sctp_output.h> 46#include <netinet/sctp_header.h> 47#include <netinet/sctp_indata.h> 48#include <netinet/sctp_asconf.h> 49#include <netinet/sctp_input.h> 50#include <netinet/sctp.h> 51#include <netinet/sctp_uio.h> 52#include <netinet/udp.h> 53 54 55void 56sctp_early_fr_timer(struct sctp_inpcb *inp, 57 struct sctp_tcb *stcb, 58 struct sctp_nets *net) 59{ 60 struct sctp_tmit_chunk *chk, *pchk; 61 struct timeval now, min_wait, tv; 62 unsigned int cur_rtt, cnt = 0, cnt_resend = 0; 63 64 /* an early FR is occuring. */ 65 (void)SCTP_GETTIME_TIMEVAL(&now); 66 /* get cur rto in micro-seconds */ 67 if (net->lastsa == 0) { 68 /* Hmm no rtt estimate yet? */ 69 cur_rtt = stcb->asoc.initial_rto >> 2; 70 } else { 71 72 cur_rtt = ((net->lastsa >> 2) + net->lastsv) >> 1; 73 } 74 if (cur_rtt < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) { 75 cur_rtt = SCTP_BASE_SYSCTL(sctp_early_fr_msec); 76 } 77 cur_rtt *= 1000; 78 tv.tv_sec = cur_rtt / 1000000; 79 tv.tv_usec = cur_rtt % 1000000; 80 min_wait = now; 81 timevalsub(&min_wait, &tv); 82 if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) { 83 /* 84 * if we hit here, we don't have enough seconds on the clock 85 * to account for the RTO. We just let the lower seconds be 86 * the bounds and don't worry about it. This may mean we 87 * will mark a lot more than we should. 88 */ 89 min_wait.tv_sec = min_wait.tv_usec = 0; 90 } 91 TAILQ_FOREACH_REVERSE_SAFE(chk, &stcb->asoc.sent_queue, sctpchunk_listhead, sctp_next, pchk) { 92 if (chk->whoTo != net) { 93 continue; 94 } 95 if (chk->sent == SCTP_DATAGRAM_RESEND) 96 cnt_resend++; 97 else if ((chk->sent > SCTP_DATAGRAM_UNSENT) && 98 (chk->sent < SCTP_DATAGRAM_RESEND)) { 99 /* pending, may need retran */ 100 if (chk->sent_rcv_time.tv_sec > min_wait.tv_sec) { 101 /* 102 * we have reached a chunk that was sent 103 * some seconds past our min.. forget it we 104 * will find no more to send. 105 */ 106 continue; 107 } else if (chk->sent_rcv_time.tv_sec == min_wait.tv_sec) { 108 /* 109 * we must look at the micro seconds to 110 * know. 111 */ 112 if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) { 113 /* 114 * ok it was sent after our boundary 115 * time. 116 */ 117 continue; 118 } 119 } 120 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_EARLYFR_LOGGING_ENABLE) { 121 sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count, 122 4, SCTP_FR_MARKED_EARLY); 123 } 124 SCTP_STAT_INCR(sctps_earlyfrmrkretrans); 125 chk->sent = SCTP_DATAGRAM_RESEND; 126 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 127 /* double book size since we are doing an early FR */ 128 chk->book_size_scale++; 129 cnt += chk->send_size; 130 if ((cnt + net->flight_size) > net->cwnd) { 131 /* Mark all we could possibly resend */ 132 break; 133 } 134 } 135 } 136 if (cnt) { 137 /* 138 * JRS - Use the congestion control given in the congestion 139 * control module 140 */ 141 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer(inp, stcb, net); 142 } else if (cnt_resend) { 143 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR, SCTP_SO_NOT_LOCKED); 144 } 145 /* Restart it? */ 146 if (net->flight_size < net->cwnd) { 147 SCTP_STAT_INCR(sctps_earlyfrstrtmr); 148 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net); 149 } 150} 151 152void 153sctp_audit_retranmission_queue(struct sctp_association *asoc) 154{ 155 struct sctp_tmit_chunk *chk; 156 157 SCTPDBG(SCTP_DEBUG_TIMER4, "Audit invoked on send queue cnt:%d onqueue:%d\n", 158 asoc->sent_queue_retran_cnt, 159 asoc->sent_queue_cnt); 160 asoc->sent_queue_retran_cnt = 0; 161 asoc->sent_queue_cnt = 0; 162 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 163 if (chk->sent == SCTP_DATAGRAM_RESEND) { 164 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 165 } 166 asoc->sent_queue_cnt++; 167 } 168 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 169 if (chk->sent == SCTP_DATAGRAM_RESEND) { 170 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 171 } 172 } 173 TAILQ_FOREACH(chk, &asoc->asconf_send_queue, sctp_next) { 174 if (chk->sent == SCTP_DATAGRAM_RESEND) { 175 sctp_ucount_incr(asoc->sent_queue_retran_cnt); 176 } 177 } 178 SCTPDBG(SCTP_DEBUG_TIMER4, "Audit completes retran:%d onqueue:%d\n", 179 asoc->sent_queue_retran_cnt, 180 asoc->sent_queue_cnt); 181} 182 183int 184sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 185 struct sctp_nets *net, uint16_t threshold) 186{ 187 if (net) { 188 net->error_count++; 189 SCTPDBG(SCTP_DEBUG_TIMER4, "Error count for %p now %d thresh:%d\n", 190 net, net->error_count, 191 net->failure_threshold); 192 if (net->error_count > net->failure_threshold) { 193 /* We had a threshold failure */ 194 if (net->dest_state & SCTP_ADDR_REACHABLE) { 195 net->dest_state &= ~SCTP_ADDR_REACHABLE; 196 net->dest_state |= SCTP_ADDR_NOT_REACHABLE; 197 net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY; 198 if (net == stcb->asoc.primary_destination) { 199 net->dest_state |= SCTP_ADDR_WAS_PRIMARY; 200 } 201 /* 202 * JRS 5/14/07 - If a destination is 203 * unreachable, the PF bit is turned off. 204 * This allows an unambiguous use of the PF 205 * bit for destinations that are reachable 206 * but potentially failed. If the 207 * destination is set to the unreachable 208 * state, also set the destination to the PF 209 * state. 210 */ 211 /* 212 * Add debug message here if destination is 213 * not in PF state. 214 */ 215 /* Stop any running T3 timers here? */ 216 if ((stcb->asoc.sctp_cmt_on_off > 0) && 217 (stcb->asoc.sctp_cmt_pf > 0)) { 218 net->dest_state &= ~SCTP_ADDR_PF; 219 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n", 220 net); 221 } 222 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 223 stcb, 224 SCTP_FAILED_THRESHOLD, 225 (void *)net, SCTP_SO_NOT_LOCKED); 226 } 227 } 228 /*********HOLD THIS COMMENT FOR PATCH OF ALTERNATE 229 *********ROUTING CODE 230 */ 231 /*********HOLD THIS COMMENT FOR END OF PATCH OF ALTERNATE 232 *********ROUTING CODE 233 */ 234 } 235 if (stcb == NULL) 236 return (0); 237 238 if (net) { 239 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) { 240 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 241 sctp_misc_ints(SCTP_THRESHOLD_INCR, 242 stcb->asoc.overall_error_count, 243 (stcb->asoc.overall_error_count + 1), 244 SCTP_FROM_SCTP_TIMER, 245 __LINE__); 246 } 247 stcb->asoc.overall_error_count++; 248 } 249 } else { 250 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 251 sctp_misc_ints(SCTP_THRESHOLD_INCR, 252 stcb->asoc.overall_error_count, 253 (stcb->asoc.overall_error_count + 1), 254 SCTP_FROM_SCTP_TIMER, 255 __LINE__); 256 } 257 stcb->asoc.overall_error_count++; 258 } 259 SCTPDBG(SCTP_DEBUG_TIMER4, "Overall error count for %p now %d thresh:%u state:%x\n", 260 &stcb->asoc, stcb->asoc.overall_error_count, 261 (uint32_t) threshold, 262 ((net == NULL) ? (uint32_t) 0 : (uint32_t) net->dest_state)); 263 /* 264 * We specifically do not do >= to give the assoc one more change 265 * before we fail it. 266 */ 267 if (stcb->asoc.overall_error_count > threshold) { 268 /* Abort notification sends a ULP notify */ 269 struct mbuf *oper; 270 271 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 272 0, M_DONTWAIT, 1, MT_DATA); 273 if (oper) { 274 struct sctp_paramhdr *ph; 275 uint32_t *ippp; 276 277 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 278 sizeof(uint32_t); 279 ph = mtod(oper, struct sctp_paramhdr *); 280 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 281 ph->param_length = htons(SCTP_BUF_LEN(oper)); 282 ippp = (uint32_t *) (ph + 1); 283 *ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_1); 284 } 285 inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_1; 286 sctp_abort_an_association(inp, stcb, SCTP_FAILED_THRESHOLD, oper, SCTP_SO_NOT_LOCKED); 287 return (1); 288 } 289 return (0); 290} 291 292/* 293 * sctp_find_alternate_net() returns a non-NULL pointer as long 294 * the argument net is non-NULL. 295 */ 296struct sctp_nets * 297sctp_find_alternate_net(struct sctp_tcb *stcb, 298 struct sctp_nets *net, 299 int mode) 300{ 301 /* Find and return an alternate network if possible */ 302 struct sctp_nets *alt, *mnet, *min_errors_net = NULL, *max_cwnd_net = NULL; 303 int once; 304 305 /* JRS 5/14/07 - Initialize min_errors to an impossible value. */ 306 int min_errors = -1; 307 uint32_t max_cwnd = 0; 308 309 if (stcb->asoc.numnets == 1) { 310 /* No others but net */ 311 return (TAILQ_FIRST(&stcb->asoc.nets)); 312 } 313 /* 314 * JRS 5/14/07 - If mode is set to 2, use the CMT PF find alternate 315 * net algorithm. This algorithm chooses the active destination (not 316 * in PF state) with the largest cwnd value. If all destinations are 317 * in PF state, unreachable, or unconfirmed, choose the desination 318 * that is in PF state with the lowest error count. In case of a 319 * tie, choose the destination that was most recently active. 320 */ 321 if (mode == 2) { 322 TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) { 323 /* 324 * JRS 5/14/07 - If the destination is unreachable 325 * or unconfirmed, skip it. 326 */ 327 if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) || 328 (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) { 329 continue; 330 } 331 /* 332 * JRS 5/14/07 - If the destination is reachable 333 * but in PF state, compare the error count of the 334 * destination to the minimum error count seen thus 335 * far. Store the destination with the lower error 336 * count. If the error counts are equal, store the 337 * destination that was most recently active. 338 */ 339 if (mnet->dest_state & SCTP_ADDR_PF) { 340 /* 341 * JRS 5/14/07 - If the destination under 342 * consideration is the current destination, 343 * work as if the error count is one higher. 344 * The actual error count will not be 345 * incremented until later in the t3 346 * handler. 347 */ 348 if (mnet == net) { 349 if (min_errors == -1) { 350 min_errors = mnet->error_count + 1; 351 min_errors_net = mnet; 352 } else if (mnet->error_count + 1 < min_errors) { 353 min_errors = mnet->error_count + 1; 354 min_errors_net = mnet; 355 } else if (mnet->error_count + 1 == min_errors 356 && mnet->last_active > min_errors_net->last_active) { 357 min_errors_net = mnet; 358 min_errors = mnet->error_count + 1; 359 } 360 continue; 361 } else { 362 if (min_errors == -1) { 363 min_errors = mnet->error_count; 364 min_errors_net = mnet; 365 } else if (mnet->error_count < min_errors) { 366 min_errors = mnet->error_count; 367 min_errors_net = mnet; 368 } else if (mnet->error_count == min_errors 369 && mnet->last_active > min_errors_net->last_active) { 370 min_errors_net = mnet; 371 min_errors = mnet->error_count; 372 } 373 continue; 374 } 375 } 376 /* 377 * JRS 5/14/07 - If the destination is reachable and 378 * not in PF state, compare the cwnd of the 379 * destination to the highest cwnd seen thus far. 380 * Store the destination with the higher cwnd value. 381 * If the cwnd values are equal, randomly choose one 382 * of the two destinations. 383 */ 384 if (max_cwnd < mnet->cwnd) { 385 max_cwnd_net = mnet; 386 max_cwnd = mnet->cwnd; 387 } else if (max_cwnd == mnet->cwnd) { 388 uint32_t rndval; 389 uint8_t this_random; 390 391 if (stcb->asoc.hb_random_idx > 3) { 392 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 393 memcpy(stcb->asoc.hb_random_values, &rndval, sizeof(stcb->asoc.hb_random_values)); 394 this_random = stcb->asoc.hb_random_values[0]; 395 stcb->asoc.hb_random_idx++; 396 stcb->asoc.hb_ect_randombit = 0; 397 } else { 398 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 399 stcb->asoc.hb_random_idx++; 400 stcb->asoc.hb_ect_randombit = 0; 401 } 402 if (this_random % 2 == 1) { 403 max_cwnd_net = mnet; 404 max_cwnd = mnet->cwnd; /* Useless? */ 405 } 406 } 407 } 408 /* 409 * JRS 5/14/07 - After all destination have been considered 410 * as alternates, check to see if there was some active 411 * destination (not in PF state). If not, check to see if 412 * there was some PF destination with the minimum number of 413 * errors. If not, return the original destination. If 414 * there is a min_errors_net, remove the PF flag from that 415 * destination, set the cwnd to one or two MTUs, and return 416 * the destination as an alt. If there was some active 417 * destination with a highest cwnd, return the destination 418 * as an alt. 419 */ 420 if (max_cwnd_net == NULL) { 421 if (min_errors_net == NULL) { 422 return (net); 423 } 424 min_errors_net->dest_state &= ~SCTP_ADDR_PF; 425 min_errors_net->cwnd = min_errors_net->mtu * stcb->asoc.sctp_cmt_pf; 426 if (SCTP_OS_TIMER_PENDING(&min_errors_net->rxt_timer.timer)) { 427 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 428 stcb, min_errors_net, 429 SCTP_FROM_SCTP_TIMER + SCTP_LOC_2); 430 } 431 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to active with %d errors.\n", 432 min_errors_net, min_errors_net->error_count); 433 return (min_errors_net); 434 } else { 435 return (max_cwnd_net); 436 } 437 } 438 /* 439 * JRS 5/14/07 - If mode is set to 1, use the CMT policy for 440 * choosing an alternate net. 441 */ 442 else if (mode == 1) { 443 TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) { 444 if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) || 445 (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) { 446 /* 447 * will skip ones that are not-reachable or 448 * unconfirmed 449 */ 450 continue; 451 } 452 if (max_cwnd < mnet->cwnd) { 453 max_cwnd_net = mnet; 454 max_cwnd = mnet->cwnd; 455 } else if (max_cwnd == mnet->cwnd) { 456 uint32_t rndval; 457 uint8_t this_random; 458 459 if (stcb->asoc.hb_random_idx > 3) { 460 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 461 memcpy(stcb->asoc.hb_random_values, &rndval, 462 sizeof(stcb->asoc.hb_random_values)); 463 this_random = stcb->asoc.hb_random_values[0]; 464 stcb->asoc.hb_random_idx = 0; 465 stcb->asoc.hb_ect_randombit = 0; 466 } else { 467 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 468 stcb->asoc.hb_random_idx++; 469 stcb->asoc.hb_ect_randombit = 0; 470 } 471 if (this_random % 2) { 472 max_cwnd_net = mnet; 473 max_cwnd = mnet->cwnd; 474 } 475 } 476 } 477 if (max_cwnd_net) { 478 return (max_cwnd_net); 479 } 480 } 481 mnet = net; 482 once = 0; 483 484 if (mnet == NULL) { 485 mnet = TAILQ_FIRST(&stcb->asoc.nets); 486 if (mnet == NULL) { 487 return (NULL); 488 } 489 } 490 do { 491 alt = TAILQ_NEXT(mnet, sctp_next); 492 if (alt == NULL) { 493 once++; 494 if (once > 1) { 495 break; 496 } 497 alt = TAILQ_FIRST(&stcb->asoc.nets); 498 if (alt == NULL) { 499 return (NULL); 500 } 501 } 502 if (alt->ro.ro_rt == NULL) { 503 if (alt->ro._s_addr) { 504 sctp_free_ifa(alt->ro._s_addr); 505 alt->ro._s_addr = NULL; 506 } 507 alt->src_addr_selected = 0; 508 } 509 /* sa_ignore NO_NULL_CHK */ 510 if (((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) && 511 (alt->ro.ro_rt != NULL) && 512 (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED))) { 513 /* Found a reachable address */ 514 break; 515 } 516 mnet = alt; 517 } while (alt != NULL); 518 519 if (alt == NULL) { 520 /* Case where NO insv network exists (dormant state) */ 521 /* we rotate destinations */ 522 once = 0; 523 mnet = net; 524 do { 525 if (mnet == NULL) { 526 return (TAILQ_FIRST(&stcb->asoc.nets)); 527 } 528 alt = TAILQ_NEXT(mnet, sctp_next); 529 if (alt == NULL) { 530 once++; 531 if (once > 1) { 532 break; 533 } 534 alt = TAILQ_FIRST(&stcb->asoc.nets); 535 } 536 /* sa_ignore NO_NULL_CHK */ 537 if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) && 538 (alt != net)) { 539 /* Found an alternate address */ 540 break; 541 } 542 mnet = alt; 543 } while (alt != NULL); 544 } 545 if (alt == NULL) { 546 return (net); 547 } 548 return (alt); 549} 550 551static void 552sctp_backoff_on_timeout(struct sctp_tcb *stcb, 553 struct sctp_nets *net, 554 int win_probe, 555 int num_marked, int num_abandoned) 556{ 557 if (net->RTO == 0) { 558 net->RTO = stcb->asoc.minrto; 559 } 560 net->RTO <<= 1; 561 if (net->RTO > stcb->asoc.maxrto) { 562 net->RTO = stcb->asoc.maxrto; 563 } 564 if ((win_probe == 0) && (num_marked || num_abandoned)) { 565 /* We don't apply penalty to window probe scenarios */ 566 /* JRS - Use the congestion control given in the CC module */ 567 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout(stcb, net); 568 } 569} 570 571#ifndef INVARIANTS 572static void 573sctp_recover_sent_list(struct sctp_tcb *stcb) 574{ 575 struct sctp_tmit_chunk *chk, *nchk; 576 struct sctp_association *asoc; 577 578 asoc = &stcb->asoc; 579 TAILQ_FOREACH_SAFE(chk, &asoc->sent_queue, sctp_next, nchk) { 580 if (SCTP_TSN_GE(asoc->last_acked_seq, chk->rec.data.TSN_seq)) { 581 SCTP_PRINTF("Found chk:%p tsn:%x <= last_acked_seq:%x\n", 582 chk, chk->rec.data.TSN_seq, asoc->last_acked_seq); 583 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 584 if (chk->pr_sctp_on) { 585 if (asoc->pr_sctp_cnt != 0) 586 asoc->pr_sctp_cnt--; 587 } 588 if (chk->data) { 589 /* sa_ignore NO_NULL_CHK */ 590 sctp_free_bufspace(stcb, asoc, chk, 1); 591 sctp_m_freem(chk->data); 592 chk->data = NULL; 593 if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(chk->flags)) { 594 asoc->sent_queue_cnt_removeable--; 595 } 596 } 597 asoc->sent_queue_cnt--; 598 sctp_free_a_chunk(stcb, chk); 599 } 600 } 601 SCTP_PRINTF("after recover order is as follows\n"); 602 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 603 SCTP_PRINTF("chk:%p TSN:%x\n", chk, chk->rec.data.TSN_seq); 604 } 605} 606 607#endif 608 609static int 610sctp_mark_all_for_resend(struct sctp_tcb *stcb, 611 struct sctp_nets *net, 612 struct sctp_nets *alt, 613 int window_probe, 614 int *num_marked, 615 int *num_abandoned) 616{ 617 618 /* 619 * Mark all chunks (well not all) that were sent to *net for 620 * retransmission. Move them to alt for there destination as well... 621 * We only mark chunks that have been outstanding long enough to 622 * have received feed-back. 623 */ 624 struct sctp_tmit_chunk *chk, *nchk; 625 struct sctp_nets *lnets; 626 struct timeval now, min_wait, tv; 627 int cur_rtt; 628 int cnt_abandoned; 629 int audit_tf, num_mk, fir; 630 unsigned int cnt_mk; 631 uint32_t orig_flight, orig_tf; 632 uint32_t tsnlast, tsnfirst; 633 int recovery_cnt = 0; 634 635 636 /* none in flight now */ 637 audit_tf = 0; 638 fir = 0; 639 /* 640 * figure out how long a data chunk must be pending before we can 641 * mark it .. 642 */ 643 (void)SCTP_GETTIME_TIMEVAL(&now); 644 /* get cur rto in micro-seconds */ 645 cur_rtt = (((net->lastsa >> 2) + net->lastsv) >> 1); 646 cur_rtt *= 1000; 647 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 648 sctp_log_fr(cur_rtt, 649 stcb->asoc.peers_rwnd, 650 window_probe, 651 SCTP_FR_T3_MARK_TIME); 652 sctp_log_fr(net->flight_size, 653 SCTP_OS_TIMER_PENDING(&net->fr_timer.timer), 654 SCTP_OS_TIMER_ACTIVE(&net->fr_timer.timer), 655 SCTP_FR_CWND_REPORT); 656 sctp_log_fr(net->flight_size, net->cwnd, stcb->asoc.total_flight, SCTP_FR_CWND_REPORT); 657 } 658 tv.tv_sec = cur_rtt / 1000000; 659 tv.tv_usec = cur_rtt % 1000000; 660 min_wait = now; 661 timevalsub(&min_wait, &tv); 662 if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) { 663 /* 664 * if we hit here, we don't have enough seconds on the clock 665 * to account for the RTO. We just let the lower seconds be 666 * the bounds and don't worry about it. This may mean we 667 * will mark a lot more than we should. 668 */ 669 min_wait.tv_sec = min_wait.tv_usec = 0; 670 } 671 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 672 sctp_log_fr(cur_rtt, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME); 673 sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME); 674 } 675 /* 676 * Our rwnd will be incorrect here since we are not adding back the 677 * cnt * mbuf but we will fix that down below. 678 */ 679 orig_flight = net->flight_size; 680 orig_tf = stcb->asoc.total_flight; 681 682 net->fast_retran_ip = 0; 683 /* Now on to each chunk */ 684 cnt_abandoned = 0; 685 num_mk = cnt_mk = 0; 686 tsnfirst = tsnlast = 0; 687#ifndef INVARIANTS 688start_again: 689#endif 690 TAILQ_FOREACH_SAFE(chk, &stcb->asoc.sent_queue, sctp_next, nchk) { 691 if (SCTP_TSN_GE(stcb->asoc.last_acked_seq, chk->rec.data.TSN_seq)) { 692 /* Strange case our list got out of order? */ 693 SCTP_PRINTF("Our list is out of order? last_acked:%x chk:%x", 694 (unsigned int)stcb->asoc.last_acked_seq, (unsigned int)chk->rec.data.TSN_seq); 695 recovery_cnt++; 696#ifdef INVARIANTS 697 panic("last acked >= chk on sent-Q"); 698#else 699 SCTP_PRINTF("Recover attempts a restart cnt:%d\n", recovery_cnt); 700 sctp_recover_sent_list(stcb); 701 if (recovery_cnt < 10) { 702 goto start_again; 703 } else { 704 SCTP_PRINTF("Recovery fails %d times??\n", recovery_cnt); 705 } 706#endif 707 } 708 if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) { 709 /* 710 * found one to mark: If it is less than 711 * DATAGRAM_ACKED it MUST not be a skipped or marked 712 * TSN but instead one that is either already set 713 * for retransmission OR one that needs 714 * retransmission. 715 */ 716 717 /* validate its been outstanding long enough */ 718 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 719 sctp_log_fr(chk->rec.data.TSN_seq, 720 chk->sent_rcv_time.tv_sec, 721 chk->sent_rcv_time.tv_usec, 722 SCTP_FR_T3_MARK_TIME); 723 } 724 if ((chk->sent_rcv_time.tv_sec > min_wait.tv_sec) && (window_probe == 0)) { 725 /* 726 * we have reached a chunk that was sent 727 * some seconds past our min.. forget it we 728 * will find no more to send. 729 */ 730 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 731 sctp_log_fr(0, 732 chk->sent_rcv_time.tv_sec, 733 chk->sent_rcv_time.tv_usec, 734 SCTP_FR_T3_STOPPED); 735 } 736 continue; 737 } else if ((chk->sent_rcv_time.tv_sec == min_wait.tv_sec) && 738 (window_probe == 0)) { 739 /* 740 * we must look at the micro seconds to 741 * know. 742 */ 743 if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) { 744 /* 745 * ok it was sent after our boundary 746 * time. 747 */ 748 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 749 sctp_log_fr(0, 750 chk->sent_rcv_time.tv_sec, 751 chk->sent_rcv_time.tv_usec, 752 SCTP_FR_T3_STOPPED); 753 } 754 continue; 755 } 756 } 757 if (stcb->asoc.peer_supports_prsctp && PR_SCTP_TTL_ENABLED(chk->flags)) { 758 /* Is it expired? */ 759 if (timevalcmp(&now, &chk->rec.data.timetodrop, >)) { 760 /* Yes so drop it */ 761 if (chk->data) { 762 (void)sctp_release_pr_sctp_chunk(stcb, 763 chk, 764 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 765 SCTP_SO_NOT_LOCKED); 766 cnt_abandoned++; 767 } 768 continue; 769 } 770 } 771 if (stcb->asoc.peer_supports_prsctp && PR_SCTP_RTX_ENABLED(chk->flags)) { 772 /* Has it been retransmitted tv_sec times? */ 773 if (chk->snd_count > chk->rec.data.timetodrop.tv_sec) { 774 if (chk->data) { 775 (void)sctp_release_pr_sctp_chunk(stcb, 776 chk, 777 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 778 SCTP_SO_NOT_LOCKED); 779 cnt_abandoned++; 780 } 781 continue; 782 } 783 } 784 if (chk->sent < SCTP_DATAGRAM_RESEND) { 785 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 786 num_mk++; 787 if (fir == 0) { 788 fir = 1; 789 tsnfirst = chk->rec.data.TSN_seq; 790 } 791 tsnlast = chk->rec.data.TSN_seq; 792 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 793 sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count, 794 0, SCTP_FR_T3_MARKED); 795 } 796 if (chk->rec.data.chunk_was_revoked) { 797 /* deflate the cwnd */ 798 chk->whoTo->cwnd -= chk->book_size; 799 chk->rec.data.chunk_was_revoked = 0; 800 } 801 net->marked_retrans++; 802 stcb->asoc.marked_retrans++; 803 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 804 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND_TO, 805 chk->whoTo->flight_size, 806 chk->book_size, 807 (uintptr_t) chk->whoTo, 808 chk->rec.data.TSN_seq); 809 } 810 sctp_flight_size_decrease(chk); 811 sctp_total_flight_decrease(stcb, chk); 812 stcb->asoc.peers_rwnd += chk->send_size; 813 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 814 } 815 chk->sent = SCTP_DATAGRAM_RESEND; 816 SCTP_STAT_INCR(sctps_markedretrans); 817 818 /* reset the TSN for striking and other FR stuff */ 819 chk->rec.data.doing_fast_retransmit = 0; 820 /* Clear any time so NO RTT is being done */ 821 chk->do_rtt = 0; 822 if (alt != net) { 823 sctp_free_remote_addr(chk->whoTo); 824 chk->no_fr_allowed = 1; 825 chk->whoTo = alt; 826 atomic_add_int(&alt->ref_count, 1); 827 } else { 828 chk->no_fr_allowed = 0; 829 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) { 830 chk->rec.data.fast_retran_tsn = stcb->asoc.sending_seq; 831 } else { 832 chk->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq; 833 } 834 } 835 /* 836 * CMT: Do not allow FRs on retransmitted TSNs. 837 */ 838 if (stcb->asoc.sctp_cmt_on_off > 0) { 839 chk->no_fr_allowed = 1; 840 } 841#ifdef THIS_SHOULD_NOT_BE_DONE 842 } else if (chk->sent == SCTP_DATAGRAM_ACKED) { 843 /* remember highest acked one */ 844 could_be_sent = chk; 845#endif 846 } 847 if (chk->sent == SCTP_DATAGRAM_RESEND) { 848 cnt_mk++; 849 } 850 } 851 if ((orig_flight - net->flight_size) != (orig_tf - stcb->asoc.total_flight)) { 852 /* we did not subtract the same things? */ 853 audit_tf = 1; 854 } 855 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 856 sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT); 857 } 858#ifdef SCTP_DEBUG 859 if (num_mk) { 860 SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n", 861 tsnlast); 862 SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%ld\n", 863 num_mk, (u_long)stcb->asoc.peers_rwnd); 864 SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n", 865 tsnlast); 866 SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%d\n", 867 num_mk, 868 (int)stcb->asoc.peers_rwnd); 869 } 870#endif 871 *num_marked = num_mk; 872 *num_abandoned = cnt_abandoned; 873 /* 874 * Now check for a ECN Echo that may be stranded And include the 875 * cnt_mk'd to have all resends in the control queue. 876 */ 877 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 878 if (chk->sent == SCTP_DATAGRAM_RESEND) { 879 cnt_mk++; 880 } 881 if ((chk->whoTo == net) && 882 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 883 sctp_free_remote_addr(chk->whoTo); 884 chk->whoTo = alt; 885 if (chk->sent != SCTP_DATAGRAM_RESEND) { 886 chk->sent = SCTP_DATAGRAM_RESEND; 887 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 888 cnt_mk++; 889 } 890 atomic_add_int(&alt->ref_count, 1); 891 } 892 } 893#ifdef THIS_SHOULD_NOT_BE_DONE 894 if ((stcb->asoc.sent_queue_retran_cnt == 0) && (could_be_sent)) { 895 /* fix it so we retransmit the highest acked anyway */ 896 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 897 cnt_mk++; 898 could_be_sent->sent = SCTP_DATAGRAM_RESEND; 899 } 900#endif 901 if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) { 902#ifdef INVARIANTS 903 SCTP_PRINTF("Local Audit says there are %d for retran asoc cnt:%d we marked:%d this time\n", 904 cnt_mk, stcb->asoc.sent_queue_retran_cnt, num_mk); 905#endif 906#ifndef SCTP_AUDITING_ENABLED 907 stcb->asoc.sent_queue_retran_cnt = cnt_mk; 908#endif 909 } 910 if (audit_tf) { 911 SCTPDBG(SCTP_DEBUG_TIMER4, 912 "Audit total flight due to negative value net:%p\n", 913 net); 914 stcb->asoc.total_flight = 0; 915 stcb->asoc.total_flight_count = 0; 916 /* Clear all networks flight size */ 917 TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) { 918 lnets->flight_size = 0; 919 SCTPDBG(SCTP_DEBUG_TIMER4, 920 "Net:%p c-f cwnd:%d ssthresh:%d\n", 921 lnets, lnets->cwnd, lnets->ssthresh); 922 } 923 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 924 if (chk->sent < SCTP_DATAGRAM_RESEND) { 925 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 926 sctp_misc_ints(SCTP_FLIGHT_LOG_UP, 927 chk->whoTo->flight_size, 928 chk->book_size, 929 (uintptr_t) chk->whoTo, 930 chk->rec.data.TSN_seq); 931 } 932 sctp_flight_size_increase(chk); 933 sctp_total_flight_increase(stcb, chk); 934 } 935 } 936 } 937 /* We return 1 if we only have a window probe outstanding */ 938 return (0); 939} 940 941 942int 943sctp_t3rxt_timer(struct sctp_inpcb *inp, 944 struct sctp_tcb *stcb, 945 struct sctp_nets *net) 946{ 947 struct sctp_nets *alt; 948 int win_probe, num_mk, num_abandoned; 949 950 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 951 sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT); 952 } 953 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 954 struct sctp_nets *lnet; 955 956 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 957 if (net == lnet) { 958 sctp_log_cwnd(stcb, lnet, 1, SCTP_CWND_LOG_FROM_T3); 959 } else { 960 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_LOG_FROM_T3); 961 } 962 } 963 } 964 /* Find an alternate and mark those for retransmission */ 965 if ((stcb->asoc.peers_rwnd == 0) && 966 (stcb->asoc.total_flight < net->mtu)) { 967 SCTP_STAT_INCR(sctps_timowindowprobe); 968 win_probe = 1; 969 } else { 970 win_probe = 0; 971 } 972 973 /* 974 * JRS 5/14/07 - If CMT PF is on and the destination if not already 975 * in PF state, set the destination to PF state and store the 976 * current time as the time that the destination was last active. In 977 * addition, find an alternate destination with PF-based 978 * find_alt_net(). 979 */ 980 if ((stcb->asoc.sctp_cmt_on_off > 0) && 981 (stcb->asoc.sctp_cmt_pf > 0)) { 982 if ((net->dest_state & SCTP_ADDR_PF) != SCTP_ADDR_PF) { 983 net->dest_state |= SCTP_ADDR_PF; 984 net->last_active = sctp_get_tick_count(); 985 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from active to PF.\n", 986 net); 987 } 988 alt = sctp_find_alternate_net(stcb, net, 2); 989 } else if (stcb->asoc.sctp_cmt_on_off > 0) { 990 /* 991 * CMT: Using RTX_SSTHRESH policy for CMT. If CMT is being 992 * used, then pick dest with largest ssthresh for any 993 * retransmission. 994 */ 995 alt = sctp_find_alternate_net(stcb, net, 1); 996 /* 997 * CUCv2: If a different dest is picked for the 998 * retransmission, then new (rtx-)pseudo_cumack needs to be 999 * tracked for orig dest. Let CUCv2 track new (rtx-) 1000 * pseudo-cumack always. 1001 */ 1002 net->find_pseudo_cumack = 1; 1003 net->find_rtx_pseudo_cumack = 1; 1004 } else { /* CMT is OFF */ 1005 alt = sctp_find_alternate_net(stcb, net, 0); 1006 } 1007 num_mk = 0; 1008 num_abandoned = 0; 1009 (void)sctp_mark_all_for_resend(stcb, net, alt, win_probe, 1010 &num_mk, &num_abandoned); 1011 /* FR Loss recovery just ended with the T3. */ 1012 stcb->asoc.fast_retran_loss_recovery = 0; 1013 1014 /* CMT FR loss recovery ended with the T3 */ 1015 net->fast_retran_loss_recovery = 0; 1016 1017 /* 1018 * setup the sat loss recovery that prevents satellite cwnd advance. 1019 */ 1020 stcb->asoc.sat_t3_loss_recovery = 1; 1021 stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq; 1022 1023 /* Backoff the timer and cwnd */ 1024 sctp_backoff_on_timeout(stcb, net, win_probe, num_mk, num_abandoned); 1025 if (win_probe == 0) { 1026 /* We don't do normal threshold management on window probes */ 1027 if (sctp_threshold_management(inp, stcb, net, 1028 stcb->asoc.max_send_times)) { 1029 /* Association was destroyed */ 1030 return (1); 1031 } else { 1032 if (net != stcb->asoc.primary_destination) { 1033 /* send a immediate HB if our RTO is stale */ 1034 struct timeval now; 1035 unsigned int ms_goneby; 1036 1037 (void)SCTP_GETTIME_TIMEVAL(&now); 1038 if (net->last_sent_time.tv_sec) { 1039 ms_goneby = (now.tv_sec - net->last_sent_time.tv_sec) * 1000; 1040 } else { 1041 ms_goneby = 0; 1042 } 1043 if ((ms_goneby > net->RTO) || (net->RTO == 0)) { 1044 /* 1045 * no recent feed back in an RTO or 1046 * more, request a RTT update 1047 */ 1048 if (sctp_send_hb(stcb, 1, net) < 0) 1049 /* 1050 * Less than 0 means we lost 1051 * the assoc 1052 */ 1053 return (1); 1054 } 1055 } 1056 } 1057 } else { 1058 /* 1059 * For a window probe we don't penalize the net's but only 1060 * the association. This may fail it if SACKs are not coming 1061 * back. If sack's are coming with rwnd locked at 0, we will 1062 * continue to hold things waiting for rwnd to raise 1063 */ 1064 if (sctp_threshold_management(inp, stcb, NULL, 1065 stcb->asoc.max_send_times)) { 1066 /* Association was destroyed */ 1067 return (1); 1068 } 1069 } 1070 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 1071 /* Move all pending over too */ 1072 sctp_move_chunks_from_net(stcb, net); 1073 1074 /* 1075 * Get the address that failed, to force a new src address 1076 * selecton and a route allocation. 1077 */ 1078 if (net->ro._s_addr) { 1079 sctp_free_ifa(net->ro._s_addr); 1080 net->ro._s_addr = NULL; 1081 } 1082 net->src_addr_selected = 0; 1083 1084 /* Force a route allocation too */ 1085 if (net->ro.ro_rt) { 1086 RTFREE(net->ro.ro_rt); 1087 net->ro.ro_rt = NULL; 1088 } 1089 /* Was it our primary? */ 1090 if ((stcb->asoc.primary_destination == net) && (alt != net)) { 1091 /* 1092 * Yes, note it as such and find an alternate note: 1093 * this means HB code must use this to resent the 1094 * primary if it goes active AND if someone does a 1095 * change-primary then this flag must be cleared 1096 * from any net structures. 1097 */ 1098 if (sctp_set_primary_addr(stcb, 1099 (struct sockaddr *)NULL, 1100 alt) == 0) { 1101 net->dest_state |= SCTP_ADDR_WAS_PRIMARY; 1102 } 1103 } 1104 } else if ((stcb->asoc.sctp_cmt_on_off > 0) && 1105 (stcb->asoc.sctp_cmt_pf > 0) && 1106 ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) { 1107 /* 1108 * JRS 5/14/07 - If the destination hasn't failed completely 1109 * but is in PF state, a PF-heartbeat needs to be sent 1110 * manually. 1111 */ 1112 if (sctp_send_hb(stcb, 1, net) < 0) 1113 /* Return less than 0 means we lost the association */ 1114 return (1); 1115 } 1116 /* 1117 * Special case for cookie-echo'ed case, we don't do output but must 1118 * await the COOKIE-ACK before retransmission 1119 */ 1120 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { 1121 /* 1122 * Here we just reset the timer and start again since we 1123 * have not established the asoc 1124 */ 1125 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 1126 return (0); 1127 } 1128 if (stcb->asoc.peer_supports_prsctp) { 1129 struct sctp_tmit_chunk *lchk; 1130 1131 lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc); 1132 /* C3. See if we need to send a Fwd-TSN */ 1133 if (SCTP_TSN_GT(stcb->asoc.advanced_peer_ack_point, stcb->asoc.last_acked_seq)) { 1134 send_forward_tsn(stcb, &stcb->asoc); 1135 if (lchk) { 1136 /* Assure a timer is up */ 1137 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo); 1138 } 1139 } 1140 } 1141 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1142 sctp_log_cwnd(stcb, net, net->cwnd, SCTP_CWND_LOG_FROM_RTX); 1143 } 1144 return (0); 1145} 1146 1147int 1148sctp_t1init_timer(struct sctp_inpcb *inp, 1149 struct sctp_tcb *stcb, 1150 struct sctp_nets *net) 1151{ 1152 /* bump the thresholds */ 1153 if (stcb->asoc.delayed_connection) { 1154 /* 1155 * special hook for delayed connection. The library did NOT 1156 * complete the rest of its sends. 1157 */ 1158 stcb->asoc.delayed_connection = 0; 1159 sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED); 1160 return (0); 1161 } 1162 if (SCTP_GET_STATE((&stcb->asoc)) != SCTP_STATE_COOKIE_WAIT) { 1163 return (0); 1164 } 1165 if (sctp_threshold_management(inp, stcb, net, 1166 stcb->asoc.max_init_times)) { 1167 /* Association was destroyed */ 1168 return (1); 1169 } 1170 stcb->asoc.dropped_special_cnt = 0; 1171 sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0, 0); 1172 if (stcb->asoc.initial_init_rto_max < net->RTO) { 1173 net->RTO = stcb->asoc.initial_init_rto_max; 1174 } 1175 if (stcb->asoc.numnets > 1) { 1176 /* If we have more than one addr use it */ 1177 struct sctp_nets *alt; 1178 1179 alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination, 0); 1180 if (alt != stcb->asoc.primary_destination) { 1181 sctp_move_chunks_from_net(stcb, stcb->asoc.primary_destination); 1182 stcb->asoc.primary_destination = alt; 1183 } 1184 } 1185 /* Send out a new init */ 1186 sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED); 1187 return (0); 1188} 1189 1190/* 1191 * For cookie and asconf we actually need to find and mark for resend, then 1192 * increment the resend counter (after all the threshold management stuff of 1193 * course). 1194 */ 1195int 1196sctp_cookie_timer(struct sctp_inpcb *inp, 1197 struct sctp_tcb *stcb, 1198 struct sctp_nets *net) 1199{ 1200 struct sctp_nets *alt; 1201 struct sctp_tmit_chunk *cookie; 1202 1203 /* first before all else we must find the cookie */ 1204 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) { 1205 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 1206 break; 1207 } 1208 } 1209 if (cookie == NULL) { 1210 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { 1211 /* FOOBAR! */ 1212 struct mbuf *oper; 1213 1214 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1215 0, M_DONTWAIT, 1, MT_DATA); 1216 if (oper) { 1217 struct sctp_paramhdr *ph; 1218 uint32_t *ippp; 1219 1220 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 1221 sizeof(uint32_t); 1222 ph = mtod(oper, struct sctp_paramhdr *); 1223 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1224 ph->param_length = htons(SCTP_BUF_LEN(oper)); 1225 ippp = (uint32_t *) (ph + 1); 1226 *ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_3); 1227 } 1228 inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_4; 1229 sctp_abort_an_association(inp, stcb, SCTP_INTERNAL_ERROR, 1230 oper, SCTP_SO_NOT_LOCKED); 1231 } else { 1232#ifdef INVARIANTS 1233 panic("Cookie timer expires in wrong state?"); 1234#else 1235 SCTP_PRINTF("Strange in state %d not cookie-echoed yet c-e timer expires?\n", SCTP_GET_STATE(&stcb->asoc)); 1236 return (0); 1237#endif 1238 } 1239 return (0); 1240 } 1241 /* Ok we found the cookie, threshold management next */ 1242 if (sctp_threshold_management(inp, stcb, cookie->whoTo, 1243 stcb->asoc.max_init_times)) { 1244 /* Assoc is over */ 1245 return (1); 1246 } 1247 /* 1248 * cleared theshold management now lets backoff the address & select 1249 * an alternate 1250 */ 1251 stcb->asoc.dropped_special_cnt = 0; 1252 sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0, 0); 1253 alt = sctp_find_alternate_net(stcb, cookie->whoTo, 0); 1254 if (alt != cookie->whoTo) { 1255 sctp_free_remote_addr(cookie->whoTo); 1256 cookie->whoTo = alt; 1257 atomic_add_int(&alt->ref_count, 1); 1258 } 1259 /* Now mark the retran info */ 1260 if (cookie->sent != SCTP_DATAGRAM_RESEND) { 1261 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1262 } 1263 cookie->sent = SCTP_DATAGRAM_RESEND; 1264 /* 1265 * Now call the output routine to kick out the cookie again, Note we 1266 * don't mark any chunks for retran so that FR will need to kick in 1267 * to move these (or a send timer). 1268 */ 1269 return (0); 1270} 1271 1272int 1273sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1274 struct sctp_nets *net) 1275{ 1276 struct sctp_nets *alt; 1277 struct sctp_tmit_chunk *strrst = NULL, *chk = NULL; 1278 1279 if (stcb->asoc.stream_reset_outstanding == 0) { 1280 return (0); 1281 } 1282 /* find the existing STRRESET, we use the seq number we sent out on */ 1283 (void)sctp_find_stream_reset(stcb, stcb->asoc.str_reset_seq_out, &strrst); 1284 if (strrst == NULL) { 1285 return (0); 1286 } 1287 /* do threshold management */ 1288 if (sctp_threshold_management(inp, stcb, strrst->whoTo, 1289 stcb->asoc.max_send_times)) { 1290 /* Assoc is over */ 1291 return (1); 1292 } 1293 /* 1294 * cleared theshold management now lets backoff the address & select 1295 * an alternate 1296 */ 1297 sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0, 0); 1298 alt = sctp_find_alternate_net(stcb, strrst->whoTo, 0); 1299 sctp_free_remote_addr(strrst->whoTo); 1300 strrst->whoTo = alt; 1301 atomic_add_int(&alt->ref_count, 1); 1302 1303 /* See if a ECN Echo is also stranded */ 1304 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 1305 if ((chk->whoTo == net) && 1306 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 1307 sctp_free_remote_addr(chk->whoTo); 1308 if (chk->sent != SCTP_DATAGRAM_RESEND) { 1309 chk->sent = SCTP_DATAGRAM_RESEND; 1310 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1311 } 1312 chk->whoTo = alt; 1313 atomic_add_int(&alt->ref_count, 1); 1314 } 1315 } 1316 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 1317 /* 1318 * If the address went un-reachable, we need to move to 1319 * alternates for ALL chk's in queue 1320 */ 1321 sctp_move_chunks_from_net(stcb, net); 1322 } 1323 /* mark the retran info */ 1324 if (strrst->sent != SCTP_DATAGRAM_RESEND) 1325 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1326 strrst->sent = SCTP_DATAGRAM_RESEND; 1327 1328 /* restart the timer */ 1329 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, strrst->whoTo); 1330 return (0); 1331} 1332 1333int 1334sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1335 struct sctp_nets *net) 1336{ 1337 struct sctp_nets *alt; 1338 struct sctp_tmit_chunk *asconf, *chk; 1339 1340 /* is this a first send, or a retransmission? */ 1341 if (TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) { 1342 /* compose a new ASCONF chunk and send it */ 1343 sctp_send_asconf(stcb, net, SCTP_ADDR_NOT_LOCKED); 1344 } else { 1345 /* 1346 * Retransmission of the existing ASCONF is needed 1347 */ 1348 1349 /* find the existing ASCONF */ 1350 asconf = TAILQ_FIRST(&stcb->asoc.asconf_send_queue); 1351 if (asconf == NULL) { 1352 return (0); 1353 } 1354 /* do threshold management */ 1355 if (sctp_threshold_management(inp, stcb, asconf->whoTo, 1356 stcb->asoc.max_send_times)) { 1357 /* Assoc is over */ 1358 return (1); 1359 } 1360 if (asconf->snd_count > stcb->asoc.max_send_times) { 1361 /* 1362 * Something is rotten: our peer is not responding 1363 * to ASCONFs but apparently is to other chunks. 1364 * i.e. it is not properly handling the chunk type 1365 * upper bits. Mark this peer as ASCONF incapable 1366 * and cleanup. 1367 */ 1368 SCTPDBG(SCTP_DEBUG_TIMER1, "asconf_timer: Peer has not responded to our repeated ASCONFs\n"); 1369 sctp_asconf_cleanup(stcb, net); 1370 return (0); 1371 } 1372 /* 1373 * cleared threshold management, so now backoff the net and 1374 * select an alternate 1375 */ 1376 sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0, 0); 1377 alt = sctp_find_alternate_net(stcb, asconf->whoTo, 0); 1378 if (asconf->whoTo != alt) { 1379 sctp_free_remote_addr(asconf->whoTo); 1380 asconf->whoTo = alt; 1381 atomic_add_int(&alt->ref_count, 1); 1382 } 1383 /* See if an ECN Echo is also stranded */ 1384 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 1385 if ((chk->whoTo == net) && 1386 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 1387 sctp_free_remote_addr(chk->whoTo); 1388 chk->whoTo = alt; 1389 if (chk->sent != SCTP_DATAGRAM_RESEND) { 1390 chk->sent = SCTP_DATAGRAM_RESEND; 1391 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1392 } 1393 atomic_add_int(&alt->ref_count, 1); 1394 } 1395 } 1396 TAILQ_FOREACH(chk, &stcb->asoc.asconf_send_queue, sctp_next) { 1397 if (chk->whoTo != alt) { 1398 sctp_free_remote_addr(chk->whoTo); 1399 chk->whoTo = alt; 1400 atomic_add_int(&alt->ref_count, 1); 1401 } 1402 if (asconf->sent != SCTP_DATAGRAM_RESEND && chk->sent != SCTP_DATAGRAM_UNSENT) 1403 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1404 chk->sent = SCTP_DATAGRAM_RESEND; 1405 } 1406 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 1407 /* 1408 * If the address went un-reachable, we need to move 1409 * to the alternate for ALL chunks in queue 1410 */ 1411 sctp_move_chunks_from_net(stcb, net); 1412 } 1413 /* mark the retran info */ 1414 if (asconf->sent != SCTP_DATAGRAM_RESEND) 1415 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1416 asconf->sent = SCTP_DATAGRAM_RESEND; 1417 1418 /* send another ASCONF if any and we can do */ 1419 sctp_send_asconf(stcb, alt, SCTP_ADDR_NOT_LOCKED); 1420 } 1421 return (0); 1422} 1423 1424/* Mobility adaptation */ 1425void 1426sctp_delete_prim_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1427 struct sctp_nets *net) 1428{ 1429 if (stcb->asoc.deleted_primary == NULL) { 1430 SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: deleted_primary is not stored...\n"); 1431 sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); 1432 return; 1433 } 1434 SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: finished to keep deleted primary "); 1435 SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &stcb->asoc.deleted_primary->ro._l_addr.sa); 1436 sctp_free_remote_addr(stcb->asoc.deleted_primary); 1437 stcb->asoc.deleted_primary = NULL; 1438 sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); 1439 return; 1440} 1441 1442/* 1443 * For the shutdown and shutdown-ack, we do not keep one around on the 1444 * control queue. This means we must generate a new one and call the general 1445 * chunk output routine, AFTER having done threshold management. 1446 * It is assumed that net is non-NULL. 1447 */ 1448int 1449sctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1450 struct sctp_nets *net) 1451{ 1452 struct sctp_nets *alt; 1453 1454 /* first threshold managment */ 1455 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { 1456 /* Assoc is over */ 1457 return (1); 1458 } 1459 sctp_backoff_on_timeout(stcb, net, 1, 0, 0); 1460 /* second select an alternative */ 1461 alt = sctp_find_alternate_net(stcb, net, 0); 1462 1463 /* third generate a shutdown into the queue for out net */ 1464 sctp_send_shutdown(stcb, alt); 1465 1466 /* fourth restart timer */ 1467 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt); 1468 return (0); 1469} 1470 1471int 1472sctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1473 struct sctp_nets *net) 1474{ 1475 struct sctp_nets *alt; 1476 1477 /* first threshold managment */ 1478 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { 1479 /* Assoc is over */ 1480 return (1); 1481 } 1482 sctp_backoff_on_timeout(stcb, net, 1, 0, 0); 1483 /* second select an alternative */ 1484 alt = sctp_find_alternate_net(stcb, net, 0); 1485 1486 /* third generate a shutdown into the queue for out net */ 1487 sctp_send_shutdown_ack(stcb, alt); 1488 1489 /* fourth restart timer */ 1490 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt); 1491 return (0); 1492} 1493 1494static void 1495sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp, 1496 struct sctp_tcb *stcb) 1497{ 1498 struct sctp_stream_queue_pending *sp; 1499 unsigned int i, chks_in_queue = 0; 1500 int being_filled = 0; 1501 1502 /* 1503 * This function is ONLY called when the send/sent queues are empty. 1504 */ 1505 if ((stcb == NULL) || (inp == NULL)) 1506 return; 1507 1508 if (stcb->asoc.sent_queue_retran_cnt) { 1509 SCTP_PRINTF("Hmm, sent_queue_retran_cnt is non-zero %d\n", 1510 stcb->asoc.sent_queue_retran_cnt); 1511 stcb->asoc.sent_queue_retran_cnt = 0; 1512 } 1513 SCTP_TCB_SEND_LOCK(stcb); 1514 if (stcb->asoc.ss_functions.sctp_ss_is_empty(stcb, &stcb->asoc)) { 1515 int cnt = 0; 1516 1517 /* Check to see if a spoke fell off the wheel */ 1518 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1519 if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) { 1520 stcb->asoc.ss_functions.sctp_ss_add_to_stream(stcb, 1521 &stcb->asoc, 1522 &stcb->asoc.strmout[i], 1523 NULL, 1524 1); 1525 cnt++; 1526 } 1527 } 1528 if (cnt) { 1529 /* yep, we lost a spoke or two */ 1530 SCTP_PRINTF("Found an additional %d streams NOT on outwheel, corrected\n", cnt); 1531 } else { 1532 /* no spokes lost, */ 1533 stcb->asoc.total_output_queue_size = 0; 1534 } 1535 SCTP_TCB_SEND_UNLOCK(stcb); 1536 return; 1537 } 1538 SCTP_TCB_SEND_UNLOCK(stcb); 1539 /* Check to see if some data queued, if so report it */ 1540 for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1541 if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) { 1542 TAILQ_FOREACH(sp, &stcb->asoc.strmout[i].outqueue, next) { 1543 if (sp->msg_is_complete) 1544 being_filled++; 1545 chks_in_queue++; 1546 } 1547 } 1548 } 1549 if (chks_in_queue != stcb->asoc.stream_queue_cnt) { 1550 SCTP_PRINTF("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n", 1551 stcb->asoc.stream_queue_cnt, chks_in_queue); 1552 } 1553 if (chks_in_queue) { 1554 /* call the output queue function */ 1555 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1556 if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) && 1557 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 1558 /* 1559 * Probably should go in and make it go back through 1560 * and add fragments allowed 1561 */ 1562 if (being_filled == 0) { 1563 SCTP_PRINTF("Still nothing moved %d chunks are stuck\n", 1564 chks_in_queue); 1565 } 1566 } 1567 } else { 1568 SCTP_PRINTF("Found no chunks on any queue tot:%lu\n", 1569 (u_long)stcb->asoc.total_output_queue_size); 1570 stcb->asoc.total_output_queue_size = 0; 1571 } 1572} 1573 1574int 1575sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1576 struct sctp_nets *net, int cnt_of_unconf) 1577{ 1578 int ret; 1579 1580 if (net) { 1581 if (net->hb_responded == 0) { 1582 if (net->ro._s_addr) { 1583 /* 1584 * Invalidate the src address if we did not 1585 * get a response last time. 1586 */ 1587 sctp_free_ifa(net->ro._s_addr); 1588 net->ro._s_addr = NULL; 1589 net->src_addr_selected = 0; 1590 } 1591 sctp_backoff_on_timeout(stcb, net, 1, 0, 0); 1592 } 1593 /* Zero PBA, if it needs it */ 1594 if (net->partial_bytes_acked) { 1595 net->partial_bytes_acked = 0; 1596 } 1597 } 1598 if ((stcb->asoc.total_output_queue_size > 0) && 1599 (TAILQ_EMPTY(&stcb->asoc.send_queue)) && 1600 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 1601 sctp_audit_stream_queues_for_size(inp, stcb); 1602 } 1603 /* Send a new HB, this will do threshold managment, pick a new dest */ 1604 if (cnt_of_unconf == 0) { 1605 if (sctp_send_hb(stcb, 0, NULL) < 0) { 1606 return (1); 1607 } 1608 } else { 1609 /* 1610 * this will send out extra hb's up to maxburst if there are 1611 * any unconfirmed addresses. 1612 */ 1613 uint32_t cnt_sent = 0; 1614 1615 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1616 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) && 1617 (net->dest_state & SCTP_ADDR_REACHABLE)) { 1618 cnt_sent++; 1619 if (net->hb_responded == 0) { 1620 /* Did we respond last time? */ 1621 if (net->ro._s_addr) { 1622 sctp_free_ifa(net->ro._s_addr); 1623 net->ro._s_addr = NULL; 1624 net->src_addr_selected = 0; 1625 } 1626 } 1627 ret = sctp_send_hb(stcb, 1, net); 1628 if (ret < 0) 1629 return 1; 1630 else if (ret == 0) { 1631 break; 1632 } 1633 if (SCTP_BASE_SYSCTL(sctp_hb_maxburst) && 1634 (cnt_sent >= SCTP_BASE_SYSCTL(sctp_hb_maxburst))) 1635 break; 1636 } 1637 } 1638 } 1639 return (0); 1640} 1641 1642void 1643sctp_pathmtu_timer(struct sctp_inpcb *inp, 1644 struct sctp_tcb *stcb, 1645 struct sctp_nets *net) 1646{ 1647 uint32_t next_mtu, mtu; 1648 1649 next_mtu = sctp_get_next_mtu(inp, net->mtu); 1650 1651 if ((next_mtu > net->mtu) && (net->port == 0)) { 1652 if ((net->src_addr_selected == 0) || 1653 (net->ro._s_addr == NULL) || 1654 (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) { 1655 if ((net->ro._s_addr != NULL) && (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) { 1656 sctp_free_ifa(net->ro._s_addr); 1657 net->ro._s_addr = NULL; 1658 net->src_addr_selected = 0; 1659 } else if (net->ro._s_addr == NULL) { 1660#if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE) 1661 if (net->ro._l_addr.sa.sa_family == AF_INET6) { 1662 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 1663 1664 /* KAME hack: embed scopeid */ 1665 (void)sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)); 1666 } 1667#endif 1668 1669 net->ro._s_addr = sctp_source_address_selection(inp, 1670 stcb, 1671 (sctp_route_t *) & net->ro, 1672 net, 0, stcb->asoc.vrf_id); 1673#if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE) 1674 if (net->ro._l_addr.sa.sa_family == AF_INET6) { 1675 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 1676 1677 (void)sa6_recoverscope(sin6); 1678 } 1679#endif /* INET6 */ 1680 } 1681 if (net->ro._s_addr) 1682 net->src_addr_selected = 1; 1683 } 1684 if (net->ro._s_addr) { 1685 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._s_addr.sa, net->ro.ro_rt); 1686 if (net->port) { 1687 mtu -= sizeof(struct udphdr); 1688 } 1689 if (mtu > next_mtu) { 1690 net->mtu = next_mtu; 1691 } 1692 } 1693 } 1694 /* restart the timer */ 1695 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 1696} 1697 1698void 1699sctp_autoclose_timer(struct sctp_inpcb *inp, 1700 struct sctp_tcb *stcb, 1701 struct sctp_nets *net) 1702{ 1703 struct timeval tn, *tim_touse; 1704 struct sctp_association *asoc; 1705 int ticks_gone_by; 1706 1707 (void)SCTP_GETTIME_TIMEVAL(&tn); 1708 if (stcb->asoc.sctp_autoclose_ticks && 1709 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1710 /* Auto close is on */ 1711 asoc = &stcb->asoc; 1712 /* pick the time to use */ 1713 if (asoc->time_last_rcvd.tv_sec > 1714 asoc->time_last_sent.tv_sec) { 1715 tim_touse = &asoc->time_last_rcvd; 1716 } else { 1717 tim_touse = &asoc->time_last_sent; 1718 } 1719 /* Now has long enough transpired to autoclose? */ 1720 ticks_gone_by = SEC_TO_TICKS(tn.tv_sec - tim_touse->tv_sec); 1721 if ((ticks_gone_by > 0) && 1722 (ticks_gone_by >= (int)asoc->sctp_autoclose_ticks)) { 1723 /* 1724 * autoclose time has hit, call the output routine, 1725 * which should do nothing just to be SURE we don't 1726 * have hanging data. We can then safely check the 1727 * queues and know that we are clear to send 1728 * shutdown 1729 */ 1730 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 1731 /* Are we clean? */ 1732 if (TAILQ_EMPTY(&asoc->send_queue) && 1733 TAILQ_EMPTY(&asoc->sent_queue)) { 1734 /* 1735 * there is nothing queued to send, so I'm 1736 * done... 1737 */ 1738 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1739 /* only send SHUTDOWN 1st time thru */ 1740 sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 1741 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 1742 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 1743 SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1744 } 1745 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 1746 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 1747 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 1748 stcb->sctp_ep, stcb, 1749 asoc->primary_destination); 1750 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1751 stcb->sctp_ep, stcb, 1752 asoc->primary_destination); 1753 } 1754 } 1755 } else { 1756 /* 1757 * No auto close at this time, reset t-o to check 1758 * later 1759 */ 1760 int tmp; 1761 1762 /* fool the timer startup to use the time left */ 1763 tmp = asoc->sctp_autoclose_ticks; 1764 asoc->sctp_autoclose_ticks -= ticks_gone_by; 1765 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, 1766 net); 1767 /* restore the real tick value */ 1768 asoc->sctp_autoclose_ticks = tmp; 1769 } 1770 } 1771} 1772