sctp_timer.c revision 210493
1163953Srrs/*- 2169382Srrs * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3163953Srrs * 4163953Srrs * Redistribution and use in source and binary forms, with or without 5163953Srrs * modification, are permitted provided that the following conditions are met: 6163953Srrs * 7163953Srrs * a) Redistributions of source code must retain the above copyright notice, 8163953Srrs * this list of conditions and the following disclaimer. 9163953Srrs * 10163953Srrs * b) Redistributions in binary form must reproduce the above copyright 11163953Srrs * notice, this list of conditions and the following disclaimer in 12163953Srrs * the documentation and/or other materials provided with the distribution. 13163953Srrs * 14163953Srrs * c) Neither the name of Cisco Systems, Inc. nor the names of its 15163953Srrs * contributors may be used to endorse or promote products derived 16163953Srrs * from this software without specific prior written permission. 17163953Srrs * 18163953Srrs * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19163953Srrs * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20163953Srrs * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21163953Srrs * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22163953Srrs * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23163953Srrs * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24163953Srrs * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25163953Srrs * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26163953Srrs * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27163953Srrs * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28163953Srrs * THE POSSIBILITY OF SUCH DAMAGE. 29163953Srrs */ 30163953Srrs 31163953Srrs/* $KAME: sctp_timer.c,v 1.29 2005/03/06 16:04:18 itojun Exp $ */ 32163953Srrs 33163953Srrs#include <sys/cdefs.h> 34163953Srrs__FBSDID("$FreeBSD: head/sys/netinet/sctp_timer.c 210493 2010-07-26 09:20:55Z rrs $"); 35163953Srrs 36163953Srrs#define _IP_VHL 37166086Srrs#include <netinet/sctp_os.h> 38163953Srrs#include <netinet/sctp_pcb.h> 39163953Srrs#ifdef INET6 40163953Srrs#endif 41163953Srrs#include <netinet/sctp_var.h> 42167598Srrs#include <netinet/sctp_sysctl.h> 43163953Srrs#include <netinet/sctp_timer.h> 44163953Srrs#include <netinet/sctputil.h> 45163953Srrs#include <netinet/sctp_output.h> 46163953Srrs#include <netinet/sctp_header.h> 47163953Srrs#include <netinet/sctp_indata.h> 48163953Srrs#include <netinet/sctp_asconf.h> 49163953Srrs#include <netinet/sctp_input.h> 50163953Srrs#include <netinet/sctp.h> 51163953Srrs#include <netinet/sctp_uio.h> 52185694Srrs#include <netinet/udp.h> 53163953Srrs 54163953Srrs 55163953Srrsvoid 56163953Srrssctp_early_fr_timer(struct sctp_inpcb *inp, 57163953Srrs struct sctp_tcb *stcb, 58163953Srrs struct sctp_nets *net) 59163953Srrs{ 60163953Srrs struct sctp_tmit_chunk *chk, *tp2; 61163953Srrs struct timeval now, min_wait, tv; 62163953Srrs unsigned int cur_rtt, cnt = 0, cnt_resend = 0; 63163953Srrs 64163953Srrs /* an early FR is occuring. */ 65169378Srrs (void)SCTP_GETTIME_TIMEVAL(&now); 66163953Srrs /* get cur rto in micro-seconds */ 67163953Srrs if (net->lastsa == 0) { 68163953Srrs /* Hmm no rtt estimate yet? */ 69163953Srrs cur_rtt = stcb->asoc.initial_rto >> 2; 70163953Srrs } else { 71163953Srrs 72163953Srrs cur_rtt = ((net->lastsa >> 2) + net->lastsv) >> 1; 73163953Srrs } 74179783Srrs if (cur_rtt < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) { 75179783Srrs cur_rtt = SCTP_BASE_SYSCTL(sctp_early_fr_msec); 76163953Srrs } 77163953Srrs cur_rtt *= 1000; 78163953Srrs tv.tv_sec = cur_rtt / 1000000; 79163953Srrs tv.tv_usec = cur_rtt % 1000000; 80163953Srrs min_wait = now; 81163953Srrs timevalsub(&min_wait, &tv); 82163953Srrs if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) { 83163953Srrs /* 84163953Srrs * if we hit here, we don't have enough seconds on the clock 85163953Srrs * to account for the RTO. We just let the lower seconds be 86163953Srrs * the bounds and don't worry about it. This may mean we 87163953Srrs * will mark a lot more than we should. 88163953Srrs */ 89163953Srrs min_wait.tv_sec = min_wait.tv_usec = 0; 90163953Srrs } 91163953Srrs chk = TAILQ_LAST(&stcb->asoc.sent_queue, sctpchunk_listhead); 92163953Srrs for (; chk != NULL; chk = tp2) { 93163953Srrs tp2 = TAILQ_PREV(chk, sctpchunk_listhead, sctp_next); 94163953Srrs if (chk->whoTo != net) { 95163953Srrs continue; 96163953Srrs } 97163953Srrs if (chk->sent == SCTP_DATAGRAM_RESEND) 98163953Srrs cnt_resend++; 99163953Srrs else if ((chk->sent > SCTP_DATAGRAM_UNSENT) && 100163953Srrs (chk->sent < SCTP_DATAGRAM_RESEND)) { 101163953Srrs /* pending, may need retran */ 102163953Srrs if (chk->sent_rcv_time.tv_sec > min_wait.tv_sec) { 103163953Srrs /* 104163953Srrs * we have reached a chunk that was sent 105163953Srrs * some seconds past our min.. forget it we 106163953Srrs * will find no more to send. 107163953Srrs */ 108163953Srrs continue; 109163953Srrs } else if (chk->sent_rcv_time.tv_sec == min_wait.tv_sec) { 110163953Srrs /* 111163953Srrs * we must look at the micro seconds to 112163953Srrs * know. 113163953Srrs */ 114163953Srrs if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) { 115163953Srrs /* 116163953Srrs * ok it was sent after our boundary 117163953Srrs * time. 118163953Srrs */ 119163953Srrs continue; 120163953Srrs } 121163953Srrs } 122179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_EARLYFR_LOGGING_ENABLE) { 123170744Srrs sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count, 124170744Srrs 4, SCTP_FR_MARKED_EARLY); 125170744Srrs } 126163953Srrs SCTP_STAT_INCR(sctps_earlyfrmrkretrans); 127163953Srrs chk->sent = SCTP_DATAGRAM_RESEND; 128163953Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 129163953Srrs /* double book size since we are doing an early FR */ 130163953Srrs chk->book_size_scale++; 131163953Srrs cnt += chk->send_size; 132163953Srrs if ((cnt + net->flight_size) > net->cwnd) { 133163953Srrs /* Mark all we could possibly resend */ 134163953Srrs break; 135163953Srrs } 136163953Srrs } 137163953Srrs } 138163953Srrs if (cnt) { 139163953Srrs /* 140171440Srrs * JRS - Use the congestion control given in the congestion 141171440Srrs * control module 142163953Srrs */ 143171440Srrs stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer(inp, stcb, net); 144163953Srrs } else if (cnt_resend) { 145172090Srrs sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR, SCTP_SO_NOT_LOCKED); 146163953Srrs } 147163953Srrs /* Restart it? */ 148163953Srrs if (net->flight_size < net->cwnd) { 149163953Srrs SCTP_STAT_INCR(sctps_earlyfrstrtmr); 150163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net); 151163953Srrs } 152163953Srrs} 153163953Srrs 154163953Srrsvoid 155163953Srrssctp_audit_retranmission_queue(struct sctp_association *asoc) 156163953Srrs{ 157163953Srrs struct sctp_tmit_chunk *chk; 158163953Srrs 159169420Srrs SCTPDBG(SCTP_DEBUG_TIMER4, "Audit invoked on send queue cnt:%d onqueue:%d\n", 160169420Srrs asoc->sent_queue_retran_cnt, 161169420Srrs asoc->sent_queue_cnt); 162163953Srrs asoc->sent_queue_retran_cnt = 0; 163163953Srrs asoc->sent_queue_cnt = 0; 164163953Srrs TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 165163953Srrs if (chk->sent == SCTP_DATAGRAM_RESEND) { 166163953Srrs sctp_ucount_incr(asoc->sent_queue_retran_cnt); 167163953Srrs } 168163953Srrs asoc->sent_queue_cnt++; 169163953Srrs } 170163953Srrs TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 171163953Srrs if (chk->sent == SCTP_DATAGRAM_RESEND) { 172163953Srrs sctp_ucount_incr(asoc->sent_queue_retran_cnt); 173163953Srrs } 174163953Srrs } 175179157Srrs TAILQ_FOREACH(chk, &asoc->asconf_send_queue, sctp_next) { 176179157Srrs if (chk->sent == SCTP_DATAGRAM_RESEND) { 177179157Srrs sctp_ucount_incr(asoc->sent_queue_retran_cnt); 178179157Srrs } 179179157Srrs } 180169420Srrs SCTPDBG(SCTP_DEBUG_TIMER4, "Audit completes retran:%d onqueue:%d\n", 181169420Srrs asoc->sent_queue_retran_cnt, 182169420Srrs asoc->sent_queue_cnt); 183163953Srrs} 184163953Srrs 185163953Srrsint 186163953Srrssctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 187163953Srrs struct sctp_nets *net, uint16_t threshold) 188163953Srrs{ 189163953Srrs if (net) { 190163953Srrs net->error_count++; 191169420Srrs SCTPDBG(SCTP_DEBUG_TIMER4, "Error count for %p now %d thresh:%d\n", 192169420Srrs net, net->error_count, 193169420Srrs net->failure_threshold); 194163953Srrs if (net->error_count > net->failure_threshold) { 195163953Srrs /* We had a threshold failure */ 196163953Srrs if (net->dest_state & SCTP_ADDR_REACHABLE) { 197163953Srrs net->dest_state &= ~SCTP_ADDR_REACHABLE; 198163953Srrs net->dest_state |= SCTP_ADDR_NOT_REACHABLE; 199167598Srrs net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY; 200163953Srrs if (net == stcb->asoc.primary_destination) { 201163953Srrs net->dest_state |= SCTP_ADDR_WAS_PRIMARY; 202163953Srrs } 203171440Srrs /* 204171440Srrs * JRS 5/14/07 - If a destination is 205171440Srrs * unreachable, the PF bit is turned off. 206171440Srrs * This allows an unambiguous use of the PF 207171440Srrs * bit for destinations that are reachable 208171440Srrs * but potentially failed. If the 209171440Srrs * destination is set to the unreachable 210171440Srrs * state, also set the destination to the PF 211171440Srrs * state. 212171440Srrs */ 213171440Srrs /* 214171440Srrs * Add debug message here if destination is 215171440Srrs * not in PF state. 216171440Srrs */ 217171440Srrs /* Stop any running T3 timers here? */ 218179783Srrs if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) { 219171440Srrs net->dest_state &= ~SCTP_ADDR_PF; 220171440Srrs SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n", 221171440Srrs net); 222171440Srrs } 223163953Srrs sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 224163953Srrs stcb, 225163953Srrs SCTP_FAILED_THRESHOLD, 226172090Srrs (void *)net, SCTP_SO_NOT_LOCKED); 227163953Srrs } 228163953Srrs } 229163953Srrs /*********HOLD THIS COMMENT FOR PATCH OF ALTERNATE 230163953Srrs *********ROUTING CODE 231163953Srrs */ 232163953Srrs /*********HOLD THIS COMMENT FOR END OF PATCH OF ALTERNATE 233163953Srrs *********ROUTING CODE 234163953Srrs */ 235163953Srrs } 236163953Srrs if (stcb == NULL) 237163953Srrs return (0); 238163953Srrs 239163953Srrs if (net) { 240163953Srrs if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) { 241179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 242171943Srrs sctp_misc_ints(SCTP_THRESHOLD_INCR, 243171943Srrs stcb->asoc.overall_error_count, 244171943Srrs (stcb->asoc.overall_error_count + 1), 245171943Srrs SCTP_FROM_SCTP_TIMER, 246171943Srrs __LINE__); 247171943Srrs } 248163953Srrs stcb->asoc.overall_error_count++; 249163953Srrs } 250163953Srrs } else { 251179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 252171943Srrs sctp_misc_ints(SCTP_THRESHOLD_INCR, 253171943Srrs stcb->asoc.overall_error_count, 254171943Srrs (stcb->asoc.overall_error_count + 1), 255171943Srrs SCTP_FROM_SCTP_TIMER, 256171943Srrs __LINE__); 257171943Srrs } 258163953Srrs stcb->asoc.overall_error_count++; 259163953Srrs } 260169420Srrs SCTPDBG(SCTP_DEBUG_TIMER4, "Overall error count for %p now %d thresh:%u state:%x\n", 261169420Srrs &stcb->asoc, stcb->asoc.overall_error_count, 262169420Srrs (uint32_t) threshold, 263169420Srrs ((net == NULL) ? (uint32_t) 0 : (uint32_t) net->dest_state)); 264163953Srrs /* 265163953Srrs * We specifically do not do >= to give the assoc one more change 266163953Srrs * before we fail it. 267163953Srrs */ 268163953Srrs if (stcb->asoc.overall_error_count > threshold) { 269163953Srrs /* Abort notification sends a ULP notify */ 270163953Srrs struct mbuf *oper; 271163953Srrs 272163953Srrs oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 273163953Srrs 0, M_DONTWAIT, 1, MT_DATA); 274163953Srrs if (oper) { 275163953Srrs struct sctp_paramhdr *ph; 276163953Srrs uint32_t *ippp; 277163953Srrs 278165647Srrs SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 279163953Srrs sizeof(uint32_t); 280163953Srrs ph = mtod(oper, struct sctp_paramhdr *); 281163953Srrs ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 282165647Srrs ph->param_length = htons(SCTP_BUF_LEN(oper)); 283163953Srrs ippp = (uint32_t *) (ph + 1); 284165220Srrs *ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_1); 285163953Srrs } 286165220Srrs inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_1; 287172090Srrs sctp_abort_an_association(inp, stcb, SCTP_FAILED_THRESHOLD, oper, SCTP_SO_NOT_LOCKED); 288163953Srrs return (1); 289163953Srrs } 290163953Srrs return (0); 291163953Srrs} 292163953Srrs 293163953Srrsstruct sctp_nets * 294163953Srrssctp_find_alternate_net(struct sctp_tcb *stcb, 295163953Srrs struct sctp_nets *net, 296171440Srrs int mode) 297163953Srrs{ 298163953Srrs /* Find and return an alternate network if possible */ 299171440Srrs struct sctp_nets *alt, *mnet, *min_errors_net = NULL, *max_cwnd_net = NULL; 300163953Srrs int once; 301163953Srrs 302171440Srrs /* JRS 5/14/07 - Initialize min_errors to an impossible value. */ 303171440Srrs int min_errors = -1; 304171440Srrs uint32_t max_cwnd = 0; 305171440Srrs 306163953Srrs if (stcb->asoc.numnets == 1) { 307163953Srrs /* No others but net */ 308163953Srrs return (TAILQ_FIRST(&stcb->asoc.nets)); 309163953Srrs } 310171440Srrs /* 311171440Srrs * JRS 5/14/07 - If mode is set to 2, use the CMT PF find alternate 312171440Srrs * net algorithm. This algorithm chooses the active destination (not 313171440Srrs * in PF state) with the largest cwnd value. If all destinations are 314171440Srrs * in PF state, unreachable, or unconfirmed, choose the desination 315171440Srrs * that is in PF state with the lowest error count. In case of a 316171440Srrs * tie, choose the destination that was most recently active. 317171440Srrs */ 318171440Srrs if (mode == 2) { 319163953Srrs TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) { 320171440Srrs /* 321171440Srrs * JRS 5/14/07 - If the destination is unreachable 322171440Srrs * or unconfirmed, skip it. 323171440Srrs */ 324163953Srrs if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) || 325171440Srrs (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) { 326171440Srrs continue; 327171440Srrs } 328171440Srrs /* 329171440Srrs * JRS 5/14/07 - If the destination is reachable 330171440Srrs * but in PF state, compare the error count of the 331171440Srrs * destination to the minimum error count seen thus 332171440Srrs * far. Store the destination with the lower error 333171440Srrs * count. If the error counts are equal, store the 334171440Srrs * destination that was most recently active. 335171440Srrs */ 336171440Srrs if (mnet->dest_state & SCTP_ADDR_PF) { 337171440Srrs /* 338171440Srrs * JRS 5/14/07 - If the destination under 339171440Srrs * consideration is the current destination, 340171440Srrs * work as if the error count is one higher. 341171440Srrs * The actual error count will not be 342171440Srrs * incremented until later in the t3 343171440Srrs * handler. 344171440Srrs */ 345171440Srrs if (mnet == net) { 346171440Srrs if (min_errors == -1) { 347171440Srrs min_errors = mnet->error_count + 1; 348171440Srrs min_errors_net = mnet; 349171440Srrs } else if (mnet->error_count + 1 < min_errors) { 350171440Srrs min_errors = mnet->error_count + 1; 351171440Srrs min_errors_net = mnet; 352171440Srrs } else if (mnet->error_count + 1 == min_errors 353171440Srrs && mnet->last_active > min_errors_net->last_active) { 354171440Srrs min_errors_net = mnet; 355171440Srrs min_errors = mnet->error_count + 1; 356171440Srrs } 357171440Srrs continue; 358171440Srrs } else { 359171440Srrs if (min_errors == -1) { 360171440Srrs min_errors = mnet->error_count; 361171440Srrs min_errors_net = mnet; 362171440Srrs } else if (mnet->error_count < min_errors) { 363171440Srrs min_errors = mnet->error_count; 364171440Srrs min_errors_net = mnet; 365171440Srrs } else if (mnet->error_count == min_errors 366171440Srrs && mnet->last_active > min_errors_net->last_active) { 367171440Srrs min_errors_net = mnet; 368171440Srrs min_errors = mnet->error_count; 369171440Srrs } 370171440Srrs continue; 371171440Srrs } 372171440Srrs } 373171440Srrs /* 374171440Srrs * JRS 5/14/07 - If the destination is reachable and 375171440Srrs * not in PF state, compare the cwnd of the 376171440Srrs * destination to the highest cwnd seen thus far. 377171440Srrs * Store the destination with the higher cwnd value. 378171440Srrs * If the cwnd values are equal, randomly choose one 379171440Srrs * of the two destinations. 380171440Srrs */ 381171440Srrs if (max_cwnd < mnet->cwnd) { 382171440Srrs max_cwnd_net = mnet; 383171440Srrs max_cwnd = mnet->cwnd; 384171440Srrs } else if (max_cwnd == mnet->cwnd) { 385171440Srrs uint32_t rndval; 386171440Srrs uint8_t this_random; 387171440Srrs 388171440Srrs if (stcb->asoc.hb_random_idx > 3) { 389171440Srrs rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 390171440Srrs memcpy(stcb->asoc.hb_random_values, &rndval, sizeof(stcb->asoc.hb_random_values)); 391171440Srrs this_random = stcb->asoc.hb_random_values[0]; 392171440Srrs stcb->asoc.hb_random_idx++; 393171440Srrs stcb->asoc.hb_ect_randombit = 0; 394171440Srrs } else { 395171440Srrs this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 396171440Srrs stcb->asoc.hb_random_idx++; 397171440Srrs stcb->asoc.hb_ect_randombit = 0; 398171440Srrs } 399171440Srrs if (this_random % 2 == 1) { 400171440Srrs max_cwnd_net = mnet; 401180387Srrs max_cwnd = mnet->cwnd; /* Useless? */ 402171440Srrs } 403171440Srrs } 404171440Srrs } 405171440Srrs /* 406171440Srrs * JRS 5/14/07 - After all destination have been considered 407171440Srrs * as alternates, check to see if there was some active 408171440Srrs * destination (not in PF state). If not, check to see if 409171440Srrs * there was some PF destination with the minimum number of 410171440Srrs * errors. If not, return the original destination. If 411171440Srrs * there is a min_errors_net, remove the PF flag from that 412171440Srrs * destination, set the cwnd to one or two MTUs, and return 413171440Srrs * the destination as an alt. If there was some active 414171440Srrs * destination with a highest cwnd, return the destination 415171440Srrs * as an alt. 416171440Srrs */ 417171440Srrs if (max_cwnd_net == NULL) { 418171440Srrs if (min_errors_net == NULL) { 419171440Srrs return (net); 420171440Srrs } 421171440Srrs min_errors_net->dest_state &= ~SCTP_ADDR_PF; 422179783Srrs min_errors_net->cwnd = min_errors_net->mtu * SCTP_BASE_SYSCTL(sctp_cmt_pf); 423171440Srrs if (SCTP_OS_TIMER_PENDING(&min_errors_net->rxt_timer.timer)) { 424171440Srrs sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 425171440Srrs stcb, min_errors_net, 426171440Srrs SCTP_FROM_SCTP_TIMER + SCTP_LOC_2); 427171440Srrs } 428171440Srrs SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to active with %d errors.\n", 429171440Srrs min_errors_net, min_errors_net->error_count); 430171440Srrs return (min_errors_net); 431171440Srrs } else { 432171440Srrs return (max_cwnd_net); 433171440Srrs } 434171440Srrs } 435171440Srrs /* 436171440Srrs * JRS 5/14/07 - If mode is set to 1, use the CMT policy for 437171440Srrs * choosing an alternate net. 438171440Srrs */ 439171440Srrs else if (mode == 1) { 440171440Srrs TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) { 441171440Srrs if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) || 442163953Srrs (mnet->dest_state & SCTP_ADDR_UNCONFIRMED) 443163953Srrs ) { 444163953Srrs /* 445163953Srrs * will skip ones that are not-reachable or 446163953Srrs * unconfirmed 447163953Srrs */ 448163953Srrs continue; 449163953Srrs } 450171440Srrs if (max_cwnd < mnet->cwnd) { 451171440Srrs max_cwnd_net = mnet; 452171440Srrs max_cwnd = mnet->cwnd; 453171440Srrs } else if (max_cwnd == mnet->cwnd) { 454163953Srrs uint32_t rndval; 455163953Srrs uint8_t this_random; 456163953Srrs 457163953Srrs if (stcb->asoc.hb_random_idx > 3) { 458163953Srrs rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 459163953Srrs memcpy(stcb->asoc.hb_random_values, &rndval, 460163953Srrs sizeof(stcb->asoc.hb_random_values)); 461163953Srrs this_random = stcb->asoc.hb_random_values[0]; 462163953Srrs stcb->asoc.hb_random_idx = 0; 463163953Srrs stcb->asoc.hb_ect_randombit = 0; 464163953Srrs } else { 465163953Srrs this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 466163953Srrs stcb->asoc.hb_random_idx++; 467163953Srrs stcb->asoc.hb_ect_randombit = 0; 468163953Srrs } 469163953Srrs if (this_random % 2) { 470171440Srrs max_cwnd_net = mnet; 471171440Srrs max_cwnd = mnet->cwnd; 472163953Srrs } 473163953Srrs } 474163953Srrs } 475171440Srrs if (max_cwnd_net) { 476171440Srrs return (max_cwnd_net); 477163953Srrs } 478163953Srrs } 479163953Srrs mnet = net; 480163953Srrs once = 0; 481163953Srrs 482163953Srrs if (mnet == NULL) { 483163953Srrs mnet = TAILQ_FIRST(&stcb->asoc.nets); 484163953Srrs } 485163953Srrs do { 486163953Srrs alt = TAILQ_NEXT(mnet, sctp_next); 487163953Srrs if (alt == NULL) { 488163953Srrs once++; 489163953Srrs if (once > 1) { 490163953Srrs break; 491163953Srrs } 492163953Srrs alt = TAILQ_FIRST(&stcb->asoc.nets); 493163953Srrs } 494163953Srrs if (alt->ro.ro_rt == NULL) { 495167598Srrs if (alt->ro._s_addr) { 496167598Srrs sctp_free_ifa(alt->ro._s_addr); 497167598Srrs alt->ro._s_addr = NULL; 498167598Srrs } 499163953Srrs alt->src_addr_selected = 0; 500163953Srrs } 501163953Srrs if ( 502163953Srrs ((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) && 503163953Srrs (alt->ro.ro_rt != NULL) && 504169655Srrs /* sa_ignore NO_NULL_CHK */ 505163953Srrs (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) 506163953Srrs ) { 507163953Srrs /* Found a reachable address */ 508163953Srrs break; 509163953Srrs } 510163953Srrs mnet = alt; 511163953Srrs } while (alt != NULL); 512163953Srrs 513163953Srrs if (alt == NULL) { 514163953Srrs /* Case where NO insv network exists (dormant state) */ 515163953Srrs /* we rotate destinations */ 516163953Srrs once = 0; 517163953Srrs mnet = net; 518163953Srrs do { 519163953Srrs alt = TAILQ_NEXT(mnet, sctp_next); 520163953Srrs if (alt == NULL) { 521163953Srrs once++; 522163953Srrs if (once > 1) { 523163953Srrs break; 524163953Srrs } 525163953Srrs alt = TAILQ_FIRST(&stcb->asoc.nets); 526163953Srrs } 527169655Srrs /* sa_ignore NO_NULL_CHK */ 528163953Srrs if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) && 529163953Srrs (alt != net)) { 530163953Srrs /* Found an alternate address */ 531163953Srrs break; 532163953Srrs } 533163953Srrs mnet = alt; 534163953Srrs } while (alt != NULL); 535163953Srrs } 536163953Srrs if (alt == NULL) { 537163953Srrs return (net); 538163953Srrs } 539163953Srrs return (alt); 540163953Srrs} 541163953Srrs 542171440Srrs 543171440Srrs 544163953Srrsstatic void 545163953Srrssctp_backoff_on_timeout(struct sctp_tcb *stcb, 546163953Srrs struct sctp_nets *net, 547163953Srrs int win_probe, 548163953Srrs int num_marked) 549163953Srrs{ 550170642Srrs if (net->RTO == 0) { 551170642Srrs net->RTO = stcb->asoc.minrto; 552170642Srrs } 553163953Srrs net->RTO <<= 1; 554163953Srrs if (net->RTO > stcb->asoc.maxrto) { 555163953Srrs net->RTO = stcb->asoc.maxrto; 556163953Srrs } 557163953Srrs if ((win_probe == 0) && num_marked) { 558163953Srrs /* We don't apply penalty to window probe scenarios */ 559171440Srrs /* JRS - Use the congestion control given in the CC module */ 560171440Srrs stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout(stcb, net); 561163953Srrs } 562163953Srrs} 563163953Srrs 564184333Srrs#ifndef INVARIANTS 565184333Srrsstatic void 566182367Srrssctp_recover_sent_list(struct sctp_tcb *stcb) 567182367Srrs{ 568182367Srrs struct sctp_tmit_chunk *chk, *tp2; 569182367Srrs struct sctp_association *asoc; 570182367Srrs 571182367Srrs asoc = &stcb->asoc; 572182367Srrs chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 573182367Srrs for (; chk != NULL; chk = tp2) { 574182367Srrs tp2 = TAILQ_NEXT(chk, sctp_next); 575182367Srrs if ((compare_with_wrap(stcb->asoc.last_acked_seq, 576182367Srrs chk->rec.data.TSN_seq, 577182367Srrs MAX_TSN)) || 578182367Srrs (stcb->asoc.last_acked_seq == chk->rec.data.TSN_seq)) { 579182367Srrs 580182367Srrs SCTP_PRINTF("Found chk:%p tsn:%x <= last_acked_seq:%x\n", 581182367Srrs chk, chk->rec.data.TSN_seq, stcb->asoc.last_acked_seq); 582182367Srrs TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 583182367Srrs if (chk->pr_sctp_on) { 584182367Srrs if (asoc->pr_sctp_cnt != 0) 585182367Srrs asoc->pr_sctp_cnt--; 586182367Srrs } 587182367Srrs if (chk->data) { 588182367Srrs /* sa_ignore NO_NULL_CHK */ 589182367Srrs sctp_free_bufspace(stcb, asoc, chk, 1); 590182367Srrs sctp_m_freem(chk->data); 591196260Stuexen if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(chk->flags)) { 592182367Srrs asoc->sent_queue_cnt_removeable--; 593182367Srrs } 594182367Srrs } 595182367Srrs chk->data = NULL; 596182367Srrs asoc->sent_queue_cnt--; 597182367Srrs sctp_free_a_chunk(stcb, chk); 598182367Srrs } 599182367Srrs } 600182367Srrs SCTP_PRINTF("after recover order is as follows\n"); 601182367Srrs chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 602182367Srrs for (; chk != NULL; chk = tp2) { 603182367Srrs tp2 = TAILQ_NEXT(chk, sctp_next); 604182367Srrs SCTP_PRINTF("chk:%p TSN:%x\n", chk, chk->rec.data.TSN_seq); 605182367Srrs } 606182367Srrs} 607182367Srrs 608184333Srrs#endif 609184333Srrs 610163953Srrsstatic int 611163953Srrssctp_mark_all_for_resend(struct sctp_tcb *stcb, 612163953Srrs struct sctp_nets *net, 613163953Srrs struct sctp_nets *alt, 614163953Srrs int window_probe, 615163953Srrs int *num_marked) 616163953Srrs{ 617163953Srrs 618163953Srrs /* 619163953Srrs * Mark all chunks (well not all) that were sent to *net for 620163953Srrs * retransmission. Move them to alt for there destination as well... 621163953Srrs * We only mark chunks that have been outstanding long enough to 622163953Srrs * have received feed-back. 623163953Srrs */ 624163953Srrs struct sctp_tmit_chunk *chk, *tp2, *could_be_sent = NULL; 625163953Srrs struct sctp_nets *lnets; 626163953Srrs struct timeval now, min_wait, tv; 627163953Srrs int cur_rtt; 628168709Srrs int audit_tf, num_mk, fir; 629163953Srrs unsigned int cnt_mk; 630168709Srrs uint32_t orig_flight, orig_tf; 631163953Srrs uint32_t tsnlast, tsnfirst; 632182367Srrs int recovery_cnt = 0; 633163953Srrs 634171440Srrs 635163953Srrs /* none in flight now */ 636163953Srrs audit_tf = 0; 637163953Srrs fir = 0; 638163953Srrs /* 639163953Srrs * figure out how long a data chunk must be pending before we can 640163953Srrs * mark it .. 641163953Srrs */ 642169378Srrs (void)SCTP_GETTIME_TIMEVAL(&now); 643163953Srrs /* get cur rto in micro-seconds */ 644163953Srrs cur_rtt = (((net->lastsa >> 2) + net->lastsv) >> 1); 645163953Srrs cur_rtt *= 1000; 646179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 647170744Srrs sctp_log_fr(cur_rtt, 648170744Srrs stcb->asoc.peers_rwnd, 649170744Srrs window_probe, 650170744Srrs SCTP_FR_T3_MARK_TIME); 651170744Srrs sctp_log_fr(net->flight_size, 652170744Srrs SCTP_OS_TIMER_PENDING(&net->fr_timer.timer), 653170744Srrs SCTP_OS_TIMER_ACTIVE(&net->fr_timer.timer), 654170744Srrs SCTP_FR_CWND_REPORT); 655170744Srrs sctp_log_fr(net->flight_size, net->cwnd, stcb->asoc.total_flight, SCTP_FR_CWND_REPORT); 656170744Srrs } 657163953Srrs tv.tv_sec = cur_rtt / 1000000; 658163953Srrs tv.tv_usec = cur_rtt % 1000000; 659163953Srrs min_wait = now; 660163953Srrs timevalsub(&min_wait, &tv); 661163953Srrs if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) { 662163953Srrs /* 663163953Srrs * if we hit here, we don't have enough seconds on the clock 664163953Srrs * to account for the RTO. We just let the lower seconds be 665163953Srrs * the bounds and don't worry about it. This may mean we 666163953Srrs * will mark a lot more than we should. 667163953Srrs */ 668163953Srrs min_wait.tv_sec = min_wait.tv_usec = 0; 669163953Srrs } 670179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 671170744Srrs sctp_log_fr(cur_rtt, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME); 672170744Srrs sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME); 673170744Srrs } 674163953Srrs /* 675163953Srrs * Our rwnd will be incorrect here since we are not adding back the 676163953Srrs * cnt * mbuf but we will fix that down below. 677163953Srrs */ 678163953Srrs orig_flight = net->flight_size; 679168709Srrs orig_tf = stcb->asoc.total_flight; 680168709Srrs 681163953Srrs net->fast_retran_ip = 0; 682163953Srrs /* Now on to each chunk */ 683163953Srrs num_mk = cnt_mk = 0; 684163953Srrs tsnfirst = tsnlast = 0; 685184333Srrs#ifndef INVARIANTS 686182367Srrsstart_again: 687184333Srrs#endif 688163953Srrs chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 689163953Srrs for (; chk != NULL; chk = tp2) { 690163953Srrs tp2 = TAILQ_NEXT(chk, sctp_next); 691163953Srrs if ((compare_with_wrap(stcb->asoc.last_acked_seq, 692163953Srrs chk->rec.data.TSN_seq, 693163953Srrs MAX_TSN)) || 694163953Srrs (stcb->asoc.last_acked_seq == chk->rec.data.TSN_seq)) { 695163953Srrs /* Strange case our list got out of order? */ 696182367Srrs SCTP_PRINTF("Our list is out of order? last_acked:%x chk:%x", 697182367Srrs (unsigned int)stcb->asoc.last_acked_seq, (unsigned int)chk->rec.data.TSN_seq); 698182367Srrs recovery_cnt++; 699182367Srrs#ifdef INVARIANTS 700182367Srrs panic("last acked >= chk on sent-Q"); 701182367Srrs#else 702182367Srrs SCTP_PRINTF("Recover attempts a restart cnt:%d\n", recovery_cnt); 703182367Srrs sctp_recover_sent_list(stcb); 704182367Srrs if (recovery_cnt < 10) { 705182367Srrs goto start_again; 706182367Srrs } else { 707182367Srrs SCTP_PRINTF("Recovery fails %d times??\n", recovery_cnt); 708182367Srrs } 709182367Srrs#endif 710163953Srrs } 711163953Srrs if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) { 712163953Srrs /* 713163953Srrs * found one to mark: If it is less than 714163953Srrs * DATAGRAM_ACKED it MUST not be a skipped or marked 715163953Srrs * TSN but instead one that is either already set 716163953Srrs * for retransmission OR one that needs 717163953Srrs * retransmission. 718163953Srrs */ 719163953Srrs 720163953Srrs /* validate its been outstanding long enough */ 721179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 722170744Srrs sctp_log_fr(chk->rec.data.TSN_seq, 723170744Srrs chk->sent_rcv_time.tv_sec, 724170744Srrs chk->sent_rcv_time.tv_usec, 725170744Srrs SCTP_FR_T3_MARK_TIME); 726170744Srrs } 727163953Srrs if ((chk->sent_rcv_time.tv_sec > min_wait.tv_sec) && (window_probe == 0)) { 728163953Srrs /* 729163953Srrs * we have reached a chunk that was sent 730163953Srrs * some seconds past our min.. forget it we 731163953Srrs * will find no more to send. 732163953Srrs */ 733179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 734170744Srrs sctp_log_fr(0, 735170744Srrs chk->sent_rcv_time.tv_sec, 736170744Srrs chk->sent_rcv_time.tv_usec, 737170744Srrs SCTP_FR_T3_STOPPED); 738170744Srrs } 739163953Srrs continue; 740163953Srrs } else if ((chk->sent_rcv_time.tv_sec == min_wait.tv_sec) && 741163953Srrs (window_probe == 0)) { 742163953Srrs /* 743163953Srrs * we must look at the micro seconds to 744163953Srrs * know. 745163953Srrs */ 746163953Srrs if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) { 747163953Srrs /* 748163953Srrs * ok it was sent after our boundary 749163953Srrs * time. 750163953Srrs */ 751179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 752170744Srrs sctp_log_fr(0, 753170744Srrs chk->sent_rcv_time.tv_sec, 754170744Srrs chk->sent_rcv_time.tv_usec, 755170744Srrs SCTP_FR_T3_STOPPED); 756170744Srrs } 757163953Srrs continue; 758163953Srrs } 759163953Srrs } 760196260Stuexen if (stcb->asoc.peer_supports_prsctp && PR_SCTP_TTL_ENABLED(chk->flags)) { 761163953Srrs /* Is it expired? */ 762163953Srrs if ((now.tv_sec > chk->rec.data.timetodrop.tv_sec) || 763163953Srrs ((chk->rec.data.timetodrop.tv_sec == now.tv_sec) && 764163953Srrs (now.tv_usec > chk->rec.data.timetodrop.tv_usec))) { 765163953Srrs /* Yes so drop it */ 766163953Srrs if (chk->data) { 767169420Srrs (void)sctp_release_pr_sctp_chunk(stcb, 768163953Srrs chk, 769163953Srrs (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 770189790Srrs SCTP_SO_NOT_LOCKED); 771163953Srrs } 772185694Srrs continue; 773163953Srrs } 774163953Srrs } 775196260Stuexen if (stcb->asoc.peer_supports_prsctp && PR_SCTP_RTX_ENABLED(chk->flags)) { 776163953Srrs /* Has it been retransmitted tv_sec times? */ 777163953Srrs if (chk->snd_count > chk->rec.data.timetodrop.tv_sec) { 778163953Srrs if (chk->data) { 779169420Srrs (void)sctp_release_pr_sctp_chunk(stcb, 780163953Srrs chk, 781163953Srrs (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 782189790Srrs SCTP_SO_NOT_LOCKED); 783163953Srrs } 784185694Srrs continue; 785163953Srrs } 786163953Srrs } 787168709Srrs if (chk->sent < SCTP_DATAGRAM_RESEND) { 788163953Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 789163953Srrs num_mk++; 790163953Srrs if (fir == 0) { 791163953Srrs fir = 1; 792163953Srrs tsnfirst = chk->rec.data.TSN_seq; 793163953Srrs } 794163953Srrs tsnlast = chk->rec.data.TSN_seq; 795179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 796170744Srrs sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count, 797170744Srrs 0, SCTP_FR_T3_MARKED); 798170744Srrs } 799168709Srrs if (chk->rec.data.chunk_was_revoked) { 800168709Srrs /* deflate the cwnd */ 801168709Srrs chk->whoTo->cwnd -= chk->book_size; 802168709Srrs chk->rec.data.chunk_was_revoked = 0; 803168709Srrs } 804168709Srrs net->marked_retrans++; 805168709Srrs stcb->asoc.marked_retrans++; 806179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 807170744Srrs sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND_TO, 808170744Srrs chk->whoTo->flight_size, 809170744Srrs chk->book_size, 810170744Srrs (uintptr_t) chk->whoTo, 811170744Srrs chk->rec.data.TSN_seq); 812170744Srrs } 813168709Srrs sctp_flight_size_decrease(chk); 814168709Srrs sctp_total_flight_decrease(stcb, chk); 815168709Srrs stcb->asoc.peers_rwnd += chk->send_size; 816179783Srrs stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 817163953Srrs } 818163953Srrs chk->sent = SCTP_DATAGRAM_RESEND; 819163953Srrs SCTP_STAT_INCR(sctps_markedretrans); 820165220Srrs 821163953Srrs /* reset the TSN for striking and other FR stuff */ 822163953Srrs chk->rec.data.doing_fast_retransmit = 0; 823163953Srrs /* Clear any time so NO RTT is being done */ 824163953Srrs chk->do_rtt = 0; 825163953Srrs if (alt != net) { 826163953Srrs sctp_free_remote_addr(chk->whoTo); 827163953Srrs chk->no_fr_allowed = 1; 828163953Srrs chk->whoTo = alt; 829163953Srrs atomic_add_int(&alt->ref_count, 1); 830163953Srrs } else { 831163953Srrs chk->no_fr_allowed = 0; 832163953Srrs if (TAILQ_EMPTY(&stcb->asoc.send_queue)) { 833163953Srrs chk->rec.data.fast_retran_tsn = stcb->asoc.sending_seq; 834163953Srrs } else { 835163953Srrs chk->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq; 836163953Srrs } 837163953Srrs } 838170181Srrs /* 839170181Srrs * CMT: Do not allow FRs on retransmitted TSNs. 840170181Srrs */ 841179783Srrs if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 1) { 842163953Srrs chk->no_fr_allowed = 1; 843163953Srrs } 844163953Srrs } else if (chk->sent == SCTP_DATAGRAM_ACKED) { 845163953Srrs /* remember highest acked one */ 846163953Srrs could_be_sent = chk; 847163953Srrs } 848163953Srrs if (chk->sent == SCTP_DATAGRAM_RESEND) { 849163953Srrs cnt_mk++; 850163953Srrs } 851163953Srrs } 852168709Srrs if ((orig_flight - net->flight_size) != (orig_tf - stcb->asoc.total_flight)) { 853168709Srrs /* we did not subtract the same things? */ 854168709Srrs audit_tf = 1; 855168709Srrs } 856179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 857170744Srrs sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT); 858170744Srrs } 859163953Srrs#ifdef SCTP_DEBUG 860169420Srrs if (num_mk) { 861169420Srrs SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n", 862169420Srrs tsnlast); 863169420Srrs SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%ld\n", 864169420Srrs num_mk, (u_long)stcb->asoc.peers_rwnd); 865169420Srrs SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n", 866169420Srrs tsnlast); 867169420Srrs SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%d\n", 868169420Srrs num_mk, 869169420Srrs (int)stcb->asoc.peers_rwnd); 870163953Srrs } 871163953Srrs#endif 872163953Srrs *num_marked = num_mk; 873210493Srrs /* 874210493Srrs * Now check for a ECN Echo that may be stranded And include the 875210493Srrs * cnt_mk'd to have all resends in the control queue. 876210493Srrs */ 877210493Srrs TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 878210493Srrs if (chk->sent == SCTP_DATAGRAM_RESEND) { 879210493Srrs cnt_mk++; 880210493Srrs } 881210493Srrs if ((chk->whoTo == net) && 882210493Srrs (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 883210493Srrs sctp_free_remote_addr(chk->whoTo); 884210493Srrs chk->whoTo = alt; 885210493Srrs if (chk->sent != SCTP_DATAGRAM_RESEND) { 886210493Srrs chk->sent = SCTP_DATAGRAM_RESEND; 887210493Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 888210493Srrs cnt_mk++; 889210493Srrs } 890210493Srrs atomic_add_int(&alt->ref_count, 1); 891210493Srrs } 892210493Srrs } 893163953Srrs if ((stcb->asoc.sent_queue_retran_cnt == 0) && (could_be_sent)) { 894163953Srrs /* fix it so we retransmit the highest acked anyway */ 895163953Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 896163953Srrs cnt_mk++; 897163953Srrs could_be_sent->sent = SCTP_DATAGRAM_RESEND; 898163953Srrs } 899163953Srrs if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) { 900165220Srrs#ifdef INVARIANTS 901171477Srrs SCTP_PRINTF("Local Audit says there are %d for retran asoc cnt:%d we marked:%d this time\n", 902171477Srrs cnt_mk, stcb->asoc.sent_queue_retran_cnt, num_mk); 903163953Srrs#endif 904163953Srrs#ifndef SCTP_AUDITING_ENABLED 905163953Srrs stcb->asoc.sent_queue_retran_cnt = cnt_mk; 906163953Srrs#endif 907163953Srrs } 908163953Srrs if (audit_tf) { 909169420Srrs SCTPDBG(SCTP_DEBUG_TIMER4, 910169420Srrs "Audit total flight due to negative value net:%p\n", 911169420Srrs net); 912163953Srrs stcb->asoc.total_flight = 0; 913163953Srrs stcb->asoc.total_flight_count = 0; 914163953Srrs /* Clear all networks flight size */ 915163953Srrs TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) { 916163953Srrs lnets->flight_size = 0; 917169420Srrs SCTPDBG(SCTP_DEBUG_TIMER4, 918169420Srrs "Net:%p c-f cwnd:%d ssthresh:%d\n", 919169420Srrs lnets, lnets->cwnd, lnets->ssthresh); 920163953Srrs } 921163953Srrs TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 922163953Srrs if (chk->sent < SCTP_DATAGRAM_RESEND) { 923179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 924170744Srrs sctp_misc_ints(SCTP_FLIGHT_LOG_UP, 925170744Srrs chk->whoTo->flight_size, 926170744Srrs chk->book_size, 927170744Srrs (uintptr_t) chk->whoTo, 928170744Srrs chk->rec.data.TSN_seq); 929170744Srrs } 930168709Srrs sctp_flight_size_increase(chk); 931168709Srrs sctp_total_flight_increase(stcb, chk); 932163953Srrs } 933163953Srrs } 934163953Srrs } 935163953Srrs /* 936163953Srrs * Setup the ecn nonce re-sync point. We do this since 937163953Srrs * retranmissions are NOT setup for ECN. This means that do to 938163953Srrs * Karn's rule, we don't know the total of the peers ecn bits. 939163953Srrs */ 940163953Srrs chk = TAILQ_FIRST(&stcb->asoc.send_queue); 941163953Srrs if (chk == NULL) { 942163953Srrs stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq; 943163953Srrs } else { 944163953Srrs stcb->asoc.nonce_resync_tsn = chk->rec.data.TSN_seq; 945163953Srrs } 946163953Srrs stcb->asoc.nonce_wait_for_ecne = 0; 947163953Srrs stcb->asoc.nonce_sum_check = 0; 948163953Srrs /* We return 1 if we only have a window probe outstanding */ 949163953Srrs return (0); 950163953Srrs} 951163953Srrs 952163953Srrsstatic void 953163953Srrssctp_move_all_chunks_to_alt(struct sctp_tcb *stcb, 954163953Srrs struct sctp_nets *net, 955163953Srrs struct sctp_nets *alt) 956163953Srrs{ 957163953Srrs struct sctp_association *asoc; 958163953Srrs struct sctp_stream_out *outs; 959163953Srrs struct sctp_tmit_chunk *chk; 960163953Srrs struct sctp_stream_queue_pending *sp; 961163953Srrs 962163953Srrs if (net == alt) 963163953Srrs /* nothing to do */ 964163953Srrs return; 965163953Srrs 966163953Srrs asoc = &stcb->asoc; 967163953Srrs 968163953Srrs /* 969163953Srrs * now through all the streams checking for chunks sent to our bad 970163953Srrs * network. 971163953Srrs */ 972163953Srrs TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) { 973163953Srrs /* now clean up any chunks here */ 974163953Srrs TAILQ_FOREACH(sp, &outs->outqueue, next) { 975163953Srrs if (sp->net == net) { 976163953Srrs sctp_free_remote_addr(sp->net); 977163953Srrs sp->net = alt; 978163953Srrs atomic_add_int(&alt->ref_count, 1); 979163953Srrs } 980163953Srrs } 981163953Srrs } 982163953Srrs /* Now check the pending queue */ 983163953Srrs TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 984163953Srrs if (chk->whoTo == net) { 985163953Srrs sctp_free_remote_addr(chk->whoTo); 986163953Srrs chk->whoTo = alt; 987163953Srrs atomic_add_int(&alt->ref_count, 1); 988163953Srrs } 989163953Srrs } 990163953Srrs 991163953Srrs} 992163953Srrs 993163953Srrsint 994163953Srrssctp_t3rxt_timer(struct sctp_inpcb *inp, 995163953Srrs struct sctp_tcb *stcb, 996163953Srrs struct sctp_nets *net) 997163953Srrs{ 998163953Srrs struct sctp_nets *alt; 999163953Srrs int win_probe, num_mk; 1000163953Srrs 1001179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 1002170744Srrs sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT); 1003170744Srrs } 1004179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1005163953Srrs struct sctp_nets *lnet; 1006163953Srrs 1007163953Srrs TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 1008163953Srrs if (net == lnet) { 1009163953Srrs sctp_log_cwnd(stcb, lnet, 1, SCTP_CWND_LOG_FROM_T3); 1010163953Srrs } else { 1011163953Srrs sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_LOG_FROM_T3); 1012163953Srrs } 1013163953Srrs } 1014163953Srrs } 1015163953Srrs /* Find an alternate and mark those for retransmission */ 1016163953Srrs if ((stcb->asoc.peers_rwnd == 0) && 1017163953Srrs (stcb->asoc.total_flight < net->mtu)) { 1018163953Srrs SCTP_STAT_INCR(sctps_timowindowprobe); 1019163953Srrs win_probe = 1; 1020163953Srrs } else { 1021163953Srrs win_probe = 0; 1022163953Srrs } 1023168709Srrs 1024171440Srrs /* 1025171440Srrs * JRS 5/14/07 - If CMT PF is on and the destination if not already 1026171440Srrs * in PF state, set the destination to PF state and store the 1027171440Srrs * current time as the time that the destination was last active. In 1028171440Srrs * addition, find an alternate destination with PF-based 1029171440Srrs * find_alt_net(). 1030171440Srrs */ 1031179783Srrs if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) { 1032171440Srrs if ((net->dest_state & SCTP_ADDR_PF) != SCTP_ADDR_PF) { 1033171440Srrs net->dest_state |= SCTP_ADDR_PF; 1034171477Srrs net->last_active = sctp_get_tick_count(); 1035171440Srrs SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from active to PF.\n", 1036171440Srrs net); 1037171440Srrs } 1038171440Srrs alt = sctp_find_alternate_net(stcb, net, 2); 1039179783Srrs } else if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) { 1040168709Srrs /* 1041168709Srrs * CMT: Using RTX_SSTHRESH policy for CMT. If CMT is being 1042168709Srrs * used, then pick dest with largest ssthresh for any 1043168709Srrs * retransmission. 1044168709Srrs */ 1045168709Srrs alt = net; 1046168709Srrs alt = sctp_find_alternate_net(stcb, alt, 1); 1047168709Srrs /* 1048168709Srrs * CUCv2: If a different dest is picked for the 1049168709Srrs * retransmission, then new (rtx-)pseudo_cumack needs to be 1050168709Srrs * tracked for orig dest. Let CUCv2 track new (rtx-) 1051168709Srrs * pseudo-cumack always. 1052168709Srrs */ 1053168709Srrs net->find_pseudo_cumack = 1; 1054168709Srrs net->find_rtx_pseudo_cumack = 1; 1055168709Srrs } else { /* CMT is OFF */ 1056168709Srrs alt = sctp_find_alternate_net(stcb, net, 0); 1057168709Srrs } 1058168709Srrs 1059169420Srrs (void)sctp_mark_all_for_resend(stcb, net, alt, win_probe, &num_mk); 1060163953Srrs /* FR Loss recovery just ended with the T3. */ 1061163953Srrs stcb->asoc.fast_retran_loss_recovery = 0; 1062163953Srrs 1063163953Srrs /* CMT FR loss recovery ended with the T3 */ 1064163953Srrs net->fast_retran_loss_recovery = 0; 1065163953Srrs 1066163953Srrs /* 1067163953Srrs * setup the sat loss recovery that prevents satellite cwnd advance. 1068163953Srrs */ 1069163953Srrs stcb->asoc.sat_t3_loss_recovery = 1; 1070163953Srrs stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq; 1071163953Srrs 1072163953Srrs /* Backoff the timer and cwnd */ 1073163953Srrs sctp_backoff_on_timeout(stcb, net, win_probe, num_mk); 1074163953Srrs if (win_probe == 0) { 1075163953Srrs /* We don't do normal threshold management on window probes */ 1076163953Srrs if (sctp_threshold_management(inp, stcb, net, 1077163953Srrs stcb->asoc.max_send_times)) { 1078163953Srrs /* Association was destroyed */ 1079163953Srrs return (1); 1080163953Srrs } else { 1081163953Srrs if (net != stcb->asoc.primary_destination) { 1082163953Srrs /* send a immediate HB if our RTO is stale */ 1083163953Srrs struct timeval now; 1084163953Srrs unsigned int ms_goneby; 1085163953Srrs 1086169378Srrs (void)SCTP_GETTIME_TIMEVAL(&now); 1087163953Srrs if (net->last_sent_time.tv_sec) { 1088163953Srrs ms_goneby = (now.tv_sec - net->last_sent_time.tv_sec) * 1000; 1089163953Srrs } else { 1090163953Srrs ms_goneby = 0; 1091163953Srrs } 1092163953Srrs if ((ms_goneby > net->RTO) || (net->RTO == 0)) { 1093163953Srrs /* 1094163953Srrs * no recent feed back in an RTO or 1095163953Srrs * more, request a RTT update 1096163953Srrs */ 1097171440Srrs if (sctp_send_hb(stcb, 1, net) < 0) 1098185694Srrs /* 1099185694Srrs * Less than 0 means we lost 1100185694Srrs * the assoc 1101185694Srrs */ 1102185694Srrs return (1); 1103163953Srrs } 1104163953Srrs } 1105163953Srrs } 1106163953Srrs } else { 1107163953Srrs /* 1108163953Srrs * For a window probe we don't penalize the net's but only 1109163953Srrs * the association. This may fail it if SACKs are not coming 1110163953Srrs * back. If sack's are coming with rwnd locked at 0, we will 1111163953Srrs * continue to hold things waiting for rwnd to raise 1112163953Srrs */ 1113163953Srrs if (sctp_threshold_management(inp, stcb, NULL, 1114163953Srrs stcb->asoc.max_send_times)) { 1115163953Srrs /* Association was destroyed */ 1116163953Srrs return (1); 1117163953Srrs } 1118163953Srrs } 1119163953Srrs if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 1120163953Srrs /* Move all pending over too */ 1121163953Srrs sctp_move_all_chunks_to_alt(stcb, net, alt); 1122169352Srrs 1123169352Srrs /* 1124169352Srrs * Get the address that failed, to force a new src address 1125169352Srrs * selecton and a route allocation. 1126169352Srrs */ 1127169352Srrs if (net->ro._s_addr) { 1128169352Srrs sctp_free_ifa(net->ro._s_addr); 1129169352Srrs net->ro._s_addr = NULL; 1130169352Srrs } 1131169352Srrs net->src_addr_selected = 0; 1132169352Srrs 1133169352Srrs /* Force a route allocation too */ 1134169352Srrs if (net->ro.ro_rt) { 1135169352Srrs RTFREE(net->ro.ro_rt); 1136169352Srrs net->ro.ro_rt = NULL; 1137169352Srrs } 1138163953Srrs /* Was it our primary? */ 1139163953Srrs if ((stcb->asoc.primary_destination == net) && (alt != net)) { 1140163953Srrs /* 1141163953Srrs * Yes, note it as such and find an alternate note: 1142163953Srrs * this means HB code must use this to resent the 1143163953Srrs * primary if it goes active AND if someone does a 1144163953Srrs * change-primary then this flag must be cleared 1145163953Srrs * from any net structures. 1146163953Srrs */ 1147163953Srrs if (sctp_set_primary_addr(stcb, 1148163953Srrs (struct sockaddr *)NULL, 1149163953Srrs alt) == 0) { 1150163953Srrs net->dest_state |= SCTP_ADDR_WAS_PRIMARY; 1151163953Srrs } 1152163953Srrs } 1153179783Srrs } else if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf) && (net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF) { 1154171440Srrs /* 1155171440Srrs * JRS 5/14/07 - If the destination hasn't failed completely 1156171440Srrs * but is in PF state, a PF-heartbeat needs to be sent 1157171440Srrs * manually. 1158171440Srrs */ 1159171440Srrs if (sctp_send_hb(stcb, 1, net) < 0) 1160185694Srrs /* Return less than 0 means we lost the association */ 1161185694Srrs return (1); 1162163953Srrs } 1163163953Srrs /* 1164163953Srrs * Special case for cookie-echo'ed case, we don't do output but must 1165163953Srrs * await the COOKIE-ACK before retransmission 1166163953Srrs */ 1167163953Srrs if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { 1168163953Srrs /* 1169163953Srrs * Here we just reset the timer and start again since we 1170163953Srrs * have not established the asoc 1171163953Srrs */ 1172163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 1173163953Srrs return (0); 1174163953Srrs } 1175163953Srrs if (stcb->asoc.peer_supports_prsctp) { 1176163953Srrs struct sctp_tmit_chunk *lchk; 1177163953Srrs 1178163953Srrs lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc); 1179163953Srrs /* C3. See if we need to send a Fwd-TSN */ 1180163953Srrs if (compare_with_wrap(stcb->asoc.advanced_peer_ack_point, 1181163953Srrs stcb->asoc.last_acked_seq, MAX_TSN)) { 1182163953Srrs /* 1183163953Srrs * ISSUE with ECN, see FWD-TSN processing for notes 1184163953Srrs * on issues that will occur when the ECN NONCE 1185163953Srrs * stuff is put into SCTP for cross checking. 1186163953Srrs */ 1187163953Srrs send_forward_tsn(stcb, &stcb->asoc); 1188163953Srrs if (lchk) { 1189163953Srrs /* Assure a timer is up */ 1190163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo); 1191163953Srrs } 1192163953Srrs } 1193163953Srrs } 1194179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1195170744Srrs sctp_log_cwnd(stcb, net, net->cwnd, SCTP_CWND_LOG_FROM_RTX); 1196170744Srrs } 1197163953Srrs return (0); 1198163953Srrs} 1199163953Srrs 1200163953Srrsint 1201163953Srrssctp_t1init_timer(struct sctp_inpcb *inp, 1202163953Srrs struct sctp_tcb *stcb, 1203163953Srrs struct sctp_nets *net) 1204163953Srrs{ 1205163953Srrs /* bump the thresholds */ 1206163953Srrs if (stcb->asoc.delayed_connection) { 1207163953Srrs /* 1208163953Srrs * special hook for delayed connection. The library did NOT 1209163953Srrs * complete the rest of its sends. 1210163953Srrs */ 1211163953Srrs stcb->asoc.delayed_connection = 0; 1212172090Srrs sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED); 1213163953Srrs return (0); 1214163953Srrs } 1215163953Srrs if (SCTP_GET_STATE((&stcb->asoc)) != SCTP_STATE_COOKIE_WAIT) { 1216163953Srrs return (0); 1217163953Srrs } 1218163953Srrs if (sctp_threshold_management(inp, stcb, net, 1219163953Srrs stcb->asoc.max_init_times)) { 1220163953Srrs /* Association was destroyed */ 1221163953Srrs return (1); 1222163953Srrs } 1223163953Srrs stcb->asoc.dropped_special_cnt = 0; 1224163953Srrs sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0); 1225163953Srrs if (stcb->asoc.initial_init_rto_max < net->RTO) { 1226163953Srrs net->RTO = stcb->asoc.initial_init_rto_max; 1227163953Srrs } 1228163953Srrs if (stcb->asoc.numnets > 1) { 1229163953Srrs /* If we have more than one addr use it */ 1230163953Srrs struct sctp_nets *alt; 1231163953Srrs 1232163953Srrs alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination, 0); 1233163953Srrs if ((alt != NULL) && (alt != stcb->asoc.primary_destination)) { 1234163953Srrs sctp_move_all_chunks_to_alt(stcb, stcb->asoc.primary_destination, alt); 1235163953Srrs stcb->asoc.primary_destination = alt; 1236163953Srrs } 1237163953Srrs } 1238163953Srrs /* Send out a new init */ 1239172090Srrs sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED); 1240163953Srrs return (0); 1241163953Srrs} 1242163953Srrs 1243163953Srrs/* 1244163953Srrs * For cookie and asconf we actually need to find and mark for resend, then 1245163953Srrs * increment the resend counter (after all the threshold management stuff of 1246163953Srrs * course). 1247163953Srrs */ 1248163953Srrsint 1249163953Srrssctp_cookie_timer(struct sctp_inpcb *inp, 1250163953Srrs struct sctp_tcb *stcb, 1251163953Srrs struct sctp_nets *net) 1252163953Srrs{ 1253163953Srrs struct sctp_nets *alt; 1254163953Srrs struct sctp_tmit_chunk *cookie; 1255163953Srrs 1256163953Srrs /* first before all else we must find the cookie */ 1257163953Srrs TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) { 1258163953Srrs if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 1259163953Srrs break; 1260163953Srrs } 1261163953Srrs } 1262163953Srrs if (cookie == NULL) { 1263163953Srrs if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { 1264163953Srrs /* FOOBAR! */ 1265163953Srrs struct mbuf *oper; 1266163953Srrs 1267163953Srrs oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1268163953Srrs 0, M_DONTWAIT, 1, MT_DATA); 1269163953Srrs if (oper) { 1270163953Srrs struct sctp_paramhdr *ph; 1271163953Srrs uint32_t *ippp; 1272163953Srrs 1273165647Srrs SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 1274163953Srrs sizeof(uint32_t); 1275163953Srrs ph = mtod(oper, struct sctp_paramhdr *); 1276163953Srrs ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1277165647Srrs ph->param_length = htons(SCTP_BUF_LEN(oper)); 1278163953Srrs ippp = (uint32_t *) (ph + 1); 1279171440Srrs *ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_3); 1280163953Srrs } 1281171440Srrs inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_4; 1282163953Srrs sctp_abort_an_association(inp, stcb, SCTP_INTERNAL_ERROR, 1283172090Srrs oper, SCTP_SO_NOT_LOCKED); 1284163953Srrs } else { 1285165220Srrs#ifdef INVARIANTS 1286163953Srrs panic("Cookie timer expires in wrong state?"); 1287163953Srrs#else 1288169420Srrs SCTP_PRINTF("Strange in state %d not cookie-echoed yet c-e timer expires?\n", SCTP_GET_STATE(&stcb->asoc)); 1289163953Srrs return (0); 1290163953Srrs#endif 1291163953Srrs } 1292163953Srrs return (0); 1293163953Srrs } 1294163953Srrs /* Ok we found the cookie, threshold management next */ 1295163953Srrs if (sctp_threshold_management(inp, stcb, cookie->whoTo, 1296163953Srrs stcb->asoc.max_init_times)) { 1297163953Srrs /* Assoc is over */ 1298163953Srrs return (1); 1299163953Srrs } 1300163953Srrs /* 1301163953Srrs * cleared theshold management now lets backoff the address & select 1302163953Srrs * an alternate 1303163953Srrs */ 1304163953Srrs stcb->asoc.dropped_special_cnt = 0; 1305163953Srrs sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0); 1306163953Srrs alt = sctp_find_alternate_net(stcb, cookie->whoTo, 0); 1307163953Srrs if (alt != cookie->whoTo) { 1308163953Srrs sctp_free_remote_addr(cookie->whoTo); 1309163953Srrs cookie->whoTo = alt; 1310163953Srrs atomic_add_int(&alt->ref_count, 1); 1311163953Srrs } 1312163953Srrs /* Now mark the retran info */ 1313163953Srrs if (cookie->sent != SCTP_DATAGRAM_RESEND) { 1314163953Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1315163953Srrs } 1316163953Srrs cookie->sent = SCTP_DATAGRAM_RESEND; 1317163953Srrs /* 1318163953Srrs * Now call the output routine to kick out the cookie again, Note we 1319163953Srrs * don't mark any chunks for retran so that FR will need to kick in 1320163953Srrs * to move these (or a send timer). 1321163953Srrs */ 1322163953Srrs return (0); 1323163953Srrs} 1324163953Srrs 1325163953Srrsint 1326163953Srrssctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1327163953Srrs struct sctp_nets *net) 1328163953Srrs{ 1329163953Srrs struct sctp_nets *alt; 1330163953Srrs struct sctp_tmit_chunk *strrst = NULL, *chk = NULL; 1331163953Srrs 1332163953Srrs if (stcb->asoc.stream_reset_outstanding == 0) { 1333163953Srrs return (0); 1334163953Srrs } 1335163953Srrs /* find the existing STRRESET, we use the seq number we sent out on */ 1336169420Srrs (void)sctp_find_stream_reset(stcb, stcb->asoc.str_reset_seq_out, &strrst); 1337163953Srrs if (strrst == NULL) { 1338163953Srrs return (0); 1339163953Srrs } 1340163953Srrs /* do threshold management */ 1341163953Srrs if (sctp_threshold_management(inp, stcb, strrst->whoTo, 1342163953Srrs stcb->asoc.max_send_times)) { 1343163953Srrs /* Assoc is over */ 1344163953Srrs return (1); 1345163953Srrs } 1346163953Srrs /* 1347163953Srrs * cleared theshold management now lets backoff the address & select 1348163953Srrs * an alternate 1349163953Srrs */ 1350163953Srrs sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0); 1351163953Srrs alt = sctp_find_alternate_net(stcb, strrst->whoTo, 0); 1352163953Srrs sctp_free_remote_addr(strrst->whoTo); 1353163953Srrs strrst->whoTo = alt; 1354163953Srrs atomic_add_int(&alt->ref_count, 1); 1355163953Srrs 1356163953Srrs /* See if a ECN Echo is also stranded */ 1357163953Srrs TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 1358163953Srrs if ((chk->whoTo == net) && 1359163953Srrs (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 1360163953Srrs sctp_free_remote_addr(chk->whoTo); 1361163953Srrs if (chk->sent != SCTP_DATAGRAM_RESEND) { 1362163953Srrs chk->sent = SCTP_DATAGRAM_RESEND; 1363163953Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1364163953Srrs } 1365163953Srrs chk->whoTo = alt; 1366163953Srrs atomic_add_int(&alt->ref_count, 1); 1367163953Srrs } 1368163953Srrs } 1369163953Srrs if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 1370163953Srrs /* 1371163953Srrs * If the address went un-reachable, we need to move to 1372163953Srrs * alternates for ALL chk's in queue 1373163953Srrs */ 1374163953Srrs sctp_move_all_chunks_to_alt(stcb, net, alt); 1375163953Srrs } 1376163953Srrs /* mark the retran info */ 1377163953Srrs if (strrst->sent != SCTP_DATAGRAM_RESEND) 1378163953Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1379163953Srrs strrst->sent = SCTP_DATAGRAM_RESEND; 1380163953Srrs 1381163953Srrs /* restart the timer */ 1382163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, strrst->whoTo); 1383163953Srrs return (0); 1384163953Srrs} 1385163953Srrs 1386163953Srrsint 1387163953Srrssctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1388163953Srrs struct sctp_nets *net) 1389163953Srrs{ 1390163953Srrs struct sctp_nets *alt; 1391179157Srrs struct sctp_tmit_chunk *asconf, *chk, *nchk; 1392163953Srrs 1393171572Srrs /* is this a first send, or a retransmission? */ 1394179157Srrs if (TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) { 1395163953Srrs /* compose a new ASCONF chunk and send it */ 1396172190Srrs sctp_send_asconf(stcb, net, SCTP_ADDR_NOT_LOCKED); 1397163953Srrs } else { 1398171572Srrs /* 1399171572Srrs * Retransmission of the existing ASCONF is needed 1400171572Srrs */ 1401163953Srrs 1402163953Srrs /* find the existing ASCONF */ 1403179157Srrs asconf = TAILQ_FIRST(&stcb->asoc.asconf_send_queue); 1404163953Srrs if (asconf == NULL) { 1405163953Srrs return (0); 1406163953Srrs } 1407163953Srrs /* do threshold management */ 1408163953Srrs if (sctp_threshold_management(inp, stcb, asconf->whoTo, 1409163953Srrs stcb->asoc.max_send_times)) { 1410163953Srrs /* Assoc is over */ 1411163953Srrs return (1); 1412163953Srrs } 1413163953Srrs if (asconf->snd_count > stcb->asoc.max_send_times) { 1414163953Srrs /* 1415171572Srrs * Something is rotten: our peer is not responding 1416171572Srrs * to ASCONFs but apparently is to other chunks. 1417171572Srrs * i.e. it is not properly handling the chunk type 1418171572Srrs * upper bits. Mark this peer as ASCONF incapable 1419171572Srrs * and cleanup. 1420163953Srrs */ 1421169420Srrs SCTPDBG(SCTP_DEBUG_TIMER1, "asconf_timer: Peer has not responded to our repeated ASCONFs\n"); 1422163953Srrs sctp_asconf_cleanup(stcb, net); 1423163953Srrs return (0); 1424163953Srrs } 1425163953Srrs /* 1426171572Srrs * cleared threshold management, so now backoff the net and 1427171572Srrs * select an alternate 1428163953Srrs */ 1429163953Srrs sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0); 1430163953Srrs alt = sctp_find_alternate_net(stcb, asconf->whoTo, 0); 1431179157Srrs if (asconf->whoTo != alt) { 1432179157Srrs sctp_free_remote_addr(asconf->whoTo); 1433179157Srrs asconf->whoTo = alt; 1434179157Srrs atomic_add_int(&alt->ref_count, 1); 1435179157Srrs } 1436171572Srrs /* See if an ECN Echo is also stranded */ 1437163953Srrs TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 1438163953Srrs if ((chk->whoTo == net) && 1439163953Srrs (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 1440163953Srrs sctp_free_remote_addr(chk->whoTo); 1441163953Srrs chk->whoTo = alt; 1442163953Srrs if (chk->sent != SCTP_DATAGRAM_RESEND) { 1443163953Srrs chk->sent = SCTP_DATAGRAM_RESEND; 1444163953Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1445163953Srrs } 1446163953Srrs atomic_add_int(&alt->ref_count, 1); 1447163953Srrs } 1448163953Srrs } 1449179157Srrs for (chk = asconf; chk; chk = nchk) { 1450179157Srrs nchk = TAILQ_NEXT(chk, sctp_next); 1451179157Srrs if (chk->whoTo != alt) { 1452179157Srrs sctp_free_remote_addr(chk->whoTo); 1453179157Srrs chk->whoTo = alt; 1454179157Srrs atomic_add_int(&alt->ref_count, 1); 1455179157Srrs } 1456179157Srrs if (asconf->sent != SCTP_DATAGRAM_RESEND && chk->sent != SCTP_DATAGRAM_UNSENT) 1457179157Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1458179157Srrs chk->sent = SCTP_DATAGRAM_RESEND; 1459179157Srrs } 1460163953Srrs if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 1461163953Srrs /* 1462163953Srrs * If the address went un-reachable, we need to move 1463171572Srrs * to the alternate for ALL chunks in queue 1464163953Srrs */ 1465163953Srrs sctp_move_all_chunks_to_alt(stcb, net, alt); 1466179157Srrs net = alt; 1467163953Srrs } 1468163953Srrs /* mark the retran info */ 1469163953Srrs if (asconf->sent != SCTP_DATAGRAM_RESEND) 1470163953Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1471163953Srrs asconf->sent = SCTP_DATAGRAM_RESEND; 1472179157Srrs 1473179157Srrs /* send another ASCONF if any and we can do */ 1474179157Srrs sctp_send_asconf(stcb, alt, SCTP_ADDR_NOT_LOCKED); 1475163953Srrs } 1476163953Srrs return (0); 1477163953Srrs} 1478163953Srrs 1479172091Srrs/* Mobility adaptation */ 1480172156Srrsvoid 1481172091Srrssctp_delete_prim_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1482172091Srrs struct sctp_nets *net) 1483172091Srrs{ 1484172091Srrs if (stcb->asoc.deleted_primary == NULL) { 1485172091Srrs SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: deleted_primary is not stored...\n"); 1486172091Srrs sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); 1487172156Srrs return; 1488172091Srrs } 1489172091Srrs SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: finished to keep deleted primary "); 1490172091Srrs SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &stcb->asoc.deleted_primary->ro._l_addr.sa); 1491172091Srrs sctp_free_remote_addr(stcb->asoc.deleted_primary); 1492172091Srrs stcb->asoc.deleted_primary = NULL; 1493172091Srrs sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); 1494172156Srrs return; 1495172091Srrs} 1496172091Srrs 1497163953Srrs/* 1498163953Srrs * For the shutdown and shutdown-ack, we do not keep one around on the 1499163953Srrs * control queue. This means we must generate a new one and call the general 1500163953Srrs * chunk output routine, AFTER having done threshold management. 1501163953Srrs */ 1502163953Srrsint 1503163953Srrssctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1504163953Srrs struct sctp_nets *net) 1505163953Srrs{ 1506163953Srrs struct sctp_nets *alt; 1507163953Srrs 1508163953Srrs /* first threshold managment */ 1509163953Srrs if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { 1510163953Srrs /* Assoc is over */ 1511163953Srrs return (1); 1512163953Srrs } 1513163953Srrs /* second select an alternative */ 1514163953Srrs alt = sctp_find_alternate_net(stcb, net, 0); 1515163953Srrs 1516163953Srrs /* third generate a shutdown into the queue for out net */ 1517163953Srrs if (alt) { 1518163953Srrs sctp_send_shutdown(stcb, alt); 1519163953Srrs } else { 1520163953Srrs /* 1521163953Srrs * if alt is NULL, there is no dest to send to?? 1522163953Srrs */ 1523163953Srrs return (0); 1524163953Srrs } 1525163953Srrs /* fourth restart timer */ 1526163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt); 1527163953Srrs return (0); 1528163953Srrs} 1529163953Srrs 1530163953Srrsint 1531163953Srrssctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1532163953Srrs struct sctp_nets *net) 1533163953Srrs{ 1534163953Srrs struct sctp_nets *alt; 1535163953Srrs 1536163953Srrs /* first threshold managment */ 1537163953Srrs if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { 1538163953Srrs /* Assoc is over */ 1539163953Srrs return (1); 1540163953Srrs } 1541163953Srrs /* second select an alternative */ 1542163953Srrs alt = sctp_find_alternate_net(stcb, net, 0); 1543163953Srrs 1544163953Srrs /* third generate a shutdown into the queue for out net */ 1545163953Srrs sctp_send_shutdown_ack(stcb, alt); 1546163953Srrs 1547163953Srrs /* fourth restart timer */ 1548163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt); 1549163953Srrs return (0); 1550163953Srrs} 1551163953Srrs 1552163953Srrsstatic void 1553163953Srrssctp_audit_stream_queues_for_size(struct sctp_inpcb *inp, 1554163953Srrs struct sctp_tcb *stcb) 1555163953Srrs{ 1556163953Srrs struct sctp_stream_out *outs; 1557163953Srrs struct sctp_stream_queue_pending *sp; 1558163953Srrs unsigned int chks_in_queue = 0; 1559163953Srrs int being_filled = 0; 1560163953Srrs 1561163953Srrs /* 1562163953Srrs * This function is ONLY called when the send/sent queues are empty. 1563163953Srrs */ 1564163953Srrs if ((stcb == NULL) || (inp == NULL)) 1565163953Srrs return; 1566163953Srrs 1567163953Srrs if (stcb->asoc.sent_queue_retran_cnt) { 1568169420Srrs SCTP_PRINTF("Hmm, sent_queue_retran_cnt is non-zero %d\n", 1569163953Srrs stcb->asoc.sent_queue_retran_cnt); 1570163953Srrs stcb->asoc.sent_queue_retran_cnt = 0; 1571163953Srrs } 1572163953Srrs SCTP_TCB_SEND_LOCK(stcb); 1573163953Srrs if (TAILQ_EMPTY(&stcb->asoc.out_wheel)) { 1574163953Srrs int i, cnt = 0; 1575163953Srrs 1576163953Srrs /* Check to see if a spoke fell off the wheel */ 1577163953Srrs for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1578163953Srrs if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) { 1579163953Srrs sctp_insert_on_wheel(stcb, &stcb->asoc, &stcb->asoc.strmout[i], 1); 1580163953Srrs cnt++; 1581163953Srrs } 1582163953Srrs } 1583163953Srrs if (cnt) { 1584163953Srrs /* yep, we lost a spoke or two */ 1585169420Srrs SCTP_PRINTF("Found an additional %d streams NOT on outwheel, corrected\n", cnt); 1586163953Srrs } else { 1587163953Srrs /* no spokes lost, */ 1588163953Srrs stcb->asoc.total_output_queue_size = 0; 1589163953Srrs } 1590163953Srrs SCTP_TCB_SEND_UNLOCK(stcb); 1591163953Srrs return; 1592163953Srrs } 1593163953Srrs SCTP_TCB_SEND_UNLOCK(stcb); 1594163953Srrs /* Check to see if some data queued, if so report it */ 1595163953Srrs TAILQ_FOREACH(outs, &stcb->asoc.out_wheel, next_spoke) { 1596163953Srrs if (!TAILQ_EMPTY(&outs->outqueue)) { 1597163953Srrs TAILQ_FOREACH(sp, &outs->outqueue, next) { 1598163953Srrs if (sp->msg_is_complete) 1599163953Srrs being_filled++; 1600163953Srrs chks_in_queue++; 1601163953Srrs } 1602163953Srrs } 1603163953Srrs } 1604163953Srrs if (chks_in_queue != stcb->asoc.stream_queue_cnt) { 1605169420Srrs SCTP_PRINTF("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n", 1606163953Srrs stcb->asoc.stream_queue_cnt, chks_in_queue); 1607163953Srrs } 1608163953Srrs if (chks_in_queue) { 1609163953Srrs /* call the output queue function */ 1610172090Srrs sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1611163953Srrs if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) && 1612163953Srrs (TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 1613163953Srrs /* 1614163953Srrs * Probably should go in and make it go back through 1615163953Srrs * and add fragments allowed 1616163953Srrs */ 1617163953Srrs if (being_filled == 0) { 1618169420Srrs SCTP_PRINTF("Still nothing moved %d chunks are stuck\n", 1619163953Srrs chks_in_queue); 1620163953Srrs } 1621163953Srrs } 1622163953Srrs } else { 1623169420Srrs SCTP_PRINTF("Found no chunks on any queue tot:%lu\n", 1624163953Srrs (u_long)stcb->asoc.total_output_queue_size); 1625163953Srrs stcb->asoc.total_output_queue_size = 0; 1626163953Srrs } 1627163953Srrs} 1628163953Srrs 1629163953Srrsint 1630163953Srrssctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1631163953Srrs struct sctp_nets *net, int cnt_of_unconf) 1632163953Srrs{ 1633171440Srrs int ret; 1634171440Srrs 1635163953Srrs if (net) { 1636163953Srrs if (net->hb_responded == 0) { 1637167598Srrs if (net->ro._s_addr) { 1638167598Srrs /* 1639167598Srrs * Invalidate the src address if we did not 1640167598Srrs * get a response last time. 1641167598Srrs */ 1642167598Srrs sctp_free_ifa(net->ro._s_addr); 1643167598Srrs net->ro._s_addr = NULL; 1644167598Srrs net->src_addr_selected = 0; 1645167598Srrs } 1646163953Srrs sctp_backoff_on_timeout(stcb, net, 1, 0); 1647163953Srrs } 1648163953Srrs /* Zero PBA, if it needs it */ 1649163953Srrs if (net->partial_bytes_acked) { 1650163953Srrs net->partial_bytes_acked = 0; 1651163953Srrs } 1652163953Srrs } 1653163953Srrs if ((stcb->asoc.total_output_queue_size > 0) && 1654163953Srrs (TAILQ_EMPTY(&stcb->asoc.send_queue)) && 1655163953Srrs (TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 1656163953Srrs sctp_audit_stream_queues_for_size(inp, stcb); 1657163953Srrs } 1658163953Srrs /* Send a new HB, this will do threshold managment, pick a new dest */ 1659163953Srrs if (cnt_of_unconf == 0) { 1660163953Srrs if (sctp_send_hb(stcb, 0, NULL) < 0) { 1661163953Srrs return (1); 1662163953Srrs } 1663163953Srrs } else { 1664163953Srrs /* 1665163953Srrs * this will send out extra hb's up to maxburst if there are 1666163953Srrs * any unconfirmed addresses. 1667163953Srrs */ 1668170056Srrs uint32_t cnt_sent = 0; 1669163953Srrs 1670163953Srrs TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1671163953Srrs if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) && 1672163953Srrs (net->dest_state & SCTP_ADDR_REACHABLE)) { 1673163953Srrs cnt_sent++; 1674167598Srrs if (net->hb_responded == 0) { 1675167598Srrs /* Did we respond last time? */ 1676167598Srrs if (net->ro._s_addr) { 1677167598Srrs sctp_free_ifa(net->ro._s_addr); 1678167598Srrs net->ro._s_addr = NULL; 1679167598Srrs net->src_addr_selected = 0; 1680167598Srrs } 1681167598Srrs } 1682171440Srrs ret = sctp_send_hb(stcb, 1, net); 1683171440Srrs if (ret < 0) 1684171440Srrs return 1; 1685171440Srrs else if (ret == 0) { 1686163953Srrs break; 1687163953Srrs } 1688179783Srrs if (cnt_sent >= SCTP_BASE_SYSCTL(sctp_hb_maxburst)) 1689163953Srrs break; 1690163953Srrs } 1691163953Srrs } 1692163953Srrs } 1693163953Srrs return (0); 1694163953Srrs} 1695163953Srrs 1696163953Srrsint 1697163953Srrssctp_is_hb_timer_running(struct sctp_tcb *stcb) 1698163953Srrs{ 1699165647Srrs if (SCTP_OS_TIMER_PENDING(&stcb->asoc.hb_timer.timer)) { 1700163953Srrs /* its running */ 1701163953Srrs return (1); 1702163953Srrs } else { 1703163953Srrs /* nope */ 1704163953Srrs return (0); 1705163953Srrs } 1706163953Srrs} 1707163953Srrs 1708163953Srrsint 1709163953Srrssctp_is_sack_timer_running(struct sctp_tcb *stcb) 1710163953Srrs{ 1711165647Srrs if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 1712163953Srrs /* its running */ 1713163953Srrs return (1); 1714163953Srrs } else { 1715163953Srrs /* nope */ 1716163953Srrs return (0); 1717163953Srrs } 1718163953Srrs} 1719163953Srrs 1720163953Srrs#define SCTP_NUMBER_OF_MTU_SIZES 18 1721163953Srrsstatic uint32_t mtu_sizes[] = { 1722163953Srrs 68, 1723163953Srrs 296, 1724163953Srrs 508, 1725163953Srrs 512, 1726163953Srrs 544, 1727163953Srrs 576, 1728163953Srrs 1006, 1729163953Srrs 1492, 1730163953Srrs 1500, 1731163953Srrs 1536, 1732163953Srrs 2002, 1733163953Srrs 2048, 1734163953Srrs 4352, 1735163953Srrs 4464, 1736163953Srrs 8166, 1737163953Srrs 17914, 1738163953Srrs 32000, 1739163953Srrs 65535 1740163953Srrs}; 1741163953Srrs 1742163953Srrs 1743163953Srrsstatic uint32_t 1744163953Srrssctp_getnext_mtu(struct sctp_inpcb *inp, uint32_t cur_mtu) 1745163953Srrs{ 1746163953Srrs /* select another MTU that is just bigger than this one */ 1747163953Srrs int i; 1748163953Srrs 1749163953Srrs for (i = 0; i < SCTP_NUMBER_OF_MTU_SIZES; i++) { 1750163953Srrs if (cur_mtu < mtu_sizes[i]) { 1751163953Srrs /* no max_mtu is bigger than this one */ 1752163953Srrs return (mtu_sizes[i]); 1753163953Srrs } 1754163953Srrs } 1755163953Srrs /* here return the highest allowable */ 1756163953Srrs return (cur_mtu); 1757163953Srrs} 1758163953Srrs 1759163953Srrs 1760163953Srrsvoid 1761163953Srrssctp_pathmtu_timer(struct sctp_inpcb *inp, 1762163953Srrs struct sctp_tcb *stcb, 1763163953Srrs struct sctp_nets *net) 1764163953Srrs{ 1765179157Srrs uint32_t next_mtu, mtu; 1766163953Srrs 1767163953Srrs next_mtu = sctp_getnext_mtu(inp, net->mtu); 1768169352Srrs 1769179157Srrs if ((next_mtu > net->mtu) && (net->port == 0)) { 1770169352Srrs if ((net->src_addr_selected == 0) || 1771169352Srrs (net->ro._s_addr == NULL) || 1772169352Srrs (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) { 1773169420Srrs if ((net->ro._s_addr != NULL) && (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) { 1774169352Srrs sctp_free_ifa(net->ro._s_addr); 1775169352Srrs net->ro._s_addr = NULL; 1776169352Srrs net->src_addr_selected = 0; 1777169420Srrs } else if (net->ro._s_addr == NULL) { 1778179157Srrs#if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE) 1779179157Srrs if (net->ro._l_addr.sa.sa_family == AF_INET6) { 1780179157Srrs struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 1781179157Srrs 1782179157Srrs /* KAME hack: embed scopeid */ 1783197288Srrs (void)sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)); 1784179157Srrs } 1785179157Srrs#endif 1786179157Srrs 1787169420Srrs net->ro._s_addr = sctp_source_address_selection(inp, 1788169420Srrs stcb, 1789169420Srrs (sctp_route_t *) & net->ro, 1790169420Srrs net, 0, stcb->asoc.vrf_id); 1791179157Srrs#if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE) 1792179157Srrs if (net->ro._l_addr.sa.sa_family == AF_INET6) { 1793179157Srrs struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 1794179157Srrs 1795179157Srrs (void)sa6_recoverscope(sin6); 1796179157Srrs } 1797179157Srrs#endif /* INET6 */ 1798169352Srrs } 1799169352Srrs if (net->ro._s_addr) 1800169352Srrs net->src_addr_selected = 1; 1801169352Srrs } 1802169352Srrs if (net->ro._s_addr) { 1803169352Srrs mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._s_addr.sa, net->ro.ro_rt); 1804185694Srrs if (net->port) { 1805185694Srrs mtu -= sizeof(struct udphdr); 1806185694Srrs } 1807169352Srrs if (mtu > next_mtu) { 1808163953Srrs net->mtu = next_mtu; 1809163953Srrs } 1810163953Srrs } 1811163953Srrs } 1812163953Srrs /* restart the timer */ 1813163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 1814163953Srrs} 1815163953Srrs 1816163953Srrsvoid 1817163953Srrssctp_autoclose_timer(struct sctp_inpcb *inp, 1818163953Srrs struct sctp_tcb *stcb, 1819163953Srrs struct sctp_nets *net) 1820163953Srrs{ 1821163953Srrs struct timeval tn, *tim_touse; 1822163953Srrs struct sctp_association *asoc; 1823163953Srrs int ticks_gone_by; 1824163953Srrs 1825169378Srrs (void)SCTP_GETTIME_TIMEVAL(&tn); 1826163953Srrs if (stcb->asoc.sctp_autoclose_ticks && 1827163953Srrs sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1828163953Srrs /* Auto close is on */ 1829163953Srrs asoc = &stcb->asoc; 1830163953Srrs /* pick the time to use */ 1831163953Srrs if (asoc->time_last_rcvd.tv_sec > 1832163953Srrs asoc->time_last_sent.tv_sec) { 1833163953Srrs tim_touse = &asoc->time_last_rcvd; 1834163953Srrs } else { 1835163953Srrs tim_touse = &asoc->time_last_sent; 1836163953Srrs } 1837163953Srrs /* Now has long enough transpired to autoclose? */ 1838163953Srrs ticks_gone_by = SEC_TO_TICKS(tn.tv_sec - tim_touse->tv_sec); 1839163953Srrs if ((ticks_gone_by > 0) && 1840163953Srrs (ticks_gone_by >= (int)asoc->sctp_autoclose_ticks)) { 1841163953Srrs /* 1842163953Srrs * autoclose time has hit, call the output routine, 1843163953Srrs * which should do nothing just to be SURE we don't 1844163953Srrs * have hanging data. We can then safely check the 1845163953Srrs * queues and know that we are clear to send 1846163953Srrs * shutdown 1847163953Srrs */ 1848172090Srrs sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 1849163953Srrs /* Are we clean? */ 1850163953Srrs if (TAILQ_EMPTY(&asoc->send_queue) && 1851163953Srrs TAILQ_EMPTY(&asoc->sent_queue)) { 1852163953Srrs /* 1853163953Srrs * there is nothing queued to send, so I'm 1854163953Srrs * done... 1855163953Srrs */ 1856166675Srrs if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1857163953Srrs /* only send SHUTDOWN 1st time thru */ 1858163953Srrs sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 1859166675Srrs if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 1860166675Srrs (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 1861166675Srrs SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1862166675Srrs } 1863171943Srrs SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 1864172703Srrs SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 1865163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 1866163953Srrs stcb->sctp_ep, stcb, 1867163953Srrs asoc->primary_destination); 1868163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1869163953Srrs stcb->sctp_ep, stcb, 1870163953Srrs asoc->primary_destination); 1871163953Srrs } 1872163953Srrs } 1873163953Srrs } else { 1874163953Srrs /* 1875163953Srrs * No auto close at this time, reset t-o to check 1876163953Srrs * later 1877163953Srrs */ 1878163953Srrs int tmp; 1879163953Srrs 1880163953Srrs /* fool the timer startup to use the time left */ 1881163953Srrs tmp = asoc->sctp_autoclose_ticks; 1882163953Srrs asoc->sctp_autoclose_ticks -= ticks_gone_by; 1883163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, 1884163953Srrs net); 1885163953Srrs /* restore the real tick value */ 1886163953Srrs asoc->sctp_autoclose_ticks = tmp; 1887163953Srrs } 1888163953Srrs } 1889163953Srrs} 1890