sctp_timer.c revision 197288
1163953Srrs/*- 2169382Srrs * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3163953Srrs * 4163953Srrs * Redistribution and use in source and binary forms, with or without 5163953Srrs * modification, are permitted provided that the following conditions are met: 6163953Srrs * 7163953Srrs * a) Redistributions of source code must retain the above copyright notice, 8163953Srrs * this list of conditions and the following disclaimer. 9163953Srrs * 10163953Srrs * b) Redistributions in binary form must reproduce the above copyright 11163953Srrs * notice, this list of conditions and the following disclaimer in 12163953Srrs * the documentation and/or other materials provided with the distribution. 13163953Srrs * 14163953Srrs * c) Neither the name of Cisco Systems, Inc. nor the names of its 15163953Srrs * contributors may be used to endorse or promote products derived 16163953Srrs * from this software without specific prior written permission. 17163953Srrs * 18163953Srrs * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19163953Srrs * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20163953Srrs * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21163953Srrs * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22163953Srrs * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23163953Srrs * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24163953Srrs * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25163953Srrs * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26163953Srrs * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27163953Srrs * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28163953Srrs * THE POSSIBILITY OF SUCH DAMAGE. 29163953Srrs */ 30163953Srrs 31163953Srrs/* $KAME: sctp_timer.c,v 1.29 2005/03/06 16:04:18 itojun Exp $ */ 32163953Srrs 33163953Srrs#include <sys/cdefs.h> 34163953Srrs__FBSDID("$FreeBSD: head/sys/netinet/sctp_timer.c 197288 2009-09-17 15:11:12Z rrs $"); 35163953Srrs 36163953Srrs#define _IP_VHL 37166086Srrs#include <netinet/sctp_os.h> 38163953Srrs#include <netinet/sctp_pcb.h> 39163953Srrs#ifdef INET6 40163953Srrs#endif 41163953Srrs#include <netinet/sctp_var.h> 42167598Srrs#include <netinet/sctp_sysctl.h> 43163953Srrs#include <netinet/sctp_timer.h> 44163953Srrs#include <netinet/sctputil.h> 45163953Srrs#include <netinet/sctp_output.h> 46163953Srrs#include <netinet/sctp_header.h> 47163953Srrs#include <netinet/sctp_indata.h> 48163953Srrs#include <netinet/sctp_asconf.h> 49163953Srrs#include <netinet/sctp_input.h> 50163953Srrs#include <netinet/sctp.h> 51163953Srrs#include <netinet/sctp_uio.h> 52185694Srrs#include <netinet/udp.h> 53163953Srrs 54163953Srrs 55163953Srrsvoid 56163953Srrssctp_early_fr_timer(struct sctp_inpcb *inp, 57163953Srrs struct sctp_tcb *stcb, 58163953Srrs struct sctp_nets *net) 59163953Srrs{ 60163953Srrs struct sctp_tmit_chunk *chk, *tp2; 61163953Srrs struct timeval now, min_wait, tv; 62163953Srrs unsigned int cur_rtt, cnt = 0, cnt_resend = 0; 63163953Srrs 64163953Srrs /* an early FR is occuring. */ 65169378Srrs (void)SCTP_GETTIME_TIMEVAL(&now); 66163953Srrs /* get cur rto in micro-seconds */ 67163953Srrs if (net->lastsa == 0) { 68163953Srrs /* Hmm no rtt estimate yet? */ 69163953Srrs cur_rtt = stcb->asoc.initial_rto >> 2; 70163953Srrs } else { 71163953Srrs 72163953Srrs cur_rtt = ((net->lastsa >> 2) + net->lastsv) >> 1; 73163953Srrs } 74179783Srrs if (cur_rtt < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) { 75179783Srrs cur_rtt = SCTP_BASE_SYSCTL(sctp_early_fr_msec); 76163953Srrs } 77163953Srrs cur_rtt *= 1000; 78163953Srrs tv.tv_sec = cur_rtt / 1000000; 79163953Srrs tv.tv_usec = cur_rtt % 1000000; 80163953Srrs min_wait = now; 81163953Srrs timevalsub(&min_wait, &tv); 82163953Srrs if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) { 83163953Srrs /* 84163953Srrs * if we hit here, we don't have enough seconds on the clock 85163953Srrs * to account for the RTO. We just let the lower seconds be 86163953Srrs * the bounds and don't worry about it. This may mean we 87163953Srrs * will mark a lot more than we should. 88163953Srrs */ 89163953Srrs min_wait.tv_sec = min_wait.tv_usec = 0; 90163953Srrs } 91163953Srrs chk = TAILQ_LAST(&stcb->asoc.sent_queue, sctpchunk_listhead); 92163953Srrs for (; chk != NULL; chk = tp2) { 93163953Srrs tp2 = TAILQ_PREV(chk, sctpchunk_listhead, sctp_next); 94163953Srrs if (chk->whoTo != net) { 95163953Srrs continue; 96163953Srrs } 97163953Srrs if (chk->sent == SCTP_DATAGRAM_RESEND) 98163953Srrs cnt_resend++; 99163953Srrs else if ((chk->sent > SCTP_DATAGRAM_UNSENT) && 100163953Srrs (chk->sent < SCTP_DATAGRAM_RESEND)) { 101163953Srrs /* pending, may need retran */ 102163953Srrs if (chk->sent_rcv_time.tv_sec > min_wait.tv_sec) { 103163953Srrs /* 104163953Srrs * we have reached a chunk that was sent 105163953Srrs * some seconds past our min.. forget it we 106163953Srrs * will find no more to send. 107163953Srrs */ 108163953Srrs continue; 109163953Srrs } else if (chk->sent_rcv_time.tv_sec == min_wait.tv_sec) { 110163953Srrs /* 111163953Srrs * we must look at the micro seconds to 112163953Srrs * know. 113163953Srrs */ 114163953Srrs if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) { 115163953Srrs /* 116163953Srrs * ok it was sent after our boundary 117163953Srrs * time. 118163953Srrs */ 119163953Srrs continue; 120163953Srrs } 121163953Srrs } 122179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_EARLYFR_LOGGING_ENABLE) { 123170744Srrs sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count, 124170744Srrs 4, SCTP_FR_MARKED_EARLY); 125170744Srrs } 126163953Srrs SCTP_STAT_INCR(sctps_earlyfrmrkretrans); 127163953Srrs chk->sent = SCTP_DATAGRAM_RESEND; 128163953Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 129163953Srrs /* double book size since we are doing an early FR */ 130163953Srrs chk->book_size_scale++; 131163953Srrs cnt += chk->send_size; 132163953Srrs if ((cnt + net->flight_size) > net->cwnd) { 133163953Srrs /* Mark all we could possibly resend */ 134163953Srrs break; 135163953Srrs } 136163953Srrs } 137163953Srrs } 138163953Srrs if (cnt) { 139163953Srrs /* 140171440Srrs * JRS - Use the congestion control given in the congestion 141171440Srrs * control module 142163953Srrs */ 143171440Srrs stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer(inp, stcb, net); 144163953Srrs } else if (cnt_resend) { 145172090Srrs sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR, SCTP_SO_NOT_LOCKED); 146163953Srrs } 147163953Srrs /* Restart it? */ 148163953Srrs if (net->flight_size < net->cwnd) { 149163953Srrs SCTP_STAT_INCR(sctps_earlyfrstrtmr); 150163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net); 151163953Srrs } 152163953Srrs} 153163953Srrs 154163953Srrsvoid 155163953Srrssctp_audit_retranmission_queue(struct sctp_association *asoc) 156163953Srrs{ 157163953Srrs struct sctp_tmit_chunk *chk; 158163953Srrs 159169420Srrs SCTPDBG(SCTP_DEBUG_TIMER4, "Audit invoked on send queue cnt:%d onqueue:%d\n", 160169420Srrs asoc->sent_queue_retran_cnt, 161169420Srrs asoc->sent_queue_cnt); 162163953Srrs asoc->sent_queue_retran_cnt = 0; 163163953Srrs asoc->sent_queue_cnt = 0; 164163953Srrs TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 165163953Srrs if (chk->sent == SCTP_DATAGRAM_RESEND) { 166163953Srrs sctp_ucount_incr(asoc->sent_queue_retran_cnt); 167163953Srrs } 168163953Srrs asoc->sent_queue_cnt++; 169163953Srrs } 170163953Srrs TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 171163953Srrs if (chk->sent == SCTP_DATAGRAM_RESEND) { 172163953Srrs sctp_ucount_incr(asoc->sent_queue_retran_cnt); 173163953Srrs } 174163953Srrs } 175179157Srrs TAILQ_FOREACH(chk, &asoc->asconf_send_queue, sctp_next) { 176179157Srrs if (chk->sent == SCTP_DATAGRAM_RESEND) { 177179157Srrs sctp_ucount_incr(asoc->sent_queue_retran_cnt); 178179157Srrs } 179179157Srrs } 180169420Srrs SCTPDBG(SCTP_DEBUG_TIMER4, "Audit completes retran:%d onqueue:%d\n", 181169420Srrs asoc->sent_queue_retran_cnt, 182169420Srrs asoc->sent_queue_cnt); 183163953Srrs} 184163953Srrs 185163953Srrsint 186163953Srrssctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 187163953Srrs struct sctp_nets *net, uint16_t threshold) 188163953Srrs{ 189163953Srrs if (net) { 190163953Srrs net->error_count++; 191169420Srrs SCTPDBG(SCTP_DEBUG_TIMER4, "Error count for %p now %d thresh:%d\n", 192169420Srrs net, net->error_count, 193169420Srrs net->failure_threshold); 194163953Srrs if (net->error_count > net->failure_threshold) { 195163953Srrs /* We had a threshold failure */ 196163953Srrs if (net->dest_state & SCTP_ADDR_REACHABLE) { 197163953Srrs net->dest_state &= ~SCTP_ADDR_REACHABLE; 198163953Srrs net->dest_state |= SCTP_ADDR_NOT_REACHABLE; 199167598Srrs net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY; 200163953Srrs if (net == stcb->asoc.primary_destination) { 201163953Srrs net->dest_state |= SCTP_ADDR_WAS_PRIMARY; 202163953Srrs } 203171440Srrs /* 204171440Srrs * JRS 5/14/07 - If a destination is 205171440Srrs * unreachable, the PF bit is turned off. 206171440Srrs * This allows an unambiguous use of the PF 207171440Srrs * bit for destinations that are reachable 208171440Srrs * but potentially failed. If the 209171440Srrs * destination is set to the unreachable 210171440Srrs * state, also set the destination to the PF 211171440Srrs * state. 212171440Srrs */ 213171440Srrs /* 214171440Srrs * Add debug message here if destination is 215171440Srrs * not in PF state. 216171440Srrs */ 217171440Srrs /* Stop any running T3 timers here? */ 218179783Srrs if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) { 219171440Srrs net->dest_state &= ~SCTP_ADDR_PF; 220171440Srrs SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n", 221171440Srrs net); 222171440Srrs } 223163953Srrs sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 224163953Srrs stcb, 225163953Srrs SCTP_FAILED_THRESHOLD, 226172090Srrs (void *)net, SCTP_SO_NOT_LOCKED); 227163953Srrs } 228163953Srrs } 229163953Srrs /*********HOLD THIS COMMENT FOR PATCH OF ALTERNATE 230163953Srrs *********ROUTING CODE 231163953Srrs */ 232163953Srrs /*********HOLD THIS COMMENT FOR END OF PATCH OF ALTERNATE 233163953Srrs *********ROUTING CODE 234163953Srrs */ 235163953Srrs } 236163953Srrs if (stcb == NULL) 237163953Srrs return (0); 238163953Srrs 239163953Srrs if (net) { 240163953Srrs if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) { 241179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 242171943Srrs sctp_misc_ints(SCTP_THRESHOLD_INCR, 243171943Srrs stcb->asoc.overall_error_count, 244171943Srrs (stcb->asoc.overall_error_count + 1), 245171943Srrs SCTP_FROM_SCTP_TIMER, 246171943Srrs __LINE__); 247171943Srrs } 248163953Srrs stcb->asoc.overall_error_count++; 249163953Srrs } 250163953Srrs } else { 251179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) { 252171943Srrs sctp_misc_ints(SCTP_THRESHOLD_INCR, 253171943Srrs stcb->asoc.overall_error_count, 254171943Srrs (stcb->asoc.overall_error_count + 1), 255171943Srrs SCTP_FROM_SCTP_TIMER, 256171943Srrs __LINE__); 257171943Srrs } 258163953Srrs stcb->asoc.overall_error_count++; 259163953Srrs } 260169420Srrs SCTPDBG(SCTP_DEBUG_TIMER4, "Overall error count for %p now %d thresh:%u state:%x\n", 261169420Srrs &stcb->asoc, stcb->asoc.overall_error_count, 262169420Srrs (uint32_t) threshold, 263169420Srrs ((net == NULL) ? (uint32_t) 0 : (uint32_t) net->dest_state)); 264163953Srrs /* 265163953Srrs * We specifically do not do >= to give the assoc one more change 266163953Srrs * before we fail it. 267163953Srrs */ 268163953Srrs if (stcb->asoc.overall_error_count > threshold) { 269163953Srrs /* Abort notification sends a ULP notify */ 270163953Srrs struct mbuf *oper; 271163953Srrs 272163953Srrs oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 273163953Srrs 0, M_DONTWAIT, 1, MT_DATA); 274163953Srrs if (oper) { 275163953Srrs struct sctp_paramhdr *ph; 276163953Srrs uint32_t *ippp; 277163953Srrs 278165647Srrs SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 279163953Srrs sizeof(uint32_t); 280163953Srrs ph = mtod(oper, struct sctp_paramhdr *); 281163953Srrs ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 282165647Srrs ph->param_length = htons(SCTP_BUF_LEN(oper)); 283163953Srrs ippp = (uint32_t *) (ph + 1); 284165220Srrs *ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_1); 285163953Srrs } 286165220Srrs inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_1; 287172090Srrs sctp_abort_an_association(inp, stcb, SCTP_FAILED_THRESHOLD, oper, SCTP_SO_NOT_LOCKED); 288163953Srrs return (1); 289163953Srrs } 290163953Srrs return (0); 291163953Srrs} 292163953Srrs 293163953Srrsstruct sctp_nets * 294163953Srrssctp_find_alternate_net(struct sctp_tcb *stcb, 295163953Srrs struct sctp_nets *net, 296171440Srrs int mode) 297163953Srrs{ 298163953Srrs /* Find and return an alternate network if possible */ 299171440Srrs struct sctp_nets *alt, *mnet, *min_errors_net = NULL, *max_cwnd_net = NULL; 300163953Srrs int once; 301163953Srrs 302171440Srrs /* JRS 5/14/07 - Initialize min_errors to an impossible value. */ 303171440Srrs int min_errors = -1; 304171440Srrs uint32_t max_cwnd = 0; 305171440Srrs 306163953Srrs if (stcb->asoc.numnets == 1) { 307163953Srrs /* No others but net */ 308163953Srrs return (TAILQ_FIRST(&stcb->asoc.nets)); 309163953Srrs } 310171440Srrs /* 311171440Srrs * JRS 5/14/07 - If mode is set to 2, use the CMT PF find alternate 312171440Srrs * net algorithm. This algorithm chooses the active destination (not 313171440Srrs * in PF state) with the largest cwnd value. If all destinations are 314171440Srrs * in PF state, unreachable, or unconfirmed, choose the desination 315171440Srrs * that is in PF state with the lowest error count. In case of a 316171440Srrs * tie, choose the destination that was most recently active. 317171440Srrs */ 318171440Srrs if (mode == 2) { 319163953Srrs TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) { 320171440Srrs /* 321171440Srrs * JRS 5/14/07 - If the destination is unreachable 322171440Srrs * or unconfirmed, skip it. 323171440Srrs */ 324163953Srrs if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) || 325171440Srrs (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) { 326171440Srrs continue; 327171440Srrs } 328171440Srrs /* 329171440Srrs * JRS 5/14/07 - If the destination is reachable 330171440Srrs * but in PF state, compare the error count of the 331171440Srrs * destination to the minimum error count seen thus 332171440Srrs * far. Store the destination with the lower error 333171440Srrs * count. If the error counts are equal, store the 334171440Srrs * destination that was most recently active. 335171440Srrs */ 336171440Srrs if (mnet->dest_state & SCTP_ADDR_PF) { 337171440Srrs /* 338171440Srrs * JRS 5/14/07 - If the destination under 339171440Srrs * consideration is the current destination, 340171440Srrs * work as if the error count is one higher. 341171440Srrs * The actual error count will not be 342171440Srrs * incremented until later in the t3 343171440Srrs * handler. 344171440Srrs */ 345171440Srrs if (mnet == net) { 346171440Srrs if (min_errors == -1) { 347171440Srrs min_errors = mnet->error_count + 1; 348171440Srrs min_errors_net = mnet; 349171440Srrs } else if (mnet->error_count + 1 < min_errors) { 350171440Srrs min_errors = mnet->error_count + 1; 351171440Srrs min_errors_net = mnet; 352171440Srrs } else if (mnet->error_count + 1 == min_errors 353171440Srrs && mnet->last_active > min_errors_net->last_active) { 354171440Srrs min_errors_net = mnet; 355171440Srrs min_errors = mnet->error_count + 1; 356171440Srrs } 357171440Srrs continue; 358171440Srrs } else { 359171440Srrs if (min_errors == -1) { 360171440Srrs min_errors = mnet->error_count; 361171440Srrs min_errors_net = mnet; 362171440Srrs } else if (mnet->error_count < min_errors) { 363171440Srrs min_errors = mnet->error_count; 364171440Srrs min_errors_net = mnet; 365171440Srrs } else if (mnet->error_count == min_errors 366171440Srrs && mnet->last_active > min_errors_net->last_active) { 367171440Srrs min_errors_net = mnet; 368171440Srrs min_errors = mnet->error_count; 369171440Srrs } 370171440Srrs continue; 371171440Srrs } 372171440Srrs } 373171440Srrs /* 374171440Srrs * JRS 5/14/07 - If the destination is reachable and 375171440Srrs * not in PF state, compare the cwnd of the 376171440Srrs * destination to the highest cwnd seen thus far. 377171440Srrs * Store the destination with the higher cwnd value. 378171440Srrs * If the cwnd values are equal, randomly choose one 379171440Srrs * of the two destinations. 380171440Srrs */ 381171440Srrs if (max_cwnd < mnet->cwnd) { 382171440Srrs max_cwnd_net = mnet; 383171440Srrs max_cwnd = mnet->cwnd; 384171440Srrs } else if (max_cwnd == mnet->cwnd) { 385171440Srrs uint32_t rndval; 386171440Srrs uint8_t this_random; 387171440Srrs 388171440Srrs if (stcb->asoc.hb_random_idx > 3) { 389171440Srrs rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 390171440Srrs memcpy(stcb->asoc.hb_random_values, &rndval, sizeof(stcb->asoc.hb_random_values)); 391171440Srrs this_random = stcb->asoc.hb_random_values[0]; 392171440Srrs stcb->asoc.hb_random_idx++; 393171440Srrs stcb->asoc.hb_ect_randombit = 0; 394171440Srrs } else { 395171440Srrs this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 396171440Srrs stcb->asoc.hb_random_idx++; 397171440Srrs stcb->asoc.hb_ect_randombit = 0; 398171440Srrs } 399171440Srrs if (this_random % 2 == 1) { 400171440Srrs max_cwnd_net = mnet; 401180387Srrs max_cwnd = mnet->cwnd; /* Useless? */ 402171440Srrs } 403171440Srrs } 404171440Srrs } 405171440Srrs /* 406171440Srrs * JRS 5/14/07 - After all destination have been considered 407171440Srrs * as alternates, check to see if there was some active 408171440Srrs * destination (not in PF state). If not, check to see if 409171440Srrs * there was some PF destination with the minimum number of 410171440Srrs * errors. If not, return the original destination. If 411171440Srrs * there is a min_errors_net, remove the PF flag from that 412171440Srrs * destination, set the cwnd to one or two MTUs, and return 413171440Srrs * the destination as an alt. If there was some active 414171440Srrs * destination with a highest cwnd, return the destination 415171440Srrs * as an alt. 416171440Srrs */ 417171440Srrs if (max_cwnd_net == NULL) { 418171440Srrs if (min_errors_net == NULL) { 419171440Srrs return (net); 420171440Srrs } 421171440Srrs min_errors_net->dest_state &= ~SCTP_ADDR_PF; 422179783Srrs min_errors_net->cwnd = min_errors_net->mtu * SCTP_BASE_SYSCTL(sctp_cmt_pf); 423171440Srrs if (SCTP_OS_TIMER_PENDING(&min_errors_net->rxt_timer.timer)) { 424171440Srrs sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, 425171440Srrs stcb, min_errors_net, 426171440Srrs SCTP_FROM_SCTP_TIMER + SCTP_LOC_2); 427171440Srrs } 428171440Srrs SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to active with %d errors.\n", 429171440Srrs min_errors_net, min_errors_net->error_count); 430171440Srrs return (min_errors_net); 431171440Srrs } else { 432171440Srrs return (max_cwnd_net); 433171440Srrs } 434171440Srrs } 435171440Srrs /* 436171440Srrs * JRS 5/14/07 - If mode is set to 1, use the CMT policy for 437171440Srrs * choosing an alternate net. 438171440Srrs */ 439171440Srrs else if (mode == 1) { 440171440Srrs TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) { 441171440Srrs if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) || 442163953Srrs (mnet->dest_state & SCTP_ADDR_UNCONFIRMED) 443163953Srrs ) { 444163953Srrs /* 445163953Srrs * will skip ones that are not-reachable or 446163953Srrs * unconfirmed 447163953Srrs */ 448163953Srrs continue; 449163953Srrs } 450171440Srrs if (max_cwnd < mnet->cwnd) { 451171440Srrs max_cwnd_net = mnet; 452171440Srrs max_cwnd = mnet->cwnd; 453171440Srrs } else if (max_cwnd == mnet->cwnd) { 454163953Srrs uint32_t rndval; 455163953Srrs uint8_t this_random; 456163953Srrs 457163953Srrs if (stcb->asoc.hb_random_idx > 3) { 458163953Srrs rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 459163953Srrs memcpy(stcb->asoc.hb_random_values, &rndval, 460163953Srrs sizeof(stcb->asoc.hb_random_values)); 461163953Srrs this_random = stcb->asoc.hb_random_values[0]; 462163953Srrs stcb->asoc.hb_random_idx = 0; 463163953Srrs stcb->asoc.hb_ect_randombit = 0; 464163953Srrs } else { 465163953Srrs this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 466163953Srrs stcb->asoc.hb_random_idx++; 467163953Srrs stcb->asoc.hb_ect_randombit = 0; 468163953Srrs } 469163953Srrs if (this_random % 2) { 470171440Srrs max_cwnd_net = mnet; 471171440Srrs max_cwnd = mnet->cwnd; 472163953Srrs } 473163953Srrs } 474163953Srrs } 475171440Srrs if (max_cwnd_net) { 476171440Srrs return (max_cwnd_net); 477163953Srrs } 478163953Srrs } 479163953Srrs mnet = net; 480163953Srrs once = 0; 481163953Srrs 482163953Srrs if (mnet == NULL) { 483163953Srrs mnet = TAILQ_FIRST(&stcb->asoc.nets); 484163953Srrs } 485163953Srrs do { 486163953Srrs alt = TAILQ_NEXT(mnet, sctp_next); 487163953Srrs if (alt == NULL) { 488163953Srrs once++; 489163953Srrs if (once > 1) { 490163953Srrs break; 491163953Srrs } 492163953Srrs alt = TAILQ_FIRST(&stcb->asoc.nets); 493163953Srrs } 494163953Srrs if (alt->ro.ro_rt == NULL) { 495167598Srrs if (alt->ro._s_addr) { 496167598Srrs sctp_free_ifa(alt->ro._s_addr); 497167598Srrs alt->ro._s_addr = NULL; 498167598Srrs } 499163953Srrs alt->src_addr_selected = 0; 500163953Srrs } 501163953Srrs if ( 502163953Srrs ((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) && 503163953Srrs (alt->ro.ro_rt != NULL) && 504169655Srrs /* sa_ignore NO_NULL_CHK */ 505163953Srrs (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) 506163953Srrs ) { 507163953Srrs /* Found a reachable address */ 508163953Srrs break; 509163953Srrs } 510163953Srrs mnet = alt; 511163953Srrs } while (alt != NULL); 512163953Srrs 513163953Srrs if (alt == NULL) { 514163953Srrs /* Case where NO insv network exists (dormant state) */ 515163953Srrs /* we rotate destinations */ 516163953Srrs once = 0; 517163953Srrs mnet = net; 518163953Srrs do { 519163953Srrs alt = TAILQ_NEXT(mnet, sctp_next); 520163953Srrs if (alt == NULL) { 521163953Srrs once++; 522163953Srrs if (once > 1) { 523163953Srrs break; 524163953Srrs } 525163953Srrs alt = TAILQ_FIRST(&stcb->asoc.nets); 526163953Srrs } 527169655Srrs /* sa_ignore NO_NULL_CHK */ 528163953Srrs if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) && 529163953Srrs (alt != net)) { 530163953Srrs /* Found an alternate address */ 531163953Srrs break; 532163953Srrs } 533163953Srrs mnet = alt; 534163953Srrs } while (alt != NULL); 535163953Srrs } 536163953Srrs if (alt == NULL) { 537163953Srrs return (net); 538163953Srrs } 539163953Srrs return (alt); 540163953Srrs} 541163953Srrs 542171440Srrs 543171440Srrs 544163953Srrsstatic void 545163953Srrssctp_backoff_on_timeout(struct sctp_tcb *stcb, 546163953Srrs struct sctp_nets *net, 547163953Srrs int win_probe, 548163953Srrs int num_marked) 549163953Srrs{ 550170642Srrs if (net->RTO == 0) { 551170642Srrs net->RTO = stcb->asoc.minrto; 552170642Srrs } 553163953Srrs net->RTO <<= 1; 554163953Srrs if (net->RTO > stcb->asoc.maxrto) { 555163953Srrs net->RTO = stcb->asoc.maxrto; 556163953Srrs } 557163953Srrs if ((win_probe == 0) && num_marked) { 558163953Srrs /* We don't apply penalty to window probe scenarios */ 559171440Srrs /* JRS - Use the congestion control given in the CC module */ 560171440Srrs stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout(stcb, net); 561163953Srrs } 562163953Srrs} 563163953Srrs 564184333Srrs#ifndef INVARIANTS 565184333Srrsstatic void 566182367Srrssctp_recover_sent_list(struct sctp_tcb *stcb) 567182367Srrs{ 568182367Srrs struct sctp_tmit_chunk *chk, *tp2; 569182367Srrs struct sctp_association *asoc; 570182367Srrs 571182367Srrs asoc = &stcb->asoc; 572182367Srrs chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 573182367Srrs for (; chk != NULL; chk = tp2) { 574182367Srrs tp2 = TAILQ_NEXT(chk, sctp_next); 575182367Srrs if ((compare_with_wrap(stcb->asoc.last_acked_seq, 576182367Srrs chk->rec.data.TSN_seq, 577182367Srrs MAX_TSN)) || 578182367Srrs (stcb->asoc.last_acked_seq == chk->rec.data.TSN_seq)) { 579182367Srrs 580182367Srrs SCTP_PRINTF("Found chk:%p tsn:%x <= last_acked_seq:%x\n", 581182367Srrs chk, chk->rec.data.TSN_seq, stcb->asoc.last_acked_seq); 582182367Srrs TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next); 583182367Srrs if (chk->pr_sctp_on) { 584182367Srrs if (asoc->pr_sctp_cnt != 0) 585182367Srrs asoc->pr_sctp_cnt--; 586182367Srrs } 587182367Srrs if (chk->data) { 588182367Srrs /* sa_ignore NO_NULL_CHK */ 589182367Srrs sctp_free_bufspace(stcb, asoc, chk, 1); 590182367Srrs sctp_m_freem(chk->data); 591196260Stuexen if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(chk->flags)) { 592182367Srrs asoc->sent_queue_cnt_removeable--; 593182367Srrs } 594182367Srrs } 595182367Srrs chk->data = NULL; 596182367Srrs asoc->sent_queue_cnt--; 597182367Srrs sctp_free_a_chunk(stcb, chk); 598182367Srrs } 599182367Srrs } 600182367Srrs SCTP_PRINTF("after recover order is as follows\n"); 601182367Srrs chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 602182367Srrs for (; chk != NULL; chk = tp2) { 603182367Srrs tp2 = TAILQ_NEXT(chk, sctp_next); 604182367Srrs SCTP_PRINTF("chk:%p TSN:%x\n", chk, chk->rec.data.TSN_seq); 605182367Srrs } 606182367Srrs} 607182367Srrs 608184333Srrs#endif 609184333Srrs 610163953Srrsstatic int 611163953Srrssctp_mark_all_for_resend(struct sctp_tcb *stcb, 612163953Srrs struct sctp_nets *net, 613163953Srrs struct sctp_nets *alt, 614163953Srrs int window_probe, 615163953Srrs int *num_marked) 616163953Srrs{ 617163953Srrs 618163953Srrs /* 619163953Srrs * Mark all chunks (well not all) that were sent to *net for 620163953Srrs * retransmission. Move them to alt for there destination as well... 621163953Srrs * We only mark chunks that have been outstanding long enough to 622163953Srrs * have received feed-back. 623163953Srrs */ 624163953Srrs struct sctp_tmit_chunk *chk, *tp2, *could_be_sent = NULL; 625163953Srrs struct sctp_nets *lnets; 626163953Srrs struct timeval now, min_wait, tv; 627163953Srrs int cur_rtt; 628168709Srrs int audit_tf, num_mk, fir; 629163953Srrs unsigned int cnt_mk; 630168709Srrs uint32_t orig_flight, orig_tf; 631163953Srrs uint32_t tsnlast, tsnfirst; 632182367Srrs int recovery_cnt = 0; 633163953Srrs 634171440Srrs 635163953Srrs /* none in flight now */ 636163953Srrs audit_tf = 0; 637163953Srrs fir = 0; 638163953Srrs /* 639163953Srrs * figure out how long a data chunk must be pending before we can 640163953Srrs * mark it .. 641163953Srrs */ 642169378Srrs (void)SCTP_GETTIME_TIMEVAL(&now); 643163953Srrs /* get cur rto in micro-seconds */ 644163953Srrs cur_rtt = (((net->lastsa >> 2) + net->lastsv) >> 1); 645163953Srrs cur_rtt *= 1000; 646179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 647170744Srrs sctp_log_fr(cur_rtt, 648170744Srrs stcb->asoc.peers_rwnd, 649170744Srrs window_probe, 650170744Srrs SCTP_FR_T3_MARK_TIME); 651170744Srrs sctp_log_fr(net->flight_size, 652170744Srrs SCTP_OS_TIMER_PENDING(&net->fr_timer.timer), 653170744Srrs SCTP_OS_TIMER_ACTIVE(&net->fr_timer.timer), 654170744Srrs SCTP_FR_CWND_REPORT); 655170744Srrs sctp_log_fr(net->flight_size, net->cwnd, stcb->asoc.total_flight, SCTP_FR_CWND_REPORT); 656170744Srrs } 657163953Srrs tv.tv_sec = cur_rtt / 1000000; 658163953Srrs tv.tv_usec = cur_rtt % 1000000; 659163953Srrs min_wait = now; 660163953Srrs timevalsub(&min_wait, &tv); 661163953Srrs if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) { 662163953Srrs /* 663163953Srrs * if we hit here, we don't have enough seconds on the clock 664163953Srrs * to account for the RTO. We just let the lower seconds be 665163953Srrs * the bounds and don't worry about it. This may mean we 666163953Srrs * will mark a lot more than we should. 667163953Srrs */ 668163953Srrs min_wait.tv_sec = min_wait.tv_usec = 0; 669163953Srrs } 670179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 671170744Srrs sctp_log_fr(cur_rtt, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME); 672170744Srrs sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME); 673170744Srrs } 674163953Srrs /* 675163953Srrs * Our rwnd will be incorrect here since we are not adding back the 676163953Srrs * cnt * mbuf but we will fix that down below. 677163953Srrs */ 678163953Srrs orig_flight = net->flight_size; 679168709Srrs orig_tf = stcb->asoc.total_flight; 680168709Srrs 681163953Srrs net->fast_retran_ip = 0; 682163953Srrs /* Now on to each chunk */ 683163953Srrs num_mk = cnt_mk = 0; 684163953Srrs tsnfirst = tsnlast = 0; 685184333Srrs#ifndef INVARIANTS 686182367Srrsstart_again: 687184333Srrs#endif 688163953Srrs chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 689163953Srrs for (; chk != NULL; chk = tp2) { 690163953Srrs tp2 = TAILQ_NEXT(chk, sctp_next); 691163953Srrs if ((compare_with_wrap(stcb->asoc.last_acked_seq, 692163953Srrs chk->rec.data.TSN_seq, 693163953Srrs MAX_TSN)) || 694163953Srrs (stcb->asoc.last_acked_seq == chk->rec.data.TSN_seq)) { 695163953Srrs /* Strange case our list got out of order? */ 696182367Srrs SCTP_PRINTF("Our list is out of order? last_acked:%x chk:%x", 697182367Srrs (unsigned int)stcb->asoc.last_acked_seq, (unsigned int)chk->rec.data.TSN_seq); 698182367Srrs recovery_cnt++; 699182367Srrs#ifdef INVARIANTS 700182367Srrs panic("last acked >= chk on sent-Q"); 701182367Srrs#else 702182367Srrs SCTP_PRINTF("Recover attempts a restart cnt:%d\n", recovery_cnt); 703182367Srrs sctp_recover_sent_list(stcb); 704182367Srrs if (recovery_cnt < 10) { 705182367Srrs goto start_again; 706182367Srrs } else { 707182367Srrs SCTP_PRINTF("Recovery fails %d times??\n", recovery_cnt); 708182367Srrs } 709182367Srrs#endif 710163953Srrs } 711163953Srrs if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) { 712163953Srrs /* 713163953Srrs * found one to mark: If it is less than 714163953Srrs * DATAGRAM_ACKED it MUST not be a skipped or marked 715163953Srrs * TSN but instead one that is either already set 716163953Srrs * for retransmission OR one that needs 717163953Srrs * retransmission. 718163953Srrs */ 719163953Srrs 720163953Srrs /* validate its been outstanding long enough */ 721179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 722170744Srrs sctp_log_fr(chk->rec.data.TSN_seq, 723170744Srrs chk->sent_rcv_time.tv_sec, 724170744Srrs chk->sent_rcv_time.tv_usec, 725170744Srrs SCTP_FR_T3_MARK_TIME); 726170744Srrs } 727163953Srrs if ((chk->sent_rcv_time.tv_sec > min_wait.tv_sec) && (window_probe == 0)) { 728163953Srrs /* 729163953Srrs * we have reached a chunk that was sent 730163953Srrs * some seconds past our min.. forget it we 731163953Srrs * will find no more to send. 732163953Srrs */ 733179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 734170744Srrs sctp_log_fr(0, 735170744Srrs chk->sent_rcv_time.tv_sec, 736170744Srrs chk->sent_rcv_time.tv_usec, 737170744Srrs SCTP_FR_T3_STOPPED); 738170744Srrs } 739163953Srrs continue; 740163953Srrs } else if ((chk->sent_rcv_time.tv_sec == min_wait.tv_sec) && 741163953Srrs (window_probe == 0)) { 742163953Srrs /* 743163953Srrs * we must look at the micro seconds to 744163953Srrs * know. 745163953Srrs */ 746163953Srrs if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) { 747163953Srrs /* 748163953Srrs * ok it was sent after our boundary 749163953Srrs * time. 750163953Srrs */ 751179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 752170744Srrs sctp_log_fr(0, 753170744Srrs chk->sent_rcv_time.tv_sec, 754170744Srrs chk->sent_rcv_time.tv_usec, 755170744Srrs SCTP_FR_T3_STOPPED); 756170744Srrs } 757163953Srrs continue; 758163953Srrs } 759163953Srrs } 760196260Stuexen if (stcb->asoc.peer_supports_prsctp && PR_SCTP_TTL_ENABLED(chk->flags)) { 761163953Srrs /* Is it expired? */ 762163953Srrs if ((now.tv_sec > chk->rec.data.timetodrop.tv_sec) || 763163953Srrs ((chk->rec.data.timetodrop.tv_sec == now.tv_sec) && 764163953Srrs (now.tv_usec > chk->rec.data.timetodrop.tv_usec))) { 765163953Srrs /* Yes so drop it */ 766163953Srrs if (chk->data) { 767169420Srrs (void)sctp_release_pr_sctp_chunk(stcb, 768163953Srrs chk, 769163953Srrs (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 770189790Srrs SCTP_SO_NOT_LOCKED); 771163953Srrs } 772185694Srrs continue; 773163953Srrs } 774163953Srrs } 775196260Stuexen if (stcb->asoc.peer_supports_prsctp && PR_SCTP_RTX_ENABLED(chk->flags)) { 776163953Srrs /* Has it been retransmitted tv_sec times? */ 777163953Srrs if (chk->snd_count > chk->rec.data.timetodrop.tv_sec) { 778163953Srrs if (chk->data) { 779169420Srrs (void)sctp_release_pr_sctp_chunk(stcb, 780163953Srrs chk, 781163953Srrs (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 782189790Srrs SCTP_SO_NOT_LOCKED); 783163953Srrs } 784185694Srrs continue; 785163953Srrs } 786163953Srrs } 787168709Srrs if (chk->sent < SCTP_DATAGRAM_RESEND) { 788163953Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 789163953Srrs num_mk++; 790163953Srrs if (fir == 0) { 791163953Srrs fir = 1; 792163953Srrs tsnfirst = chk->rec.data.TSN_seq; 793163953Srrs } 794163953Srrs tsnlast = chk->rec.data.TSN_seq; 795179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 796170744Srrs sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count, 797170744Srrs 0, SCTP_FR_T3_MARKED); 798170744Srrs } 799168709Srrs if (chk->rec.data.chunk_was_revoked) { 800168709Srrs /* deflate the cwnd */ 801168709Srrs chk->whoTo->cwnd -= chk->book_size; 802168709Srrs chk->rec.data.chunk_was_revoked = 0; 803168709Srrs } 804168709Srrs net->marked_retrans++; 805168709Srrs stcb->asoc.marked_retrans++; 806179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 807170744Srrs sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND_TO, 808170744Srrs chk->whoTo->flight_size, 809170744Srrs chk->book_size, 810170744Srrs (uintptr_t) chk->whoTo, 811170744Srrs chk->rec.data.TSN_seq); 812170744Srrs } 813168709Srrs sctp_flight_size_decrease(chk); 814168709Srrs sctp_total_flight_decrease(stcb, chk); 815168709Srrs stcb->asoc.peers_rwnd += chk->send_size; 816179783Srrs stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh); 817163953Srrs } 818163953Srrs chk->sent = SCTP_DATAGRAM_RESEND; 819163953Srrs SCTP_STAT_INCR(sctps_markedretrans); 820165220Srrs 821163953Srrs /* reset the TSN for striking and other FR stuff */ 822163953Srrs chk->rec.data.doing_fast_retransmit = 0; 823163953Srrs /* Clear any time so NO RTT is being done */ 824163953Srrs chk->do_rtt = 0; 825163953Srrs if (alt != net) { 826163953Srrs sctp_free_remote_addr(chk->whoTo); 827163953Srrs chk->no_fr_allowed = 1; 828163953Srrs chk->whoTo = alt; 829163953Srrs atomic_add_int(&alt->ref_count, 1); 830163953Srrs } else { 831163953Srrs chk->no_fr_allowed = 0; 832163953Srrs if (TAILQ_EMPTY(&stcb->asoc.send_queue)) { 833163953Srrs chk->rec.data.fast_retran_tsn = stcb->asoc.sending_seq; 834163953Srrs } else { 835163953Srrs chk->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq; 836163953Srrs } 837163953Srrs } 838170181Srrs /* 839170181Srrs * CMT: Do not allow FRs on retransmitted TSNs. 840170181Srrs */ 841179783Srrs if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 1) { 842163953Srrs chk->no_fr_allowed = 1; 843163953Srrs } 844163953Srrs } else if (chk->sent == SCTP_DATAGRAM_ACKED) { 845163953Srrs /* remember highest acked one */ 846163953Srrs could_be_sent = chk; 847163953Srrs } 848163953Srrs if (chk->sent == SCTP_DATAGRAM_RESEND) { 849163953Srrs cnt_mk++; 850163953Srrs } 851163953Srrs } 852168709Srrs if ((orig_flight - net->flight_size) != (orig_tf - stcb->asoc.total_flight)) { 853168709Srrs /* we did not subtract the same things? */ 854168709Srrs audit_tf = 1; 855168709Srrs } 856179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) { 857170744Srrs sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT); 858170744Srrs } 859163953Srrs#ifdef SCTP_DEBUG 860169420Srrs if (num_mk) { 861169420Srrs SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n", 862169420Srrs tsnlast); 863169420Srrs SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%ld\n", 864169420Srrs num_mk, (u_long)stcb->asoc.peers_rwnd); 865169420Srrs SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n", 866169420Srrs tsnlast); 867169420Srrs SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%d\n", 868169420Srrs num_mk, 869169420Srrs (int)stcb->asoc.peers_rwnd); 870163953Srrs } 871163953Srrs#endif 872163953Srrs *num_marked = num_mk; 873163953Srrs if ((stcb->asoc.sent_queue_retran_cnt == 0) && (could_be_sent)) { 874163953Srrs /* fix it so we retransmit the highest acked anyway */ 875163953Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 876163953Srrs cnt_mk++; 877163953Srrs could_be_sent->sent = SCTP_DATAGRAM_RESEND; 878163953Srrs } 879163953Srrs if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) { 880165220Srrs#ifdef INVARIANTS 881171477Srrs SCTP_PRINTF("Local Audit says there are %d for retran asoc cnt:%d we marked:%d this time\n", 882171477Srrs cnt_mk, stcb->asoc.sent_queue_retran_cnt, num_mk); 883163953Srrs#endif 884163953Srrs#ifndef SCTP_AUDITING_ENABLED 885163953Srrs stcb->asoc.sent_queue_retran_cnt = cnt_mk; 886163953Srrs#endif 887163953Srrs } 888163953Srrs /* Now check for a ECN Echo that may be stranded */ 889163953Srrs TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 890163953Srrs if ((chk->whoTo == net) && 891163953Srrs (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 892163953Srrs sctp_free_remote_addr(chk->whoTo); 893163953Srrs chk->whoTo = alt; 894163953Srrs if (chk->sent != SCTP_DATAGRAM_RESEND) { 895163953Srrs chk->sent = SCTP_DATAGRAM_RESEND; 896163953Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 897163953Srrs } 898163953Srrs atomic_add_int(&alt->ref_count, 1); 899163953Srrs } 900163953Srrs } 901163953Srrs if (audit_tf) { 902169420Srrs SCTPDBG(SCTP_DEBUG_TIMER4, 903169420Srrs "Audit total flight due to negative value net:%p\n", 904169420Srrs net); 905163953Srrs stcb->asoc.total_flight = 0; 906163953Srrs stcb->asoc.total_flight_count = 0; 907163953Srrs /* Clear all networks flight size */ 908163953Srrs TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) { 909163953Srrs lnets->flight_size = 0; 910169420Srrs SCTPDBG(SCTP_DEBUG_TIMER4, 911169420Srrs "Net:%p c-f cwnd:%d ssthresh:%d\n", 912169420Srrs lnets, lnets->cwnd, lnets->ssthresh); 913163953Srrs } 914163953Srrs TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 915163953Srrs if (chk->sent < SCTP_DATAGRAM_RESEND) { 916179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) { 917170744Srrs sctp_misc_ints(SCTP_FLIGHT_LOG_UP, 918170744Srrs chk->whoTo->flight_size, 919170744Srrs chk->book_size, 920170744Srrs (uintptr_t) chk->whoTo, 921170744Srrs chk->rec.data.TSN_seq); 922170744Srrs } 923168709Srrs sctp_flight_size_increase(chk); 924168709Srrs sctp_total_flight_increase(stcb, chk); 925163953Srrs } 926163953Srrs } 927163953Srrs } 928163953Srrs /* 929163953Srrs * Setup the ecn nonce re-sync point. We do this since 930163953Srrs * retranmissions are NOT setup for ECN. This means that do to 931163953Srrs * Karn's rule, we don't know the total of the peers ecn bits. 932163953Srrs */ 933163953Srrs chk = TAILQ_FIRST(&stcb->asoc.send_queue); 934163953Srrs if (chk == NULL) { 935163953Srrs stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq; 936163953Srrs } else { 937163953Srrs stcb->asoc.nonce_resync_tsn = chk->rec.data.TSN_seq; 938163953Srrs } 939163953Srrs stcb->asoc.nonce_wait_for_ecne = 0; 940163953Srrs stcb->asoc.nonce_sum_check = 0; 941163953Srrs /* We return 1 if we only have a window probe outstanding */ 942163953Srrs return (0); 943163953Srrs} 944163953Srrs 945163953Srrsstatic void 946163953Srrssctp_move_all_chunks_to_alt(struct sctp_tcb *stcb, 947163953Srrs struct sctp_nets *net, 948163953Srrs struct sctp_nets *alt) 949163953Srrs{ 950163953Srrs struct sctp_association *asoc; 951163953Srrs struct sctp_stream_out *outs; 952163953Srrs struct sctp_tmit_chunk *chk; 953163953Srrs struct sctp_stream_queue_pending *sp; 954163953Srrs 955163953Srrs if (net == alt) 956163953Srrs /* nothing to do */ 957163953Srrs return; 958163953Srrs 959163953Srrs asoc = &stcb->asoc; 960163953Srrs 961163953Srrs /* 962163953Srrs * now through all the streams checking for chunks sent to our bad 963163953Srrs * network. 964163953Srrs */ 965163953Srrs TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) { 966163953Srrs /* now clean up any chunks here */ 967163953Srrs TAILQ_FOREACH(sp, &outs->outqueue, next) { 968163953Srrs if (sp->net == net) { 969163953Srrs sctp_free_remote_addr(sp->net); 970163953Srrs sp->net = alt; 971163953Srrs atomic_add_int(&alt->ref_count, 1); 972163953Srrs } 973163953Srrs } 974163953Srrs } 975163953Srrs /* Now check the pending queue */ 976163953Srrs TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 977163953Srrs if (chk->whoTo == net) { 978163953Srrs sctp_free_remote_addr(chk->whoTo); 979163953Srrs chk->whoTo = alt; 980163953Srrs atomic_add_int(&alt->ref_count, 1); 981163953Srrs } 982163953Srrs } 983163953Srrs 984163953Srrs} 985163953Srrs 986163953Srrsint 987163953Srrssctp_t3rxt_timer(struct sctp_inpcb *inp, 988163953Srrs struct sctp_tcb *stcb, 989163953Srrs struct sctp_nets *net) 990163953Srrs{ 991163953Srrs struct sctp_nets *alt; 992163953Srrs int win_probe, num_mk; 993163953Srrs 994179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) { 995170744Srrs sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT); 996170744Srrs } 997179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 998163953Srrs struct sctp_nets *lnet; 999163953Srrs 1000163953Srrs TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 1001163953Srrs if (net == lnet) { 1002163953Srrs sctp_log_cwnd(stcb, lnet, 1, SCTP_CWND_LOG_FROM_T3); 1003163953Srrs } else { 1004163953Srrs sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_LOG_FROM_T3); 1005163953Srrs } 1006163953Srrs } 1007163953Srrs } 1008163953Srrs /* Find an alternate and mark those for retransmission */ 1009163953Srrs if ((stcb->asoc.peers_rwnd == 0) && 1010163953Srrs (stcb->asoc.total_flight < net->mtu)) { 1011163953Srrs SCTP_STAT_INCR(sctps_timowindowprobe); 1012163953Srrs win_probe = 1; 1013163953Srrs } else { 1014163953Srrs win_probe = 0; 1015163953Srrs } 1016168709Srrs 1017171440Srrs /* 1018171440Srrs * JRS 5/14/07 - If CMT PF is on and the destination if not already 1019171440Srrs * in PF state, set the destination to PF state and store the 1020171440Srrs * current time as the time that the destination was last active. In 1021171440Srrs * addition, find an alternate destination with PF-based 1022171440Srrs * find_alt_net(). 1023171440Srrs */ 1024179783Srrs if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) { 1025171440Srrs if ((net->dest_state & SCTP_ADDR_PF) != SCTP_ADDR_PF) { 1026171440Srrs net->dest_state |= SCTP_ADDR_PF; 1027171477Srrs net->last_active = sctp_get_tick_count(); 1028171440Srrs SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from active to PF.\n", 1029171440Srrs net); 1030171440Srrs } 1031171440Srrs alt = sctp_find_alternate_net(stcb, net, 2); 1032179783Srrs } else if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) { 1033168709Srrs /* 1034168709Srrs * CMT: Using RTX_SSTHRESH policy for CMT. If CMT is being 1035168709Srrs * used, then pick dest with largest ssthresh for any 1036168709Srrs * retransmission. 1037168709Srrs */ 1038168709Srrs alt = net; 1039168709Srrs alt = sctp_find_alternate_net(stcb, alt, 1); 1040168709Srrs /* 1041168709Srrs * CUCv2: If a different dest is picked for the 1042168709Srrs * retransmission, then new (rtx-)pseudo_cumack needs to be 1043168709Srrs * tracked for orig dest. Let CUCv2 track new (rtx-) 1044168709Srrs * pseudo-cumack always. 1045168709Srrs */ 1046168709Srrs net->find_pseudo_cumack = 1; 1047168709Srrs net->find_rtx_pseudo_cumack = 1; 1048168709Srrs } else { /* CMT is OFF */ 1049168709Srrs alt = sctp_find_alternate_net(stcb, net, 0); 1050168709Srrs } 1051168709Srrs 1052169420Srrs (void)sctp_mark_all_for_resend(stcb, net, alt, win_probe, &num_mk); 1053163953Srrs /* FR Loss recovery just ended with the T3. */ 1054163953Srrs stcb->asoc.fast_retran_loss_recovery = 0; 1055163953Srrs 1056163953Srrs /* CMT FR loss recovery ended with the T3 */ 1057163953Srrs net->fast_retran_loss_recovery = 0; 1058163953Srrs 1059163953Srrs /* 1060163953Srrs * setup the sat loss recovery that prevents satellite cwnd advance. 1061163953Srrs */ 1062163953Srrs stcb->asoc.sat_t3_loss_recovery = 1; 1063163953Srrs stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq; 1064163953Srrs 1065163953Srrs /* Backoff the timer and cwnd */ 1066163953Srrs sctp_backoff_on_timeout(stcb, net, win_probe, num_mk); 1067163953Srrs if (win_probe == 0) { 1068163953Srrs /* We don't do normal threshold management on window probes */ 1069163953Srrs if (sctp_threshold_management(inp, stcb, net, 1070163953Srrs stcb->asoc.max_send_times)) { 1071163953Srrs /* Association was destroyed */ 1072163953Srrs return (1); 1073163953Srrs } else { 1074163953Srrs if (net != stcb->asoc.primary_destination) { 1075163953Srrs /* send a immediate HB if our RTO is stale */ 1076163953Srrs struct timeval now; 1077163953Srrs unsigned int ms_goneby; 1078163953Srrs 1079169378Srrs (void)SCTP_GETTIME_TIMEVAL(&now); 1080163953Srrs if (net->last_sent_time.tv_sec) { 1081163953Srrs ms_goneby = (now.tv_sec - net->last_sent_time.tv_sec) * 1000; 1082163953Srrs } else { 1083163953Srrs ms_goneby = 0; 1084163953Srrs } 1085163953Srrs if ((ms_goneby > net->RTO) || (net->RTO == 0)) { 1086163953Srrs /* 1087163953Srrs * no recent feed back in an RTO or 1088163953Srrs * more, request a RTT update 1089163953Srrs */ 1090171440Srrs if (sctp_send_hb(stcb, 1, net) < 0) 1091185694Srrs /* 1092185694Srrs * Less than 0 means we lost 1093185694Srrs * the assoc 1094185694Srrs */ 1095185694Srrs return (1); 1096163953Srrs } 1097163953Srrs } 1098163953Srrs } 1099163953Srrs } else { 1100163953Srrs /* 1101163953Srrs * For a window probe we don't penalize the net's but only 1102163953Srrs * the association. This may fail it if SACKs are not coming 1103163953Srrs * back. If sack's are coming with rwnd locked at 0, we will 1104163953Srrs * continue to hold things waiting for rwnd to raise 1105163953Srrs */ 1106163953Srrs if (sctp_threshold_management(inp, stcb, NULL, 1107163953Srrs stcb->asoc.max_send_times)) { 1108163953Srrs /* Association was destroyed */ 1109163953Srrs return (1); 1110163953Srrs } 1111163953Srrs } 1112163953Srrs if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 1113163953Srrs /* Move all pending over too */ 1114163953Srrs sctp_move_all_chunks_to_alt(stcb, net, alt); 1115169352Srrs 1116169352Srrs /* 1117169352Srrs * Get the address that failed, to force a new src address 1118169352Srrs * selecton and a route allocation. 1119169352Srrs */ 1120169352Srrs if (net->ro._s_addr) { 1121169352Srrs sctp_free_ifa(net->ro._s_addr); 1122169352Srrs net->ro._s_addr = NULL; 1123169352Srrs } 1124169352Srrs net->src_addr_selected = 0; 1125169352Srrs 1126169352Srrs /* Force a route allocation too */ 1127169352Srrs if (net->ro.ro_rt) { 1128169352Srrs RTFREE(net->ro.ro_rt); 1129169352Srrs net->ro.ro_rt = NULL; 1130169352Srrs } 1131163953Srrs /* Was it our primary? */ 1132163953Srrs if ((stcb->asoc.primary_destination == net) && (alt != net)) { 1133163953Srrs /* 1134163953Srrs * Yes, note it as such and find an alternate note: 1135163953Srrs * this means HB code must use this to resent the 1136163953Srrs * primary if it goes active AND if someone does a 1137163953Srrs * change-primary then this flag must be cleared 1138163953Srrs * from any net structures. 1139163953Srrs */ 1140163953Srrs if (sctp_set_primary_addr(stcb, 1141163953Srrs (struct sockaddr *)NULL, 1142163953Srrs alt) == 0) { 1143163953Srrs net->dest_state |= SCTP_ADDR_WAS_PRIMARY; 1144163953Srrs } 1145163953Srrs } 1146179783Srrs } else if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf) && (net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF) { 1147171440Srrs /* 1148171440Srrs * JRS 5/14/07 - If the destination hasn't failed completely 1149171440Srrs * but is in PF state, a PF-heartbeat needs to be sent 1150171440Srrs * manually. 1151171440Srrs */ 1152171440Srrs if (sctp_send_hb(stcb, 1, net) < 0) 1153185694Srrs /* Return less than 0 means we lost the association */ 1154185694Srrs return (1); 1155163953Srrs } 1156163953Srrs /* 1157163953Srrs * Special case for cookie-echo'ed case, we don't do output but must 1158163953Srrs * await the COOKIE-ACK before retransmission 1159163953Srrs */ 1160163953Srrs if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { 1161163953Srrs /* 1162163953Srrs * Here we just reset the timer and start again since we 1163163953Srrs * have not established the asoc 1164163953Srrs */ 1165163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 1166163953Srrs return (0); 1167163953Srrs } 1168163953Srrs if (stcb->asoc.peer_supports_prsctp) { 1169163953Srrs struct sctp_tmit_chunk *lchk; 1170163953Srrs 1171163953Srrs lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc); 1172163953Srrs /* C3. See if we need to send a Fwd-TSN */ 1173163953Srrs if (compare_with_wrap(stcb->asoc.advanced_peer_ack_point, 1174163953Srrs stcb->asoc.last_acked_seq, MAX_TSN)) { 1175163953Srrs /* 1176163953Srrs * ISSUE with ECN, see FWD-TSN processing for notes 1177163953Srrs * on issues that will occur when the ECN NONCE 1178163953Srrs * stuff is put into SCTP for cross checking. 1179163953Srrs */ 1180163953Srrs send_forward_tsn(stcb, &stcb->asoc); 1181163953Srrs if (lchk) { 1182163953Srrs /* Assure a timer is up */ 1183163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo); 1184163953Srrs } 1185163953Srrs } 1186163953Srrs } 1187179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1188170744Srrs sctp_log_cwnd(stcb, net, net->cwnd, SCTP_CWND_LOG_FROM_RTX); 1189170744Srrs } 1190163953Srrs return (0); 1191163953Srrs} 1192163953Srrs 1193163953Srrsint 1194163953Srrssctp_t1init_timer(struct sctp_inpcb *inp, 1195163953Srrs struct sctp_tcb *stcb, 1196163953Srrs struct sctp_nets *net) 1197163953Srrs{ 1198163953Srrs /* bump the thresholds */ 1199163953Srrs if (stcb->asoc.delayed_connection) { 1200163953Srrs /* 1201163953Srrs * special hook for delayed connection. The library did NOT 1202163953Srrs * complete the rest of its sends. 1203163953Srrs */ 1204163953Srrs stcb->asoc.delayed_connection = 0; 1205172090Srrs sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED); 1206163953Srrs return (0); 1207163953Srrs } 1208163953Srrs if (SCTP_GET_STATE((&stcb->asoc)) != SCTP_STATE_COOKIE_WAIT) { 1209163953Srrs return (0); 1210163953Srrs } 1211163953Srrs if (sctp_threshold_management(inp, stcb, net, 1212163953Srrs stcb->asoc.max_init_times)) { 1213163953Srrs /* Association was destroyed */ 1214163953Srrs return (1); 1215163953Srrs } 1216163953Srrs stcb->asoc.dropped_special_cnt = 0; 1217163953Srrs sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0); 1218163953Srrs if (stcb->asoc.initial_init_rto_max < net->RTO) { 1219163953Srrs net->RTO = stcb->asoc.initial_init_rto_max; 1220163953Srrs } 1221163953Srrs if (stcb->asoc.numnets > 1) { 1222163953Srrs /* If we have more than one addr use it */ 1223163953Srrs struct sctp_nets *alt; 1224163953Srrs 1225163953Srrs alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination, 0); 1226163953Srrs if ((alt != NULL) && (alt != stcb->asoc.primary_destination)) { 1227163953Srrs sctp_move_all_chunks_to_alt(stcb, stcb->asoc.primary_destination, alt); 1228163953Srrs stcb->asoc.primary_destination = alt; 1229163953Srrs } 1230163953Srrs } 1231163953Srrs /* Send out a new init */ 1232172090Srrs sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED); 1233163953Srrs return (0); 1234163953Srrs} 1235163953Srrs 1236163953Srrs/* 1237163953Srrs * For cookie and asconf we actually need to find and mark for resend, then 1238163953Srrs * increment the resend counter (after all the threshold management stuff of 1239163953Srrs * course). 1240163953Srrs */ 1241163953Srrsint 1242163953Srrssctp_cookie_timer(struct sctp_inpcb *inp, 1243163953Srrs struct sctp_tcb *stcb, 1244163953Srrs struct sctp_nets *net) 1245163953Srrs{ 1246163953Srrs struct sctp_nets *alt; 1247163953Srrs struct sctp_tmit_chunk *cookie; 1248163953Srrs 1249163953Srrs /* first before all else we must find the cookie */ 1250163953Srrs TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) { 1251163953Srrs if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 1252163953Srrs break; 1253163953Srrs } 1254163953Srrs } 1255163953Srrs if (cookie == NULL) { 1256163953Srrs if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { 1257163953Srrs /* FOOBAR! */ 1258163953Srrs struct mbuf *oper; 1259163953Srrs 1260163953Srrs oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1261163953Srrs 0, M_DONTWAIT, 1, MT_DATA); 1262163953Srrs if (oper) { 1263163953Srrs struct sctp_paramhdr *ph; 1264163953Srrs uint32_t *ippp; 1265163953Srrs 1266165647Srrs SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 1267163953Srrs sizeof(uint32_t); 1268163953Srrs ph = mtod(oper, struct sctp_paramhdr *); 1269163953Srrs ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1270165647Srrs ph->param_length = htons(SCTP_BUF_LEN(oper)); 1271163953Srrs ippp = (uint32_t *) (ph + 1); 1272171440Srrs *ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_3); 1273163953Srrs } 1274171440Srrs inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_4; 1275163953Srrs sctp_abort_an_association(inp, stcb, SCTP_INTERNAL_ERROR, 1276172090Srrs oper, SCTP_SO_NOT_LOCKED); 1277163953Srrs } else { 1278165220Srrs#ifdef INVARIANTS 1279163953Srrs panic("Cookie timer expires in wrong state?"); 1280163953Srrs#else 1281169420Srrs SCTP_PRINTF("Strange in state %d not cookie-echoed yet c-e timer expires?\n", SCTP_GET_STATE(&stcb->asoc)); 1282163953Srrs return (0); 1283163953Srrs#endif 1284163953Srrs } 1285163953Srrs return (0); 1286163953Srrs } 1287163953Srrs /* Ok we found the cookie, threshold management next */ 1288163953Srrs if (sctp_threshold_management(inp, stcb, cookie->whoTo, 1289163953Srrs stcb->asoc.max_init_times)) { 1290163953Srrs /* Assoc is over */ 1291163953Srrs return (1); 1292163953Srrs } 1293163953Srrs /* 1294163953Srrs * cleared theshold management now lets backoff the address & select 1295163953Srrs * an alternate 1296163953Srrs */ 1297163953Srrs stcb->asoc.dropped_special_cnt = 0; 1298163953Srrs sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0); 1299163953Srrs alt = sctp_find_alternate_net(stcb, cookie->whoTo, 0); 1300163953Srrs if (alt != cookie->whoTo) { 1301163953Srrs sctp_free_remote_addr(cookie->whoTo); 1302163953Srrs cookie->whoTo = alt; 1303163953Srrs atomic_add_int(&alt->ref_count, 1); 1304163953Srrs } 1305163953Srrs /* Now mark the retran info */ 1306163953Srrs if (cookie->sent != SCTP_DATAGRAM_RESEND) { 1307163953Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1308163953Srrs } 1309163953Srrs cookie->sent = SCTP_DATAGRAM_RESEND; 1310163953Srrs /* 1311163953Srrs * Now call the output routine to kick out the cookie again, Note we 1312163953Srrs * don't mark any chunks for retran so that FR will need to kick in 1313163953Srrs * to move these (or a send timer). 1314163953Srrs */ 1315163953Srrs return (0); 1316163953Srrs} 1317163953Srrs 1318163953Srrsint 1319163953Srrssctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1320163953Srrs struct sctp_nets *net) 1321163953Srrs{ 1322163953Srrs struct sctp_nets *alt; 1323163953Srrs struct sctp_tmit_chunk *strrst = NULL, *chk = NULL; 1324163953Srrs 1325163953Srrs if (stcb->asoc.stream_reset_outstanding == 0) { 1326163953Srrs return (0); 1327163953Srrs } 1328163953Srrs /* find the existing STRRESET, we use the seq number we sent out on */ 1329169420Srrs (void)sctp_find_stream_reset(stcb, stcb->asoc.str_reset_seq_out, &strrst); 1330163953Srrs if (strrst == NULL) { 1331163953Srrs return (0); 1332163953Srrs } 1333163953Srrs /* do threshold management */ 1334163953Srrs if (sctp_threshold_management(inp, stcb, strrst->whoTo, 1335163953Srrs stcb->asoc.max_send_times)) { 1336163953Srrs /* Assoc is over */ 1337163953Srrs return (1); 1338163953Srrs } 1339163953Srrs /* 1340163953Srrs * cleared theshold management now lets backoff the address & select 1341163953Srrs * an alternate 1342163953Srrs */ 1343163953Srrs sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0); 1344163953Srrs alt = sctp_find_alternate_net(stcb, strrst->whoTo, 0); 1345163953Srrs sctp_free_remote_addr(strrst->whoTo); 1346163953Srrs strrst->whoTo = alt; 1347163953Srrs atomic_add_int(&alt->ref_count, 1); 1348163953Srrs 1349163953Srrs /* See if a ECN Echo is also stranded */ 1350163953Srrs TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 1351163953Srrs if ((chk->whoTo == net) && 1352163953Srrs (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 1353163953Srrs sctp_free_remote_addr(chk->whoTo); 1354163953Srrs if (chk->sent != SCTP_DATAGRAM_RESEND) { 1355163953Srrs chk->sent = SCTP_DATAGRAM_RESEND; 1356163953Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1357163953Srrs } 1358163953Srrs chk->whoTo = alt; 1359163953Srrs atomic_add_int(&alt->ref_count, 1); 1360163953Srrs } 1361163953Srrs } 1362163953Srrs if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 1363163953Srrs /* 1364163953Srrs * If the address went un-reachable, we need to move to 1365163953Srrs * alternates for ALL chk's in queue 1366163953Srrs */ 1367163953Srrs sctp_move_all_chunks_to_alt(stcb, net, alt); 1368163953Srrs } 1369163953Srrs /* mark the retran info */ 1370163953Srrs if (strrst->sent != SCTP_DATAGRAM_RESEND) 1371163953Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1372163953Srrs strrst->sent = SCTP_DATAGRAM_RESEND; 1373163953Srrs 1374163953Srrs /* restart the timer */ 1375163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, strrst->whoTo); 1376163953Srrs return (0); 1377163953Srrs} 1378163953Srrs 1379163953Srrsint 1380163953Srrssctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1381163953Srrs struct sctp_nets *net) 1382163953Srrs{ 1383163953Srrs struct sctp_nets *alt; 1384179157Srrs struct sctp_tmit_chunk *asconf, *chk, *nchk; 1385163953Srrs 1386171572Srrs /* is this a first send, or a retransmission? */ 1387179157Srrs if (TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) { 1388163953Srrs /* compose a new ASCONF chunk and send it */ 1389172190Srrs sctp_send_asconf(stcb, net, SCTP_ADDR_NOT_LOCKED); 1390163953Srrs } else { 1391171572Srrs /* 1392171572Srrs * Retransmission of the existing ASCONF is needed 1393171572Srrs */ 1394163953Srrs 1395163953Srrs /* find the existing ASCONF */ 1396179157Srrs asconf = TAILQ_FIRST(&stcb->asoc.asconf_send_queue); 1397163953Srrs if (asconf == NULL) { 1398163953Srrs return (0); 1399163953Srrs } 1400163953Srrs /* do threshold management */ 1401163953Srrs if (sctp_threshold_management(inp, stcb, asconf->whoTo, 1402163953Srrs stcb->asoc.max_send_times)) { 1403163953Srrs /* Assoc is over */ 1404163953Srrs return (1); 1405163953Srrs } 1406163953Srrs if (asconf->snd_count > stcb->asoc.max_send_times) { 1407163953Srrs /* 1408171572Srrs * Something is rotten: our peer is not responding 1409171572Srrs * to ASCONFs but apparently is to other chunks. 1410171572Srrs * i.e. it is not properly handling the chunk type 1411171572Srrs * upper bits. Mark this peer as ASCONF incapable 1412171572Srrs * and cleanup. 1413163953Srrs */ 1414169420Srrs SCTPDBG(SCTP_DEBUG_TIMER1, "asconf_timer: Peer has not responded to our repeated ASCONFs\n"); 1415163953Srrs sctp_asconf_cleanup(stcb, net); 1416163953Srrs return (0); 1417163953Srrs } 1418163953Srrs /* 1419171572Srrs * cleared threshold management, so now backoff the net and 1420171572Srrs * select an alternate 1421163953Srrs */ 1422163953Srrs sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0); 1423163953Srrs alt = sctp_find_alternate_net(stcb, asconf->whoTo, 0); 1424179157Srrs if (asconf->whoTo != alt) { 1425179157Srrs sctp_free_remote_addr(asconf->whoTo); 1426179157Srrs asconf->whoTo = alt; 1427179157Srrs atomic_add_int(&alt->ref_count, 1); 1428179157Srrs } 1429171572Srrs /* See if an ECN Echo is also stranded */ 1430163953Srrs TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 1431163953Srrs if ((chk->whoTo == net) && 1432163953Srrs (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 1433163953Srrs sctp_free_remote_addr(chk->whoTo); 1434163953Srrs chk->whoTo = alt; 1435163953Srrs if (chk->sent != SCTP_DATAGRAM_RESEND) { 1436163953Srrs chk->sent = SCTP_DATAGRAM_RESEND; 1437163953Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1438163953Srrs } 1439163953Srrs atomic_add_int(&alt->ref_count, 1); 1440163953Srrs } 1441163953Srrs } 1442179157Srrs for (chk = asconf; chk; chk = nchk) { 1443179157Srrs nchk = TAILQ_NEXT(chk, sctp_next); 1444179157Srrs if (chk->whoTo != alt) { 1445179157Srrs sctp_free_remote_addr(chk->whoTo); 1446179157Srrs chk->whoTo = alt; 1447179157Srrs atomic_add_int(&alt->ref_count, 1); 1448179157Srrs } 1449179157Srrs if (asconf->sent != SCTP_DATAGRAM_RESEND && chk->sent != SCTP_DATAGRAM_UNSENT) 1450179157Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1451179157Srrs chk->sent = SCTP_DATAGRAM_RESEND; 1452179157Srrs } 1453163953Srrs if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 1454163953Srrs /* 1455163953Srrs * If the address went un-reachable, we need to move 1456171572Srrs * to the alternate for ALL chunks in queue 1457163953Srrs */ 1458163953Srrs sctp_move_all_chunks_to_alt(stcb, net, alt); 1459179157Srrs net = alt; 1460163953Srrs } 1461163953Srrs /* mark the retran info */ 1462163953Srrs if (asconf->sent != SCTP_DATAGRAM_RESEND) 1463163953Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1464163953Srrs asconf->sent = SCTP_DATAGRAM_RESEND; 1465179157Srrs 1466179157Srrs /* send another ASCONF if any and we can do */ 1467179157Srrs sctp_send_asconf(stcb, alt, SCTP_ADDR_NOT_LOCKED); 1468163953Srrs } 1469163953Srrs return (0); 1470163953Srrs} 1471163953Srrs 1472172091Srrs/* Mobility adaptation */ 1473172156Srrsvoid 1474172091Srrssctp_delete_prim_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1475172091Srrs struct sctp_nets *net) 1476172091Srrs{ 1477172091Srrs if (stcb->asoc.deleted_primary == NULL) { 1478172091Srrs SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: deleted_primary is not stored...\n"); 1479172091Srrs sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); 1480172156Srrs return; 1481172091Srrs } 1482172091Srrs SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: finished to keep deleted primary "); 1483172091Srrs SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &stcb->asoc.deleted_primary->ro._l_addr.sa); 1484172091Srrs sctp_free_remote_addr(stcb->asoc.deleted_primary); 1485172091Srrs stcb->asoc.deleted_primary = NULL; 1486172091Srrs sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED); 1487172156Srrs return; 1488172091Srrs} 1489172091Srrs 1490163953Srrs/* 1491163953Srrs * For the shutdown and shutdown-ack, we do not keep one around on the 1492163953Srrs * control queue. This means we must generate a new one and call the general 1493163953Srrs * chunk output routine, AFTER having done threshold management. 1494163953Srrs */ 1495163953Srrsint 1496163953Srrssctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1497163953Srrs struct sctp_nets *net) 1498163953Srrs{ 1499163953Srrs struct sctp_nets *alt; 1500163953Srrs 1501163953Srrs /* first threshold managment */ 1502163953Srrs if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { 1503163953Srrs /* Assoc is over */ 1504163953Srrs return (1); 1505163953Srrs } 1506163953Srrs /* second select an alternative */ 1507163953Srrs alt = sctp_find_alternate_net(stcb, net, 0); 1508163953Srrs 1509163953Srrs /* third generate a shutdown into the queue for out net */ 1510163953Srrs if (alt) { 1511163953Srrs sctp_send_shutdown(stcb, alt); 1512163953Srrs } else { 1513163953Srrs /* 1514163953Srrs * if alt is NULL, there is no dest to send to?? 1515163953Srrs */ 1516163953Srrs return (0); 1517163953Srrs } 1518163953Srrs /* fourth restart timer */ 1519163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt); 1520163953Srrs return (0); 1521163953Srrs} 1522163953Srrs 1523163953Srrsint 1524163953Srrssctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1525163953Srrs struct sctp_nets *net) 1526163953Srrs{ 1527163953Srrs struct sctp_nets *alt; 1528163953Srrs 1529163953Srrs /* first threshold managment */ 1530163953Srrs if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { 1531163953Srrs /* Assoc is over */ 1532163953Srrs return (1); 1533163953Srrs } 1534163953Srrs /* second select an alternative */ 1535163953Srrs alt = sctp_find_alternate_net(stcb, net, 0); 1536163953Srrs 1537163953Srrs /* third generate a shutdown into the queue for out net */ 1538163953Srrs sctp_send_shutdown_ack(stcb, alt); 1539163953Srrs 1540163953Srrs /* fourth restart timer */ 1541163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt); 1542163953Srrs return (0); 1543163953Srrs} 1544163953Srrs 1545163953Srrsstatic void 1546163953Srrssctp_audit_stream_queues_for_size(struct sctp_inpcb *inp, 1547163953Srrs struct sctp_tcb *stcb) 1548163953Srrs{ 1549163953Srrs struct sctp_stream_out *outs; 1550163953Srrs struct sctp_stream_queue_pending *sp; 1551163953Srrs unsigned int chks_in_queue = 0; 1552163953Srrs int being_filled = 0; 1553163953Srrs 1554163953Srrs /* 1555163953Srrs * This function is ONLY called when the send/sent queues are empty. 1556163953Srrs */ 1557163953Srrs if ((stcb == NULL) || (inp == NULL)) 1558163953Srrs return; 1559163953Srrs 1560163953Srrs if (stcb->asoc.sent_queue_retran_cnt) { 1561169420Srrs SCTP_PRINTF("Hmm, sent_queue_retran_cnt is non-zero %d\n", 1562163953Srrs stcb->asoc.sent_queue_retran_cnt); 1563163953Srrs stcb->asoc.sent_queue_retran_cnt = 0; 1564163953Srrs } 1565163953Srrs SCTP_TCB_SEND_LOCK(stcb); 1566163953Srrs if (TAILQ_EMPTY(&stcb->asoc.out_wheel)) { 1567163953Srrs int i, cnt = 0; 1568163953Srrs 1569163953Srrs /* Check to see if a spoke fell off the wheel */ 1570163953Srrs for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1571163953Srrs if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) { 1572163953Srrs sctp_insert_on_wheel(stcb, &stcb->asoc, &stcb->asoc.strmout[i], 1); 1573163953Srrs cnt++; 1574163953Srrs } 1575163953Srrs } 1576163953Srrs if (cnt) { 1577163953Srrs /* yep, we lost a spoke or two */ 1578169420Srrs SCTP_PRINTF("Found an additional %d streams NOT on outwheel, corrected\n", cnt); 1579163953Srrs } else { 1580163953Srrs /* no spokes lost, */ 1581163953Srrs stcb->asoc.total_output_queue_size = 0; 1582163953Srrs } 1583163953Srrs SCTP_TCB_SEND_UNLOCK(stcb); 1584163953Srrs return; 1585163953Srrs } 1586163953Srrs SCTP_TCB_SEND_UNLOCK(stcb); 1587163953Srrs /* Check to see if some data queued, if so report it */ 1588163953Srrs TAILQ_FOREACH(outs, &stcb->asoc.out_wheel, next_spoke) { 1589163953Srrs if (!TAILQ_EMPTY(&outs->outqueue)) { 1590163953Srrs TAILQ_FOREACH(sp, &outs->outqueue, next) { 1591163953Srrs if (sp->msg_is_complete) 1592163953Srrs being_filled++; 1593163953Srrs chks_in_queue++; 1594163953Srrs } 1595163953Srrs } 1596163953Srrs } 1597163953Srrs if (chks_in_queue != stcb->asoc.stream_queue_cnt) { 1598169420Srrs SCTP_PRINTF("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n", 1599163953Srrs stcb->asoc.stream_queue_cnt, chks_in_queue); 1600163953Srrs } 1601163953Srrs if (chks_in_queue) { 1602163953Srrs /* call the output queue function */ 1603172090Srrs sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1604163953Srrs if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) && 1605163953Srrs (TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 1606163953Srrs /* 1607163953Srrs * Probably should go in and make it go back through 1608163953Srrs * and add fragments allowed 1609163953Srrs */ 1610163953Srrs if (being_filled == 0) { 1611169420Srrs SCTP_PRINTF("Still nothing moved %d chunks are stuck\n", 1612163953Srrs chks_in_queue); 1613163953Srrs } 1614163953Srrs } 1615163953Srrs } else { 1616169420Srrs SCTP_PRINTF("Found no chunks on any queue tot:%lu\n", 1617163953Srrs (u_long)stcb->asoc.total_output_queue_size); 1618163953Srrs stcb->asoc.total_output_queue_size = 0; 1619163953Srrs } 1620163953Srrs} 1621163953Srrs 1622163953Srrsint 1623163953Srrssctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1624163953Srrs struct sctp_nets *net, int cnt_of_unconf) 1625163953Srrs{ 1626171440Srrs int ret; 1627171440Srrs 1628163953Srrs if (net) { 1629163953Srrs if (net->hb_responded == 0) { 1630167598Srrs if (net->ro._s_addr) { 1631167598Srrs /* 1632167598Srrs * Invalidate the src address if we did not 1633167598Srrs * get a response last time. 1634167598Srrs */ 1635167598Srrs sctp_free_ifa(net->ro._s_addr); 1636167598Srrs net->ro._s_addr = NULL; 1637167598Srrs net->src_addr_selected = 0; 1638167598Srrs } 1639163953Srrs sctp_backoff_on_timeout(stcb, net, 1, 0); 1640163953Srrs } 1641163953Srrs /* Zero PBA, if it needs it */ 1642163953Srrs if (net->partial_bytes_acked) { 1643163953Srrs net->partial_bytes_acked = 0; 1644163953Srrs } 1645163953Srrs } 1646163953Srrs if ((stcb->asoc.total_output_queue_size > 0) && 1647163953Srrs (TAILQ_EMPTY(&stcb->asoc.send_queue)) && 1648163953Srrs (TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 1649163953Srrs sctp_audit_stream_queues_for_size(inp, stcb); 1650163953Srrs } 1651163953Srrs /* Send a new HB, this will do threshold managment, pick a new dest */ 1652163953Srrs if (cnt_of_unconf == 0) { 1653163953Srrs if (sctp_send_hb(stcb, 0, NULL) < 0) { 1654163953Srrs return (1); 1655163953Srrs } 1656163953Srrs } else { 1657163953Srrs /* 1658163953Srrs * this will send out extra hb's up to maxburst if there are 1659163953Srrs * any unconfirmed addresses. 1660163953Srrs */ 1661170056Srrs uint32_t cnt_sent = 0; 1662163953Srrs 1663163953Srrs TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1664163953Srrs if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) && 1665163953Srrs (net->dest_state & SCTP_ADDR_REACHABLE)) { 1666163953Srrs cnt_sent++; 1667167598Srrs if (net->hb_responded == 0) { 1668167598Srrs /* Did we respond last time? */ 1669167598Srrs if (net->ro._s_addr) { 1670167598Srrs sctp_free_ifa(net->ro._s_addr); 1671167598Srrs net->ro._s_addr = NULL; 1672167598Srrs net->src_addr_selected = 0; 1673167598Srrs } 1674167598Srrs } 1675171440Srrs ret = sctp_send_hb(stcb, 1, net); 1676171440Srrs if (ret < 0) 1677171440Srrs return 1; 1678171440Srrs else if (ret == 0) { 1679163953Srrs break; 1680163953Srrs } 1681179783Srrs if (cnt_sent >= SCTP_BASE_SYSCTL(sctp_hb_maxburst)) 1682163953Srrs break; 1683163953Srrs } 1684163953Srrs } 1685163953Srrs } 1686163953Srrs return (0); 1687163953Srrs} 1688163953Srrs 1689163953Srrsint 1690163953Srrssctp_is_hb_timer_running(struct sctp_tcb *stcb) 1691163953Srrs{ 1692165647Srrs if (SCTP_OS_TIMER_PENDING(&stcb->asoc.hb_timer.timer)) { 1693163953Srrs /* its running */ 1694163953Srrs return (1); 1695163953Srrs } else { 1696163953Srrs /* nope */ 1697163953Srrs return (0); 1698163953Srrs } 1699163953Srrs} 1700163953Srrs 1701163953Srrsint 1702163953Srrssctp_is_sack_timer_running(struct sctp_tcb *stcb) 1703163953Srrs{ 1704165647Srrs if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 1705163953Srrs /* its running */ 1706163953Srrs return (1); 1707163953Srrs } else { 1708163953Srrs /* nope */ 1709163953Srrs return (0); 1710163953Srrs } 1711163953Srrs} 1712163953Srrs 1713163953Srrs#define SCTP_NUMBER_OF_MTU_SIZES 18 1714163953Srrsstatic uint32_t mtu_sizes[] = { 1715163953Srrs 68, 1716163953Srrs 296, 1717163953Srrs 508, 1718163953Srrs 512, 1719163953Srrs 544, 1720163953Srrs 576, 1721163953Srrs 1006, 1722163953Srrs 1492, 1723163953Srrs 1500, 1724163953Srrs 1536, 1725163953Srrs 2002, 1726163953Srrs 2048, 1727163953Srrs 4352, 1728163953Srrs 4464, 1729163953Srrs 8166, 1730163953Srrs 17914, 1731163953Srrs 32000, 1732163953Srrs 65535 1733163953Srrs}; 1734163953Srrs 1735163953Srrs 1736163953Srrsstatic uint32_t 1737163953Srrssctp_getnext_mtu(struct sctp_inpcb *inp, uint32_t cur_mtu) 1738163953Srrs{ 1739163953Srrs /* select another MTU that is just bigger than this one */ 1740163953Srrs int i; 1741163953Srrs 1742163953Srrs for (i = 0; i < SCTP_NUMBER_OF_MTU_SIZES; i++) { 1743163953Srrs if (cur_mtu < mtu_sizes[i]) { 1744163953Srrs /* no max_mtu is bigger than this one */ 1745163953Srrs return (mtu_sizes[i]); 1746163953Srrs } 1747163953Srrs } 1748163953Srrs /* here return the highest allowable */ 1749163953Srrs return (cur_mtu); 1750163953Srrs} 1751163953Srrs 1752163953Srrs 1753163953Srrsvoid 1754163953Srrssctp_pathmtu_timer(struct sctp_inpcb *inp, 1755163953Srrs struct sctp_tcb *stcb, 1756163953Srrs struct sctp_nets *net) 1757163953Srrs{ 1758179157Srrs uint32_t next_mtu, mtu; 1759163953Srrs 1760163953Srrs next_mtu = sctp_getnext_mtu(inp, net->mtu); 1761169352Srrs 1762179157Srrs if ((next_mtu > net->mtu) && (net->port == 0)) { 1763169352Srrs if ((net->src_addr_selected == 0) || 1764169352Srrs (net->ro._s_addr == NULL) || 1765169352Srrs (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) { 1766169420Srrs if ((net->ro._s_addr != NULL) && (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) { 1767169352Srrs sctp_free_ifa(net->ro._s_addr); 1768169352Srrs net->ro._s_addr = NULL; 1769169352Srrs net->src_addr_selected = 0; 1770169420Srrs } else if (net->ro._s_addr == NULL) { 1771179157Srrs#if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE) 1772179157Srrs if (net->ro._l_addr.sa.sa_family == AF_INET6) { 1773179157Srrs struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 1774179157Srrs 1775179157Srrs /* KAME hack: embed scopeid */ 1776197288Srrs (void)sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone)); 1777179157Srrs } 1778179157Srrs#endif 1779179157Srrs 1780169420Srrs net->ro._s_addr = sctp_source_address_selection(inp, 1781169420Srrs stcb, 1782169420Srrs (sctp_route_t *) & net->ro, 1783169420Srrs net, 0, stcb->asoc.vrf_id); 1784179157Srrs#if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE) 1785179157Srrs if (net->ro._l_addr.sa.sa_family == AF_INET6) { 1786179157Srrs struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr; 1787179157Srrs 1788179157Srrs (void)sa6_recoverscope(sin6); 1789179157Srrs } 1790179157Srrs#endif /* INET6 */ 1791169352Srrs } 1792169352Srrs if (net->ro._s_addr) 1793169352Srrs net->src_addr_selected = 1; 1794169352Srrs } 1795169352Srrs if (net->ro._s_addr) { 1796169352Srrs mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._s_addr.sa, net->ro.ro_rt); 1797185694Srrs if (net->port) { 1798185694Srrs mtu -= sizeof(struct udphdr); 1799185694Srrs } 1800169352Srrs if (mtu > next_mtu) { 1801163953Srrs net->mtu = next_mtu; 1802163953Srrs } 1803163953Srrs } 1804163953Srrs } 1805163953Srrs /* restart the timer */ 1806163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 1807163953Srrs} 1808163953Srrs 1809163953Srrsvoid 1810163953Srrssctp_autoclose_timer(struct sctp_inpcb *inp, 1811163953Srrs struct sctp_tcb *stcb, 1812163953Srrs struct sctp_nets *net) 1813163953Srrs{ 1814163953Srrs struct timeval tn, *tim_touse; 1815163953Srrs struct sctp_association *asoc; 1816163953Srrs int ticks_gone_by; 1817163953Srrs 1818169378Srrs (void)SCTP_GETTIME_TIMEVAL(&tn); 1819163953Srrs if (stcb->asoc.sctp_autoclose_ticks && 1820163953Srrs sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1821163953Srrs /* Auto close is on */ 1822163953Srrs asoc = &stcb->asoc; 1823163953Srrs /* pick the time to use */ 1824163953Srrs if (asoc->time_last_rcvd.tv_sec > 1825163953Srrs asoc->time_last_sent.tv_sec) { 1826163953Srrs tim_touse = &asoc->time_last_rcvd; 1827163953Srrs } else { 1828163953Srrs tim_touse = &asoc->time_last_sent; 1829163953Srrs } 1830163953Srrs /* Now has long enough transpired to autoclose? */ 1831163953Srrs ticks_gone_by = SEC_TO_TICKS(tn.tv_sec - tim_touse->tv_sec); 1832163953Srrs if ((ticks_gone_by > 0) && 1833163953Srrs (ticks_gone_by >= (int)asoc->sctp_autoclose_ticks)) { 1834163953Srrs /* 1835163953Srrs * autoclose time has hit, call the output routine, 1836163953Srrs * which should do nothing just to be SURE we don't 1837163953Srrs * have hanging data. We can then safely check the 1838163953Srrs * queues and know that we are clear to send 1839163953Srrs * shutdown 1840163953Srrs */ 1841172090Srrs sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED); 1842163953Srrs /* Are we clean? */ 1843163953Srrs if (TAILQ_EMPTY(&asoc->send_queue) && 1844163953Srrs TAILQ_EMPTY(&asoc->sent_queue)) { 1845163953Srrs /* 1846163953Srrs * there is nothing queued to send, so I'm 1847163953Srrs * done... 1848163953Srrs */ 1849166675Srrs if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1850163953Srrs /* only send SHUTDOWN 1st time thru */ 1851163953Srrs sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 1852166675Srrs if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 1853166675Srrs (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 1854166675Srrs SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1855166675Srrs } 1856171943Srrs SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT); 1857172703Srrs SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING); 1858163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 1859163953Srrs stcb->sctp_ep, stcb, 1860163953Srrs asoc->primary_destination); 1861163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1862163953Srrs stcb->sctp_ep, stcb, 1863163953Srrs asoc->primary_destination); 1864163953Srrs } 1865163953Srrs } 1866163953Srrs } else { 1867163953Srrs /* 1868163953Srrs * No auto close at this time, reset t-o to check 1869163953Srrs * later 1870163953Srrs */ 1871163953Srrs int tmp; 1872163953Srrs 1873163953Srrs /* fool the timer startup to use the time left */ 1874163953Srrs tmp = asoc->sctp_autoclose_ticks; 1875163953Srrs asoc->sctp_autoclose_ticks -= ticks_gone_by; 1876163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, 1877163953Srrs net); 1878163953Srrs /* restore the real tick value */ 1879163953Srrs asoc->sctp_autoclose_ticks = tmp; 1880163953Srrs } 1881163953Srrs } 1882163953Srrs} 1883163953Srrs 1884163953Srrsvoid 1885163953Srrssctp_iterator_timer(struct sctp_iterator *it) 1886163953Srrs{ 1887163953Srrs int iteration_count = 0; 1888167598Srrs int inp_skip = 0; 1889163953Srrs 1890163953Srrs /* 1891163953Srrs * only one iterator can run at a time. This is the only way we can 1892163953Srrs * cleanly pull ep's from underneath all the running interators when 1893163953Srrs * a ep is freed. 1894163953Srrs */ 1895163953Srrs SCTP_ITERATOR_LOCK(); 1896163953Srrs if (it->inp == NULL) { 1897163953Srrs /* iterator is complete */ 1898163953Srrsdone_with_iterator: 1899163953Srrs SCTP_ITERATOR_UNLOCK(); 1900163953Srrs SCTP_INP_INFO_WLOCK(); 1901179783Srrs TAILQ_REMOVE(&SCTP_BASE_INFO(iteratorhead), it, sctp_nxt_itr); 1902163953Srrs /* stopping the callout is not needed, in theory */ 1903163953Srrs SCTP_INP_INFO_WUNLOCK(); 1904169378Srrs (void)SCTP_OS_TIMER_STOP(&it->tmr.timer); 1905163953Srrs if (it->function_atend != NULL) { 1906163953Srrs (*it->function_atend) (it->pointer, it->val); 1907163953Srrs } 1908170091Srrs SCTP_FREE(it, SCTP_M_ITER); 1909163953Srrs return; 1910163953Srrs } 1911163953Srrsselect_a_new_ep: 1912163953Srrs SCTP_INP_WLOCK(it->inp); 1913163953Srrs while (((it->pcb_flags) && 1914163953Srrs ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1915163953Srrs ((it->pcb_features) && 1916163953Srrs ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1917163953Srrs /* endpoint flags or features don't match, so keep looking */ 1918163953Srrs if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1919163953Srrs SCTP_INP_WUNLOCK(it->inp); 1920163953Srrs goto done_with_iterator; 1921163953Srrs } 1922163953Srrs SCTP_INP_WUNLOCK(it->inp); 1923163953Srrs it->inp = LIST_NEXT(it->inp, sctp_list); 1924163953Srrs if (it->inp == NULL) { 1925163953Srrs goto done_with_iterator; 1926163953Srrs } 1927163953Srrs SCTP_INP_WLOCK(it->inp); 1928163953Srrs } 1929163953Srrs if ((it->inp->inp_starting_point_for_iterator != NULL) && 1930163953Srrs (it->inp->inp_starting_point_for_iterator != it)) { 1931169420Srrs SCTP_PRINTF("Iterator collision, waiting for one at %p\n", 1932163979Sru it->inp); 1933163953Srrs SCTP_INP_WUNLOCK(it->inp); 1934163953Srrs goto start_timer_return; 1935163953Srrs } 1936163953Srrs /* mark the current iterator on the endpoint */ 1937163953Srrs it->inp->inp_starting_point_for_iterator = it; 1938163953Srrs SCTP_INP_WUNLOCK(it->inp); 1939163953Srrs SCTP_INP_RLOCK(it->inp); 1940163953Srrs /* now go through each assoc which is in the desired state */ 1941167598Srrs if (it->done_current_ep == 0) { 1942167598Srrs if (it->function_inp != NULL) 1943167598Srrs inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1944167598Srrs it->done_current_ep = 1; 1945167598Srrs } 1946163953Srrs if (it->stcb == NULL) { 1947163953Srrs /* run the per instance function */ 1948163953Srrs it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1949163953Srrs } 1950163953Srrs SCTP_INP_RUNLOCK(it->inp); 1951167598Srrs if ((inp_skip) || it->stcb == NULL) { 1952167598Srrs if (it->function_inp_end != NULL) { 1953167598Srrs inp_skip = (*it->function_inp_end) (it->inp, 1954167598Srrs it->pointer, 1955167598Srrs it->val); 1956167598Srrs } 1957167598Srrs goto no_stcb; 1958167598Srrs } 1959163953Srrs if ((it->stcb) && 1960163953Srrs (it->stcb->asoc.stcb_starting_point_for_iterator == it)) { 1961163953Srrs it->stcb->asoc.stcb_starting_point_for_iterator = NULL; 1962163953Srrs } 1963163953Srrs while (it->stcb) { 1964163953Srrs SCTP_TCB_LOCK(it->stcb); 1965163953Srrs if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1966163953Srrs /* not in the right state... keep looking */ 1967163953Srrs SCTP_TCB_UNLOCK(it->stcb); 1968163953Srrs goto next_assoc; 1969163953Srrs } 1970163953Srrs /* mark the current iterator on the assoc */ 1971163953Srrs it->stcb->asoc.stcb_starting_point_for_iterator = it; 1972163953Srrs /* see if we have limited out the iterator loop */ 1973163953Srrs iteration_count++; 1974163953Srrs if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1975163953Srrs start_timer_return: 1976163953Srrs /* set a timer to continue this later */ 1977170943Srrs if (it->stcb) 1978170943Srrs SCTP_TCB_UNLOCK(it->stcb); 1979163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_ITERATOR, 1980163953Srrs (struct sctp_inpcb *)it, NULL, NULL); 1981163953Srrs SCTP_ITERATOR_UNLOCK(); 1982163953Srrs return; 1983163953Srrs } 1984163953Srrs /* run function on this one */ 1985163953Srrs (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1986163953Srrs 1987163953Srrs /* 1988163953Srrs * we lie here, it really needs to have its own type but 1989163953Srrs * first I must verify that this won't effect things :-0 1990163953Srrs */ 1991163953Srrs if (it->no_chunk_output == 0) 1992172090Srrs sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED); 1993163953Srrs 1994163953Srrs SCTP_TCB_UNLOCK(it->stcb); 1995163953Srrsnext_assoc: 1996163953Srrs it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1997167598Srrs if (it->stcb == NULL) { 1998167598Srrs if (it->function_inp_end != NULL) { 1999167598Srrs inp_skip = (*it->function_inp_end) (it->inp, 2000167598Srrs it->pointer, 2001167598Srrs it->val); 2002167598Srrs } 2003167598Srrs } 2004163953Srrs } 2005167598Srrsno_stcb: 2006163953Srrs /* done with all assocs on this endpoint, move on to next endpoint */ 2007167598Srrs it->done_current_ep = 0; 2008163953Srrs SCTP_INP_WLOCK(it->inp); 2009163953Srrs it->inp->inp_starting_point_for_iterator = NULL; 2010163953Srrs SCTP_INP_WUNLOCK(it->inp); 2011163953Srrs if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 2012163953Srrs it->inp = NULL; 2013163953Srrs } else { 2014163953Srrs SCTP_INP_INFO_RLOCK(); 2015163953Srrs it->inp = LIST_NEXT(it->inp, sctp_list); 2016163953Srrs SCTP_INP_INFO_RUNLOCK(); 2017163953Srrs } 2018163953Srrs if (it->inp == NULL) { 2019163953Srrs goto done_with_iterator; 2020163953Srrs } 2021163953Srrs goto select_a_new_ep; 2022163953Srrs} 2023