sctp_timer.c revision 170642
1163953Srrs/*- 2169382Srrs * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3163953Srrs * 4163953Srrs * Redistribution and use in source and binary forms, with or without 5163953Srrs * modification, are permitted provided that the following conditions are met: 6163953Srrs * 7163953Srrs * a) Redistributions of source code must retain the above copyright notice, 8163953Srrs * this list of conditions and the following disclaimer. 9163953Srrs * 10163953Srrs * b) Redistributions in binary form must reproduce the above copyright 11163953Srrs * notice, this list of conditions and the following disclaimer in 12163953Srrs * the documentation and/or other materials provided with the distribution. 13163953Srrs * 14163953Srrs * c) Neither the name of Cisco Systems, Inc. nor the names of its 15163953Srrs * contributors may be used to endorse or promote products derived 16163953Srrs * from this software without specific prior written permission. 17163953Srrs * 18163953Srrs * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19163953Srrs * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20163953Srrs * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21163953Srrs * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22163953Srrs * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23163953Srrs * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24163953Srrs * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25163953Srrs * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26163953Srrs * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27163953Srrs * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28163953Srrs * THE POSSIBILITY OF SUCH DAMAGE. 29163953Srrs */ 30163953Srrs 31163953Srrs/* $KAME: sctp_timer.c,v 1.29 2005/03/06 16:04:18 itojun Exp $ */ 32163953Srrs 33163953Srrs#include <sys/cdefs.h> 34163953Srrs__FBSDID("$FreeBSD: head/sys/netinet/sctp_timer.c 170642 2007-06-13 01:31:53Z rrs $"); 35163953Srrs 36163953Srrs#define _IP_VHL 37166086Srrs#include <netinet/sctp_os.h> 38163953Srrs#include <netinet/sctp_pcb.h> 39163953Srrs#ifdef INET6 40163953Srrs#include <netinet6/sctp6_var.h> 41163953Srrs#endif 42163953Srrs#include <netinet/sctp_var.h> 43167598Srrs#include <netinet/sctp_sysctl.h> 44163953Srrs#include <netinet/sctp_timer.h> 45163953Srrs#include <netinet/sctputil.h> 46163953Srrs#include <netinet/sctp_output.h> 47163953Srrs#include <netinet/sctp_header.h> 48163953Srrs#include <netinet/sctp_indata.h> 49163953Srrs#include <netinet/sctp_asconf.h> 50163953Srrs#include <netinet/sctp_input.h> 51163953Srrs#include <netinet/sctp.h> 52163953Srrs#include <netinet/sctp_uio.h> 53163953Srrs 54163953Srrs 55163953Srrs 56163953Srrsvoid 57163953Srrssctp_early_fr_timer(struct sctp_inpcb *inp, 58163953Srrs struct sctp_tcb *stcb, 59163953Srrs struct sctp_nets *net) 60163953Srrs{ 61163953Srrs struct sctp_tmit_chunk *chk, *tp2; 62163953Srrs struct timeval now, min_wait, tv; 63163953Srrs unsigned int cur_rtt, cnt = 0, cnt_resend = 0; 64163953Srrs 65163953Srrs /* an early FR is occuring. */ 66169378Srrs (void)SCTP_GETTIME_TIMEVAL(&now); 67163953Srrs /* get cur rto in micro-seconds */ 68163953Srrs if (net->lastsa == 0) { 69163953Srrs /* Hmm no rtt estimate yet? */ 70163953Srrs cur_rtt = stcb->asoc.initial_rto >> 2; 71163953Srrs } else { 72163953Srrs 73163953Srrs cur_rtt = ((net->lastsa >> 2) + net->lastsv) >> 1; 74163953Srrs } 75163953Srrs if (cur_rtt < sctp_early_fr_msec) { 76163953Srrs cur_rtt = sctp_early_fr_msec; 77163953Srrs } 78163953Srrs cur_rtt *= 1000; 79163953Srrs tv.tv_sec = cur_rtt / 1000000; 80163953Srrs tv.tv_usec = cur_rtt % 1000000; 81163953Srrs min_wait = now; 82163953Srrs timevalsub(&min_wait, &tv); 83163953Srrs if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) { 84163953Srrs /* 85163953Srrs * if we hit here, we don't have enough seconds on the clock 86163953Srrs * to account for the RTO. We just let the lower seconds be 87163953Srrs * the bounds and don't worry about it. This may mean we 88163953Srrs * will mark a lot more than we should. 89163953Srrs */ 90163953Srrs min_wait.tv_sec = min_wait.tv_usec = 0; 91163953Srrs } 92163953Srrs chk = TAILQ_LAST(&stcb->asoc.sent_queue, sctpchunk_listhead); 93163953Srrs for (; chk != NULL; chk = tp2) { 94163953Srrs tp2 = TAILQ_PREV(chk, sctpchunk_listhead, sctp_next); 95163953Srrs if (chk->whoTo != net) { 96163953Srrs continue; 97163953Srrs } 98163953Srrs if (chk->sent == SCTP_DATAGRAM_RESEND) 99163953Srrs cnt_resend++; 100163953Srrs else if ((chk->sent > SCTP_DATAGRAM_UNSENT) && 101163953Srrs (chk->sent < SCTP_DATAGRAM_RESEND)) { 102163953Srrs /* pending, may need retran */ 103163953Srrs if (chk->sent_rcv_time.tv_sec > min_wait.tv_sec) { 104163953Srrs /* 105163953Srrs * we have reached a chunk that was sent 106163953Srrs * some seconds past our min.. forget it we 107163953Srrs * will find no more to send. 108163953Srrs */ 109163953Srrs continue; 110163953Srrs } else if (chk->sent_rcv_time.tv_sec == min_wait.tv_sec) { 111163953Srrs /* 112163953Srrs * we must look at the micro seconds to 113163953Srrs * know. 114163953Srrs */ 115163953Srrs if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) { 116163953Srrs /* 117163953Srrs * ok it was sent after our boundary 118163953Srrs * time. 119163953Srrs */ 120163953Srrs continue; 121163953Srrs } 122163953Srrs } 123163953Srrs#ifdef SCTP_EARLYFR_LOGGING 124163953Srrs sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count, 125163953Srrs 4, SCTP_FR_MARKED_EARLY); 126163953Srrs#endif 127163953Srrs SCTP_STAT_INCR(sctps_earlyfrmrkretrans); 128163953Srrs chk->sent = SCTP_DATAGRAM_RESEND; 129163953Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 130163953Srrs /* double book size since we are doing an early FR */ 131163953Srrs chk->book_size_scale++; 132163953Srrs cnt += chk->send_size; 133163953Srrs if ((cnt + net->flight_size) > net->cwnd) { 134163953Srrs /* Mark all we could possibly resend */ 135163953Srrs break; 136163953Srrs } 137163953Srrs } 138163953Srrs } 139163953Srrs if (cnt) { 140163953Srrs#ifdef SCTP_CWND_MONITOR 141163953Srrs int old_cwnd; 142163953Srrs 143163953Srrs old_cwnd = net->cwnd; 144163953Srrs#endif 145163953Srrs sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR); 146163953Srrs /* 147163953Srrs * make a small adjustment to cwnd and force to CA. 148163953Srrs */ 149163953Srrs 150163953Srrs if (net->cwnd > net->mtu) 151163953Srrs /* drop down one MTU after sending */ 152163953Srrs net->cwnd -= net->mtu; 153163953Srrs if (net->cwnd < net->ssthresh) 154163953Srrs /* still in SS move to CA */ 155163953Srrs net->ssthresh = net->cwnd - 1; 156163953Srrs#ifdef SCTP_CWND_MONITOR 157163953Srrs sctp_log_cwnd(stcb, net, (old_cwnd - net->cwnd), SCTP_CWND_LOG_FROM_FR); 158163953Srrs#endif 159163953Srrs } else if (cnt_resend) { 160163953Srrs sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR); 161163953Srrs } 162163953Srrs /* Restart it? */ 163163953Srrs if (net->flight_size < net->cwnd) { 164163953Srrs SCTP_STAT_INCR(sctps_earlyfrstrtmr); 165163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net); 166163953Srrs } 167163953Srrs} 168163953Srrs 169163953Srrsvoid 170163953Srrssctp_audit_retranmission_queue(struct sctp_association *asoc) 171163953Srrs{ 172163953Srrs struct sctp_tmit_chunk *chk; 173163953Srrs 174169420Srrs SCTPDBG(SCTP_DEBUG_TIMER4, "Audit invoked on send queue cnt:%d onqueue:%d\n", 175169420Srrs asoc->sent_queue_retran_cnt, 176169420Srrs asoc->sent_queue_cnt); 177163953Srrs asoc->sent_queue_retran_cnt = 0; 178163953Srrs asoc->sent_queue_cnt = 0; 179163953Srrs TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) { 180163953Srrs if (chk->sent == SCTP_DATAGRAM_RESEND) { 181163953Srrs sctp_ucount_incr(asoc->sent_queue_retran_cnt); 182163953Srrs } 183163953Srrs asoc->sent_queue_cnt++; 184163953Srrs } 185163953Srrs TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) { 186163953Srrs if (chk->sent == SCTP_DATAGRAM_RESEND) { 187163953Srrs sctp_ucount_incr(asoc->sent_queue_retran_cnt); 188163953Srrs } 189163953Srrs } 190169420Srrs SCTPDBG(SCTP_DEBUG_TIMER4, "Audit completes retran:%d onqueue:%d\n", 191169420Srrs asoc->sent_queue_retran_cnt, 192169420Srrs asoc->sent_queue_cnt); 193163953Srrs} 194163953Srrs 195163953Srrsint 196163953Srrssctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 197163953Srrs struct sctp_nets *net, uint16_t threshold) 198163953Srrs{ 199163953Srrs if (net) { 200163953Srrs net->error_count++; 201169420Srrs SCTPDBG(SCTP_DEBUG_TIMER4, "Error count for %p now %d thresh:%d\n", 202169420Srrs net, net->error_count, 203169420Srrs net->failure_threshold); 204163953Srrs if (net->error_count > net->failure_threshold) { 205163953Srrs /* We had a threshold failure */ 206163953Srrs if (net->dest_state & SCTP_ADDR_REACHABLE) { 207163953Srrs net->dest_state &= ~SCTP_ADDR_REACHABLE; 208163953Srrs net->dest_state |= SCTP_ADDR_NOT_REACHABLE; 209167598Srrs net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY; 210163953Srrs if (net == stcb->asoc.primary_destination) { 211163953Srrs net->dest_state |= SCTP_ADDR_WAS_PRIMARY; 212163953Srrs } 213163953Srrs sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN, 214163953Srrs stcb, 215163953Srrs SCTP_FAILED_THRESHOLD, 216163953Srrs (void *)net); 217163953Srrs } 218163953Srrs } 219163953Srrs /*********HOLD THIS COMMENT FOR PATCH OF ALTERNATE 220163953Srrs *********ROUTING CODE 221163953Srrs */ 222163953Srrs /*********HOLD THIS COMMENT FOR END OF PATCH OF ALTERNATE 223163953Srrs *********ROUTING CODE 224163953Srrs */ 225163953Srrs } 226163953Srrs if (stcb == NULL) 227163953Srrs return (0); 228163953Srrs 229163953Srrs if (net) { 230163953Srrs if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) { 231163953Srrs stcb->asoc.overall_error_count++; 232163953Srrs } 233163953Srrs } else { 234163953Srrs stcb->asoc.overall_error_count++; 235163953Srrs } 236169420Srrs SCTPDBG(SCTP_DEBUG_TIMER4, "Overall error count for %p now %d thresh:%u state:%x\n", 237169420Srrs &stcb->asoc, stcb->asoc.overall_error_count, 238169420Srrs (uint32_t) threshold, 239169420Srrs ((net == NULL) ? (uint32_t) 0 : (uint32_t) net->dest_state)); 240163953Srrs /* 241163953Srrs * We specifically do not do >= to give the assoc one more change 242163953Srrs * before we fail it. 243163953Srrs */ 244163953Srrs if (stcb->asoc.overall_error_count > threshold) { 245163953Srrs /* Abort notification sends a ULP notify */ 246163953Srrs struct mbuf *oper; 247163953Srrs 248163953Srrs oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 249163953Srrs 0, M_DONTWAIT, 1, MT_DATA); 250163953Srrs if (oper) { 251163953Srrs struct sctp_paramhdr *ph; 252163953Srrs uint32_t *ippp; 253163953Srrs 254165647Srrs SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 255163953Srrs sizeof(uint32_t); 256163953Srrs ph = mtod(oper, struct sctp_paramhdr *); 257163953Srrs ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 258165647Srrs ph->param_length = htons(SCTP_BUF_LEN(oper)); 259163953Srrs ippp = (uint32_t *) (ph + 1); 260165220Srrs *ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_1); 261163953Srrs } 262165220Srrs inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_1; 263163953Srrs sctp_abort_an_association(inp, stcb, SCTP_FAILED_THRESHOLD, oper); 264163953Srrs return (1); 265163953Srrs } 266163953Srrs return (0); 267163953Srrs} 268163953Srrs 269163953Srrsstruct sctp_nets * 270163953Srrssctp_find_alternate_net(struct sctp_tcb *stcb, 271163953Srrs struct sctp_nets *net, 272163953Srrs int highest_ssthresh) 273163953Srrs{ 274163953Srrs /* Find and return an alternate network if possible */ 275163953Srrs struct sctp_nets *alt, *mnet, *hthresh = NULL; 276163953Srrs int once; 277163953Srrs uint32_t val = 0; 278163953Srrs 279163953Srrs if (stcb->asoc.numnets == 1) { 280163953Srrs /* No others but net */ 281163953Srrs return (TAILQ_FIRST(&stcb->asoc.nets)); 282163953Srrs } 283163953Srrs if (highest_ssthresh) { 284163953Srrs TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) { 285163953Srrs if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) || 286163953Srrs (mnet->dest_state & SCTP_ADDR_UNCONFIRMED) 287163953Srrs ) { 288163953Srrs /* 289163953Srrs * will skip ones that are not-reachable or 290163953Srrs * unconfirmed 291163953Srrs */ 292163953Srrs continue; 293163953Srrs } 294170181Srrs if (val < mnet->ssthresh) { 295163953Srrs hthresh = mnet; 296163953Srrs val = mnet->ssthresh; 297163953Srrs } else if (val == mnet->ssthresh) { 298163953Srrs uint32_t rndval; 299163953Srrs uint8_t this_random; 300163953Srrs 301163953Srrs if (stcb->asoc.hb_random_idx > 3) { 302163953Srrs rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep); 303163953Srrs memcpy(stcb->asoc.hb_random_values, &rndval, 304163953Srrs sizeof(stcb->asoc.hb_random_values)); 305163953Srrs this_random = stcb->asoc.hb_random_values[0]; 306163953Srrs stcb->asoc.hb_random_idx = 0; 307163953Srrs stcb->asoc.hb_ect_randombit = 0; 308163953Srrs } else { 309163953Srrs this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx]; 310163953Srrs stcb->asoc.hb_random_idx++; 311163953Srrs stcb->asoc.hb_ect_randombit = 0; 312163953Srrs } 313163953Srrs if (this_random % 2) { 314163953Srrs hthresh = mnet; 315163953Srrs val = mnet->ssthresh; 316163953Srrs } 317163953Srrs } 318163953Srrs } 319163953Srrs if (hthresh) { 320163953Srrs return (hthresh); 321163953Srrs } 322163953Srrs } 323163953Srrs mnet = net; 324163953Srrs once = 0; 325163953Srrs 326163953Srrs if (mnet == NULL) { 327163953Srrs mnet = TAILQ_FIRST(&stcb->asoc.nets); 328163953Srrs } 329163953Srrs do { 330163953Srrs alt = TAILQ_NEXT(mnet, sctp_next); 331163953Srrs if (alt == NULL) { 332163953Srrs once++; 333163953Srrs if (once > 1) { 334163953Srrs break; 335163953Srrs } 336163953Srrs alt = TAILQ_FIRST(&stcb->asoc.nets); 337163953Srrs } 338163953Srrs if (alt->ro.ro_rt == NULL) { 339167598Srrs if (alt->ro._s_addr) { 340167598Srrs sctp_free_ifa(alt->ro._s_addr); 341167598Srrs alt->ro._s_addr = NULL; 342169352Srrs 343167598Srrs } 344163953Srrs alt->src_addr_selected = 0; 345163953Srrs } 346163953Srrs if ( 347163953Srrs ((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) && 348163953Srrs (alt->ro.ro_rt != NULL) && 349169655Srrs /* sa_ignore NO_NULL_CHK */ 350163953Srrs (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) 351163953Srrs ) { 352163953Srrs /* Found a reachable address */ 353163953Srrs break; 354163953Srrs } 355163953Srrs mnet = alt; 356163953Srrs } while (alt != NULL); 357163953Srrs 358163953Srrs if (alt == NULL) { 359163953Srrs /* Case where NO insv network exists (dormant state) */ 360163953Srrs /* we rotate destinations */ 361163953Srrs once = 0; 362163953Srrs mnet = net; 363163953Srrs do { 364163953Srrs alt = TAILQ_NEXT(mnet, sctp_next); 365163953Srrs if (alt == NULL) { 366163953Srrs once++; 367163953Srrs if (once > 1) { 368163953Srrs break; 369163953Srrs } 370163953Srrs alt = TAILQ_FIRST(&stcb->asoc.nets); 371163953Srrs } 372169655Srrs /* sa_ignore NO_NULL_CHK */ 373163953Srrs if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) && 374163953Srrs (alt != net)) { 375163953Srrs /* Found an alternate address */ 376163953Srrs break; 377163953Srrs } 378163953Srrs mnet = alt; 379163953Srrs } while (alt != NULL); 380163953Srrs } 381163953Srrs if (alt == NULL) { 382163953Srrs return (net); 383163953Srrs } 384163953Srrs return (alt); 385163953Srrs} 386163953Srrs 387163953Srrsstatic void 388163953Srrssctp_backoff_on_timeout(struct sctp_tcb *stcb, 389163953Srrs struct sctp_nets *net, 390163953Srrs int win_probe, 391163953Srrs int num_marked) 392163953Srrs{ 393170642Srrs if (net->RTO == 0) { 394170642Srrs net->RTO = stcb->asoc.minrto; 395170642Srrs } 396163953Srrs net->RTO <<= 1; 397163953Srrs if (net->RTO > stcb->asoc.maxrto) { 398163953Srrs net->RTO = stcb->asoc.maxrto; 399163953Srrs } 400163953Srrs if ((win_probe == 0) && num_marked) { 401163953Srrs /* We don't apply penalty to window probe scenarios */ 402163953Srrs#ifdef SCTP_CWND_MONITOR 403163953Srrs int old_cwnd = net->cwnd; 404163953Srrs 405163953Srrs#endif 406163953Srrs net->ssthresh = net->cwnd >> 1; 407163953Srrs if (net->ssthresh < (net->mtu << 1)) { 408163953Srrs net->ssthresh = (net->mtu << 1); 409163953Srrs } 410163953Srrs net->cwnd = net->mtu; 411163953Srrs /* floor of 1 mtu */ 412163953Srrs if (net->cwnd < net->mtu) 413163953Srrs net->cwnd = net->mtu; 414163953Srrs#ifdef SCTP_CWND_MONITOR 415163953Srrs sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX); 416163953Srrs#endif 417163953Srrs 418163953Srrs net->partial_bytes_acked = 0; 419163953Srrs } 420163953Srrs} 421163953Srrs 422163953Srrsstatic int 423163953Srrssctp_mark_all_for_resend(struct sctp_tcb *stcb, 424163953Srrs struct sctp_nets *net, 425163953Srrs struct sctp_nets *alt, 426163953Srrs int window_probe, 427163953Srrs int *num_marked) 428163953Srrs{ 429163953Srrs 430163953Srrs /* 431163953Srrs * Mark all chunks (well not all) that were sent to *net for 432163953Srrs * retransmission. Move them to alt for there destination as well... 433163953Srrs * We only mark chunks that have been outstanding long enough to 434163953Srrs * have received feed-back. 435163953Srrs */ 436163953Srrs struct sctp_tmit_chunk *chk, *tp2, *could_be_sent = NULL; 437163953Srrs struct sctp_nets *lnets; 438163953Srrs struct timeval now, min_wait, tv; 439163953Srrs int cur_rtt; 440168709Srrs int audit_tf, num_mk, fir; 441163953Srrs unsigned int cnt_mk; 442168709Srrs uint32_t orig_flight, orig_tf; 443163953Srrs uint32_t tsnlast, tsnfirst; 444163953Srrs 445163953Srrs /* 446163953Srrs * CMT: Using RTX_SSTHRESH policy for CMT. If CMT is being used, 447163953Srrs * then pick dest with largest ssthresh for any retransmission. 448163953Srrs * (iyengar@cis.udel.edu, 2005/08/12) 449163953Srrs */ 450163953Srrs if (sctp_cmt_on_off) { 451163953Srrs alt = sctp_find_alternate_net(stcb, net, 1); 452163953Srrs /* 453163953Srrs * CUCv2: If a different dest is picked for the 454163953Srrs * retransmission, then new (rtx-)pseudo_cumack needs to be 455163953Srrs * tracked for orig dest. Let CUCv2 track new (rtx-) 456163953Srrs * pseudo-cumack always. 457163953Srrs */ 458163953Srrs net->find_pseudo_cumack = 1; 459163953Srrs net->find_rtx_pseudo_cumack = 1; 460163953Srrs } 461163953Srrs /* none in flight now */ 462163953Srrs audit_tf = 0; 463163953Srrs fir = 0; 464163953Srrs /* 465163953Srrs * figure out how long a data chunk must be pending before we can 466163953Srrs * mark it .. 467163953Srrs */ 468169378Srrs (void)SCTP_GETTIME_TIMEVAL(&now); 469163953Srrs /* get cur rto in micro-seconds */ 470163953Srrs cur_rtt = (((net->lastsa >> 2) + net->lastsv) >> 1); 471163953Srrs cur_rtt *= 1000; 472163953Srrs#if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING) 473163953Srrs sctp_log_fr(cur_rtt, 474163953Srrs stcb->asoc.peers_rwnd, 475163953Srrs window_probe, 476163953Srrs SCTP_FR_T3_MARK_TIME); 477163953Srrs sctp_log_fr(net->flight_size, 478165647Srrs SCTP_OS_TIMER_PENDING(&net->fr_timer.timer), 479165647Srrs SCTP_OS_TIMER_ACTIVE(&net->fr_timer.timer), 480163953Srrs SCTP_FR_CWND_REPORT); 481163953Srrs sctp_log_fr(net->flight_size, net->cwnd, stcb->asoc.total_flight, SCTP_FR_CWND_REPORT); 482163953Srrs#endif 483163953Srrs tv.tv_sec = cur_rtt / 1000000; 484163953Srrs tv.tv_usec = cur_rtt % 1000000; 485163953Srrs min_wait = now; 486163953Srrs timevalsub(&min_wait, &tv); 487163953Srrs if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) { 488163953Srrs /* 489163953Srrs * if we hit here, we don't have enough seconds on the clock 490163953Srrs * to account for the RTO. We just let the lower seconds be 491163953Srrs * the bounds and don't worry about it. This may mean we 492163953Srrs * will mark a lot more than we should. 493163953Srrs */ 494163953Srrs min_wait.tv_sec = min_wait.tv_usec = 0; 495163953Srrs } 496163953Srrs#if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING) 497163953Srrs sctp_log_fr(cur_rtt, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME); 498163953Srrs sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME); 499163953Srrs#endif 500163953Srrs /* 501163953Srrs * Our rwnd will be incorrect here since we are not adding back the 502163953Srrs * cnt * mbuf but we will fix that down below. 503163953Srrs */ 504163953Srrs orig_flight = net->flight_size; 505168709Srrs orig_tf = stcb->asoc.total_flight; 506168709Srrs 507163953Srrs net->fast_retran_ip = 0; 508163953Srrs /* Now on to each chunk */ 509163953Srrs num_mk = cnt_mk = 0; 510163953Srrs tsnfirst = tsnlast = 0; 511163953Srrs chk = TAILQ_FIRST(&stcb->asoc.sent_queue); 512163953Srrs for (; chk != NULL; chk = tp2) { 513163953Srrs tp2 = TAILQ_NEXT(chk, sctp_next); 514163953Srrs if ((compare_with_wrap(stcb->asoc.last_acked_seq, 515163953Srrs chk->rec.data.TSN_seq, 516163953Srrs MAX_TSN)) || 517163953Srrs (stcb->asoc.last_acked_seq == chk->rec.data.TSN_seq)) { 518163953Srrs /* Strange case our list got out of order? */ 519169420Srrs SCTP_PRINTF("Our list is out of order?\n"); 520163953Srrs panic("Out of order list"); 521163953Srrs } 522163953Srrs if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) { 523163953Srrs /* 524163953Srrs * found one to mark: If it is less than 525163953Srrs * DATAGRAM_ACKED it MUST not be a skipped or marked 526163953Srrs * TSN but instead one that is either already set 527163953Srrs * for retransmission OR one that needs 528163953Srrs * retransmission. 529163953Srrs */ 530163953Srrs 531163953Srrs /* validate its been outstanding long enough */ 532163953Srrs#if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING) 533163953Srrs sctp_log_fr(chk->rec.data.TSN_seq, 534163953Srrs chk->sent_rcv_time.tv_sec, 535163953Srrs chk->sent_rcv_time.tv_usec, 536163953Srrs SCTP_FR_T3_MARK_TIME); 537163953Srrs#endif 538163953Srrs if ((chk->sent_rcv_time.tv_sec > min_wait.tv_sec) && (window_probe == 0)) { 539163953Srrs /* 540163953Srrs * we have reached a chunk that was sent 541163953Srrs * some seconds past our min.. forget it we 542163953Srrs * will find no more to send. 543163953Srrs */ 544163953Srrs#if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING) 545163953Srrs sctp_log_fr(0, 546163953Srrs chk->sent_rcv_time.tv_sec, 547163953Srrs chk->sent_rcv_time.tv_usec, 548163953Srrs SCTP_FR_T3_STOPPED); 549163953Srrs#endif 550163953Srrs continue; 551163953Srrs } else if ((chk->sent_rcv_time.tv_sec == min_wait.tv_sec) && 552163953Srrs (window_probe == 0)) { 553163953Srrs /* 554163953Srrs * we must look at the micro seconds to 555163953Srrs * know. 556163953Srrs */ 557163953Srrs if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) { 558163953Srrs /* 559163953Srrs * ok it was sent after our boundary 560163953Srrs * time. 561163953Srrs */ 562163953Srrs#if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING) 563163953Srrs sctp_log_fr(0, 564163953Srrs chk->sent_rcv_time.tv_sec, 565163953Srrs chk->sent_rcv_time.tv_usec, 566163953Srrs SCTP_FR_T3_STOPPED); 567163953Srrs#endif 568163953Srrs continue; 569163953Srrs } 570163953Srrs } 571163953Srrs if (PR_SCTP_TTL_ENABLED(chk->flags)) { 572163953Srrs /* Is it expired? */ 573163953Srrs if ((now.tv_sec > chk->rec.data.timetodrop.tv_sec) || 574163953Srrs ((chk->rec.data.timetodrop.tv_sec == now.tv_sec) && 575163953Srrs (now.tv_usec > chk->rec.data.timetodrop.tv_usec))) { 576163953Srrs /* Yes so drop it */ 577163953Srrs if (chk->data) { 578169420Srrs (void)sctp_release_pr_sctp_chunk(stcb, 579163953Srrs chk, 580163953Srrs (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 581163953Srrs &stcb->asoc.sent_queue); 582163953Srrs } 583163953Srrs } 584163953Srrs continue; 585163953Srrs } 586163953Srrs if (PR_SCTP_RTX_ENABLED(chk->flags)) { 587163953Srrs /* Has it been retransmitted tv_sec times? */ 588163953Srrs if (chk->snd_count > chk->rec.data.timetodrop.tv_sec) { 589163953Srrs if (chk->data) { 590169420Srrs (void)sctp_release_pr_sctp_chunk(stcb, 591163953Srrs chk, 592163953Srrs (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT), 593163953Srrs &stcb->asoc.sent_queue); 594163953Srrs } 595163953Srrs } 596163953Srrs continue; 597163953Srrs } 598168709Srrs if (chk->sent < SCTP_DATAGRAM_RESEND) { 599163953Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 600163953Srrs num_mk++; 601163953Srrs if (fir == 0) { 602163953Srrs fir = 1; 603163953Srrs tsnfirst = chk->rec.data.TSN_seq; 604163953Srrs } 605163953Srrs tsnlast = chk->rec.data.TSN_seq; 606163953Srrs#if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING) 607163953Srrs sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count, 608163953Srrs 0, SCTP_FR_T3_MARKED); 609163953Srrs 610163953Srrs#endif 611168709Srrs if (chk->rec.data.chunk_was_revoked) { 612168709Srrs /* deflate the cwnd */ 613168709Srrs chk->whoTo->cwnd -= chk->book_size; 614168709Srrs chk->rec.data.chunk_was_revoked = 0; 615168709Srrs } 616168709Srrs net->marked_retrans++; 617168709Srrs stcb->asoc.marked_retrans++; 618168709Srrs#ifdef SCTP_FLIGHT_LOGGING 619168709Srrs sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND_TO, 620168709Srrs chk->whoTo->flight_size, 621168709Srrs chk->book_size, 622168709Srrs (uintptr_t) chk->whoTo, 623168709Srrs chk->rec.data.TSN_seq); 624168709Srrs#endif 625168709Srrs sctp_flight_size_decrease(chk); 626168709Srrs sctp_total_flight_decrease(stcb, chk); 627168709Srrs stcb->asoc.peers_rwnd += chk->send_size; 628168709Srrs stcb->asoc.peers_rwnd += sctp_peer_chunk_oh; 629163953Srrs } 630163953Srrs chk->sent = SCTP_DATAGRAM_RESEND; 631163953Srrs SCTP_STAT_INCR(sctps_markedretrans); 632165220Srrs 633163953Srrs /* reset the TSN for striking and other FR stuff */ 634163953Srrs chk->rec.data.doing_fast_retransmit = 0; 635163953Srrs /* Clear any time so NO RTT is being done */ 636163953Srrs chk->do_rtt = 0; 637163953Srrs if (alt != net) { 638163953Srrs sctp_free_remote_addr(chk->whoTo); 639163953Srrs chk->no_fr_allowed = 1; 640163953Srrs chk->whoTo = alt; 641163953Srrs atomic_add_int(&alt->ref_count, 1); 642163953Srrs } else { 643163953Srrs chk->no_fr_allowed = 0; 644163953Srrs if (TAILQ_EMPTY(&stcb->asoc.send_queue)) { 645163953Srrs chk->rec.data.fast_retran_tsn = stcb->asoc.sending_seq; 646163953Srrs } else { 647163953Srrs chk->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq; 648163953Srrs } 649163953Srrs } 650170181Srrs /* 651170181Srrs * CMT: Do not allow FRs on retransmitted TSNs. 652170181Srrs */ 653163953Srrs if (sctp_cmt_on_off == 1) { 654163953Srrs chk->no_fr_allowed = 1; 655163953Srrs } 656163953Srrs } else if (chk->sent == SCTP_DATAGRAM_ACKED) { 657163953Srrs /* remember highest acked one */ 658163953Srrs could_be_sent = chk; 659163953Srrs } 660163953Srrs if (chk->sent == SCTP_DATAGRAM_RESEND) { 661163953Srrs cnt_mk++; 662163953Srrs } 663163953Srrs } 664168709Srrs if ((orig_flight - net->flight_size) != (orig_tf - stcb->asoc.total_flight)) { 665168709Srrs /* we did not subtract the same things? */ 666168709Srrs audit_tf = 1; 667168709Srrs } 668163953Srrs#if defined(SCTP_FR_LOGGING) || defined(SCTP_EARLYFR_LOGGING) 669163953Srrs sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT); 670163953Srrs#endif 671163953Srrs#ifdef SCTP_DEBUG 672169420Srrs if (num_mk) { 673169420Srrs SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n", 674169420Srrs tsnlast); 675169420Srrs SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%ld\n", 676169420Srrs num_mk, (u_long)stcb->asoc.peers_rwnd); 677169420Srrs SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n", 678169420Srrs tsnlast); 679169420Srrs SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%d\n", 680169420Srrs num_mk, 681169420Srrs (int)stcb->asoc.peers_rwnd); 682163953Srrs } 683163953Srrs#endif 684163953Srrs *num_marked = num_mk; 685163953Srrs if ((stcb->asoc.sent_queue_retran_cnt == 0) && (could_be_sent)) { 686163953Srrs /* fix it so we retransmit the highest acked anyway */ 687163953Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 688163953Srrs cnt_mk++; 689163953Srrs could_be_sent->sent = SCTP_DATAGRAM_RESEND; 690163953Srrs } 691163953Srrs if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) { 692165220Srrs#ifdef INVARIANTS 693169420Srrs SCTP_PRINTF("Local Audit says there are %d for retran asoc cnt:%d\n", 694163953Srrs cnt_mk, stcb->asoc.sent_queue_retran_cnt); 695163953Srrs#endif 696163953Srrs#ifndef SCTP_AUDITING_ENABLED 697163953Srrs stcb->asoc.sent_queue_retran_cnt = cnt_mk; 698163953Srrs#endif 699163953Srrs } 700163953Srrs /* Now check for a ECN Echo that may be stranded */ 701163953Srrs TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 702163953Srrs if ((chk->whoTo == net) && 703163953Srrs (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 704163953Srrs sctp_free_remote_addr(chk->whoTo); 705163953Srrs chk->whoTo = alt; 706163953Srrs if (chk->sent != SCTP_DATAGRAM_RESEND) { 707163953Srrs chk->sent = SCTP_DATAGRAM_RESEND; 708163953Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 709163953Srrs } 710163953Srrs atomic_add_int(&alt->ref_count, 1); 711163953Srrs } 712163953Srrs } 713163953Srrs if (audit_tf) { 714169420Srrs SCTPDBG(SCTP_DEBUG_TIMER4, 715169420Srrs "Audit total flight due to negative value net:%p\n", 716169420Srrs net); 717163953Srrs stcb->asoc.total_flight = 0; 718163953Srrs stcb->asoc.total_flight_count = 0; 719163953Srrs /* Clear all networks flight size */ 720163953Srrs TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) { 721163953Srrs lnets->flight_size = 0; 722169420Srrs SCTPDBG(SCTP_DEBUG_TIMER4, 723169420Srrs "Net:%p c-f cwnd:%d ssthresh:%d\n", 724169420Srrs lnets, lnets->cwnd, lnets->ssthresh); 725163953Srrs } 726163953Srrs TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) { 727163953Srrs if (chk->sent < SCTP_DATAGRAM_RESEND) { 728165220Srrs#ifdef SCTP_FLIGHT_LOGGING 729165220Srrs sctp_misc_ints(SCTP_FLIGHT_LOG_UP, 730165220Srrs chk->whoTo->flight_size, 731165220Srrs chk->book_size, 732168709Srrs (uintptr_t) chk->whoTo, 733165220Srrs chk->rec.data.TSN_seq); 734165220Srrs#endif 735168709Srrs 736168709Srrs sctp_flight_size_increase(chk); 737168709Srrs sctp_total_flight_increase(stcb, chk); 738163953Srrs } 739163953Srrs } 740163953Srrs } 741163953Srrs /* 742163953Srrs * Setup the ecn nonce re-sync point. We do this since 743163953Srrs * retranmissions are NOT setup for ECN. This means that do to 744163953Srrs * Karn's rule, we don't know the total of the peers ecn bits. 745163953Srrs */ 746163953Srrs chk = TAILQ_FIRST(&stcb->asoc.send_queue); 747163953Srrs if (chk == NULL) { 748163953Srrs stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq; 749163953Srrs } else { 750163953Srrs stcb->asoc.nonce_resync_tsn = chk->rec.data.TSN_seq; 751163953Srrs } 752163953Srrs stcb->asoc.nonce_wait_for_ecne = 0; 753163953Srrs stcb->asoc.nonce_sum_check = 0; 754163953Srrs /* We return 1 if we only have a window probe outstanding */ 755163953Srrs return (0); 756163953Srrs} 757163953Srrs 758163953Srrsstatic void 759163953Srrssctp_move_all_chunks_to_alt(struct sctp_tcb *stcb, 760163953Srrs struct sctp_nets *net, 761163953Srrs struct sctp_nets *alt) 762163953Srrs{ 763163953Srrs struct sctp_association *asoc; 764163953Srrs struct sctp_stream_out *outs; 765163953Srrs struct sctp_tmit_chunk *chk; 766163953Srrs struct sctp_stream_queue_pending *sp; 767163953Srrs 768163953Srrs if (net == alt) 769163953Srrs /* nothing to do */ 770163953Srrs return; 771163953Srrs 772163953Srrs asoc = &stcb->asoc; 773163953Srrs 774163953Srrs /* 775163953Srrs * now through all the streams checking for chunks sent to our bad 776163953Srrs * network. 777163953Srrs */ 778163953Srrs TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) { 779163953Srrs /* now clean up any chunks here */ 780163953Srrs TAILQ_FOREACH(sp, &outs->outqueue, next) { 781163953Srrs if (sp->net == net) { 782163953Srrs sctp_free_remote_addr(sp->net); 783163953Srrs sp->net = alt; 784163953Srrs atomic_add_int(&alt->ref_count, 1); 785163953Srrs } 786163953Srrs } 787163953Srrs } 788163953Srrs /* Now check the pending queue */ 789163953Srrs TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) { 790163953Srrs if (chk->whoTo == net) { 791163953Srrs sctp_free_remote_addr(chk->whoTo); 792163953Srrs chk->whoTo = alt; 793163953Srrs atomic_add_int(&alt->ref_count, 1); 794163953Srrs } 795163953Srrs } 796163953Srrs 797163953Srrs} 798163953Srrs 799163953Srrsint 800163953Srrssctp_t3rxt_timer(struct sctp_inpcb *inp, 801163953Srrs struct sctp_tcb *stcb, 802163953Srrs struct sctp_nets *net) 803163953Srrs{ 804163953Srrs struct sctp_nets *alt; 805163953Srrs int win_probe, num_mk; 806163953Srrs 807163953Srrs#ifdef SCTP_FR_LOGGING 808163964Srrs sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT); 809163953Srrs#ifdef SCTP_CWND_LOGGING 810163953Srrs { 811163953Srrs struct sctp_nets *lnet; 812163953Srrs 813163953Srrs TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) { 814163953Srrs if (net == lnet) { 815163953Srrs sctp_log_cwnd(stcb, lnet, 1, SCTP_CWND_LOG_FROM_T3); 816163953Srrs } else { 817163953Srrs sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_LOG_FROM_T3); 818163953Srrs } 819163953Srrs } 820163953Srrs } 821163953Srrs#endif 822163953Srrs#endif 823163953Srrs /* Find an alternate and mark those for retransmission */ 824163953Srrs if ((stcb->asoc.peers_rwnd == 0) && 825163953Srrs (stcb->asoc.total_flight < net->mtu)) { 826163953Srrs SCTP_STAT_INCR(sctps_timowindowprobe); 827163953Srrs win_probe = 1; 828163953Srrs } else { 829163953Srrs win_probe = 0; 830163953Srrs } 831168709Srrs 832168709Srrs if (sctp_cmt_on_off) { 833168709Srrs /* 834168709Srrs * CMT: Using RTX_SSTHRESH policy for CMT. If CMT is being 835168709Srrs * used, then pick dest with largest ssthresh for any 836168709Srrs * retransmission. 837168709Srrs */ 838168709Srrs alt = net; 839168709Srrs alt = sctp_find_alternate_net(stcb, alt, 1); 840168709Srrs /* 841168709Srrs * CUCv2: If a different dest is picked for the 842168709Srrs * retransmission, then new (rtx-)pseudo_cumack needs to be 843168709Srrs * tracked for orig dest. Let CUCv2 track new (rtx-) 844168709Srrs * pseudo-cumack always. 845168709Srrs */ 846168709Srrs net->find_pseudo_cumack = 1; 847168709Srrs net->find_rtx_pseudo_cumack = 1; 848168709Srrs 849168709Srrs } else { /* CMT is OFF */ 850168709Srrs 851168709Srrs alt = sctp_find_alternate_net(stcb, net, 0); 852168709Srrs } 853168709Srrs 854169420Srrs (void)sctp_mark_all_for_resend(stcb, net, alt, win_probe, &num_mk); 855163953Srrs /* FR Loss recovery just ended with the T3. */ 856163953Srrs stcb->asoc.fast_retran_loss_recovery = 0; 857163953Srrs 858163953Srrs /* CMT FR loss recovery ended with the T3 */ 859163953Srrs net->fast_retran_loss_recovery = 0; 860163953Srrs 861163953Srrs /* 862163953Srrs * setup the sat loss recovery that prevents satellite cwnd advance. 863163953Srrs */ 864163953Srrs stcb->asoc.sat_t3_loss_recovery = 1; 865163953Srrs stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq; 866163953Srrs 867163953Srrs /* Backoff the timer and cwnd */ 868163953Srrs sctp_backoff_on_timeout(stcb, net, win_probe, num_mk); 869163953Srrs if (win_probe == 0) { 870163953Srrs /* We don't do normal threshold management on window probes */ 871163953Srrs if (sctp_threshold_management(inp, stcb, net, 872163953Srrs stcb->asoc.max_send_times)) { 873163953Srrs /* Association was destroyed */ 874163953Srrs return (1); 875163953Srrs } else { 876163953Srrs if (net != stcb->asoc.primary_destination) { 877163953Srrs /* send a immediate HB if our RTO is stale */ 878163953Srrs struct timeval now; 879163953Srrs unsigned int ms_goneby; 880163953Srrs 881169378Srrs (void)SCTP_GETTIME_TIMEVAL(&now); 882163953Srrs if (net->last_sent_time.tv_sec) { 883163953Srrs ms_goneby = (now.tv_sec - net->last_sent_time.tv_sec) * 1000; 884163953Srrs } else { 885163953Srrs ms_goneby = 0; 886163953Srrs } 887163953Srrs if ((ms_goneby > net->RTO) || (net->RTO == 0)) { 888163953Srrs /* 889163953Srrs * no recent feed back in an RTO or 890163953Srrs * more, request a RTT update 891163953Srrs */ 892169378Srrs (void)sctp_send_hb(stcb, 1, net); 893163953Srrs } 894163953Srrs } 895163953Srrs } 896163953Srrs } else { 897163953Srrs /* 898163953Srrs * For a window probe we don't penalize the net's but only 899163953Srrs * the association. This may fail it if SACKs are not coming 900163953Srrs * back. If sack's are coming with rwnd locked at 0, we will 901163953Srrs * continue to hold things waiting for rwnd to raise 902163953Srrs */ 903163953Srrs if (sctp_threshold_management(inp, stcb, NULL, 904163953Srrs stcb->asoc.max_send_times)) { 905163953Srrs /* Association was destroyed */ 906163953Srrs return (1); 907163953Srrs } 908163953Srrs } 909163953Srrs if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 910163953Srrs /* Move all pending over too */ 911163953Srrs sctp_move_all_chunks_to_alt(stcb, net, alt); 912169352Srrs 913169352Srrs /* 914169352Srrs * Get the address that failed, to force a new src address 915169352Srrs * selecton and a route allocation. 916169352Srrs */ 917169352Srrs if (net->ro._s_addr) { 918169352Srrs sctp_free_ifa(net->ro._s_addr); 919169352Srrs net->ro._s_addr = NULL; 920169352Srrs } 921169352Srrs net->src_addr_selected = 0; 922169352Srrs 923169352Srrs /* Force a route allocation too */ 924169352Srrs if (net->ro.ro_rt) { 925169352Srrs RTFREE(net->ro.ro_rt); 926169352Srrs net->ro.ro_rt = NULL; 927169352Srrs } 928163953Srrs /* Was it our primary? */ 929163953Srrs if ((stcb->asoc.primary_destination == net) && (alt != net)) { 930163953Srrs /* 931163953Srrs * Yes, note it as such and find an alternate note: 932163953Srrs * this means HB code must use this to resent the 933163953Srrs * primary if it goes active AND if someone does a 934163953Srrs * change-primary then this flag must be cleared 935163953Srrs * from any net structures. 936163953Srrs */ 937163953Srrs if (sctp_set_primary_addr(stcb, 938163953Srrs (struct sockaddr *)NULL, 939163953Srrs alt) == 0) { 940163953Srrs net->dest_state |= SCTP_ADDR_WAS_PRIMARY; 941163953Srrs } 942163953Srrs } 943163953Srrs } 944163953Srrs /* 945163953Srrs * Special case for cookie-echo'ed case, we don't do output but must 946163953Srrs * await the COOKIE-ACK before retransmission 947163953Srrs */ 948163953Srrs if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { 949163953Srrs /* 950163953Srrs * Here we just reset the timer and start again since we 951163953Srrs * have not established the asoc 952163953Srrs */ 953163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net); 954163953Srrs return (0); 955163953Srrs } 956163953Srrs if (stcb->asoc.peer_supports_prsctp) { 957163953Srrs struct sctp_tmit_chunk *lchk; 958163953Srrs 959163953Srrs lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc); 960163953Srrs /* C3. See if we need to send a Fwd-TSN */ 961163953Srrs if (compare_with_wrap(stcb->asoc.advanced_peer_ack_point, 962163953Srrs stcb->asoc.last_acked_seq, MAX_TSN)) { 963163953Srrs /* 964163953Srrs * ISSUE with ECN, see FWD-TSN processing for notes 965163953Srrs * on issues that will occur when the ECN NONCE 966163953Srrs * stuff is put into SCTP for cross checking. 967163953Srrs */ 968163953Srrs send_forward_tsn(stcb, &stcb->asoc); 969163953Srrs if (lchk) { 970163953Srrs /* Assure a timer is up */ 971163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo); 972163953Srrs } 973163953Srrs } 974163953Srrs } 975163953Srrs#ifdef SCTP_CWND_MONITOR 976163953Srrs sctp_log_cwnd(stcb, net, net->cwnd, SCTP_CWND_LOG_FROM_RTX); 977163953Srrs#endif 978163953Srrs return (0); 979163953Srrs} 980163953Srrs 981163953Srrsint 982163953Srrssctp_t1init_timer(struct sctp_inpcb *inp, 983163953Srrs struct sctp_tcb *stcb, 984163953Srrs struct sctp_nets *net) 985163953Srrs{ 986163953Srrs /* bump the thresholds */ 987163953Srrs if (stcb->asoc.delayed_connection) { 988163953Srrs /* 989163953Srrs * special hook for delayed connection. The library did NOT 990163953Srrs * complete the rest of its sends. 991163953Srrs */ 992163953Srrs stcb->asoc.delayed_connection = 0; 993163953Srrs sctp_send_initiate(inp, stcb); 994163953Srrs return (0); 995163953Srrs } 996163953Srrs if (SCTP_GET_STATE((&stcb->asoc)) != SCTP_STATE_COOKIE_WAIT) { 997163953Srrs return (0); 998163953Srrs } 999163953Srrs if (sctp_threshold_management(inp, stcb, net, 1000163953Srrs stcb->asoc.max_init_times)) { 1001163953Srrs /* Association was destroyed */ 1002163953Srrs return (1); 1003163953Srrs } 1004163953Srrs stcb->asoc.dropped_special_cnt = 0; 1005163953Srrs sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0); 1006163953Srrs if (stcb->asoc.initial_init_rto_max < net->RTO) { 1007163953Srrs net->RTO = stcb->asoc.initial_init_rto_max; 1008163953Srrs } 1009163953Srrs if (stcb->asoc.numnets > 1) { 1010163953Srrs /* If we have more than one addr use it */ 1011163953Srrs struct sctp_nets *alt; 1012163953Srrs 1013163953Srrs alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination, 0); 1014163953Srrs if ((alt != NULL) && (alt != stcb->asoc.primary_destination)) { 1015163953Srrs sctp_move_all_chunks_to_alt(stcb, stcb->asoc.primary_destination, alt); 1016163953Srrs stcb->asoc.primary_destination = alt; 1017163953Srrs } 1018163953Srrs } 1019163953Srrs /* Send out a new init */ 1020163953Srrs sctp_send_initiate(inp, stcb); 1021163953Srrs return (0); 1022163953Srrs} 1023163953Srrs 1024163953Srrs/* 1025163953Srrs * For cookie and asconf we actually need to find and mark for resend, then 1026163953Srrs * increment the resend counter (after all the threshold management stuff of 1027163953Srrs * course). 1028163953Srrs */ 1029163953Srrsint 1030163953Srrssctp_cookie_timer(struct sctp_inpcb *inp, 1031163953Srrs struct sctp_tcb *stcb, 1032163953Srrs struct sctp_nets *net) 1033163953Srrs{ 1034163953Srrs struct sctp_nets *alt; 1035163953Srrs struct sctp_tmit_chunk *cookie; 1036163953Srrs 1037163953Srrs /* first before all else we must find the cookie */ 1038163953Srrs TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) { 1039163953Srrs if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) { 1040163953Srrs break; 1041163953Srrs } 1042163953Srrs } 1043163953Srrs if (cookie == NULL) { 1044163953Srrs if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) { 1045163953Srrs /* FOOBAR! */ 1046163953Srrs struct mbuf *oper; 1047163953Srrs 1048163953Srrs oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)), 1049163953Srrs 0, M_DONTWAIT, 1, MT_DATA); 1050163953Srrs if (oper) { 1051163953Srrs struct sctp_paramhdr *ph; 1052163953Srrs uint32_t *ippp; 1053163953Srrs 1054165647Srrs SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) + 1055163953Srrs sizeof(uint32_t); 1056163953Srrs ph = mtod(oper, struct sctp_paramhdr *); 1057163953Srrs ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION); 1058165647Srrs ph->param_length = htons(SCTP_BUF_LEN(oper)); 1059163953Srrs ippp = (uint32_t *) (ph + 1); 1060165220Srrs *ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_2); 1061163953Srrs } 1062165220Srrs inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_3; 1063163953Srrs sctp_abort_an_association(inp, stcb, SCTP_INTERNAL_ERROR, 1064163953Srrs oper); 1065163953Srrs } else { 1066165220Srrs#ifdef INVARIANTS 1067163953Srrs panic("Cookie timer expires in wrong state?"); 1068163953Srrs#else 1069169420Srrs SCTP_PRINTF("Strange in state %d not cookie-echoed yet c-e timer expires?\n", SCTP_GET_STATE(&stcb->asoc)); 1070163953Srrs return (0); 1071163953Srrs#endif 1072163953Srrs } 1073163953Srrs return (0); 1074163953Srrs } 1075163953Srrs /* Ok we found the cookie, threshold management next */ 1076163953Srrs if (sctp_threshold_management(inp, stcb, cookie->whoTo, 1077163953Srrs stcb->asoc.max_init_times)) { 1078163953Srrs /* Assoc is over */ 1079163953Srrs return (1); 1080163953Srrs } 1081163953Srrs /* 1082163953Srrs * cleared theshold management now lets backoff the address & select 1083163953Srrs * an alternate 1084163953Srrs */ 1085163953Srrs stcb->asoc.dropped_special_cnt = 0; 1086163953Srrs sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0); 1087163953Srrs alt = sctp_find_alternate_net(stcb, cookie->whoTo, 0); 1088163953Srrs if (alt != cookie->whoTo) { 1089163953Srrs sctp_free_remote_addr(cookie->whoTo); 1090163953Srrs cookie->whoTo = alt; 1091163953Srrs atomic_add_int(&alt->ref_count, 1); 1092163953Srrs } 1093163953Srrs /* Now mark the retran info */ 1094163953Srrs if (cookie->sent != SCTP_DATAGRAM_RESEND) { 1095163953Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1096163953Srrs } 1097163953Srrs cookie->sent = SCTP_DATAGRAM_RESEND; 1098163953Srrs /* 1099163953Srrs * Now call the output routine to kick out the cookie again, Note we 1100163953Srrs * don't mark any chunks for retran so that FR will need to kick in 1101163953Srrs * to move these (or a send timer). 1102163953Srrs */ 1103163953Srrs return (0); 1104163953Srrs} 1105163953Srrs 1106163953Srrsint 1107163953Srrssctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1108163953Srrs struct sctp_nets *net) 1109163953Srrs{ 1110163953Srrs struct sctp_nets *alt; 1111163953Srrs struct sctp_tmit_chunk *strrst = NULL, *chk = NULL; 1112163953Srrs 1113163953Srrs if (stcb->asoc.stream_reset_outstanding == 0) { 1114163953Srrs return (0); 1115163953Srrs } 1116163953Srrs /* find the existing STRRESET, we use the seq number we sent out on */ 1117169420Srrs (void)sctp_find_stream_reset(stcb, stcb->asoc.str_reset_seq_out, &strrst); 1118163953Srrs if (strrst == NULL) { 1119163953Srrs return (0); 1120163953Srrs } 1121163953Srrs /* do threshold management */ 1122163953Srrs if (sctp_threshold_management(inp, stcb, strrst->whoTo, 1123163953Srrs stcb->asoc.max_send_times)) { 1124163953Srrs /* Assoc is over */ 1125163953Srrs return (1); 1126163953Srrs } 1127163953Srrs /* 1128163953Srrs * cleared theshold management now lets backoff the address & select 1129163953Srrs * an alternate 1130163953Srrs */ 1131163953Srrs sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0); 1132163953Srrs alt = sctp_find_alternate_net(stcb, strrst->whoTo, 0); 1133163953Srrs sctp_free_remote_addr(strrst->whoTo); 1134163953Srrs strrst->whoTo = alt; 1135163953Srrs atomic_add_int(&alt->ref_count, 1); 1136163953Srrs 1137163953Srrs /* See if a ECN Echo is also stranded */ 1138163953Srrs TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 1139163953Srrs if ((chk->whoTo == net) && 1140163953Srrs (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 1141163953Srrs sctp_free_remote_addr(chk->whoTo); 1142163953Srrs if (chk->sent != SCTP_DATAGRAM_RESEND) { 1143163953Srrs chk->sent = SCTP_DATAGRAM_RESEND; 1144163953Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1145163953Srrs } 1146163953Srrs chk->whoTo = alt; 1147163953Srrs atomic_add_int(&alt->ref_count, 1); 1148163953Srrs } 1149163953Srrs } 1150163953Srrs if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 1151163953Srrs /* 1152163953Srrs * If the address went un-reachable, we need to move to 1153163953Srrs * alternates for ALL chk's in queue 1154163953Srrs */ 1155163953Srrs sctp_move_all_chunks_to_alt(stcb, net, alt); 1156163953Srrs } 1157163953Srrs /* mark the retran info */ 1158163953Srrs if (strrst->sent != SCTP_DATAGRAM_RESEND) 1159163953Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1160163953Srrs strrst->sent = SCTP_DATAGRAM_RESEND; 1161163953Srrs 1162163953Srrs /* restart the timer */ 1163163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, strrst->whoTo); 1164163953Srrs return (0); 1165163953Srrs} 1166163953Srrs 1167163953Srrsint 1168163953Srrssctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1169163953Srrs struct sctp_nets *net) 1170163953Srrs{ 1171163953Srrs struct sctp_nets *alt; 1172163953Srrs struct sctp_tmit_chunk *asconf, *chk; 1173163953Srrs 1174163953Srrs /* is this the first send, or a retransmission? */ 1175163953Srrs if (stcb->asoc.asconf_sent == 0) { 1176163953Srrs /* compose a new ASCONF chunk and send it */ 1177163953Srrs sctp_send_asconf(stcb, net); 1178163953Srrs } else { 1179163953Srrs /* Retransmission of the existing ASCONF needed... */ 1180163953Srrs 1181163953Srrs /* find the existing ASCONF */ 1182163953Srrs TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue, 1183163953Srrs sctp_next) { 1184163953Srrs if (asconf->rec.chunk_id.id == SCTP_ASCONF) { 1185163953Srrs break; 1186163953Srrs } 1187163953Srrs } 1188163953Srrs if (asconf == NULL) { 1189163953Srrs return (0); 1190163953Srrs } 1191163953Srrs /* do threshold management */ 1192163953Srrs if (sctp_threshold_management(inp, stcb, asconf->whoTo, 1193163953Srrs stcb->asoc.max_send_times)) { 1194163953Srrs /* Assoc is over */ 1195163953Srrs return (1); 1196163953Srrs } 1197163953Srrs /* 1198163953Srrs * PETER? FIX? How will the following code ever run? If the 1199163953Srrs * max_send_times is hit, threshold managment will blow away 1200163953Srrs * the association? 1201163953Srrs */ 1202163953Srrs if (asconf->snd_count > stcb->asoc.max_send_times) { 1203163953Srrs /* 1204163953Srrs * Something is rotten, peer is not responding to 1205163953Srrs * ASCONFs but maybe is to data etc. e.g. it is not 1206163953Srrs * properly handling the chunk type upper bits Mark 1207163953Srrs * this peer as ASCONF incapable and cleanup 1208163953Srrs */ 1209169420Srrs SCTPDBG(SCTP_DEBUG_TIMER1, "asconf_timer: Peer has not responded to our repeated ASCONFs\n"); 1210163953Srrs sctp_asconf_cleanup(stcb, net); 1211163953Srrs return (0); 1212163953Srrs } 1213163953Srrs /* 1214163953Srrs * cleared theshold management now lets backoff the address 1215163953Srrs * & select an alternate 1216163953Srrs */ 1217163953Srrs sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0); 1218163953Srrs alt = sctp_find_alternate_net(stcb, asconf->whoTo, 0); 1219163953Srrs sctp_free_remote_addr(asconf->whoTo); 1220163953Srrs asconf->whoTo = alt; 1221163953Srrs atomic_add_int(&alt->ref_count, 1); 1222163953Srrs 1223163953Srrs /* See if a ECN Echo is also stranded */ 1224163953Srrs TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) { 1225163953Srrs if ((chk->whoTo == net) && 1226163953Srrs (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) { 1227163953Srrs sctp_free_remote_addr(chk->whoTo); 1228163953Srrs chk->whoTo = alt; 1229163953Srrs if (chk->sent != SCTP_DATAGRAM_RESEND) { 1230163953Srrs chk->sent = SCTP_DATAGRAM_RESEND; 1231163953Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1232163953Srrs } 1233163953Srrs atomic_add_int(&alt->ref_count, 1); 1234163953Srrs } 1235163953Srrs } 1236163953Srrs if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) { 1237163953Srrs /* 1238163953Srrs * If the address went un-reachable, we need to move 1239163953Srrs * to alternates for ALL chk's in queue 1240163953Srrs */ 1241163953Srrs sctp_move_all_chunks_to_alt(stcb, net, alt); 1242163953Srrs } 1243163953Srrs /* mark the retran info */ 1244163953Srrs if (asconf->sent != SCTP_DATAGRAM_RESEND) 1245163953Srrs sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt); 1246163953Srrs asconf->sent = SCTP_DATAGRAM_RESEND; 1247163953Srrs } 1248163953Srrs return (0); 1249163953Srrs} 1250163953Srrs 1251163953Srrs/* 1252163953Srrs * For the shutdown and shutdown-ack, we do not keep one around on the 1253163953Srrs * control queue. This means we must generate a new one and call the general 1254163953Srrs * chunk output routine, AFTER having done threshold management. 1255163953Srrs */ 1256163953Srrsint 1257163953Srrssctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1258163953Srrs struct sctp_nets *net) 1259163953Srrs{ 1260163953Srrs struct sctp_nets *alt; 1261163953Srrs 1262163953Srrs /* first threshold managment */ 1263163953Srrs if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { 1264163953Srrs /* Assoc is over */ 1265163953Srrs return (1); 1266163953Srrs } 1267163953Srrs /* second select an alternative */ 1268163953Srrs alt = sctp_find_alternate_net(stcb, net, 0); 1269163953Srrs 1270163953Srrs /* third generate a shutdown into the queue for out net */ 1271163953Srrs if (alt) { 1272163953Srrs sctp_send_shutdown(stcb, alt); 1273163953Srrs } else { 1274163953Srrs /* 1275163953Srrs * if alt is NULL, there is no dest to send to?? 1276163953Srrs */ 1277163953Srrs return (0); 1278163953Srrs } 1279163953Srrs /* fourth restart timer */ 1280163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt); 1281163953Srrs return (0); 1282163953Srrs} 1283163953Srrs 1284163953Srrsint 1285163953Srrssctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1286163953Srrs struct sctp_nets *net) 1287163953Srrs{ 1288163953Srrs struct sctp_nets *alt; 1289163953Srrs 1290163953Srrs /* first threshold managment */ 1291163953Srrs if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) { 1292163953Srrs /* Assoc is over */ 1293163953Srrs return (1); 1294163953Srrs } 1295163953Srrs /* second select an alternative */ 1296163953Srrs alt = sctp_find_alternate_net(stcb, net, 0); 1297163953Srrs 1298163953Srrs /* third generate a shutdown into the queue for out net */ 1299163953Srrs sctp_send_shutdown_ack(stcb, alt); 1300163953Srrs 1301163953Srrs /* fourth restart timer */ 1302163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt); 1303163953Srrs return (0); 1304163953Srrs} 1305163953Srrs 1306163953Srrsstatic void 1307163953Srrssctp_audit_stream_queues_for_size(struct sctp_inpcb *inp, 1308163953Srrs struct sctp_tcb *stcb) 1309163953Srrs{ 1310163953Srrs struct sctp_stream_out *outs; 1311163953Srrs struct sctp_stream_queue_pending *sp; 1312163953Srrs unsigned int chks_in_queue = 0; 1313163953Srrs int being_filled = 0; 1314163953Srrs 1315163953Srrs /* 1316163953Srrs * This function is ONLY called when the send/sent queues are empty. 1317163953Srrs */ 1318163953Srrs if ((stcb == NULL) || (inp == NULL)) 1319163953Srrs return; 1320163953Srrs 1321163953Srrs if (stcb->asoc.sent_queue_retran_cnt) { 1322169420Srrs SCTP_PRINTF("Hmm, sent_queue_retran_cnt is non-zero %d\n", 1323163953Srrs stcb->asoc.sent_queue_retran_cnt); 1324163953Srrs stcb->asoc.sent_queue_retran_cnt = 0; 1325163953Srrs } 1326163953Srrs SCTP_TCB_SEND_LOCK(stcb); 1327163953Srrs if (TAILQ_EMPTY(&stcb->asoc.out_wheel)) { 1328163953Srrs int i, cnt = 0; 1329163953Srrs 1330163953Srrs /* Check to see if a spoke fell off the wheel */ 1331163953Srrs for (i = 0; i < stcb->asoc.streamoutcnt; i++) { 1332163953Srrs if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) { 1333163953Srrs sctp_insert_on_wheel(stcb, &stcb->asoc, &stcb->asoc.strmout[i], 1); 1334163953Srrs cnt++; 1335163953Srrs } 1336163953Srrs } 1337163953Srrs if (cnt) { 1338163953Srrs /* yep, we lost a spoke or two */ 1339169420Srrs SCTP_PRINTF("Found an additional %d streams NOT on outwheel, corrected\n", cnt); 1340163953Srrs } else { 1341163953Srrs /* no spokes lost, */ 1342163953Srrs stcb->asoc.total_output_queue_size = 0; 1343163953Srrs } 1344163953Srrs SCTP_TCB_SEND_UNLOCK(stcb); 1345163953Srrs return; 1346163953Srrs } 1347163953Srrs SCTP_TCB_SEND_UNLOCK(stcb); 1348163953Srrs /* Check to see if some data queued, if so report it */ 1349163953Srrs TAILQ_FOREACH(outs, &stcb->asoc.out_wheel, next_spoke) { 1350163953Srrs if (!TAILQ_EMPTY(&outs->outqueue)) { 1351163953Srrs TAILQ_FOREACH(sp, &outs->outqueue, next) { 1352163953Srrs if (sp->msg_is_complete) 1353163953Srrs being_filled++; 1354163953Srrs chks_in_queue++; 1355163953Srrs } 1356163953Srrs } 1357163953Srrs } 1358163953Srrs if (chks_in_queue != stcb->asoc.stream_queue_cnt) { 1359169420Srrs SCTP_PRINTF("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n", 1360163953Srrs stcb->asoc.stream_queue_cnt, chks_in_queue); 1361163953Srrs } 1362163953Srrs if (chks_in_queue) { 1363163953Srrs /* call the output queue function */ 1364163953Srrs sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3); 1365163953Srrs if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) && 1366163953Srrs (TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 1367163953Srrs /* 1368163953Srrs * Probably should go in and make it go back through 1369163953Srrs * and add fragments allowed 1370163953Srrs */ 1371163953Srrs if (being_filled == 0) { 1372169420Srrs SCTP_PRINTF("Still nothing moved %d chunks are stuck\n", 1373163953Srrs chks_in_queue); 1374163953Srrs } 1375163953Srrs } 1376163953Srrs } else { 1377169420Srrs SCTP_PRINTF("Found no chunks on any queue tot:%lu\n", 1378163953Srrs (u_long)stcb->asoc.total_output_queue_size); 1379163953Srrs stcb->asoc.total_output_queue_size = 0; 1380163953Srrs } 1381163953Srrs} 1382163953Srrs 1383163953Srrsint 1384163953Srrssctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb, 1385163953Srrs struct sctp_nets *net, int cnt_of_unconf) 1386163953Srrs{ 1387163953Srrs if (net) { 1388163953Srrs if (net->hb_responded == 0) { 1389167598Srrs if (net->ro._s_addr) { 1390167598Srrs /* 1391167598Srrs * Invalidate the src address if we did not 1392167598Srrs * get a response last time. 1393167598Srrs */ 1394167598Srrs sctp_free_ifa(net->ro._s_addr); 1395167598Srrs net->ro._s_addr = NULL; 1396167598Srrs net->src_addr_selected = 0; 1397167598Srrs } 1398163953Srrs sctp_backoff_on_timeout(stcb, net, 1, 0); 1399163953Srrs } 1400163953Srrs /* Zero PBA, if it needs it */ 1401163953Srrs if (net->partial_bytes_acked) { 1402163953Srrs net->partial_bytes_acked = 0; 1403163953Srrs } 1404163953Srrs } 1405163953Srrs if ((stcb->asoc.total_output_queue_size > 0) && 1406163953Srrs (TAILQ_EMPTY(&stcb->asoc.send_queue)) && 1407163953Srrs (TAILQ_EMPTY(&stcb->asoc.sent_queue))) { 1408163953Srrs sctp_audit_stream_queues_for_size(inp, stcb); 1409163953Srrs } 1410163953Srrs /* Send a new HB, this will do threshold managment, pick a new dest */ 1411163953Srrs if (cnt_of_unconf == 0) { 1412163953Srrs if (sctp_send_hb(stcb, 0, NULL) < 0) { 1413163953Srrs return (1); 1414163953Srrs } 1415163953Srrs } else { 1416163953Srrs /* 1417163953Srrs * this will send out extra hb's up to maxburst if there are 1418163953Srrs * any unconfirmed addresses. 1419163953Srrs */ 1420170056Srrs uint32_t cnt_sent = 0; 1421163953Srrs 1422163953Srrs TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { 1423163953Srrs if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) && 1424163953Srrs (net->dest_state & SCTP_ADDR_REACHABLE)) { 1425163953Srrs cnt_sent++; 1426167598Srrs if (net->hb_responded == 0) { 1427167598Srrs /* Did we respond last time? */ 1428167598Srrs if (net->ro._s_addr) { 1429167598Srrs sctp_free_ifa(net->ro._s_addr); 1430167598Srrs net->ro._s_addr = NULL; 1431167598Srrs net->src_addr_selected = 0; 1432167598Srrs } 1433167598Srrs } 1434163953Srrs if (sctp_send_hb(stcb, 1, net) == 0) { 1435163953Srrs break; 1436163953Srrs } 1437167598Srrs if (cnt_sent >= sctp_hb_maxburst) 1438163953Srrs break; 1439163953Srrs } 1440163953Srrs } 1441163953Srrs } 1442163953Srrs return (0); 1443163953Srrs} 1444163953Srrs 1445163953Srrsint 1446163953Srrssctp_is_hb_timer_running(struct sctp_tcb *stcb) 1447163953Srrs{ 1448165647Srrs if (SCTP_OS_TIMER_PENDING(&stcb->asoc.hb_timer.timer)) { 1449163953Srrs /* its running */ 1450163953Srrs return (1); 1451163953Srrs } else { 1452163953Srrs /* nope */ 1453163953Srrs return (0); 1454163953Srrs } 1455163953Srrs} 1456163953Srrs 1457163953Srrsint 1458163953Srrssctp_is_sack_timer_running(struct sctp_tcb *stcb) 1459163953Srrs{ 1460165647Srrs if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) { 1461163953Srrs /* its running */ 1462163953Srrs return (1); 1463163953Srrs } else { 1464163953Srrs /* nope */ 1465163953Srrs return (0); 1466163953Srrs } 1467163953Srrs} 1468163953Srrs 1469163953Srrs#define SCTP_NUMBER_OF_MTU_SIZES 18 1470163953Srrsstatic uint32_t mtu_sizes[] = { 1471163953Srrs 68, 1472163953Srrs 296, 1473163953Srrs 508, 1474163953Srrs 512, 1475163953Srrs 544, 1476163953Srrs 576, 1477163953Srrs 1006, 1478163953Srrs 1492, 1479163953Srrs 1500, 1480163953Srrs 1536, 1481163953Srrs 2002, 1482163953Srrs 2048, 1483163953Srrs 4352, 1484163953Srrs 4464, 1485163953Srrs 8166, 1486163953Srrs 17914, 1487163953Srrs 32000, 1488163953Srrs 65535 1489163953Srrs}; 1490163953Srrs 1491163953Srrs 1492163953Srrsstatic uint32_t 1493163953Srrssctp_getnext_mtu(struct sctp_inpcb *inp, uint32_t cur_mtu) 1494163953Srrs{ 1495163953Srrs /* select another MTU that is just bigger than this one */ 1496163953Srrs int i; 1497163953Srrs 1498163953Srrs for (i = 0; i < SCTP_NUMBER_OF_MTU_SIZES; i++) { 1499163953Srrs if (cur_mtu < mtu_sizes[i]) { 1500163953Srrs /* no max_mtu is bigger than this one */ 1501163953Srrs return (mtu_sizes[i]); 1502163953Srrs } 1503163953Srrs } 1504163953Srrs /* here return the highest allowable */ 1505163953Srrs return (cur_mtu); 1506163953Srrs} 1507163953Srrs 1508163953Srrs 1509163953Srrsvoid 1510163953Srrssctp_pathmtu_timer(struct sctp_inpcb *inp, 1511163953Srrs struct sctp_tcb *stcb, 1512163953Srrs struct sctp_nets *net) 1513163953Srrs{ 1514163953Srrs uint32_t next_mtu; 1515163953Srrs 1516163953Srrs /* restart the timer in any case */ 1517163953Srrs next_mtu = sctp_getnext_mtu(inp, net->mtu); 1518163953Srrs if (next_mtu <= net->mtu) { 1519163953Srrs /* nothing to do */ 1520163953Srrs return; 1521169352Srrs } { 1522169352Srrs uint32_t mtu; 1523169352Srrs 1524169352Srrs if ((net->src_addr_selected == 0) || 1525169352Srrs (net->ro._s_addr == NULL) || 1526169352Srrs (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) { 1527169420Srrs if ((net->ro._s_addr != NULL) && (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) { 1528169352Srrs sctp_free_ifa(net->ro._s_addr); 1529169352Srrs net->ro._s_addr = NULL; 1530169352Srrs net->src_addr_selected = 0; 1531169420Srrs } else if (net->ro._s_addr == NULL) { 1532169420Srrs net->ro._s_addr = sctp_source_address_selection(inp, 1533169420Srrs stcb, 1534169420Srrs (sctp_route_t *) & net->ro, 1535169420Srrs net, 0, stcb->asoc.vrf_id); 1536169352Srrs } 1537169352Srrs if (net->ro._s_addr) 1538169352Srrs net->src_addr_selected = 1; 1539169352Srrs } 1540169352Srrs if (net->ro._s_addr) { 1541169352Srrs mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._s_addr.sa, net->ro.ro_rt); 1542169352Srrs if (mtu > next_mtu) { 1543163953Srrs net->mtu = next_mtu; 1544163953Srrs } 1545163953Srrs } 1546163953Srrs } 1547163953Srrs /* restart the timer */ 1548163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net); 1549163953Srrs} 1550163953Srrs 1551163953Srrsvoid 1552163953Srrssctp_autoclose_timer(struct sctp_inpcb *inp, 1553163953Srrs struct sctp_tcb *stcb, 1554163953Srrs struct sctp_nets *net) 1555163953Srrs{ 1556163953Srrs struct timeval tn, *tim_touse; 1557163953Srrs struct sctp_association *asoc; 1558163953Srrs int ticks_gone_by; 1559163953Srrs 1560169378Srrs (void)SCTP_GETTIME_TIMEVAL(&tn); 1561163953Srrs if (stcb->asoc.sctp_autoclose_ticks && 1562163953Srrs sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) { 1563163953Srrs /* Auto close is on */ 1564163953Srrs asoc = &stcb->asoc; 1565163953Srrs /* pick the time to use */ 1566163953Srrs if (asoc->time_last_rcvd.tv_sec > 1567163953Srrs asoc->time_last_sent.tv_sec) { 1568163953Srrs tim_touse = &asoc->time_last_rcvd; 1569163953Srrs } else { 1570163953Srrs tim_touse = &asoc->time_last_sent; 1571163953Srrs } 1572163953Srrs /* Now has long enough transpired to autoclose? */ 1573163953Srrs ticks_gone_by = SEC_TO_TICKS(tn.tv_sec - tim_touse->tv_sec); 1574163953Srrs if ((ticks_gone_by > 0) && 1575163953Srrs (ticks_gone_by >= (int)asoc->sctp_autoclose_ticks)) { 1576163953Srrs /* 1577163953Srrs * autoclose time has hit, call the output routine, 1578163953Srrs * which should do nothing just to be SURE we don't 1579163953Srrs * have hanging data. We can then safely check the 1580163953Srrs * queues and know that we are clear to send 1581163953Srrs * shutdown 1582163953Srrs */ 1583163953Srrs sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR); 1584163953Srrs /* Are we clean? */ 1585163953Srrs if (TAILQ_EMPTY(&asoc->send_queue) && 1586163953Srrs TAILQ_EMPTY(&asoc->sent_queue)) { 1587163953Srrs /* 1588163953Srrs * there is nothing queued to send, so I'm 1589163953Srrs * done... 1590163953Srrs */ 1591166675Srrs if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) { 1592163953Srrs /* only send SHUTDOWN 1st time thru */ 1593163953Srrs sctp_send_shutdown(stcb, stcb->asoc.primary_destination); 1594166675Srrs if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) || 1595166675Srrs (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) { 1596166675Srrs SCTP_STAT_DECR_GAUGE32(sctps_currestab); 1597166675Srrs } 1598163953Srrs asoc->state = SCTP_STATE_SHUTDOWN_SENT; 1599163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, 1600163953Srrs stcb->sctp_ep, stcb, 1601163953Srrs asoc->primary_destination); 1602163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD, 1603163953Srrs stcb->sctp_ep, stcb, 1604163953Srrs asoc->primary_destination); 1605163953Srrs } 1606163953Srrs } 1607163953Srrs } else { 1608163953Srrs /* 1609163953Srrs * No auto close at this time, reset t-o to check 1610163953Srrs * later 1611163953Srrs */ 1612163953Srrs int tmp; 1613163953Srrs 1614163953Srrs /* fool the timer startup to use the time left */ 1615163953Srrs tmp = asoc->sctp_autoclose_ticks; 1616163953Srrs asoc->sctp_autoclose_ticks -= ticks_gone_by; 1617163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb, 1618163953Srrs net); 1619163953Srrs /* restore the real tick value */ 1620163953Srrs asoc->sctp_autoclose_ticks = tmp; 1621163953Srrs } 1622163953Srrs } 1623163953Srrs} 1624163953Srrs 1625163953Srrsvoid 1626163953Srrssctp_iterator_timer(struct sctp_iterator *it) 1627163953Srrs{ 1628163953Srrs int iteration_count = 0; 1629167598Srrs int inp_skip = 0; 1630163953Srrs 1631163953Srrs /* 1632163953Srrs * only one iterator can run at a time. This is the only way we can 1633163953Srrs * cleanly pull ep's from underneath all the running interators when 1634163953Srrs * a ep is freed. 1635163953Srrs */ 1636163953Srrs SCTP_ITERATOR_LOCK(); 1637163953Srrs if (it->inp == NULL) { 1638163953Srrs /* iterator is complete */ 1639163953Srrsdone_with_iterator: 1640163953Srrs SCTP_ITERATOR_UNLOCK(); 1641163953Srrs SCTP_INP_INFO_WLOCK(); 1642167598Srrs TAILQ_REMOVE(&sctppcbinfo.iteratorhead, it, sctp_nxt_itr); 1643163953Srrs /* stopping the callout is not needed, in theory */ 1644163953Srrs SCTP_INP_INFO_WUNLOCK(); 1645169378Srrs (void)SCTP_OS_TIMER_STOP(&it->tmr.timer); 1646163953Srrs if (it->function_atend != NULL) { 1647163953Srrs (*it->function_atend) (it->pointer, it->val); 1648163953Srrs } 1649170091Srrs SCTP_FREE(it, SCTP_M_ITER); 1650163953Srrs return; 1651163953Srrs } 1652163953Srrsselect_a_new_ep: 1653163953Srrs SCTP_INP_WLOCK(it->inp); 1654163953Srrs while (((it->pcb_flags) && 1655163953Srrs ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) || 1656163953Srrs ((it->pcb_features) && 1657163953Srrs ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) { 1658163953Srrs /* endpoint flags or features don't match, so keep looking */ 1659163953Srrs if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1660163953Srrs SCTP_INP_WUNLOCK(it->inp); 1661163953Srrs goto done_with_iterator; 1662163953Srrs } 1663163953Srrs SCTP_INP_WUNLOCK(it->inp); 1664163953Srrs it->inp = LIST_NEXT(it->inp, sctp_list); 1665163953Srrs if (it->inp == NULL) { 1666163953Srrs goto done_with_iterator; 1667163953Srrs } 1668163953Srrs SCTP_INP_WLOCK(it->inp); 1669163953Srrs } 1670163953Srrs if ((it->inp->inp_starting_point_for_iterator != NULL) && 1671163953Srrs (it->inp->inp_starting_point_for_iterator != it)) { 1672169420Srrs SCTP_PRINTF("Iterator collision, waiting for one at %p\n", 1673163979Sru it->inp); 1674163953Srrs SCTP_INP_WUNLOCK(it->inp); 1675163953Srrs goto start_timer_return; 1676163953Srrs } 1677163953Srrs /* mark the current iterator on the endpoint */ 1678163953Srrs it->inp->inp_starting_point_for_iterator = it; 1679163953Srrs SCTP_INP_WUNLOCK(it->inp); 1680163953Srrs SCTP_INP_RLOCK(it->inp); 1681163953Srrs /* now go through each assoc which is in the desired state */ 1682167598Srrs if (it->done_current_ep == 0) { 1683167598Srrs if (it->function_inp != NULL) 1684167598Srrs inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val); 1685167598Srrs it->done_current_ep = 1; 1686167598Srrs } 1687163953Srrs if (it->stcb == NULL) { 1688163953Srrs /* run the per instance function */ 1689163953Srrs it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list); 1690163953Srrs } 1691163953Srrs SCTP_INP_RUNLOCK(it->inp); 1692167598Srrs if ((inp_skip) || it->stcb == NULL) { 1693167598Srrs if (it->function_inp_end != NULL) { 1694167598Srrs inp_skip = (*it->function_inp_end) (it->inp, 1695167598Srrs it->pointer, 1696167598Srrs it->val); 1697167598Srrs } 1698167598Srrs goto no_stcb; 1699167598Srrs } 1700163953Srrs if ((it->stcb) && 1701163953Srrs (it->stcb->asoc.stcb_starting_point_for_iterator == it)) { 1702163953Srrs it->stcb->asoc.stcb_starting_point_for_iterator = NULL; 1703163953Srrs } 1704163953Srrs while (it->stcb) { 1705163953Srrs SCTP_TCB_LOCK(it->stcb); 1706163953Srrs if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) { 1707163953Srrs /* not in the right state... keep looking */ 1708163953Srrs SCTP_TCB_UNLOCK(it->stcb); 1709163953Srrs goto next_assoc; 1710163953Srrs } 1711163953Srrs /* mark the current iterator on the assoc */ 1712163953Srrs it->stcb->asoc.stcb_starting_point_for_iterator = it; 1713163953Srrs /* see if we have limited out the iterator loop */ 1714163953Srrs iteration_count++; 1715163953Srrs if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) { 1716163953Srrs start_timer_return: 1717163953Srrs /* set a timer to continue this later */ 1718163953Srrs SCTP_TCB_UNLOCK(it->stcb); 1719163953Srrs sctp_timer_start(SCTP_TIMER_TYPE_ITERATOR, 1720163953Srrs (struct sctp_inpcb *)it, NULL, NULL); 1721163953Srrs SCTP_ITERATOR_UNLOCK(); 1722163953Srrs return; 1723163953Srrs } 1724163953Srrs /* run function on this one */ 1725163953Srrs (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val); 1726163953Srrs 1727163953Srrs /* 1728163953Srrs * we lie here, it really needs to have its own type but 1729163953Srrs * first I must verify that this won't effect things :-0 1730163953Srrs */ 1731163953Srrs if (it->no_chunk_output == 0) 1732163953Srrs sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3); 1733163953Srrs 1734163953Srrs SCTP_TCB_UNLOCK(it->stcb); 1735163953Srrsnext_assoc: 1736163953Srrs it->stcb = LIST_NEXT(it->stcb, sctp_tcblist); 1737167598Srrs if (it->stcb == NULL) { 1738167598Srrs if (it->function_inp_end != NULL) { 1739167598Srrs inp_skip = (*it->function_inp_end) (it->inp, 1740167598Srrs it->pointer, 1741167598Srrs it->val); 1742167598Srrs } 1743167598Srrs } 1744163953Srrs } 1745167598Srrsno_stcb: 1746163953Srrs /* done with all assocs on this endpoint, move on to next endpoint */ 1747167598Srrs it->done_current_ep = 0; 1748163953Srrs SCTP_INP_WLOCK(it->inp); 1749163953Srrs it->inp->inp_starting_point_for_iterator = NULL; 1750163953Srrs SCTP_INP_WUNLOCK(it->inp); 1751163953Srrs if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) { 1752163953Srrs it->inp = NULL; 1753163953Srrs } else { 1754163953Srrs SCTP_INP_INFO_RLOCK(); 1755163953Srrs it->inp = LIST_NEXT(it->inp, sctp_list); 1756163953Srrs SCTP_INP_INFO_RUNLOCK(); 1757163953Srrs } 1758163953Srrs if (it->inp == NULL) { 1759163953Srrs goto done_with_iterator; 1760163953Srrs } 1761163953Srrs goto select_a_new_ep; 1762163953Srrs} 1763