sctp_cc_functions.c revision 198522
1171440Srrs/*- 2171440Srrs * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved. 3171440Srrs * 4171440Srrs * Redistribution and use in source and binary forms, with or without 5171440Srrs * modification, are permitted provided that the following conditions are met: 6171440Srrs * 7171440Srrs * a) Redistributions of source code must retain the above copyright notice, 8171440Srrs * this list of conditions and the following disclaimer. 9171440Srrs * 10171440Srrs * b) Redistributions in binary form must reproduce the above copyright 11171440Srrs * notice, this list of conditions and the following disclaimer in 12171440Srrs * the documentation and/or other materials provided with the distribution. 13171440Srrs * 14171440Srrs * c) Neither the name of Cisco Systems, Inc. nor the names of its 15171440Srrs * contributors may be used to endorse or promote products derived 16171440Srrs * from this software without specific prior written permission. 17171440Srrs * 18171440Srrs * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19171440Srrs * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 20171440Srrs * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21171440Srrs * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 22171440Srrs * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 23171440Srrs * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 24171440Srrs * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 25171440Srrs * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 26171440Srrs * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 27171440Srrs * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 28171440Srrs * THE POSSIBILITY OF SUCH DAMAGE. 29171440Srrs */ 30171440Srrs 31171440Srrs#include <netinet/sctp_os.h> 32171440Srrs#include <netinet/sctp_var.h> 33171440Srrs#include <netinet/sctp_sysctl.h> 34171440Srrs#include <netinet/sctp_pcb.h> 35171440Srrs#include <netinet/sctp_header.h> 36171440Srrs#include <netinet/sctputil.h> 37171440Srrs#include <netinet/sctp_output.h> 38171440Srrs#include <netinet/sctp_input.h> 39171440Srrs#include <netinet/sctp_indata.h> 40171440Srrs#include <netinet/sctp_uio.h> 41171440Srrs#include <netinet/sctp_timer.h> 42171440Srrs#include <netinet/sctp_auth.h> 43171440Srrs#include <netinet/sctp_asconf.h> 44171440Srrs#include <netinet/sctp_cc_functions.h> 45171440Srrs#include <sys/cdefs.h> 46171440Srrs__FBSDID("$FreeBSD: head/sys/netinet/sctp_cc_functions.c 198522 2009-10-27 18:17:07Z tuexen $"); 47171440Srrsvoid 48171440Srrssctp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net) 49171440Srrs{ 50171440Srrs /* 51171440Srrs * We take the max of the burst limit times a MTU or the 52179157Srrs * INITIAL_CWND. We then limit this to 4 MTU's of sending. cwnd must 53179157Srrs * be at least 2 MTU. 54171440Srrs */ 55171440Srrs net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND)); 56171440Srrs net->ssthresh = stcb->asoc.peers_rwnd; 57171440Srrs 58179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) { 59171440Srrs sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION); 60171440Srrs } 61171440Srrs} 62171440Srrs 63171440Srrsvoid 64171440Srrssctp_cwnd_update_after_fr(struct sctp_tcb *stcb, 65171440Srrs struct sctp_association *asoc) 66171440Srrs{ 67171440Srrs struct sctp_nets *net; 68171440Srrs 69171440Srrs /*- 70171440Srrs * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off == 1) && 71171440Srrs * (net->fast_retran_loss_recovery == 0))) 72171440Srrs */ 73171440Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 74179783Srrs if ((asoc->fast_retran_loss_recovery == 0) || (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 1)) { 75171440Srrs /* out of a RFC2582 Fast recovery window? */ 76171440Srrs if (net->net_ack > 0) { 77171440Srrs /* 78171440Srrs * per section 7.2.3, are there any 79171440Srrs * destinations that had a fast retransmit 80171440Srrs * to them. If so what we need to do is 81171440Srrs * adjust ssthresh and cwnd. 82171440Srrs */ 83171440Srrs struct sctp_tmit_chunk *lchk; 84171440Srrs int old_cwnd = net->cwnd; 85171440Srrs 86171440Srrs net->ssthresh = net->cwnd / 2; 87171440Srrs if (net->ssthresh < (net->mtu * 2)) { 88171440Srrs net->ssthresh = 2 * net->mtu; 89171440Srrs } 90171440Srrs net->cwnd = net->ssthresh; 91179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 92171440Srrs sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), 93171440Srrs SCTP_CWND_LOG_FROM_FR); 94171440Srrs } 95171440Srrs lchk = TAILQ_FIRST(&asoc->send_queue); 96171440Srrs 97171440Srrs net->partial_bytes_acked = 0; 98171440Srrs /* Turn on fast recovery window */ 99171440Srrs asoc->fast_retran_loss_recovery = 1; 100171440Srrs if (lchk == NULL) { 101171440Srrs /* Mark end of the window */ 102171440Srrs asoc->fast_recovery_tsn = asoc->sending_seq - 1; 103171440Srrs } else { 104171440Srrs asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 105171440Srrs } 106171440Srrs 107171440Srrs /* 108171440Srrs * CMT fast recovery -- per destination 109171440Srrs * recovery variable. 110171440Srrs */ 111171440Srrs net->fast_retran_loss_recovery = 1; 112171440Srrs 113171440Srrs if (lchk == NULL) { 114171440Srrs /* Mark end of the window */ 115171440Srrs net->fast_recovery_tsn = asoc->sending_seq - 1; 116171440Srrs } else { 117171440Srrs net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 118171440Srrs } 119171440Srrs 120171440Srrs /* 121171440Srrs * Disable Nonce Sum Checking and store the 122171440Srrs * resync tsn 123171440Srrs */ 124171440Srrs asoc->nonce_sum_check = 0; 125171440Srrs asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1; 126171440Srrs 127171440Srrs sctp_timer_stop(SCTP_TIMER_TYPE_SEND, 128171440Srrs stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); 129171440Srrs sctp_timer_start(SCTP_TIMER_TYPE_SEND, 130171440Srrs stcb->sctp_ep, stcb, net); 131171440Srrs } 132171440Srrs } else if (net->net_ack > 0) { 133171440Srrs /* 134171440Srrs * Mark a peg that we WOULD have done a cwnd 135171440Srrs * reduction but RFC2582 prevented this action. 136171440Srrs */ 137171440Srrs SCTP_STAT_INCR(sctps_fastretransinrtt); 138171440Srrs } 139171440Srrs } 140171440Srrs} 141171440Srrs 142171440Srrsvoid 143171440Srrssctp_cwnd_update_after_sack(struct sctp_tcb *stcb, 144171440Srrs struct sctp_association *asoc, 145171440Srrs int accum_moved, int reneged_all, int will_exit) 146171440Srrs{ 147171440Srrs struct sctp_nets *net; 148171440Srrs 149171440Srrs /******************************/ 150171440Srrs /* update cwnd and Early FR */ 151171440Srrs /******************************/ 152171440Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 153171440Srrs 154171440Srrs#ifdef JANA_CMT_FAST_RECOVERY 155171440Srrs /* 156171440Srrs * CMT fast recovery code. Need to debug. 157171440Srrs */ 158171440Srrs if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) { 159171440Srrs if (compare_with_wrap(asoc->last_acked_seq, 160171440Srrs net->fast_recovery_tsn, MAX_TSN) || 161171440Srrs (asoc->last_acked_seq == net->fast_recovery_tsn) || 162171440Srrs compare_with_wrap(net->pseudo_cumack, net->fast_recovery_tsn, MAX_TSN) || 163171440Srrs (net->pseudo_cumack == net->fast_recovery_tsn)) { 164171440Srrs net->will_exit_fast_recovery = 1; 165171440Srrs } 166171440Srrs } 167171440Srrs#endif 168179783Srrs if (SCTP_BASE_SYSCTL(sctp_early_fr)) { 169171440Srrs /* 170171440Srrs * So, first of all do we need to have a Early FR 171171440Srrs * timer running? 172171440Srrs */ 173171440Srrs if (((TAILQ_FIRST(&asoc->sent_queue)) && 174171440Srrs (net->ref_count > 1) && 175171440Srrs (net->flight_size < net->cwnd)) || 176171440Srrs (reneged_all)) { 177171440Srrs /* 178171440Srrs * yes, so in this case stop it if its 179171440Srrs * running, and then restart it. Reneging 180171440Srrs * all is a special case where we want to 181171440Srrs * run the Early FR timer and then force the 182171440Srrs * last few unacked to be sent, causing us 183171440Srrs * to illicit a sack with gaps to force out 184171440Srrs * the others. 185171440Srrs */ 186171440Srrs if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 187171440Srrs SCTP_STAT_INCR(sctps_earlyfrstpidsck2); 188171440Srrs sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 189171440Srrs SCTP_FROM_SCTP_INDATA + SCTP_LOC_20); 190171440Srrs } 191171440Srrs SCTP_STAT_INCR(sctps_earlyfrstrid); 192171440Srrs sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net); 193171440Srrs } else { 194171440Srrs /* No, stop it if its running */ 195171440Srrs if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 196171440Srrs SCTP_STAT_INCR(sctps_earlyfrstpidsck3); 197171440Srrs sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 198171440Srrs SCTP_FROM_SCTP_INDATA + SCTP_LOC_21); 199171440Srrs } 200171440Srrs } 201171440Srrs } 202171440Srrs /* if nothing was acked on this destination skip it */ 203171440Srrs if (net->net_ack == 0) { 204179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 205171440Srrs sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK); 206171440Srrs } 207171440Srrs continue; 208171440Srrs } 209171440Srrs if (net->net_ack2 > 0) { 210171440Srrs /* 211171440Srrs * Karn's rule applies to clearing error count, this 212171440Srrs * is optional. 213171440Srrs */ 214171440Srrs net->error_count = 0; 215171440Srrs if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) == 216171440Srrs SCTP_ADDR_NOT_REACHABLE) { 217171440Srrs /* addr came good */ 218171440Srrs net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE; 219171440Srrs net->dest_state |= SCTP_ADDR_REACHABLE; 220171440Srrs sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 221172090Srrs SCTP_RECEIVED_SACK, (void *)net, SCTP_SO_NOT_LOCKED); 222171440Srrs /* now was it the primary? if so restore */ 223171440Srrs if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) { 224171440Srrs (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net); 225171440Srrs } 226171440Srrs } 227171440Srrs /* 228171440Srrs * JRS 5/14/07 - If CMT PF is on and the destination 229171440Srrs * is in PF state, set the destination to active 230171440Srrs * state and set the cwnd to one or two MTU's based 231171440Srrs * on whether PF1 or PF2 is being used. 232171440Srrs * 233171440Srrs * Should we stop any running T3 timer here? 234171440Srrs */ 235179783Srrs if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && 236179783Srrs SCTP_BASE_SYSCTL(sctp_cmt_pf) && 237179783Srrs ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) { 238171440Srrs net->dest_state &= ~SCTP_ADDR_PF; 239179783Srrs net->cwnd = net->mtu * SCTP_BASE_SYSCTL(sctp_cmt_pf); 240171440Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n", 241171440Srrs net, net->cwnd); 242171440Srrs /* 243171440Srrs * Since the cwnd value is explicitly set, 244171440Srrs * skip the code that updates the cwnd 245171440Srrs * value. 246171440Srrs */ 247171440Srrs goto skip_cwnd_update; 248171440Srrs } 249171440Srrs } 250171440Srrs#ifdef JANA_CMT_FAST_RECOVERY 251171440Srrs /* 252171440Srrs * CMT fast recovery code 253171440Srrs */ 254171440Srrs /* 255171440Srrs * if (sctp_cmt_on_off == 1 && 256171440Srrs * net->fast_retran_loss_recovery && 257171440Srrs * net->will_exit_fast_recovery == 0) { @@@ Do something } 258171440Srrs * else if (sctp_cmt_on_off == 0 && 259171440Srrs * asoc->fast_retran_loss_recovery && will_exit == 0) { 260171440Srrs */ 261171440Srrs#endif 262171440Srrs 263179783Srrs if (asoc->fast_retran_loss_recovery && will_exit == 0 && SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) { 264171440Srrs /* 265171440Srrs * If we are in loss recovery we skip any cwnd 266171440Srrs * update 267171440Srrs */ 268171440Srrs goto skip_cwnd_update; 269171440Srrs } 270171440Srrs /* 271171440Srrs * CMT: CUC algorithm. Update cwnd if pseudo-cumack has 272171440Srrs * moved. 273171440Srrs */ 274179783Srrs if (accum_moved || (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && net->new_pseudo_cumack)) { 275171440Srrs /* If the cumulative ack moved we can proceed */ 276171440Srrs if (net->cwnd <= net->ssthresh) { 277171440Srrs /* We are in slow start */ 278179157Srrs if (net->flight_size + net->net_ack >= net->cwnd) { 279179783Srrs if (net->net_ack > (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable))) { 280179783Srrs net->cwnd += (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable)); 281179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 282171440Srrs sctp_log_cwnd(stcb, net, net->mtu, 283171440Srrs SCTP_CWND_LOG_FROM_SS); 284171440Srrs } 285171440Srrs } else { 286171440Srrs net->cwnd += net->net_ack; 287179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 288171440Srrs sctp_log_cwnd(stcb, net, net->net_ack, 289171440Srrs SCTP_CWND_LOG_FROM_SS); 290171440Srrs } 291171440Srrs } 292171440Srrs } else { 293179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 294171440Srrs sctp_log_cwnd(stcb, net, net->net_ack, 295171440Srrs SCTP_CWND_LOG_NOADV_SS); 296171440Srrs } 297171440Srrs } 298171440Srrs } else { 299171440Srrs /* We are in congestion avoidance */ 300179141Srrs /* 301179141Srrs * Add to pba 302179141Srrs */ 303179157Srrs net->partial_bytes_acked += net->net_ack; 304171440Srrs 305179141Srrs if ((net->flight_size + net->net_ack >= net->cwnd) && 306179141Srrs (net->partial_bytes_acked >= net->cwnd)) { 307179141Srrs net->partial_bytes_acked -= net->cwnd; 308179141Srrs net->cwnd += net->mtu; 309179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 310179141Srrs sctp_log_cwnd(stcb, net, net->mtu, 311179141Srrs SCTP_CWND_LOG_FROM_CA); 312171440Srrs } 313171440Srrs } else { 314179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 315171440Srrs sctp_log_cwnd(stcb, net, net->net_ack, 316171440Srrs SCTP_CWND_LOG_NOADV_CA); 317171440Srrs } 318171440Srrs } 319171440Srrs } 320171440Srrs } else { 321179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 322171440Srrs sctp_log_cwnd(stcb, net, net->mtu, 323171440Srrs SCTP_CWND_LOG_NO_CUMACK); 324171440Srrs } 325171440Srrs } 326171440Srrsskip_cwnd_update: 327171440Srrs /* 328171440Srrs * NOW, according to Karn's rule do we need to restore the 329171440Srrs * RTO timer back? Check our net_ack2. If not set then we 330171440Srrs * have a ambiguity.. i.e. all data ack'd was sent to more 331171440Srrs * than one place. 332171440Srrs */ 333171440Srrs if (net->net_ack2) { 334171440Srrs /* restore any doubled timers */ 335171440Srrs net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1; 336171440Srrs if (net->RTO < stcb->asoc.minrto) { 337171440Srrs net->RTO = stcb->asoc.minrto; 338171440Srrs } 339171440Srrs if (net->RTO > stcb->asoc.maxrto) { 340171440Srrs net->RTO = stcb->asoc.maxrto; 341171440Srrs } 342171440Srrs } 343171440Srrs } 344171440Srrs} 345171440Srrs 346171440Srrsvoid 347179157Srrssctp_cwnd_update_after_timeout(struct sctp_tcb *stcb, struct sctp_nets *net) 348171440Srrs{ 349171440Srrs int old_cwnd = net->cwnd; 350171440Srrs 351198522Stuexen net->ssthresh = max(net->cwnd / 2, 4 * net->mtu); 352171440Srrs net->cwnd = net->mtu; 353179157Srrs net->partial_bytes_acked = 0; 354179157Srrs 355179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 356171440Srrs sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX); 357171440Srrs } 358171440Srrs} 359171440Srrs 360179157Srrsvoid 361179157Srrssctp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net) 362179157Srrs{ 363179157Srrs int old_cwnd = net->cwnd; 364179157Srrs 365179157Srrs SCTP_STAT_INCR(sctps_ecnereducedcwnd); 366179157Srrs net->ssthresh = net->cwnd / 2; 367179157Srrs if (net->ssthresh < net->mtu) { 368179157Srrs net->ssthresh = net->mtu; 369179157Srrs /* here back off the timer as well, to slow us down */ 370179157Srrs net->RTO <<= 1; 371179157Srrs } 372179157Srrs net->cwnd = net->ssthresh; 373179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 374179157Srrs sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT); 375179157Srrs } 376179157Srrs} 377179157Srrs 378179157Srrsvoid 379179157Srrssctp_cwnd_update_after_packet_dropped(struct sctp_tcb *stcb, 380179157Srrs struct sctp_nets *net, struct sctp_pktdrop_chunk *cp, 381179157Srrs uint32_t * bottle_bw, uint32_t * on_queue) 382179157Srrs{ 383179157Srrs uint32_t bw_avail; 384179157Srrs int rtt, incr; 385179157Srrs int old_cwnd = net->cwnd; 386179157Srrs 387179157Srrs /* need real RTT for this calc */ 388179157Srrs rtt = ((net->lastsa >> 2) + net->lastsv) >> 1; 389179157Srrs /* get bottle neck bw */ 390179157Srrs *bottle_bw = ntohl(cp->bottle_bw); 391179157Srrs /* and whats on queue */ 392179157Srrs *on_queue = ntohl(cp->current_onq); 393179157Srrs /* 394179157Srrs * adjust the on-queue if our flight is more it could be that the 395179157Srrs * router has not yet gotten data "in-flight" to it 396179157Srrs */ 397179157Srrs if (*on_queue < net->flight_size) 398179157Srrs *on_queue = net->flight_size; 399179157Srrs /* calculate the available space */ 400179157Srrs bw_avail = (*bottle_bw * rtt) / 1000; 401179157Srrs if (bw_avail > *bottle_bw) { 402179157Srrs /* 403179157Srrs * Cap the growth to no more than the bottle neck. This can 404179157Srrs * happen as RTT slides up due to queues. It also means if 405179157Srrs * you have more than a 1 second RTT with a empty queue you 406179157Srrs * will be limited to the bottle_bw per second no matter if 407179157Srrs * other points have 1/2 the RTT and you could get more 408179157Srrs * out... 409179157Srrs */ 410179157Srrs bw_avail = *bottle_bw; 411179157Srrs } 412179157Srrs if (*on_queue > bw_avail) { 413179157Srrs /* 414179157Srrs * No room for anything else don't allow anything else to be 415179157Srrs * "added to the fire". 416179157Srrs */ 417179157Srrs int seg_inflight, seg_onqueue, my_portion; 418179157Srrs 419179157Srrs net->partial_bytes_acked = 0; 420179157Srrs 421179157Srrs /* how much are we over queue size? */ 422179157Srrs incr = *on_queue - bw_avail; 423179157Srrs if (stcb->asoc.seen_a_sack_this_pkt) { 424179157Srrs /* 425179157Srrs * undo any cwnd adjustment that the sack might have 426179157Srrs * made 427179157Srrs */ 428179157Srrs net->cwnd = net->prev_cwnd; 429179157Srrs } 430179157Srrs /* Now how much of that is mine? */ 431179157Srrs seg_inflight = net->flight_size / net->mtu; 432179157Srrs seg_onqueue = *on_queue / net->mtu; 433179157Srrs my_portion = (incr * seg_inflight) / seg_onqueue; 434179157Srrs 435179157Srrs /* Have I made an adjustment already */ 436179157Srrs if (net->cwnd > net->flight_size) { 437179157Srrs /* 438179157Srrs * for this flight I made an adjustment we need to 439179157Srrs * decrease the portion by a share our previous 440179157Srrs * adjustment. 441179157Srrs */ 442179157Srrs int diff_adj; 443179157Srrs 444179157Srrs diff_adj = net->cwnd - net->flight_size; 445179157Srrs if (diff_adj > my_portion) 446179157Srrs my_portion = 0; 447179157Srrs else 448179157Srrs my_portion -= diff_adj; 449179157Srrs } 450179157Srrs /* 451179157Srrs * back down to the previous cwnd (assume we have had a sack 452179157Srrs * before this packet). minus what ever portion of the 453179157Srrs * overage is my fault. 454179157Srrs */ 455179157Srrs net->cwnd -= my_portion; 456179157Srrs 457179157Srrs /* we will NOT back down more than 1 MTU */ 458179157Srrs if (net->cwnd <= net->mtu) { 459179157Srrs net->cwnd = net->mtu; 460179157Srrs } 461179157Srrs /* force into CA */ 462179157Srrs net->ssthresh = net->cwnd - 1; 463179157Srrs } else { 464179157Srrs /* 465179157Srrs * Take 1/4 of the space left or max burst up .. whichever 466179157Srrs * is less. 467179157Srrs */ 468179157Srrs incr = min((bw_avail - *on_queue) >> 2, 469179157Srrs stcb->asoc.max_burst * net->mtu); 470179157Srrs net->cwnd += incr; 471179157Srrs } 472179157Srrs if (net->cwnd > bw_avail) { 473179157Srrs /* We can't exceed the pipe size */ 474179157Srrs net->cwnd = bw_avail; 475179157Srrs } 476179157Srrs if (net->cwnd < net->mtu) { 477179157Srrs /* We always have 1 MTU */ 478179157Srrs net->cwnd = net->mtu; 479179157Srrs } 480179157Srrs if (net->cwnd - old_cwnd != 0) { 481179157Srrs /* log only changes */ 482179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 483179157Srrs sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), 484179157Srrs SCTP_CWND_LOG_FROM_SAT); 485179157Srrs } 486179157Srrs } 487179157Srrs} 488179157Srrs 489179157Srrsvoid 490179157Srrssctp_cwnd_update_after_output(struct sctp_tcb *stcb, 491179157Srrs struct sctp_nets *net, int burst_limit) 492179157Srrs{ 493179157Srrs int old_cwnd = net->cwnd; 494179157Srrs 495179157Srrs if (net->ssthresh < net->cwnd) 496179157Srrs net->ssthresh = net->cwnd; 497179157Srrs net->cwnd = (net->flight_size + (burst_limit * net->mtu)); 498179157Srrs 499179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 500179157Srrs sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_BRST); 501179157Srrs } 502179157Srrs} 503179157Srrs 504179157Srrsvoid 505179157Srrssctp_cwnd_update_after_fr_timer(struct sctp_inpcb *inp, 506179157Srrs struct sctp_tcb *stcb, struct sctp_nets *net) 507179157Srrs{ 508179157Srrs int old_cwnd = net->cwnd; 509179157Srrs 510179157Srrs sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR, SCTP_SO_NOT_LOCKED); 511179157Srrs /* 512179157Srrs * make a small adjustment to cwnd and force to CA. 513179157Srrs */ 514179157Srrs if (net->cwnd > net->mtu) 515179157Srrs /* drop down one MTU after sending */ 516179157Srrs net->cwnd -= net->mtu; 517179157Srrs if (net->cwnd < net->ssthresh) 518179157Srrs /* still in SS move to CA */ 519179157Srrs net->ssthresh = net->cwnd - 1; 520179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 521179157Srrs sctp_log_cwnd(stcb, net, (old_cwnd - net->cwnd), SCTP_CWND_LOG_FROM_FR); 522179157Srrs } 523179157Srrs} 524179157Srrs 525171440Srrsstruct sctp_hs_raise_drop { 526171440Srrs int32_t cwnd; 527171440Srrs int32_t increase; 528171440Srrs int32_t drop_percent; 529171440Srrs}; 530171440Srrs 531171440Srrs#define SCTP_HS_TABLE_SIZE 73 532171440Srrs 533171440Srrsstruct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = { 534171440Srrs {38, 1, 50}, /* 0 */ 535171440Srrs {118, 2, 44}, /* 1 */ 536171440Srrs {221, 3, 41}, /* 2 */ 537171440Srrs {347, 4, 38}, /* 3 */ 538171440Srrs {495, 5, 37}, /* 4 */ 539171440Srrs {663, 6, 35}, /* 5 */ 540171440Srrs {851, 7, 34}, /* 6 */ 541171440Srrs {1058, 8, 33}, /* 7 */ 542171440Srrs {1284, 9, 32}, /* 8 */ 543171440Srrs {1529, 10, 31}, /* 9 */ 544171440Srrs {1793, 11, 30}, /* 10 */ 545171440Srrs {2076, 12, 29}, /* 11 */ 546171440Srrs {2378, 13, 28}, /* 12 */ 547171440Srrs {2699, 14, 28}, /* 13 */ 548171440Srrs {3039, 15, 27}, /* 14 */ 549171440Srrs {3399, 16, 27}, /* 15 */ 550171440Srrs {3778, 17, 26}, /* 16 */ 551171440Srrs {4177, 18, 26}, /* 17 */ 552171440Srrs {4596, 19, 25}, /* 18 */ 553171440Srrs {5036, 20, 25}, /* 19 */ 554171440Srrs {5497, 21, 24}, /* 20 */ 555171440Srrs {5979, 22, 24}, /* 21 */ 556171440Srrs {6483, 23, 23}, /* 22 */ 557171440Srrs {7009, 24, 23}, /* 23 */ 558171440Srrs {7558, 25, 22}, /* 24 */ 559171440Srrs {8130, 26, 22}, /* 25 */ 560171440Srrs {8726, 27, 22}, /* 26 */ 561171440Srrs {9346, 28, 21}, /* 27 */ 562171440Srrs {9991, 29, 21}, /* 28 */ 563171440Srrs {10661, 30, 21}, /* 29 */ 564171440Srrs {11358, 31, 20}, /* 30 */ 565171440Srrs {12082, 32, 20}, /* 31 */ 566171440Srrs {12834, 33, 20}, /* 32 */ 567171440Srrs {13614, 34, 19}, /* 33 */ 568171440Srrs {14424, 35, 19}, /* 34 */ 569171440Srrs {15265, 36, 19}, /* 35 */ 570171440Srrs {16137, 37, 19}, /* 36 */ 571171440Srrs {17042, 38, 18}, /* 37 */ 572171440Srrs {17981, 39, 18}, /* 38 */ 573171440Srrs {18955, 40, 18}, /* 39 */ 574171440Srrs {19965, 41, 17}, /* 40 */ 575171440Srrs {21013, 42, 17}, /* 41 */ 576171440Srrs {22101, 43, 17}, /* 42 */ 577171440Srrs {23230, 44, 17}, /* 43 */ 578171440Srrs {24402, 45, 16}, /* 44 */ 579171440Srrs {25618, 46, 16}, /* 45 */ 580171440Srrs {26881, 47, 16}, /* 46 */ 581171440Srrs {28193, 48, 16}, /* 47 */ 582171440Srrs {29557, 49, 15}, /* 48 */ 583171440Srrs {30975, 50, 15}, /* 49 */ 584171440Srrs {32450, 51, 15}, /* 50 */ 585171440Srrs {33986, 52, 15}, /* 51 */ 586171440Srrs {35586, 53, 14}, /* 52 */ 587171440Srrs {37253, 54, 14}, /* 53 */ 588171440Srrs {38992, 55, 14}, /* 54 */ 589171440Srrs {40808, 56, 14}, /* 55 */ 590171440Srrs {42707, 57, 13}, /* 56 */ 591171440Srrs {44694, 58, 13}, /* 57 */ 592171440Srrs {46776, 59, 13}, /* 58 */ 593171440Srrs {48961, 60, 13}, /* 59 */ 594171440Srrs {51258, 61, 13}, /* 60 */ 595171440Srrs {53677, 62, 12}, /* 61 */ 596171440Srrs {56230, 63, 12}, /* 62 */ 597171440Srrs {58932, 64, 12}, /* 63 */ 598171440Srrs {61799, 65, 12}, /* 64 */ 599171440Srrs {64851, 66, 11}, /* 65 */ 600171440Srrs {68113, 67, 11}, /* 66 */ 601171440Srrs {71617, 68, 11}, /* 67 */ 602171440Srrs {75401, 69, 10}, /* 68 */ 603171440Srrs {79517, 70, 10}, /* 69 */ 604171440Srrs {84035, 71, 10}, /* 70 */ 605171440Srrs {89053, 72, 10}, /* 71 */ 606171440Srrs {94717, 73, 9} /* 72 */ 607171440Srrs}; 608171440Srrs 609171440Srrsstatic void 610171440Srrssctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net) 611171440Srrs{ 612171440Srrs int cur_val, i, indx, incr; 613171440Srrs 614171440Srrs cur_val = net->cwnd >> 10; 615171440Srrs indx = SCTP_HS_TABLE_SIZE - 1; 616171440Srrs#ifdef SCTP_DEBUG 617171440Srrs printf("HS CC CAlled.\n"); 618171440Srrs#endif 619171440Srrs if (cur_val < sctp_cwnd_adjust[0].cwnd) { 620171440Srrs /* normal mode */ 621171440Srrs if (net->net_ack > net->mtu) { 622171440Srrs net->cwnd += net->mtu; 623179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 624171440Srrs sctp_log_cwnd(stcb, net, net->mtu, SCTP_CWND_LOG_FROM_SS); 625171440Srrs } 626171440Srrs } else { 627171440Srrs net->cwnd += net->net_ack; 628179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 629171440Srrs sctp_log_cwnd(stcb, net, net->net_ack, SCTP_CWND_LOG_FROM_SS); 630171440Srrs } 631171440Srrs } 632171440Srrs } else { 633171440Srrs for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) { 634171440Srrs if (cur_val < sctp_cwnd_adjust[i].cwnd) { 635171440Srrs indx = i; 636171440Srrs break; 637171440Srrs } 638171440Srrs } 639171440Srrs net->last_hs_used = indx; 640171440Srrs incr = ((sctp_cwnd_adjust[indx].increase) << 10); 641171440Srrs net->cwnd += incr; 642179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 643171440Srrs sctp_log_cwnd(stcb, net, incr, SCTP_CWND_LOG_FROM_SS); 644171440Srrs } 645171440Srrs } 646171440Srrs} 647171440Srrs 648171440Srrsstatic void 649171440Srrssctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net) 650171440Srrs{ 651171440Srrs int cur_val, i, indx; 652171440Srrs int old_cwnd = net->cwnd; 653171440Srrs 654171440Srrs cur_val = net->cwnd >> 10; 655171440Srrs indx = net->last_hs_used; 656171440Srrs if (cur_val < sctp_cwnd_adjust[0].cwnd) { 657171440Srrs /* normal mode */ 658171440Srrs net->ssthresh = net->cwnd / 2; 659171440Srrs if (net->ssthresh < (net->mtu * 2)) { 660171440Srrs net->ssthresh = 2 * net->mtu; 661171440Srrs } 662171440Srrs net->cwnd = net->ssthresh; 663171440Srrs } else { 664171440Srrs /* drop by the proper amount */ 665171440Srrs net->ssthresh = net->cwnd - (int)((net->cwnd / 100) * 666171440Srrs sctp_cwnd_adjust[net->last_hs_used].drop_percent); 667171440Srrs net->cwnd = net->ssthresh; 668171440Srrs /* now where are we */ 669171440Srrs indx = net->last_hs_used; 670171440Srrs cur_val = net->cwnd >> 10; 671171440Srrs /* reset where we are in the table */ 672171440Srrs if (cur_val < sctp_cwnd_adjust[0].cwnd) { 673171440Srrs /* feel out of hs */ 674171440Srrs net->last_hs_used = 0; 675171440Srrs } else { 676171440Srrs for (i = indx; i >= 1; i--) { 677171440Srrs if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) { 678171440Srrs break; 679171440Srrs } 680171440Srrs } 681171440Srrs net->last_hs_used = indx; 682171440Srrs } 683171440Srrs } 684179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 685171440Srrs sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR); 686171440Srrs } 687171440Srrs} 688171440Srrs 689171440Srrsvoid 690171440Srrssctp_hs_cwnd_update_after_fr(struct sctp_tcb *stcb, 691171440Srrs struct sctp_association *asoc) 692171440Srrs{ 693171440Srrs struct sctp_nets *net; 694171440Srrs 695171440Srrs /* 696171440Srrs * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off == 1) && 697171440Srrs * (net->fast_retran_loss_recovery == 0))) 698171440Srrs */ 699171440Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 700179783Srrs if ((asoc->fast_retran_loss_recovery == 0) || (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 1)) { 701171440Srrs /* out of a RFC2582 Fast recovery window? */ 702171440Srrs if (net->net_ack > 0) { 703171440Srrs /* 704171440Srrs * per section 7.2.3, are there any 705171440Srrs * destinations that had a fast retransmit 706171440Srrs * to them. If so what we need to do is 707171440Srrs * adjust ssthresh and cwnd. 708171440Srrs */ 709171440Srrs struct sctp_tmit_chunk *lchk; 710171440Srrs 711171440Srrs sctp_hs_cwnd_decrease(stcb, net); 712171440Srrs 713171440Srrs lchk = TAILQ_FIRST(&asoc->send_queue); 714171440Srrs 715171440Srrs net->partial_bytes_acked = 0; 716171440Srrs /* Turn on fast recovery window */ 717171440Srrs asoc->fast_retran_loss_recovery = 1; 718171440Srrs if (lchk == NULL) { 719171440Srrs /* Mark end of the window */ 720171440Srrs asoc->fast_recovery_tsn = asoc->sending_seq - 1; 721171440Srrs } else { 722171440Srrs asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 723171440Srrs } 724171440Srrs 725171440Srrs /* 726171440Srrs * CMT fast recovery -- per destination 727171440Srrs * recovery variable. 728171440Srrs */ 729171440Srrs net->fast_retran_loss_recovery = 1; 730171440Srrs 731171440Srrs if (lchk == NULL) { 732171440Srrs /* Mark end of the window */ 733171440Srrs net->fast_recovery_tsn = asoc->sending_seq - 1; 734171440Srrs } else { 735171440Srrs net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 736171440Srrs } 737171440Srrs 738171440Srrs /* 739171440Srrs * Disable Nonce Sum Checking and store the 740171440Srrs * resync tsn 741171440Srrs */ 742171440Srrs asoc->nonce_sum_check = 0; 743171440Srrs asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1; 744171440Srrs 745171440Srrs sctp_timer_stop(SCTP_TIMER_TYPE_SEND, 746171440Srrs stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); 747171440Srrs sctp_timer_start(SCTP_TIMER_TYPE_SEND, 748171440Srrs stcb->sctp_ep, stcb, net); 749171440Srrs } 750171440Srrs } else if (net->net_ack > 0) { 751171440Srrs /* 752171440Srrs * Mark a peg that we WOULD have done a cwnd 753171440Srrs * reduction but RFC2582 prevented this action. 754171440Srrs */ 755171440Srrs SCTP_STAT_INCR(sctps_fastretransinrtt); 756171440Srrs } 757171440Srrs } 758171440Srrs} 759171440Srrs 760171440Srrsvoid 761171440Srrssctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb, 762171440Srrs struct sctp_association *asoc, 763171440Srrs int accum_moved, int reneged_all, int will_exit) 764171440Srrs{ 765171440Srrs struct sctp_nets *net; 766171440Srrs 767171440Srrs /******************************/ 768171440Srrs /* update cwnd and Early FR */ 769171440Srrs /******************************/ 770171440Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 771171440Srrs 772171440Srrs#ifdef JANA_CMT_FAST_RECOVERY 773171440Srrs /* 774171440Srrs * CMT fast recovery code. Need to debug. 775171440Srrs */ 776171440Srrs if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) { 777171440Srrs if (compare_with_wrap(asoc->last_acked_seq, 778171440Srrs net->fast_recovery_tsn, MAX_TSN) || 779171440Srrs (asoc->last_acked_seq == net->fast_recovery_tsn) || 780171440Srrs compare_with_wrap(net->pseudo_cumack, net->fast_recovery_tsn, MAX_TSN) || 781171440Srrs (net->pseudo_cumack == net->fast_recovery_tsn)) { 782171440Srrs net->will_exit_fast_recovery = 1; 783171440Srrs } 784171440Srrs } 785171440Srrs#endif 786179783Srrs if (SCTP_BASE_SYSCTL(sctp_early_fr)) { 787171440Srrs /* 788171440Srrs * So, first of all do we need to have a Early FR 789171440Srrs * timer running? 790171440Srrs */ 791171440Srrs if (((TAILQ_FIRST(&asoc->sent_queue)) && 792171440Srrs (net->ref_count > 1) && 793171440Srrs (net->flight_size < net->cwnd)) || 794171440Srrs (reneged_all)) { 795171440Srrs /* 796171440Srrs * yes, so in this case stop it if its 797171440Srrs * running, and then restart it. Reneging 798171440Srrs * all is a special case where we want to 799171440Srrs * run the Early FR timer and then force the 800171440Srrs * last few unacked to be sent, causing us 801171440Srrs * to illicit a sack with gaps to force out 802171440Srrs * the others. 803171440Srrs */ 804171440Srrs if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 805171440Srrs SCTP_STAT_INCR(sctps_earlyfrstpidsck2); 806171440Srrs sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 807171440Srrs SCTP_FROM_SCTP_INDATA + SCTP_LOC_20); 808171440Srrs } 809171440Srrs SCTP_STAT_INCR(sctps_earlyfrstrid); 810171440Srrs sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net); 811171440Srrs } else { 812171440Srrs /* No, stop it if its running */ 813171440Srrs if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 814171440Srrs SCTP_STAT_INCR(sctps_earlyfrstpidsck3); 815171440Srrs sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 816171440Srrs SCTP_FROM_SCTP_INDATA + SCTP_LOC_21); 817171440Srrs } 818171440Srrs } 819171440Srrs } 820171440Srrs /* if nothing was acked on this destination skip it */ 821171440Srrs if (net->net_ack == 0) { 822179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 823171440Srrs sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK); 824171440Srrs } 825171440Srrs continue; 826171440Srrs } 827171440Srrs if (net->net_ack2 > 0) { 828171440Srrs /* 829171440Srrs * Karn's rule applies to clearing error count, this 830171440Srrs * is optional. 831171440Srrs */ 832171440Srrs net->error_count = 0; 833171440Srrs if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) == 834171440Srrs SCTP_ADDR_NOT_REACHABLE) { 835171440Srrs /* addr came good */ 836171440Srrs net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE; 837171440Srrs net->dest_state |= SCTP_ADDR_REACHABLE; 838171440Srrs sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 839172090Srrs SCTP_RECEIVED_SACK, (void *)net, SCTP_SO_NOT_LOCKED); 840171440Srrs /* now was it the primary? if so restore */ 841171440Srrs if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) { 842171440Srrs (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net); 843171440Srrs } 844171440Srrs } 845171440Srrs /* 846171440Srrs * JRS 5/14/07 - If CMT PF is on and the destination 847171440Srrs * is in PF state, set the destination to active 848171440Srrs * state and set the cwnd to one or two MTU's based 849171440Srrs * on whether PF1 or PF2 is being used. 850171440Srrs * 851171440Srrs * Should we stop any running T3 timer here? 852171440Srrs */ 853179783Srrs if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && 854179783Srrs SCTP_BASE_SYSCTL(sctp_cmt_pf) && 855179783Srrs ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) { 856171440Srrs net->dest_state &= ~SCTP_ADDR_PF; 857179783Srrs net->cwnd = net->mtu * SCTP_BASE_SYSCTL(sctp_cmt_pf); 858171440Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n", 859171440Srrs net, net->cwnd); 860171440Srrs /* 861171440Srrs * Since the cwnd value is explicitly set, 862171440Srrs * skip the code that updates the cwnd 863171440Srrs * value. 864171440Srrs */ 865171440Srrs goto skip_cwnd_update; 866171440Srrs } 867171440Srrs } 868171440Srrs#ifdef JANA_CMT_FAST_RECOVERY 869171440Srrs /* 870171440Srrs * CMT fast recovery code 871171440Srrs */ 872171440Srrs /* 873171440Srrs * if (sctp_cmt_on_off == 1 && 874171440Srrs * net->fast_retran_loss_recovery && 875171440Srrs * net->will_exit_fast_recovery == 0) { @@@ Do something } 876171440Srrs * else if (sctp_cmt_on_off == 0 && 877171440Srrs * asoc->fast_retran_loss_recovery && will_exit == 0) { 878171440Srrs */ 879171440Srrs#endif 880171440Srrs 881179783Srrs if (asoc->fast_retran_loss_recovery && will_exit == 0 && SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) { 882171440Srrs /* 883171440Srrs * If we are in loss recovery we skip any cwnd 884171440Srrs * update 885171440Srrs */ 886171440Srrs goto skip_cwnd_update; 887171440Srrs } 888171440Srrs /* 889171440Srrs * CMT: CUC algorithm. Update cwnd if pseudo-cumack has 890171440Srrs * moved. 891171440Srrs */ 892179783Srrs if (accum_moved || (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && net->new_pseudo_cumack)) { 893171440Srrs /* If the cumulative ack moved we can proceed */ 894171440Srrs if (net->cwnd <= net->ssthresh) { 895171440Srrs /* We are in slow start */ 896179157Srrs if (net->flight_size + net->net_ack >= net->cwnd) { 897171440Srrs 898171440Srrs sctp_hs_cwnd_increase(stcb, net); 899171440Srrs 900171440Srrs } else { 901179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 902171440Srrs sctp_log_cwnd(stcb, net, net->net_ack, 903171440Srrs SCTP_CWND_LOG_NOADV_SS); 904171440Srrs } 905171440Srrs } 906171440Srrs } else { 907171440Srrs /* We are in congestion avoidance */ 908179157Srrs net->partial_bytes_acked += net->net_ack; 909179157Srrs if ((net->flight_size + net->net_ack >= net->cwnd) && 910179157Srrs (net->partial_bytes_acked >= net->cwnd)) { 911179157Srrs net->partial_bytes_acked -= net->cwnd; 912179157Srrs net->cwnd += net->mtu; 913179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 914179157Srrs sctp_log_cwnd(stcb, net, net->mtu, 915179157Srrs SCTP_CWND_LOG_FROM_CA); 916171440Srrs } 917171440Srrs } else { 918179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 919171440Srrs sctp_log_cwnd(stcb, net, net->net_ack, 920171440Srrs SCTP_CWND_LOG_NOADV_CA); 921171440Srrs } 922171440Srrs } 923171440Srrs } 924171440Srrs } else { 925179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 926171440Srrs sctp_log_cwnd(stcb, net, net->mtu, 927171440Srrs SCTP_CWND_LOG_NO_CUMACK); 928171440Srrs } 929171440Srrs } 930171440Srrsskip_cwnd_update: 931171440Srrs /* 932171440Srrs * NOW, according to Karn's rule do we need to restore the 933171440Srrs * RTO timer back? Check our net_ack2. If not set then we 934171440Srrs * have a ambiguity.. i.e. all data ack'd was sent to more 935171440Srrs * than one place. 936171440Srrs */ 937171440Srrs if (net->net_ack2) { 938171440Srrs /* restore any doubled timers */ 939171440Srrs net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1; 940171440Srrs if (net->RTO < stcb->asoc.minrto) { 941171440Srrs net->RTO = stcb->asoc.minrto; 942171440Srrs } 943171440Srrs if (net->RTO > stcb->asoc.maxrto) { 944171440Srrs net->RTO = stcb->asoc.maxrto; 945171440Srrs } 946171440Srrs } 947171440Srrs } 948171440Srrs} 949171440Srrs 950171440Srrs 951171440Srrs/* 952171440Srrs * H-TCP congestion control. The algorithm is detailed in: 953171440Srrs * R.N.Shorten, D.J.Leith: 954171440Srrs * "H-TCP: TCP for high-speed and long-distance networks" 955171440Srrs * Proc. PFLDnet, Argonne, 2004. 956171440Srrs * http://www.hamilton.ie/net/htcp3.pdf 957171440Srrs */ 958171440Srrs 959171440Srrs 960171440Srrsstatic int use_rtt_scaling = 1; 961171440Srrsstatic int use_bandwidth_switch = 1; 962171440Srrs 963171440Srrsstatic inline int 964171440Srrsbetween(uint32_t seq1, uint32_t seq2, uint32_t seq3) 965171440Srrs{ 966171440Srrs return seq3 - seq2 >= seq1 - seq2; 967171440Srrs} 968171440Srrs 969171440Srrsstatic inline uint32_t 970171440Srrshtcp_cong_time(struct htcp *ca) 971171440Srrs{ 972171477Srrs return sctp_get_tick_count() - ca->last_cong; 973171440Srrs} 974171440Srrs 975171440Srrsstatic inline uint32_t 976171440Srrshtcp_ccount(struct htcp *ca) 977171440Srrs{ 978171440Srrs return htcp_cong_time(ca) / ca->minRTT; 979171440Srrs} 980171440Srrs 981171440Srrsstatic inline void 982171440Srrshtcp_reset(struct htcp *ca) 983171440Srrs{ 984171440Srrs ca->undo_last_cong = ca->last_cong; 985171440Srrs ca->undo_maxRTT = ca->maxRTT; 986171440Srrs ca->undo_old_maxB = ca->old_maxB; 987171477Srrs ca->last_cong = sctp_get_tick_count(); 988171440Srrs} 989171440Srrs 990171440Srrs#ifdef SCTP_NOT_USED 991171440Srrs 992171440Srrsstatic uint32_t 993171440Srrshtcp_cwnd_undo(struct sctp_tcb *stcb, struct sctp_nets *net) 994171440Srrs{ 995171440Srrs net->htcp_ca.last_cong = net->htcp_ca.undo_last_cong; 996171440Srrs net->htcp_ca.maxRTT = net->htcp_ca.undo_maxRTT; 997171440Srrs net->htcp_ca.old_maxB = net->htcp_ca.undo_old_maxB; 998171440Srrs return max(net->cwnd, ((net->ssthresh / net->mtu << 7) / net->htcp_ca.beta) * net->mtu); 999171440Srrs} 1000171440Srrs 1001171440Srrs#endif 1002171440Srrs 1003171440Srrsstatic inline void 1004171440Srrsmeasure_rtt(struct sctp_tcb *stcb, struct sctp_nets *net) 1005171440Srrs{ 1006171440Srrs uint32_t srtt = net->lastsa >> 3; 1007171440Srrs 1008171440Srrs /* keep track of minimum RTT seen so far, minRTT is zero at first */ 1009171440Srrs if (net->htcp_ca.minRTT > srtt || !net->htcp_ca.minRTT) 1010171440Srrs net->htcp_ca.minRTT = srtt; 1011171440Srrs 1012171440Srrs /* max RTT */ 1013171440Srrs if (net->fast_retran_ip == 0 && net->ssthresh < 0xFFFF && htcp_ccount(&net->htcp_ca) > 3) { 1014171440Srrs if (net->htcp_ca.maxRTT < net->htcp_ca.minRTT) 1015171440Srrs net->htcp_ca.maxRTT = net->htcp_ca.minRTT; 1016171440Srrs if (net->htcp_ca.maxRTT < srtt && srtt <= net->htcp_ca.maxRTT + MSEC_TO_TICKS(20)) 1017171440Srrs net->htcp_ca.maxRTT = srtt; 1018171440Srrs } 1019171440Srrs} 1020171440Srrs 1021171440Srrsstatic void 1022171440Srrsmeasure_achieved_throughput(struct sctp_tcb *stcb, struct sctp_nets *net) 1023171440Srrs{ 1024171477Srrs uint32_t now = sctp_get_tick_count(); 1025171440Srrs 1026171440Srrs if (net->fast_retran_ip == 0) 1027171440Srrs net->htcp_ca.bytes_acked = net->net_ack; 1028171440Srrs 1029171440Srrs if (!use_bandwidth_switch) 1030171440Srrs return; 1031171440Srrs 1032171440Srrs /* achieved throughput calculations */ 1033171440Srrs /* JRS - not 100% sure of this statement */ 1034171440Srrs if (net->fast_retran_ip == 1) { 1035171440Srrs net->htcp_ca.bytecount = 0; 1036171440Srrs net->htcp_ca.lasttime = now; 1037171440Srrs return; 1038171440Srrs } 1039171440Srrs net->htcp_ca.bytecount += net->net_ack; 1040171440Srrs 1041171440Srrs if (net->htcp_ca.bytecount >= net->cwnd - ((net->htcp_ca.alpha >> 7 ? : 1) * net->mtu) 1042171440Srrs && now - net->htcp_ca.lasttime >= net->htcp_ca.minRTT 1043171440Srrs && net->htcp_ca.minRTT > 0) { 1044171440Srrs uint32_t cur_Bi = net->htcp_ca.bytecount / net->mtu * hz / (now - net->htcp_ca.lasttime); 1045171440Srrs 1046171440Srrs if (htcp_ccount(&net->htcp_ca) <= 3) { 1047171440Srrs /* just after backoff */ 1048171440Srrs net->htcp_ca.minB = net->htcp_ca.maxB = net->htcp_ca.Bi = cur_Bi; 1049171440Srrs } else { 1050171440Srrs net->htcp_ca.Bi = (3 * net->htcp_ca.Bi + cur_Bi) / 4; 1051171440Srrs if (net->htcp_ca.Bi > net->htcp_ca.maxB) 1052171440Srrs net->htcp_ca.maxB = net->htcp_ca.Bi; 1053171440Srrs if (net->htcp_ca.minB > net->htcp_ca.maxB) 1054171440Srrs net->htcp_ca.minB = net->htcp_ca.maxB; 1055171440Srrs } 1056171440Srrs net->htcp_ca.bytecount = 0; 1057171440Srrs net->htcp_ca.lasttime = now; 1058171440Srrs } 1059171440Srrs} 1060171440Srrs 1061171440Srrsstatic inline void 1062171440Srrshtcp_beta_update(struct htcp *ca, uint32_t minRTT, uint32_t maxRTT) 1063171440Srrs{ 1064171440Srrs if (use_bandwidth_switch) { 1065171440Srrs uint32_t maxB = ca->maxB; 1066171440Srrs uint32_t old_maxB = ca->old_maxB; 1067171440Srrs 1068171440Srrs ca->old_maxB = ca->maxB; 1069171440Srrs 1070171440Srrs if (!between(5 * maxB, 4 * old_maxB, 6 * old_maxB)) { 1071171440Srrs ca->beta = BETA_MIN; 1072171440Srrs ca->modeswitch = 0; 1073171440Srrs return; 1074171440Srrs } 1075171440Srrs } 1076171440Srrs if (ca->modeswitch && minRTT > (uint32_t) MSEC_TO_TICKS(10) && maxRTT) { 1077171440Srrs ca->beta = (minRTT << 7) / maxRTT; 1078171440Srrs if (ca->beta < BETA_MIN) 1079171440Srrs ca->beta = BETA_MIN; 1080171440Srrs else if (ca->beta > BETA_MAX) 1081171440Srrs ca->beta = BETA_MAX; 1082171440Srrs } else { 1083171440Srrs ca->beta = BETA_MIN; 1084171440Srrs ca->modeswitch = 1; 1085171440Srrs } 1086171440Srrs} 1087171440Srrs 1088171440Srrsstatic inline void 1089171440Srrshtcp_alpha_update(struct htcp *ca) 1090171440Srrs{ 1091171440Srrs uint32_t minRTT = ca->minRTT; 1092171440Srrs uint32_t factor = 1; 1093171440Srrs uint32_t diff = htcp_cong_time(ca); 1094171440Srrs 1095171440Srrs if (diff > (uint32_t) hz) { 1096171440Srrs diff -= hz; 1097171440Srrs factor = 1 + (10 * diff + ((diff / 2) * (diff / 2) / hz)) / hz; 1098171440Srrs } 1099171440Srrs if (use_rtt_scaling && minRTT) { 1100171440Srrs uint32_t scale = (hz << 3) / (10 * minRTT); 1101171440Srrs 1102171440Srrs scale = min(max(scale, 1U << 2), 10U << 3); /* clamping ratio to 1103171440Srrs * interval [0.5,10]<<3 */ 1104171440Srrs factor = (factor << 3) / scale; 1105171440Srrs if (!factor) 1106171440Srrs factor = 1; 1107171440Srrs } 1108171440Srrs ca->alpha = 2 * factor * ((1 << 7) - ca->beta); 1109171440Srrs if (!ca->alpha) 1110171440Srrs ca->alpha = ALPHA_BASE; 1111171440Srrs} 1112171440Srrs 1113171440Srrs/* After we have the rtt data to calculate beta, we'd still prefer to wait one 1114171440Srrs * rtt before we adjust our beta to ensure we are working from a consistent 1115171440Srrs * data. 1116171440Srrs * 1117171440Srrs * This function should be called when we hit a congestion event since only at 1118171440Srrs * that point do we really have a real sense of maxRTT (the queues en route 1119171440Srrs * were getting just too full now). 1120171440Srrs */ 1121171440Srrsstatic void 1122171440Srrshtcp_param_update(struct sctp_tcb *stcb, struct sctp_nets *net) 1123171440Srrs{ 1124171440Srrs uint32_t minRTT = net->htcp_ca.minRTT; 1125171440Srrs uint32_t maxRTT = net->htcp_ca.maxRTT; 1126171440Srrs 1127171440Srrs htcp_beta_update(&net->htcp_ca, minRTT, maxRTT); 1128171440Srrs htcp_alpha_update(&net->htcp_ca); 1129171440Srrs 1130171440Srrs /* 1131171440Srrs * add slowly fading memory for maxRTT to accommodate routing 1132171440Srrs * changes etc 1133171440Srrs */ 1134171440Srrs if (minRTT > 0 && maxRTT > minRTT) 1135171440Srrs net->htcp_ca.maxRTT = minRTT + ((maxRTT - minRTT) * 95) / 100; 1136171440Srrs} 1137171440Srrs 1138171440Srrsstatic uint32_t 1139171440Srrshtcp_recalc_ssthresh(struct sctp_tcb *stcb, struct sctp_nets *net) 1140171440Srrs{ 1141171440Srrs htcp_param_update(stcb, net); 1142171440Srrs return max(((net->cwnd / net->mtu * net->htcp_ca.beta) >> 7) * net->mtu, 2U * net->mtu); 1143171440Srrs} 1144171440Srrs 1145171440Srrsstatic void 1146171440Srrshtcp_cong_avoid(struct sctp_tcb *stcb, struct sctp_nets *net) 1147171440Srrs{ 1148171440Srrs /*- 1149171440Srrs * How to handle these functions? 1150171440Srrs * if (!tcp_is_cwnd_limited(sk, in_flight)) RRS - good question. 1151171440Srrs * return; 1152171440Srrs */ 1153171440Srrs if (net->cwnd <= net->ssthresh) { 1154171440Srrs /* We are in slow start */ 1155171440Srrs if (net->flight_size + net->net_ack >= net->cwnd) { 1156179783Srrs if (net->net_ack > (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable))) { 1157179783Srrs net->cwnd += (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable)); 1158179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1159171440Srrs sctp_log_cwnd(stcb, net, net->mtu, 1160171440Srrs SCTP_CWND_LOG_FROM_SS); 1161171440Srrs } 1162171440Srrs } else { 1163171440Srrs net->cwnd += net->net_ack; 1164179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1165171440Srrs sctp_log_cwnd(stcb, net, net->net_ack, 1166171440Srrs SCTP_CWND_LOG_FROM_SS); 1167171440Srrs } 1168171440Srrs } 1169171440Srrs } else { 1170179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1171171440Srrs sctp_log_cwnd(stcb, net, net->net_ack, 1172171440Srrs SCTP_CWND_LOG_NOADV_SS); 1173171440Srrs } 1174171440Srrs } 1175171440Srrs } else { 1176171440Srrs measure_rtt(stcb, net); 1177171440Srrs 1178171440Srrs /* 1179171440Srrs * In dangerous area, increase slowly. In theory this is 1180171440Srrs * net->cwnd += alpha / net->cwnd 1181171440Srrs */ 1182171440Srrs /* What is snd_cwnd_cnt?? */ 1183171440Srrs if (((net->partial_bytes_acked / net->mtu * net->htcp_ca.alpha) >> 7) * net->mtu >= net->cwnd) { 1184171440Srrs /*- 1185171440Srrs * Does SCTP have a cwnd clamp? 1186171440Srrs * if (net->snd_cwnd < net->snd_cwnd_clamp) - Nope (RRS). 1187171440Srrs */ 1188171440Srrs net->cwnd += net->mtu; 1189171440Srrs net->partial_bytes_acked = 0; 1190171440Srrs htcp_alpha_update(&net->htcp_ca); 1191179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1192171440Srrs sctp_log_cwnd(stcb, net, net->mtu, 1193171440Srrs SCTP_CWND_LOG_FROM_CA); 1194171440Srrs } 1195171440Srrs } else { 1196171440Srrs net->partial_bytes_acked += net->net_ack; 1197179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1198171440Srrs sctp_log_cwnd(stcb, net, net->net_ack, 1199171440Srrs SCTP_CWND_LOG_NOADV_CA); 1200171440Srrs } 1201171440Srrs } 1202171440Srrs 1203171440Srrs net->htcp_ca.bytes_acked = net->mtu; 1204171440Srrs } 1205171440Srrs} 1206171440Srrs 1207171440Srrs#ifdef SCTP_NOT_USED 1208171440Srrs/* Lower bound on congestion window. */ 1209171440Srrsstatic uint32_t 1210171440Srrshtcp_min_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net) 1211171440Srrs{ 1212171440Srrs return net->ssthresh; 1213171440Srrs} 1214171440Srrs 1215171440Srrs#endif 1216171440Srrs 1217171440Srrsstatic void 1218171440Srrshtcp_init(struct sctp_tcb *stcb, struct sctp_nets *net) 1219171440Srrs{ 1220171440Srrs memset(&net->htcp_ca, 0, sizeof(struct htcp)); 1221171440Srrs net->htcp_ca.alpha = ALPHA_BASE; 1222171440Srrs net->htcp_ca.beta = BETA_MIN; 1223171440Srrs net->htcp_ca.bytes_acked = net->mtu; 1224171477Srrs net->htcp_ca.last_cong = sctp_get_tick_count(); 1225171440Srrs} 1226171440Srrs 1227171440Srrsvoid 1228171440Srrssctp_htcp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net) 1229171440Srrs{ 1230171440Srrs /* 1231171440Srrs * We take the max of the burst limit times a MTU or the 1232171440Srrs * INITIAL_CWND. We then limit this to 4 MTU's of sending. 1233171440Srrs */ 1234171440Srrs net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND)); 1235171440Srrs net->ssthresh = stcb->asoc.peers_rwnd; 1236171440Srrs htcp_init(stcb, net); 1237171440Srrs 1238179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) { 1239171440Srrs sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION); 1240171440Srrs } 1241171440Srrs} 1242171440Srrs 1243171440Srrsvoid 1244171440Srrssctp_htcp_cwnd_update_after_sack(struct sctp_tcb *stcb, 1245171440Srrs struct sctp_association *asoc, 1246171440Srrs int accum_moved, int reneged_all, int will_exit) 1247171440Srrs{ 1248171440Srrs struct sctp_nets *net; 1249171440Srrs 1250171440Srrs /******************************/ 1251171440Srrs /* update cwnd and Early FR */ 1252171440Srrs /******************************/ 1253171440Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1254171440Srrs 1255171440Srrs#ifdef JANA_CMT_FAST_RECOVERY 1256171440Srrs /* 1257171440Srrs * CMT fast recovery code. Need to debug. 1258171440Srrs */ 1259171440Srrs if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) { 1260171440Srrs if (compare_with_wrap(asoc->last_acked_seq, 1261171440Srrs net->fast_recovery_tsn, MAX_TSN) || 1262171440Srrs (asoc->last_acked_seq == net->fast_recovery_tsn) || 1263171440Srrs compare_with_wrap(net->pseudo_cumack, net->fast_recovery_tsn, MAX_TSN) || 1264171440Srrs (net->pseudo_cumack == net->fast_recovery_tsn)) { 1265171440Srrs net->will_exit_fast_recovery = 1; 1266171440Srrs } 1267171440Srrs } 1268171440Srrs#endif 1269179783Srrs if (SCTP_BASE_SYSCTL(sctp_early_fr)) { 1270171440Srrs /* 1271171440Srrs * So, first of all do we need to have a Early FR 1272171440Srrs * timer running? 1273171440Srrs */ 1274171440Srrs if (((TAILQ_FIRST(&asoc->sent_queue)) && 1275171440Srrs (net->ref_count > 1) && 1276171440Srrs (net->flight_size < net->cwnd)) || 1277171440Srrs (reneged_all)) { 1278171440Srrs /* 1279171440Srrs * yes, so in this case stop it if its 1280171440Srrs * running, and then restart it. Reneging 1281171440Srrs * all is a special case where we want to 1282171440Srrs * run the Early FR timer and then force the 1283171440Srrs * last few unacked to be sent, causing us 1284171440Srrs * to illicit a sack with gaps to force out 1285171440Srrs * the others. 1286171440Srrs */ 1287171440Srrs if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 1288171440Srrs SCTP_STAT_INCR(sctps_earlyfrstpidsck2); 1289171440Srrs sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 1290171440Srrs SCTP_FROM_SCTP_INDATA + SCTP_LOC_20); 1291171440Srrs } 1292171440Srrs SCTP_STAT_INCR(sctps_earlyfrstrid); 1293171440Srrs sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net); 1294171440Srrs } else { 1295171440Srrs /* No, stop it if its running */ 1296171440Srrs if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) { 1297171440Srrs SCTP_STAT_INCR(sctps_earlyfrstpidsck3); 1298171440Srrs sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net, 1299171440Srrs SCTP_FROM_SCTP_INDATA + SCTP_LOC_21); 1300171440Srrs } 1301171440Srrs } 1302171440Srrs } 1303171440Srrs /* if nothing was acked on this destination skip it */ 1304171440Srrs if (net->net_ack == 0) { 1305179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1306171440Srrs sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK); 1307171440Srrs } 1308171440Srrs continue; 1309171440Srrs } 1310171440Srrs if (net->net_ack2 > 0) { 1311171440Srrs /* 1312171440Srrs * Karn's rule applies to clearing error count, this 1313171440Srrs * is optional. 1314171440Srrs */ 1315171440Srrs net->error_count = 0; 1316171440Srrs if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) == 1317171440Srrs SCTP_ADDR_NOT_REACHABLE) { 1318171440Srrs /* addr came good */ 1319171440Srrs net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE; 1320171440Srrs net->dest_state |= SCTP_ADDR_REACHABLE; 1321171440Srrs sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb, 1322172090Srrs SCTP_RECEIVED_SACK, (void *)net, SCTP_SO_NOT_LOCKED); 1323171440Srrs /* now was it the primary? if so restore */ 1324171440Srrs if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) { 1325171440Srrs (void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net); 1326171440Srrs } 1327171440Srrs } 1328171440Srrs /* 1329171440Srrs * JRS 5/14/07 - If CMT PF is on and the destination 1330171440Srrs * is in PF state, set the destination to active 1331171440Srrs * state and set the cwnd to one or two MTU's based 1332171440Srrs * on whether PF1 or PF2 is being used. 1333171440Srrs * 1334171440Srrs * Should we stop any running T3 timer here? 1335171440Srrs */ 1336179783Srrs if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && 1337179783Srrs SCTP_BASE_SYSCTL(sctp_cmt_pf) && 1338179783Srrs ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) { 1339171440Srrs net->dest_state &= ~SCTP_ADDR_PF; 1340179783Srrs net->cwnd = net->mtu * SCTP_BASE_SYSCTL(sctp_cmt_pf); 1341171440Srrs SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n", 1342171440Srrs net, net->cwnd); 1343171440Srrs /* 1344171440Srrs * Since the cwnd value is explicitly set, 1345171440Srrs * skip the code that updates the cwnd 1346171440Srrs * value. 1347171440Srrs */ 1348171440Srrs goto skip_cwnd_update; 1349171440Srrs } 1350171440Srrs } 1351171440Srrs#ifdef JANA_CMT_FAST_RECOVERY 1352171440Srrs /* 1353171440Srrs * CMT fast recovery code 1354171440Srrs */ 1355171440Srrs /* 1356171440Srrs * if (sctp_cmt_on_off == 1 && 1357171440Srrs * net->fast_retran_loss_recovery && 1358171440Srrs * net->will_exit_fast_recovery == 0) { @@@ Do something } 1359171440Srrs * else if (sctp_cmt_on_off == 0 && 1360171440Srrs * asoc->fast_retran_loss_recovery && will_exit == 0) { 1361171440Srrs */ 1362171440Srrs#endif 1363171440Srrs 1364179783Srrs if (asoc->fast_retran_loss_recovery && will_exit == 0 && SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 0) { 1365171440Srrs /* 1366171440Srrs * If we are in loss recovery we skip any cwnd 1367171440Srrs * update 1368171440Srrs */ 1369171440Srrs goto skip_cwnd_update; 1370171440Srrs } 1371171440Srrs /* 1372171440Srrs * CMT: CUC algorithm. Update cwnd if pseudo-cumack has 1373171440Srrs * moved. 1374171440Srrs */ 1375179783Srrs if (accum_moved || (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && net->new_pseudo_cumack)) { 1376171440Srrs htcp_cong_avoid(stcb, net); 1377171440Srrs measure_achieved_throughput(stcb, net); 1378171440Srrs } else { 1379179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) { 1380171440Srrs sctp_log_cwnd(stcb, net, net->mtu, 1381171440Srrs SCTP_CWND_LOG_NO_CUMACK); 1382171440Srrs } 1383171440Srrs } 1384171440Srrsskip_cwnd_update: 1385171440Srrs /* 1386171440Srrs * NOW, according to Karn's rule do we need to restore the 1387171440Srrs * RTO timer back? Check our net_ack2. If not set then we 1388171440Srrs * have a ambiguity.. i.e. all data ack'd was sent to more 1389171440Srrs * than one place. 1390171440Srrs */ 1391171440Srrs if (net->net_ack2) { 1392171440Srrs /* restore any doubled timers */ 1393171440Srrs net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1; 1394171440Srrs if (net->RTO < stcb->asoc.minrto) { 1395171440Srrs net->RTO = stcb->asoc.minrto; 1396171440Srrs } 1397171440Srrs if (net->RTO > stcb->asoc.maxrto) { 1398171440Srrs net->RTO = stcb->asoc.maxrto; 1399171440Srrs } 1400171440Srrs } 1401171440Srrs } 1402171440Srrs} 1403171440Srrs 1404171440Srrsvoid 1405171440Srrssctp_htcp_cwnd_update_after_fr(struct sctp_tcb *stcb, 1406171440Srrs struct sctp_association *asoc) 1407171440Srrs{ 1408171440Srrs struct sctp_nets *net; 1409171440Srrs 1410171440Srrs /* 1411171440Srrs * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off == 1) && 1412171440Srrs * (net->fast_retran_loss_recovery == 0))) 1413171440Srrs */ 1414171440Srrs TAILQ_FOREACH(net, &asoc->nets, sctp_next) { 1415179783Srrs if ((asoc->fast_retran_loss_recovery == 0) || (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 1)) { 1416171440Srrs /* out of a RFC2582 Fast recovery window? */ 1417171440Srrs if (net->net_ack > 0) { 1418171440Srrs /* 1419171440Srrs * per section 7.2.3, are there any 1420171440Srrs * destinations that had a fast retransmit 1421171440Srrs * to them. If so what we need to do is 1422171440Srrs * adjust ssthresh and cwnd. 1423171440Srrs */ 1424171440Srrs struct sctp_tmit_chunk *lchk; 1425171440Srrs int old_cwnd = net->cwnd; 1426171440Srrs 1427171440Srrs /* JRS - reset as if state were changed */ 1428171440Srrs htcp_reset(&net->htcp_ca); 1429171440Srrs net->ssthresh = htcp_recalc_ssthresh(stcb, net); 1430171440Srrs net->cwnd = net->ssthresh; 1431179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1432171440Srrs sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), 1433171440Srrs SCTP_CWND_LOG_FROM_FR); 1434171440Srrs } 1435171440Srrs lchk = TAILQ_FIRST(&asoc->send_queue); 1436171440Srrs 1437171440Srrs net->partial_bytes_acked = 0; 1438171440Srrs /* Turn on fast recovery window */ 1439171440Srrs asoc->fast_retran_loss_recovery = 1; 1440171440Srrs if (lchk == NULL) { 1441171440Srrs /* Mark end of the window */ 1442171440Srrs asoc->fast_recovery_tsn = asoc->sending_seq - 1; 1443171440Srrs } else { 1444171440Srrs asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 1445171440Srrs } 1446171440Srrs 1447171440Srrs /* 1448171440Srrs * CMT fast recovery -- per destination 1449171440Srrs * recovery variable. 1450171440Srrs */ 1451171440Srrs net->fast_retran_loss_recovery = 1; 1452171440Srrs 1453171440Srrs if (lchk == NULL) { 1454171440Srrs /* Mark end of the window */ 1455171440Srrs net->fast_recovery_tsn = asoc->sending_seq - 1; 1456171440Srrs } else { 1457171440Srrs net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1; 1458171440Srrs } 1459171440Srrs 1460171440Srrs /* 1461171440Srrs * Disable Nonce Sum Checking and store the 1462171440Srrs * resync tsn 1463171440Srrs */ 1464171440Srrs asoc->nonce_sum_check = 0; 1465171440Srrs asoc->nonce_resync_tsn = asoc->fast_recovery_tsn + 1; 1466171440Srrs 1467171440Srrs sctp_timer_stop(SCTP_TIMER_TYPE_SEND, 1468171440Srrs stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32); 1469171440Srrs sctp_timer_start(SCTP_TIMER_TYPE_SEND, 1470171440Srrs stcb->sctp_ep, stcb, net); 1471171440Srrs } 1472171440Srrs } else if (net->net_ack > 0) { 1473171440Srrs /* 1474171440Srrs * Mark a peg that we WOULD have done a cwnd 1475171440Srrs * reduction but RFC2582 prevented this action. 1476171440Srrs */ 1477171440Srrs SCTP_STAT_INCR(sctps_fastretransinrtt); 1478171440Srrs } 1479171440Srrs } 1480171440Srrs} 1481171440Srrs 1482171440Srrsvoid 1483171440Srrssctp_htcp_cwnd_update_after_timeout(struct sctp_tcb *stcb, 1484171440Srrs struct sctp_nets *net) 1485171440Srrs{ 1486171440Srrs int old_cwnd = net->cwnd; 1487171440Srrs 1488171440Srrs /* JRS - reset as if the state were being changed to timeout */ 1489171440Srrs htcp_reset(&net->htcp_ca); 1490171440Srrs net->ssthresh = htcp_recalc_ssthresh(stcb, net); 1491171440Srrs net->cwnd = net->mtu; 1492179157Srrs net->partial_bytes_acked = 0; 1493179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1494171440Srrs sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX); 1495171440Srrs } 1496171440Srrs} 1497171440Srrs 1498171440Srrsvoid 1499171440Srrssctp_htcp_cwnd_update_after_fr_timer(struct sctp_inpcb *inp, 1500171440Srrs struct sctp_tcb *stcb, struct sctp_nets *net) 1501171440Srrs{ 1502171440Srrs int old_cwnd; 1503171440Srrs 1504171440Srrs old_cwnd = net->cwnd; 1505171440Srrs 1506172090Srrs sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR, SCTP_SO_NOT_LOCKED); 1507171477Srrs net->htcp_ca.last_cong = sctp_get_tick_count(); 1508171440Srrs /* 1509171440Srrs * make a small adjustment to cwnd and force to CA. 1510171440Srrs */ 1511171440Srrs if (net->cwnd > net->mtu) 1512171440Srrs /* drop down one MTU after sending */ 1513171440Srrs net->cwnd -= net->mtu; 1514171440Srrs if (net->cwnd < net->ssthresh) 1515171440Srrs /* still in SS move to CA */ 1516171440Srrs net->ssthresh = net->cwnd - 1; 1517179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1518171440Srrs sctp_log_cwnd(stcb, net, (old_cwnd - net->cwnd), SCTP_CWND_LOG_FROM_FR); 1519171440Srrs } 1520171440Srrs} 1521171440Srrs 1522171440Srrsvoid 1523171440Srrssctp_htcp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, 1524171440Srrs struct sctp_nets *net) 1525171440Srrs{ 1526171440Srrs int old_cwnd; 1527171440Srrs 1528171440Srrs old_cwnd = net->cwnd; 1529171440Srrs 1530171440Srrs /* JRS - reset hctp as if state changed */ 1531171440Srrs htcp_reset(&net->htcp_ca); 1532171440Srrs SCTP_STAT_INCR(sctps_ecnereducedcwnd); 1533171440Srrs net->ssthresh = htcp_recalc_ssthresh(stcb, net); 1534171440Srrs if (net->ssthresh < net->mtu) { 1535171440Srrs net->ssthresh = net->mtu; 1536171440Srrs /* here back off the timer as well, to slow us down */ 1537171440Srrs net->RTO <<= 1; 1538171440Srrs } 1539171440Srrs net->cwnd = net->ssthresh; 1540179783Srrs if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) { 1541171440Srrs sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT); 1542171440Srrs } 1543171440Srrs} 1544