1/* 2 * Copyright (c) 2010-2011 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28#include <sys/param.h> 29#include <sys/systm.h> 30#include <sys/kernel.h> 31#include <sys/protosw.h> 32#include <sys/mcache.h> 33#include <sys/sysctl.h> 34 35#include <net/route.h> 36#include <netinet/in.h> 37#include <netinet/in_systm.h> 38#include <netinet/ip.h> 39 40#if INET6 41#include <netinet/ip6.h> 42#endif 43#include <netinet/ip_var.h> 44#include <netinet/tcp.h> 45#include <netinet/tcp_fsm.h> 46#include <netinet/tcp_timer.h> 47#include <netinet/tcp_var.h> 48#include <netinet/tcpip.h> 49#include <netinet/tcp_cc.h> 50 51#include <libkern/OSAtomic.h> 52 53/* This file implements an alternate TCP congestion control algorithm 54 * for background transport developed by LEDBAT working group at IETF and 55 * described in draft: draft-ietf-ledbat-congestion-02 56 */ 57 58int tcp_ledbat_init(struct tcpcb *tp); 59int tcp_ledbat_cleanup(struct tcpcb *tp); 60void tcp_ledbat_cwnd_init(struct tcpcb *tp); 61void tcp_ledbat_inseq_ack_rcvd(struct tcpcb *tp, struct tcphdr *th); 62void tcp_ledbat_ack_rcvd(struct tcpcb *tp, struct tcphdr *th); 63void tcp_ledbat_pre_fr(struct tcpcb *tp); 64void tcp_ledbat_post_fr(struct tcpcb *tp, struct tcphdr *th); 65void tcp_ledbat_after_idle(struct tcpcb *tp); 66void tcp_ledbat_after_timeout(struct tcpcb *tp); 67int tcp_ledbat_delay_ack(struct tcpcb *tp, struct tcphdr *th); 68void tcp_ledbat_switch_cc(struct tcpcb *tp, uint16_t old_cc_index); 69 70struct tcp_cc_algo tcp_cc_ledbat = { 71 .name = "ledbat", 72 .init = tcp_ledbat_init, 73 .cleanup = tcp_ledbat_cleanup, 74 .cwnd_init = tcp_ledbat_cwnd_init, 75 .inseq_ack_rcvd = tcp_ledbat_inseq_ack_rcvd, 76 .ack_rcvd = tcp_ledbat_ack_rcvd, 77 .pre_fr = tcp_ledbat_pre_fr, 78 .post_fr = tcp_ledbat_post_fr, 79 .after_idle = tcp_ledbat_after_idle, 80 .after_timeout = tcp_ledbat_after_timeout, 81 .delay_ack = tcp_ledbat_delay_ack, 82 .switch_to = tcp_ledbat_switch_cc 83}; 84 85extern int tcp_do_rfc3465; 86extern int tcp_do_rfc3465_lim2; 87extern uint32_t get_base_rtt(struct tcpcb *tp); 88 89/* Target queuing delay in milliseconds. This includes the processing 90 * and scheduling delay on both of the end-hosts. A LEDBAT sender tries 91 * to keep queuing delay below this limit. When the queuing delay 92 * goes above this limit, a LEDBAT sender will start reducing the 93 * congestion window. 94 * 95 * The LEDBAT draft says that target queue delay MUST be 100 ms for 96 * inter-operability. 97 */ 98int target_qdelay = 100; 99SYSCTL_INT(_net_inet_tcp, OID_AUTO, bg_target_qdelay, CTLFLAG_RW | CTLFLAG_LOCKED, 100 &target_qdelay , 100, "Target queuing delay"); 101 102/* Allowed increase and tether are used to place an upper bound on 103 * congestion window based on the amount of data that is outstanding. 104 * This will limit the congestion window when the amount of data in 105 * flight is little because the application is writing to the socket 106 * intermittently and is preventing the connection from becoming idle . 107 * 108 * max_allowed_cwnd = allowed_increase + (tether * flight_size) 109 * cwnd = min(cwnd, max_allowed_cwnd) 110 * 111 * 'Allowed_increase' parameter is set to 8. If the flight size is zero, then 112 * we want the congestion window to be at least 8 packets to reduce the 113 * delay induced by delayed ack. This helps when the receiver is acking 114 * more than 2 packets at a time (stretching acks for better performance). 115 * 116 * 'Tether' is also set to 2. We do not want this to limit the growth of cwnd 117 * during slow-start. 118 */ 119int allowed_increase = 8; 120SYSCTL_INT(_net_inet_tcp, OID_AUTO, bg_allowed_increase, CTLFLAG_RW | CTLFLAG_LOCKED, 121 &allowed_increase, 1, "Additive constant used to calculate max allowed congestion window"); 122 123/* Left shift for cwnd to get tether value of 2 */ 124int tether_shift = 1; 125SYSCTL_INT(_net_inet_tcp, OID_AUTO, bg_tether_shift, CTLFLAG_RW | CTLFLAG_LOCKED, 126 &tether_shift, 1, "Tether shift for max allowed congestion window"); 127 128/* Start with an initial window of 2. This will help to get more accurate 129 * minimum RTT measurement in the beginning. It will help to probe 130 * the path slowly and will not add to the existing delay if the path is 131 * already congested. Using 2 packets will reduce the delay induced by delayed-ack. 132 */ 133uint32_t bg_ss_fltsz = 2; 134SYSCTL_INT(_net_inet_tcp, OID_AUTO, bg_ss_fltsz, CTLFLAG_RW | CTLFLAG_LOCKED, 135 &bg_ss_fltsz, 2, "Initial congestion window for background transport"); 136 137extern int rtt_samples_per_slot; 138 139static void update_cwnd(struct tcpcb *tp, uint32_t incr) { 140 uint32_t max_allowed_cwnd = 0, flight_size = 0; 141 uint32_t qdelay, base_rtt; 142 int32_t off_target; 143 144 base_rtt = get_base_rtt(tp); 145 146 /* If we do not have a good RTT measurement yet, increment 147 * congestion window by the default value. 148 */ 149 if (base_rtt == 0 || tp->t_rttcur == 0) { 150 tp->snd_cwnd += incr; 151 goto check_max; 152 } 153 154 qdelay = tp->t_rttcur - base_rtt; 155 off_target = (int32_t)(target_qdelay - qdelay); 156 157 if (off_target >= 0) { 158 /* Delay decreased or remained the same, we can increase 159 * the congestion window according to RFC 3465. 160 * 161 * Move background slow-start threshold to current 162 * congestion window so that the next time (after some idle 163 * period), we can attempt to do slow-start till here if there 164 * is no increase in rtt 165 */ 166 if (tp->bg_ssthresh < tp->snd_cwnd) 167 tp->bg_ssthresh = tp->snd_cwnd; 168 tp->snd_cwnd += incr; 169 170 } else { 171 /* In response to an increase in rtt, reduce the congestion 172 * window by one-eighth. This will help to yield immediately 173 * to a competing stream. 174 */ 175 uint32_t redwin; 176 177 redwin = tp->snd_cwnd >> 3; 178 tp->snd_cwnd -= redwin; 179 if (tp->snd_cwnd < bg_ss_fltsz * tp->t_maxseg) 180 tp->snd_cwnd = bg_ss_fltsz * tp->t_maxseg; 181 182 /* Lower background slow-start threshold so that the connection 183 * will go into congestion avoidance phase 184 */ 185 if (tp->bg_ssthresh > tp->snd_cwnd) 186 tp->bg_ssthresh = tp->snd_cwnd; 187 } 188check_max: 189 /* Calculate the outstanding flight size and restrict the 190 * congestion window to a factor of flight size. 191 */ 192 flight_size = tp->snd_max - tp->snd_una; 193 194 max_allowed_cwnd = (allowed_increase * tp->t_maxseg) 195 + (flight_size << tether_shift); 196 tp->snd_cwnd = min(tp->snd_cwnd, max_allowed_cwnd); 197 return; 198} 199 200int tcp_ledbat_init(struct tcpcb *tp) { 201#pragma unused(tp) 202 OSIncrementAtomic((volatile SInt32 *)&tcp_cc_ledbat.num_sockets); 203 return 0; 204} 205 206int tcp_ledbat_cleanup(struct tcpcb *tp) { 207#pragma unused(tp) 208 OSDecrementAtomic((volatile SInt32 *)&tcp_cc_ledbat.num_sockets); 209 return 0; 210} 211 212/* Initialize the congestion window for a connection 213 * 214 */ 215 216void 217tcp_ledbat_cwnd_init(struct tcpcb *tp) { 218 tp->snd_cwnd = tp->t_maxseg * bg_ss_fltsz; 219 tp->bg_ssthresh = tp->snd_ssthresh; 220} 221 222/* Function to handle an in-sequence ack which is fast-path processing 223 * of an in sequence ack in tcp_input function (called as header prediction). 224 * This gets called only during congestion avoidance phase. 225 */ 226void 227tcp_ledbat_inseq_ack_rcvd(struct tcpcb *tp, struct tcphdr *th) { 228 int acked = 0; 229 u_int32_t incr = 0; 230 231 acked = th->th_ack - tp->snd_una; 232 tp->t_bytes_acked += acked; 233 if (tp->t_bytes_acked > tp->snd_cwnd) { 234 tp->t_bytes_acked -= tp->snd_cwnd; 235 incr = tp->t_maxseg; 236 } 237 238 if (tp->snd_cwnd < tp->snd_wnd && incr > 0) { 239 update_cwnd(tp, incr); 240 } 241} 242/* Function to process an ack. 243 */ 244void 245tcp_ledbat_ack_rcvd(struct tcpcb *tp, struct tcphdr *th) { 246 /* 247 * RFC 3465 - Appropriate Byte Counting. 248 * 249 * If the window is currently less than ssthresh, 250 * open the window by the number of bytes ACKed by 251 * the last ACK, however clamp the window increase 252 * to an upper limit "L". 253 * 254 * In congestion avoidance phase, open the window by 255 * one segment each time "bytes_acked" grows to be 256 * greater than or equal to the congestion window. 257 */ 258 259 register u_int cw = tp->snd_cwnd; 260 register u_int incr = tp->t_maxseg; 261 int acked = 0; 262 263 acked = th->th_ack - tp->snd_una; 264 tp->t_bytes_acked += acked; 265 if (cw >= tp->bg_ssthresh) { 266 /* congestion-avoidance */ 267 if (tp->t_bytes_acked < cw) { 268 /* No need to increase yet. */ 269 incr = 0; 270 } 271 } else { 272 /* 273 * If the user explicitly enables RFC3465 274 * use 2*SMSS for the "L" param. Otherwise 275 * use the more conservative 1*SMSS. 276 * 277 * (See RFC 3465 2.3 Choosing the Limit) 278 */ 279 u_int abc_lim; 280 281 abc_lim = (tcp_do_rfc3465_lim2 && 282 tp->snd_nxt == tp->snd_max) ? incr * 2 : incr; 283 284 incr = lmin(acked, abc_lim); 285 } 286 if (tp->t_bytes_acked >= cw) 287 tp->t_bytes_acked -= cw; 288 if (incr > 0) 289 update_cwnd(tp, incr); 290} 291 292void 293tcp_ledbat_pre_fr(struct tcpcb *tp) { 294 uint32_t win; 295 296 win = min(tp->snd_wnd, tp->snd_cwnd) / 297 2 / tp->t_maxseg; 298 if ( win < 2 ) 299 win = 2; 300 tp->snd_ssthresh = win * tp->t_maxseg; 301 if (tp->bg_ssthresh > tp->snd_ssthresh) 302 tp->bg_ssthresh = tp->snd_ssthresh; 303 304 tcp_cc_resize_sndbuf(tp); 305} 306 307void 308tcp_ledbat_post_fr(struct tcpcb *tp, struct tcphdr *th) { 309 int32_t ss; 310 311 ss = tp->snd_max - th->th_ack; 312 313 /* 314 * Complete ack. Inflate the congestion window to 315 * ssthresh and exit fast recovery. 316 * 317 * Window inflation should have left us with approx. 318 * snd_ssthresh outstanding data. But in case we 319 * would be inclined to send a burst, better to do 320 * it via the slow start mechanism. 321 */ 322 if (ss < (int32_t)tp->snd_ssthresh) 323 tp->snd_cwnd = ss + tp->t_maxseg; 324 else 325 tp->snd_cwnd = tp->snd_ssthresh; 326 tp->t_bytes_acked = 0; 327} 328 329/* 330 * Function to handle connections that have been idle for 331 * some time. Slow start to get ack "clock" running again. 332 * Clear base history after idle time. 333 */ 334void 335tcp_ledbat_after_idle(struct tcpcb *tp) { 336 int32_t n = N_RTT_BASE, i = (N_RTT_BASE - 1); 337 338 /* Decide how many base history entries have to be cleared 339 * based on how long the connection has been idle. 340 */ 341 342 if (tp->t_rttcur > 0) { 343 int32_t nrtt, idle_time; 344 345 idle_time = tcp_now - tp->t_rcvtime; 346 nrtt = idle_time / tp->t_rttcur; 347 n = nrtt / rtt_samples_per_slot; 348 if (n > N_RTT_BASE) 349 n = N_RTT_BASE; 350 } 351 for (i = (N_RTT_BASE - 1); n > 0; --i, --n) { 352 tp->rtt_hist[i] = 0; 353 } 354 for (n = (N_RTT_BASE - 1); i >= 0; --i, --n) { 355 tp->rtt_hist[n] = tp->rtt_hist[i]; 356 tp->rtt_hist[i] = 0; 357 } 358 359 /* Reset the congestion window */ 360 tp->snd_cwnd = tp->t_maxseg * bg_ss_fltsz; 361} 362 363/* Function to change the congestion window when the retransmit 364 * timer fires. The behavior is the same as that for best-effort 365 * TCP, reduce congestion window to one segment and start probing 366 * the link using "slow start". The slow start threshold is set 367 * to half of the current window. Lower the background slow start 368 * threshold also. 369 */ 370void 371tcp_ledbat_after_timeout(struct tcpcb *tp) { 372 if (tp->t_state >= TCPS_ESTABLISHED) { 373 u_int win = min(tp->snd_wnd, tp->snd_cwnd) / 2 / tp->t_maxseg; 374 if (win < 2) 375 win = 2; 376 tp->snd_cwnd = tp->t_maxseg; 377 tp->snd_ssthresh = win * tp->t_maxseg; 378 tp->t_bytes_acked = 0; 379 tp->t_dupacks = 0; 380 381 if (tp->bg_ssthresh > tp->snd_ssthresh) 382 tp->bg_ssthresh = tp->snd_ssthresh; 383 384 tcp_cc_resize_sndbuf(tp); 385 } 386} 387 388/* 389 * Indicate whether this ack should be delayed. 390 * We can delay the ack if: 391 * - our last ack wasn't a 0-sized window. 392 * - the peer hasn't sent us a TH_PUSH data packet: if they did, take this 393 * as a clue that we need to ACK without any delay. This helps higher 394 * level protocols who won't send us more data even if the window is 395 * open because their last "segment" hasn't been ACKed 396 * Otherwise the receiver will ack every other full-sized segment or when the 397 * delayed ack timer fires. This will help to generate better rtt estimates for 398 * the other end if it is a ledbat sender. 399 * 400 */ 401 402int 403tcp_ledbat_delay_ack(struct tcpcb *tp, struct tcphdr *th) { 404 if ((tp->t_flags & TF_RXWIN0SENT) == 0 && 405 (th->th_flags & TH_PUSH) == 0 && 406 (tp->t_unacksegs == 1)) 407 return(1); 408 return(0); 409} 410 411/* Change a connection to use ledbat. First, lower bg_ssthresh value 412 * if it needs to be. 413 */ 414void 415tcp_ledbat_switch_cc(struct tcpcb *tp, uint16_t old_cc_index) { 416#pragma unused(old_cc_index) 417 uint32_t cwnd; 418 419 if (tp->bg_ssthresh == 0 || tp->bg_ssthresh > tp->snd_ssthresh) 420 tp->bg_ssthresh = tp->snd_ssthresh; 421 422 cwnd = min(tp->snd_wnd, tp->snd_cwnd); 423 424 if (tp->snd_cwnd > tp->bg_ssthresh) 425 cwnd = cwnd / tp->t_maxseg; 426 else 427 cwnd = cwnd / 2 / tp->t_maxseg; 428 429 if (cwnd < bg_ss_fltsz) 430 cwnd = bg_ss_fltsz; 431 432 tp->snd_cwnd = cwnd * tp->t_maxseg; 433 tp->t_bytes_acked = 0; 434 435 OSIncrementAtomic((volatile SInt32 *)&tcp_cc_ledbat.num_sockets); 436} 437