1/*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 5 * The Regents of the University of California. 6 * Copyright (c) 2007-2008,2010,2014 7 * Swinburne University of Technology, Melbourne, Australia. 8 * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org> 9 * Copyright (c) 2010 The FreeBSD Foundation 10 * All rights reserved. 11 * 12 * This software was developed at the Centre for Advanced Internet 13 * Architectures, Swinburne University of Technology, by Lawrence Stewart, James 14 * Healy and David Hayes, made possible in part by a grant from the Cisco 15 * University Research Program Fund at Community Foundation Silicon Valley. 16 * 17 * Portions of this software were developed at the Centre for Advanced 18 * Internet Architectures, Swinburne University of Technology, Melbourne, 19 * Australia by David Hayes under sponsorship from the FreeBSD Foundation. 20 * 21 * Redistribution and use in source and binary forms, with or without 22 * modification, are permitted provided that the following conditions 23 * are met: 24 * 1. Redistributions of source code must retain the above copyright 25 * notice, this list of conditions and the following disclaimer. 26 * 2. Redistributions in binary form must reproduce the above copyright 27 * notice, this list of conditions and the following disclaimer in the 28 * documentation and/or other materials provided with the distribution. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 */ 42 43/* 44 * This software was first released in 2007 by James Healy and Lawrence Stewart 45 * whilst working on the NewTCP research project at Swinburne University of 46 * Technology's Centre for Advanced Internet Architectures, Melbourne, 47 * Australia, which was made possible in part by a grant from the Cisco 48 * University Research Program Fund at Community Foundation Silicon Valley. 49 * More details are available at: 50 * http://caia.swin.edu.au/urp/newtcp/ 51 * 52 * Dec 2014 garmitage@swin.edu.au 53 * Borrowed code fragments from cc_cdg.c to add modifiable beta 54 * via sysctls. 55 * 56 */ 57 58#include <sys/cdefs.h> 59__FBSDID("$FreeBSD$"); 60 61#include <sys/param.h> 62#include <sys/kernel.h> 63#include <sys/malloc.h> 64#include <sys/module.h> 65#include <sys/socket.h> 66#include <sys/socketvar.h> 67#include <sys/sysctl.h> 68#include <sys/systm.h> 69 70#include <net/vnet.h> 71 72#include <netinet/tcp.h> 73#include <netinet/tcp_seq.h> 74#include <netinet/tcp_var.h> 75#include <netinet/cc/cc.h> 76#include <netinet/cc/cc_module.h> 77#include <netinet/cc/cc_newreno.h> 78 79static MALLOC_DEFINE(M_NEWRENO, "newreno data", 80 "newreno beta values"); 81 82static void newreno_cb_destroy(struct cc_var *ccv); 83static void newreno_ack_received(struct cc_var *ccv, uint16_t type); 84static void newreno_after_idle(struct cc_var *ccv); 85static void newreno_cong_signal(struct cc_var *ccv, uint32_t type); 86static void newreno_post_recovery(struct cc_var *ccv); 87static int newreno_ctl_output(struct cc_var *ccv, struct sockopt *sopt, void *buf); 88 89VNET_DEFINE_STATIC(uint32_t, newreno_beta) = 50; 90VNET_DEFINE_STATIC(uint32_t, newreno_beta_ecn) = 80; 91#define V_newreno_beta VNET(newreno_beta) 92#define V_newreno_beta_ecn VNET(newreno_beta_ecn) 93 94struct cc_algo newreno_cc_algo = { 95 .name = "newreno", 96 .cb_destroy = newreno_cb_destroy, 97 .ack_received = newreno_ack_received, 98 .after_idle = newreno_after_idle, 99 .cong_signal = newreno_cong_signal, 100 .post_recovery = newreno_post_recovery, 101 .ctl_output = newreno_ctl_output, 102}; 103 104struct newreno { 105 uint32_t beta; 106 uint32_t beta_ecn; 107}; 108 109static inline struct newreno * 110newreno_malloc(struct cc_var *ccv) 111{ 112 struct newreno *nreno; 113 114 nreno = malloc(sizeof(struct newreno), M_NEWRENO, M_NOWAIT); 115 if (nreno != NULL) { 116 /* NB: nreno is not zeroed, so initialise all fields. */ 117 nreno->beta = V_newreno_beta; 118 nreno->beta_ecn = V_newreno_beta_ecn; 119 ccv->cc_data = nreno; 120 } 121 122 return (nreno); 123} 124 125static void 126newreno_cb_destroy(struct cc_var *ccv) 127{ 128 free(ccv->cc_data, M_NEWRENO); 129} 130 131static void 132newreno_ack_received(struct cc_var *ccv, uint16_t type) 133{ 134 if (type == CC_ACK && !IN_RECOVERY(CCV(ccv, t_flags)) && 135 (ccv->flags & CCF_CWND_LIMITED)) { 136 u_int cw = CCV(ccv, snd_cwnd); 137 u_int incr = CCV(ccv, t_maxseg); 138 139 /* 140 * Regular in-order ACK, open the congestion window. 141 * Method depends on which congestion control state we're 142 * in (slow start or cong avoid) and if ABC (RFC 3465) is 143 * enabled. 144 * 145 * slow start: cwnd <= ssthresh 146 * cong avoid: cwnd > ssthresh 147 * 148 * slow start and ABC (RFC 3465): 149 * Grow cwnd exponentially by the amount of data 150 * ACKed capping the max increment per ACK to 151 * (abc_l_var * maxseg) bytes. 152 * 153 * slow start without ABC (RFC 5681): 154 * Grow cwnd exponentially by maxseg per ACK. 155 * 156 * cong avoid and ABC (RFC 3465): 157 * Grow cwnd linearly by maxseg per RTT for each 158 * cwnd worth of ACKed data. 159 * 160 * cong avoid without ABC (RFC 5681): 161 * Grow cwnd linearly by approximately maxseg per RTT using 162 * maxseg^2 / cwnd per ACK as the increment. 163 * If cwnd > maxseg^2, fix the cwnd increment at 1 byte to 164 * avoid capping cwnd. 165 */ 166 if (cw > CCV(ccv, snd_ssthresh)) { 167 if (V_tcp_do_rfc3465) { 168 if (ccv->flags & CCF_ABC_SENTAWND) 169 ccv->flags &= ~CCF_ABC_SENTAWND; 170 else 171 incr = 0; 172 } else 173 incr = max((incr * incr / cw), 1); 174 } else if (V_tcp_do_rfc3465) { 175 /* 176 * In slow-start with ABC enabled and no RTO in sight? 177 * (Must not use abc_l_var > 1 if slow starting after 178 * an RTO. On RTO, snd_nxt = snd_una, so the 179 * snd_nxt == snd_max check is sufficient to 180 * handle this). 181 * 182 * XXXLAS: Find a way to signal SS after RTO that 183 * doesn't rely on tcpcb vars. 184 */ 185 if (CCV(ccv, snd_nxt) == CCV(ccv, snd_max)) 186 incr = min(ccv->bytes_this_ack, 187 ccv->nsegs * V_tcp_abc_l_var * 188 CCV(ccv, t_maxseg)); 189 else 190 incr = min(ccv->bytes_this_ack, CCV(ccv, t_maxseg)); 191 } 192 /* ABC is on by default, so incr equals 0 frequently. */ 193 if (incr > 0) 194 CCV(ccv, snd_cwnd) = min(cw + incr, 195 TCP_MAXWIN << CCV(ccv, snd_scale)); 196 } 197} 198 199static void 200newreno_after_idle(struct cc_var *ccv) 201{ 202 int rw; 203 204 /* 205 * If we've been idle for more than one retransmit timeout the old 206 * congestion window is no longer current and we have to reduce it to 207 * the restart window before we can transmit again. 208 * 209 * The restart window is the initial window or the last CWND, whichever 210 * is smaller. 211 * 212 * This is done to prevent us from flooding the path with a full CWND at 213 * wirespeed, overloading router and switch buffers along the way. 214 * 215 * See RFC5681 Section 4.1. "Restarting Idle Connections". 216 * 217 * In addition, per RFC2861 Section 2, the ssthresh is set to the 218 * maximum of the former ssthresh or 3/4 of the old cwnd, to 219 * not exit slow-start prematurely. 220 */ 221 if (V_tcp_do_rfc3390) 222 rw = min(4 * CCV(ccv, t_maxseg), 223 max(2 * CCV(ccv, t_maxseg), 4380)); 224 else 225 rw = CCV(ccv, t_maxseg) * 2; 226 227 CCV(ccv, snd_ssthresh) = max(CCV(ccv, snd_ssthresh), 228 CCV(ccv, snd_cwnd)-(CCV(ccv, snd_cwnd)>>2)); 229 230 CCV(ccv, snd_cwnd) = min(rw, CCV(ccv, snd_cwnd)); 231} 232 233/* 234 * Perform any necessary tasks before we enter congestion recovery. 235 */ 236static void 237newreno_cong_signal(struct cc_var *ccv, uint32_t type) 238{ 239 struct newreno *nreno; 240 uint32_t beta, beta_ecn, cwin, factor; 241 u_int mss; 242 243 cwin = CCV(ccv, snd_cwnd); 244 mss = tcp_maxseg(ccv->ccvc.tcp); 245 /* 246 * Other TCP congestion controls use newreno_cong_signal(), but 247 * with their own private cc_data. Make sure the cc_data is used 248 * correctly. 249 */ 250 nreno = (CC_ALGO(ccv->ccvc.tcp) == &newreno_cc_algo) ? ccv->cc_data : NULL; 251 beta = (nreno == NULL) ? V_newreno_beta : nreno->beta; 252 beta_ecn = (nreno == NULL) ? V_newreno_beta_ecn : nreno->beta_ecn; 253 if (V_cc_do_abe && type == CC_ECN) 254 factor = beta_ecn; 255 else 256 factor = beta; 257 258 /* Catch algos which mistakenly leak private signal types. */ 259 KASSERT((type & CC_SIGPRIVMASK) == 0, 260 ("%s: congestion signal type 0x%08x is private\n", __func__, type)); 261 262 cwin = max(((uint64_t)cwin * (uint64_t)factor) / (100ULL * (uint64_t)mss), 263 2) * mss; 264 265 switch (type) { 266 case CC_NDUPACK: 267 if (!IN_FASTRECOVERY(CCV(ccv, t_flags))) { 268 if (IN_CONGRECOVERY(CCV(ccv, t_flags) && 269 V_cc_do_abe && V_cc_abe_frlossreduce)) { 270 CCV(ccv, snd_ssthresh) = 271 ((uint64_t)CCV(ccv, snd_ssthresh) * 272 (uint64_t)beta) / 273 (100ULL * (uint64_t)beta_ecn); 274 } 275 if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) 276 CCV(ccv, snd_ssthresh) = cwin; 277 ENTER_RECOVERY(CCV(ccv, t_flags)); 278 } 279 break; 280 case CC_ECN: 281 if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) { 282 CCV(ccv, snd_ssthresh) = cwin; 283 CCV(ccv, snd_cwnd) = cwin; 284 ENTER_CONGRECOVERY(CCV(ccv, t_flags)); 285 } 286 break; 287 case CC_RTO: 288 CCV(ccv, snd_ssthresh) = max(min(CCV(ccv, snd_wnd), 289 CCV(ccv, snd_cwnd)) / 2 / mss, 290 2) * mss; 291 CCV(ccv, snd_cwnd) = mss; 292 break; 293 } 294} 295 296/* 297 * Perform any necessary tasks before we exit congestion recovery. 298 */ 299static void 300newreno_post_recovery(struct cc_var *ccv) 301{ 302 int pipe; 303 304 if (IN_FASTRECOVERY(CCV(ccv, t_flags))) { 305 /* 306 * Fast recovery will conclude after returning from this 307 * function. Window inflation should have left us with 308 * approximately snd_ssthresh outstanding data. But in case we 309 * would be inclined to send a burst, better to do it via the 310 * slow start mechanism. 311 * 312 * XXXLAS: Find a way to do this without needing curack 313 */ 314 if (V_tcp_do_rfc6675_pipe) 315 pipe = tcp_compute_pipe(ccv->ccvc.tcp); 316 else 317 pipe = CCV(ccv, snd_max) - ccv->curack; 318 319 if (pipe < CCV(ccv, snd_ssthresh)) 320 /* 321 * Ensure that cwnd does not collapse to 1 MSS under 322 * adverse conditons. Implements RFC6582 323 */ 324 CCV(ccv, snd_cwnd) = max(pipe, CCV(ccv, t_maxseg)) + 325 CCV(ccv, t_maxseg); 326 else 327 CCV(ccv, snd_cwnd) = CCV(ccv, snd_ssthresh); 328 } 329} 330 331static int 332newreno_ctl_output(struct cc_var *ccv, struct sockopt *sopt, void *buf) 333{ 334 struct newreno *nreno; 335 struct cc_newreno_opts *opt; 336 337 if (sopt->sopt_valsize != sizeof(struct cc_newreno_opts)) 338 return (EMSGSIZE); 339 340 if (CC_ALGO(ccv->ccvc.tcp) != &newreno_cc_algo) 341 return (ENOPROTOOPT); 342 343 nreno = ccv->cc_data; 344 opt = buf; 345 346 switch (sopt->sopt_dir) { 347 case SOPT_SET: 348 /* We cannot set without cc_data memory. */ 349 if (nreno == NULL) { 350 nreno = newreno_malloc(ccv); 351 if (nreno == NULL) 352 return (ENOMEM); 353 } 354 switch (opt->name) { 355 case CC_NEWRENO_BETA: 356 nreno->beta = opt->val; 357 break; 358 case CC_NEWRENO_BETA_ECN: 359 if (!V_cc_do_abe) 360 return (EACCES); 361 nreno->beta_ecn = opt->val; 362 break; 363 default: 364 return (ENOPROTOOPT); 365 } 366 break; 367 case SOPT_GET: 368 switch (opt->name) { 369 case CC_NEWRENO_BETA: 370 opt->val = (nreno == NULL) ? 371 V_newreno_beta : nreno->beta; 372 break; 373 case CC_NEWRENO_BETA_ECN: 374 opt->val = (nreno == NULL) ? 375 V_newreno_beta_ecn : nreno->beta_ecn; 376 break; 377 default: 378 return (ENOPROTOOPT); 379 } 380 break; 381 default: 382 return (EINVAL); 383 } 384 385 return (0); 386} 387 388static int 389newreno_beta_handler(SYSCTL_HANDLER_ARGS) 390{ 391 int error; 392 uint32_t new; 393 394 new = *(uint32_t *)arg1; 395 error = sysctl_handle_int(oidp, &new, 0, req); 396 if (error == 0 && req->newptr != NULL ) { 397 if (arg1 == &VNET_NAME(newreno_beta_ecn) && !V_cc_do_abe) 398 error = EACCES; 399 else if (new == 0 || new > 100) 400 error = EINVAL; 401 else 402 *(uint32_t *)arg1 = new; 403 } 404 405 return (error); 406} 407 408SYSCTL_DECL(_net_inet_tcp_cc_newreno); 409SYSCTL_NODE(_net_inet_tcp_cc, OID_AUTO, newreno, CTLFLAG_RW, NULL, 410 "New Reno related settings"); 411 412SYSCTL_PROC(_net_inet_tcp_cc_newreno, OID_AUTO, beta, 413 CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW, 414 &VNET_NAME(newreno_beta), 3, &newreno_beta_handler, "IU", 415 "New Reno beta, specified as number between 1 and 100"); 416 417SYSCTL_PROC(_net_inet_tcp_cc_newreno, OID_AUTO, beta_ecn, 418 CTLFLAG_VNET | CTLTYPE_UINT | CTLFLAG_RW, 419 &VNET_NAME(newreno_beta_ecn), 3, &newreno_beta_handler, "IU", 420 "New Reno beta ecn, specified as number between 1 and 100"); 421 422DECLARE_CC_MODULE(newreno, &newreno_cc_algo); 423MODULE_VERSION(newreno, 1); 424