cc_htcp.c revision 220592
167754Smsmith/*-
267754Smsmith * Copyright (c) 2007-2008
367754Smsmith * 	Swinburne University of Technology, Melbourne, Australia
467754Smsmith * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org>
567754Smsmith * Copyright (c) 2010 The FreeBSD Foundation
667754Smsmith * All rights reserved.
767754Smsmith *
867754Smsmith * This software was developed at the Centre for Advanced Internet
967754Smsmith * Architectures, Swinburne University of Technology, by Lawrence Stewart and
1067754Smsmith * James Healy, made possible in part by a grant from the Cisco University
1167754Smsmith * Research Program Fund at Community Foundation Silicon Valley.
12202771Sjkim *
1370243Smsmith * Portions of this software were developed at the Centre for Advanced
1467754Smsmith * Internet Architectures, Swinburne University of Technology, Melbourne,
1567754Smsmith * Australia by David Hayes under sponsorship from the FreeBSD Foundation.
1667754Smsmith *
1767754Smsmith * Redistribution and use in source and binary forms, with or without
1867754Smsmith * modification, are permitted provided that the following conditions
1967754Smsmith * are met:
2067754Smsmith * 1. Redistributions of source code must retain the above copyright
2167754Smsmith *    notice, this list of conditions and the following disclaimer.
2267754Smsmith * 2. Redistributions in binary form must reproduce the above copyright
2367754Smsmith *    notice, this list of conditions and the following disclaimer in the
2467754Smsmith *    documentation and/or other materials provided with the distribution.
2567754Smsmith *
2667754Smsmith * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
2767754Smsmith * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2867754Smsmith * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2967754Smsmith * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
3067754Smsmith * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
3167754Smsmith * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
3267754Smsmith * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
3367754Smsmith * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3467754Smsmith * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3567754Smsmith * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3667754Smsmith * SUCH DAMAGE.
3767754Smsmith */
3867754Smsmith
3967754Smsmith/*
4067754Smsmith * An implementation of the H-TCP congestion control algorithm for FreeBSD,
4167754Smsmith * based on the Internet Draft "draft-leith-tcp-htcp-06.txt" by Leith and
4267754Smsmith * Shorten. Originally released as part of the NewTCP research project at
4367754Smsmith * Swinburne University of Technology's Centre for Advanced Internet
4467754Smsmith * Architectures, Melbourne, Australia, which was made possible in part by a
4567754Smsmith * grant from the Cisco University Research Program Fund at Community Foundation
4667754Smsmith * Silicon Valley. More details are available at:
4767754Smsmith *   http://caia.swin.edu.au/urp/newtcp/
4867754Smsmith */
4967754Smsmith
5067754Smsmith#include <sys/cdefs.h>
5167754Smsmith__FBSDID("$FreeBSD: head/sys/netinet/cc/cc_htcp.c 220592 2011-04-13 11:28:46Z pluknet $");
5267754Smsmith
5367754Smsmith#include <sys/param.h>
5467754Smsmith#include <sys/kernel.h>
5567754Smsmith#include <sys/limits.h>
5667754Smsmith#include <sys/malloc.h>
5767754Smsmith#include <sys/module.h>
5867754Smsmith#include <sys/socket.h>
5967754Smsmith#include <sys/socketvar.h>
6067754Smsmith#include <sys/sysctl.h>
6167754Smsmith#include <sys/systm.h>
6267754Smsmith
6367754Smsmith#include <net/vnet.h>
6467754Smsmith
6567754Smsmith#include <netinet/cc.h>
6667754Smsmith#include <netinet/tcp_seq.h>
6767754Smsmith#include <netinet/tcp_timer.h>
6867754Smsmith#include <netinet/tcp_var.h>
6967754Smsmith
7067754Smsmith#include <netinet/cc/cc_module.h>
7167754Smsmith
7267754Smsmith/* Fixed point math shifts. */
7367754Smsmith#define HTCP_SHIFT 8
7467754Smsmith#define HTCP_ALPHA_INC_SHIFT 4
7567754Smsmith
7667754Smsmith#define HTCP_INIT_ALPHA 1
7767754Smsmith#define HTCP_DELTA_L hz		/* 1 sec in ticks. */
7867754Smsmith#define HTCP_MINBETA 128	/* 0.5 << HTCP_SHIFT. */
7967754Smsmith#define HTCP_MAXBETA 204	/* ~0.8 << HTCP_SHIFT. */
8067754Smsmith#define HTCP_MINROWE 26		/* ~0.1 << HTCP_SHIFT. */
8167754Smsmith#define HTCP_MAXROWE 512	/* 2 << HTCP_SHIFT. */
8267754Smsmith
8367754Smsmith/* RTT_ref (ms) used in the calculation of alpha if RTT scaling is enabled. */
8467754Smsmith#define HTCP_RTT_REF 100
8567754Smsmith
8667754Smsmith/* Don't trust SRTT until this many samples have been taken. */
8767754Smsmith#define HTCP_MIN_RTT_SAMPLES 8
8867754Smsmith
8967754Smsmith/*
9067754Smsmith * HTCP_CALC_ALPHA performs a fixed point math calculation to determine the
9167754Smsmith * value of alpha, based on the function defined in the HTCP spec.
9267754Smsmith *
9367754Smsmith * i.e. 1 + 10(delta - delta_l) + ((delta - delta_l) / 2) ^ 2
9467754Smsmith *
9567754Smsmith * "diff" is passed in to the macro as "delta - delta_l" and is expected to be
9667754Smsmith * in units of ticks.
9767754Smsmith *
9867754Smsmith * The joyousnous of fixed point maths means our function implementation looks a
9967754Smsmith * little funky...
10067754Smsmith *
10167754Smsmith * In order to maintain some precision in the calculations, a fixed point shift
10267754Smsmith * HTCP_ALPHA_INC_SHIFT is used to ensure the integer divisions don't
10367754Smsmith * truncate the results too badly.
10467754Smsmith *
10567754Smsmith * The "16" value is the "1" term in the alpha function shifted up by
10667754Smsmith * HTCP_ALPHA_INC_SHIFT
10767754Smsmith *
10867754Smsmith * The "160" value is the "10" multiplier in the alpha function multiplied by
10967754Smsmith * 2^HTCP_ALPHA_INC_SHIFT
11067754Smsmith *
11167754Smsmith * Specifying these as constants reduces the computations required. After
11267754Smsmith * up-shifting all the terms in the function and performing the required
11367754Smsmith * calculations, we down-shift the final result by HTCP_ALPHA_INC_SHIFT to
11467754Smsmith * ensure it is back in the correct range.
11567754Smsmith *
11667754Smsmith * The "hz" terms are required as kernels can be configured to run with
11767754Smsmith * different tick timers, which we have to adjust for in the alpha calculation
11867754Smsmith * (which originally was defined in terms of seconds).
119193341Sjkim *
120193341Sjkim * We also have to be careful to constrain the value of diff such that it won't
121193341Sjkim * overflow whilst performing the calculation. The middle term i.e. (160 * diff)
122193341Sjkim * / hz is the limiting factor in the calculation. We must constrain diff to be
12367754Smsmith * less than the max size of an int divided by the constant 160 figure
12477424Smsmith * i.e. diff < INT_MAX / 160
12591116Smsmith *
12667754Smsmith * NB: Changing HTCP_ALPHA_INC_SHIFT will require you to MANUALLY update the
127167802Sjkim * constants used in this function!
12867754Smsmith */
129167802Sjkim#define HTCP_CALC_ALPHA(diff) \
130167802Sjkim((\
131167802Sjkim	(16) + \
132167802Sjkim	((160 * (diff)) / hz) + \
133167802Sjkim	(((diff) / hz) * (((diff) << HTCP_ALPHA_INC_SHIFT) / (4 * hz))) \
13467754Smsmith) >> HTCP_ALPHA_INC_SHIFT)
13567754Smsmith
136167802Sjkimstatic void	htcp_ack_received(struct cc_var *ccv, uint16_t type);
13767754Smsmithstatic void	htcp_cb_destroy(struct cc_var *ccv);
138167802Sjkimstatic int	htcp_cb_init(struct cc_var *ccv);
139167802Sjkimstatic void	htcp_cong_signal(struct cc_var *ccv, uint32_t type);
14067754Smsmithstatic int	htcp_mod_init(void);
14167754Smsmithstatic void	htcp_post_recovery(struct cc_var *ccv);
14267754Smsmithstatic void	htcp_recalc_alpha(struct cc_var *ccv);
143167802Sjkimstatic void	htcp_recalc_beta(struct cc_var *ccv);
144167802Sjkimstatic void	htcp_record_rtt(struct cc_var *ccv);
14567754Smsmithstatic void	htcp_ssthresh_update(struct cc_var *ccv);
14667754Smsmith
14767754Smsmithstruct htcp {
14867754Smsmith	/* cwnd before entering cong recovery. */
149167802Sjkim	unsigned long	prev_cwnd;
150167802Sjkim	/* cwnd additive increase parameter. */
15167754Smsmith	int		alpha;
15267754Smsmith	/* cwnd multiplicative decrease parameter. */
153207344Sjkim	int		beta;
154167802Sjkim	/* Largest rtt seen for the flow. */
15567754Smsmith	int		maxrtt;
156167802Sjkim	/* Shortest rtt seen for the flow. */
157167802Sjkim	int		minrtt;
15867754Smsmith	/* Time of last congestion event in ticks. */
15967754Smsmith	int		t_last_cong;
160167802Sjkim};
161167802Sjkim
162167802Sjkimstatic int htcp_rtt_ref;
163167802Sjkim/*
164167802Sjkim * The maximum number of ticks the value of diff can reach in
165167802Sjkim * htcp_recalc_alpha() before alpha will stop increasing due to overflow.
166167802Sjkim * See comment above HTCP_CALC_ALPHA for more info.
167167802Sjkim */
168167802Sjkimstatic int htcp_max_diff = INT_MAX / ((1 << HTCP_ALPHA_INC_SHIFT) * 10);
169167802Sjkim
170167802Sjkim/* Per-netstack vars. */
171167802Sjkimstatic VNET_DEFINE(u_int, htcp_adaptive_backoff) = 0;
172167802Sjkimstatic VNET_DEFINE(u_int, htcp_rtt_scaling) = 0;
173167802Sjkim#define	V_htcp_adaptive_backoff    VNET(htcp_adaptive_backoff)
174167802Sjkim#define	V_htcp_rtt_scaling    VNET(htcp_rtt_scaling)
175167802Sjkim
176167802Sjkimstatic MALLOC_DEFINE(M_HTCP, "htcp data",
177167802Sjkim    "Per connection data required for the HTCP congestion control algorithm");
178167802Sjkim
179167802Sjkimstruct cc_algo htcp_cc_algo = {
180167802Sjkim	.name = "htcp",
181167802Sjkim	.ack_received = htcp_ack_received,
182167802Sjkim	.cb_destroy = htcp_cb_destroy,
183167802Sjkim	.cb_init = htcp_cb_init,
184167802Sjkim	.cong_signal = htcp_cong_signal,
18580062Smsmith	.mod_init = htcp_mod_init,
186167802Sjkim	.post_recovery = htcp_post_recovery,
187167802Sjkim};
188167802Sjkim
189167802Sjkimstatic void
190167802Sjkimhtcp_ack_received(struct cc_var *ccv, uint16_t type)
191167802Sjkim{
192167802Sjkim	struct htcp *htcp_data;
193167802Sjkim
19480062Smsmith	htcp_data = ccv->cc_data;
19567754Smsmith	htcp_record_rtt(ccv);
196167802Sjkim
19799146Siwasaki	/*
198167802Sjkim	 * Regular ACK and we're not in cong/fast recovery and we're cwnd
199167802Sjkim	 * limited and we're either not doing ABC or are slow starting or are
200167802Sjkim	 * doing ABC and we've sent a cwnd's worth of bytes.
201167802Sjkim	 */
202167802Sjkim	if (type == CC_ACK && !IN_RECOVERY(CCV(ccv, t_flags)) &&
203167802Sjkim	    (ccv->flags & CCF_CWND_LIMITED) && (!V_tcp_do_rfc3465 ||
20467754Smsmith	    CCV(ccv, snd_cwnd) <= CCV(ccv, snd_ssthresh) ||
205167802Sjkim	    (V_tcp_do_rfc3465 && ccv->flags & CCF_ABC_SENTAWND))) {
206167802Sjkim		htcp_recalc_beta(ccv);
207167802Sjkim		htcp_recalc_alpha(ccv);
208167802Sjkim		/*
209167802Sjkim		 * Use the logic in NewReno ack_received() for slow start and
21067754Smsmith		 * for the first HTCP_DELTA_L ticks after either the flow starts
211167802Sjkim		 * or a congestion event (when alpha equals 1).
21267754Smsmith		 */
213167802Sjkim		if (htcp_data->alpha == 1 ||
21467754Smsmith		    CCV(ccv, snd_cwnd) <= CCV(ccv, snd_ssthresh))
215167802Sjkim			newreno_cc_algo.ack_received(ccv, type);
216193267Sjkim		else {
21767754Smsmith			if (V_tcp_do_rfc3465) {
218167802Sjkim				/* Increment cwnd by alpha segments. */
219207344Sjkim				CCV(ccv, snd_cwnd) += htcp_data->alpha *
220167802Sjkim				    CCV(ccv, t_maxseg);
221167802Sjkim				ccv->flags &= ~CCF_ABC_SENTAWND;
222167802Sjkim			} else
223167802Sjkim				/*
224167802Sjkim				 * Increment cwnd by alpha/cwnd segments to
22567754Smsmith				 * approximate an increase of alpha segments
22667754Smsmith				 * per RTT.
227167802Sjkim				 */
22867754Smsmith				CCV(ccv, snd_cwnd) += (((htcp_data->alpha <<
229167802Sjkim				    HTCP_SHIFT) / (CCV(ccv, snd_cwnd) /
230167802Sjkim				    CCV(ccv, t_maxseg))) * CCV(ccv, t_maxseg))
23167754Smsmith				    >> HTCP_SHIFT;
232167802Sjkim		}
23367754Smsmith	}
23467754Smsmith}
235167802Sjkim
236167802Sjkimstatic void
237167802Sjkimhtcp_cb_destroy(struct cc_var *ccv)
238167802Sjkim{
239167802Sjkim
240193267Sjkim	if (ccv->cc_data != NULL)
24167754Smsmith		free(ccv->cc_data, M_HTCP);
24267754Smsmith}
24367754Smsmith
244167802Sjkimstatic int
24567754Smsmithhtcp_cb_init(struct cc_var *ccv)
246167802Sjkim{
24767754Smsmith	struct htcp *htcp_data;
24867754Smsmith
249167802Sjkim	htcp_data = malloc(sizeof(struct htcp), M_HTCP, M_NOWAIT);
25067754Smsmith
251167802Sjkim	if (htcp_data == NULL)
25267754Smsmith		return (ENOMEM);
25367754Smsmith
25467754Smsmith	/* Init some key variables with sensible defaults. */
255167802Sjkim	htcp_data->alpha = HTCP_INIT_ALPHA;
256167802Sjkim	htcp_data->beta = HTCP_MINBETA;
257167802Sjkim	htcp_data->maxrtt = TCPTV_SRTTBASE;
258167802Sjkim	htcp_data->minrtt = TCPTV_SRTTBASE;
25967754Smsmith	htcp_data->prev_cwnd = 0;
26067754Smsmith	htcp_data->t_last_cong = ticks;
26167754Smsmith
26267754Smsmith	ccv->cc_data = htcp_data;
263167802Sjkim
264167802Sjkim	return (0);
26567754Smsmith}
266167802Sjkim
267167802Sjkim/*
268206117Sjkim * Perform any necessary tasks before we enter congestion recovery.
26967754Smsmith */
27067754Smsmithstatic void
271167802Sjkimhtcp_cong_signal(struct cc_var *ccv, uint32_t type)
27267754Smsmith{
27377424Smsmith	struct htcp *htcp_data;
274167802Sjkim
275167802Sjkim	htcp_data = ccv->cc_data;
276167802Sjkim
277167802Sjkim	switch (type) {
278167802Sjkim	case CC_NDUPACK:
27967754Smsmith		if (!IN_FASTRECOVERY(CCV(ccv, t_flags))) {
280167802Sjkim			if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) {
28167754Smsmith				/*
28267754Smsmith				 * Apply hysteresis to maxrtt to ensure
283206117Sjkim				 * reductions in the RTT are reflected in our
284206117Sjkim				 * measurements.
285206117Sjkim				 */
286206117Sjkim				htcp_data->maxrtt = (htcp_data->minrtt +
287206117Sjkim				    (htcp_data->maxrtt - htcp_data->minrtt) *
288207344Sjkim				    95) / 100;
28967754Smsmith				htcp_ssthresh_update(ccv);
290206117Sjkim				htcp_data->t_last_cong = ticks;
291206117Sjkim				htcp_data->prev_cwnd = CCV(ccv, snd_cwnd);
292206117Sjkim			}
293167802Sjkim			ENTER_RECOVERY(CCV(ccv, t_flags));
29499146Siwasaki		}
295167802Sjkim		break;
296167802Sjkim
29767754Smsmith	case CC_ECN:
298167802Sjkim		if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) {
29967754Smsmith			/*
30067754Smsmith			 * Apply hysteresis to maxrtt to ensure reductions in
301206117Sjkim			 * the RTT are reflected in our measurements.
302151937Sjkim			 */
303206117Sjkim			htcp_data->maxrtt = (htcp_data->minrtt + (htcp_data->maxrtt -
304206117Sjkim			    htcp_data->minrtt) * 95) / 100;
305206117Sjkim			htcp_ssthresh_update(ccv);
306206117Sjkim			CCV(ccv, snd_cwnd) = CCV(ccv, snd_ssthresh);
307206117Sjkim			htcp_data->t_last_cong = ticks;
308167802Sjkim			htcp_data->prev_cwnd = CCV(ccv, snd_cwnd);
309207344Sjkim			ENTER_CONGRECOVERY(CCV(ccv, t_flags));
310207344Sjkim		}
311167802Sjkim		break;
312167802Sjkim
313151937Sjkim	case CC_RTO:
314167802Sjkim		/*
315167802Sjkim		 * Grab the current time and record it so we know when the
31667754Smsmith		 * most recent congestion event was. Only record it when the
317167802Sjkim		 * timeout has fired more than once, as there is a reasonable
318151937Sjkim		 * chance the first one is a false alarm and may not indicate
319151937Sjkim		 * congestion.
320193267Sjkim		 */
321167802Sjkim		if (CCV(ccv, t_rxtshift) >= 2)
322167802Sjkim			htcp_data->t_last_cong = ticks;
323167802Sjkim		break;
324167802Sjkim	}
325167802Sjkim}
326167802Sjkim
327167802Sjkimstatic int
328167802Sjkimhtcp_mod_init(void)
329167802Sjkim{
330167802Sjkim
331167802Sjkim	htcp_cc_algo.after_idle = newreno_cc_algo.after_idle;
332167802Sjkim
333167802Sjkim	/*
334167802Sjkim	 * HTCP_RTT_REF is defined in ms, and t_srtt in the tcpcb is stored in
335193267Sjkim	 * units of TCP_RTT_SCALE*hz. Scale HTCP_RTT_REF to be in the same units
33667754Smsmith	 * as t_srtt.
337167802Sjkim	 */
338167802Sjkim	htcp_rtt_ref = (HTCP_RTT_REF * TCP_RTT_SCALE * hz) / 1000;
339167802Sjkim
340193267Sjkim	return (0);
341167802Sjkim}
342167802Sjkim
343193267Sjkim/*
344193267Sjkim * Perform any necessary tasks before we exit congestion recovery.
345167802Sjkim */
34667754Smsmithstatic void
34767754Smsmithhtcp_post_recovery(struct cc_var *ccv)
348167802Sjkim{
349167802Sjkim	struct htcp *htcp_data;
350167802Sjkim
35191116Smsmith	htcp_data = ccv->cc_data;
352167802Sjkim
353167802Sjkim	if (IN_FASTRECOVERY(CCV(ccv, t_flags))) {
35491116Smsmith		/*
355193267Sjkim		 * If inflight data is less than ssthresh, set cwnd
356193267Sjkim		 * conservatively to avoid a burst of data, as suggested in the
357207344Sjkim		 * NewReno RFC. Otherwise, use the HTCP method.
358167802Sjkim		 *
359167802Sjkim		 * XXXLAS: Find a way to do this without needing curack
360167802Sjkim		 */
361167802Sjkim		if (SEQ_GT(ccv->curack + CCV(ccv, snd_ssthresh),
362167802Sjkim		    CCV(ccv, snd_max)))
363167802Sjkim			CCV(ccv, snd_cwnd) = CCV(ccv, snd_max) - ccv->curack +
36491116Smsmith			    CCV(ccv, t_maxseg);
365167802Sjkim		else
366167802Sjkim			CCV(ccv, snd_cwnd) = max(1, ((htcp_data->beta *
367167802Sjkim			    htcp_data->prev_cwnd / CCV(ccv, t_maxseg))
368167802Sjkim			    >> HTCP_SHIFT)) * CCV(ccv, t_maxseg);
36991116Smsmith	}
370167802Sjkim}
371167802Sjkim
372193267Sjkimstatic void
373193267Sjkimhtcp_recalc_alpha(struct cc_var *ccv)
374167802Sjkim{
375167802Sjkim	struct htcp *htcp_data;
376193267Sjkim	int alpha, diff, now;
377193267Sjkim
378167802Sjkim	htcp_data = ccv->cc_data;
379167802Sjkim	now = ticks;
380167802Sjkim
381167802Sjkim	/*
382167802Sjkim	 * If ticks has wrapped around (will happen approximately once every 49
38391116Smsmith	 * days on a machine with the default kern.hz=1000) and a flow straddles
384167802Sjkim	 * the wrap point, our alpha calcs will be completely wrong. We cut our
385167802Sjkim	 * losses and restart alpha from scratch by setting t_last_cong = now -
386167802Sjkim	 * HTCP_DELTA_L.
387167802Sjkim	 *
388167802Sjkim	 * This does not deflate our cwnd at all. It simply slows the rate cwnd
389167802Sjkim	 * is growing by until alpha regains the value it held prior to taking
390167802Sjkim	 * this drastic measure.
391167802Sjkim	 */
392167802Sjkim	if (now < htcp_data->t_last_cong)
393167802Sjkim		htcp_data->t_last_cong = now - HTCP_DELTA_L;
394193267Sjkim
395193267Sjkim	diff = now - htcp_data->t_last_cong - HTCP_DELTA_L;
396167802Sjkim
397167802Sjkim	/* Cap alpha if the value of diff would overflow HTCP_CALC_ALPHA(). */
39867754Smsmith	if (diff < htcp_max_diff) {
399167802Sjkim		/*
40067754Smsmith		 * If it has been more than HTCP_DELTA_L ticks since congestion,
40167754Smsmith		 * increase alpha according to the function defined in the spec.
402167802Sjkim		 */
40367754Smsmith		if (diff > 0) {
40467754Smsmith			alpha = HTCP_CALC_ALPHA(diff);
405167802Sjkim
40667754Smsmith			/*
407167802Sjkim			 * Adaptive backoff fairness adjustment:
408193267Sjkim			 * 2 * (1 - beta) * alpha_raw
40967754Smsmith			 */
410167802Sjkim			if (V_htcp_adaptive_backoff)
41167754Smsmith				alpha = max(1, (2 * ((1 << HTCP_SHIFT) -
412167802Sjkim				    htcp_data->beta) * alpha) >> HTCP_SHIFT);
413167802Sjkim
414167802Sjkim			/*
41567754Smsmith			 * RTT scaling: (RTT / RTT_ref) * alpha
416167802Sjkim			 * alpha will be the raw value from HTCP_CALC_ALPHA() if
41767754Smsmith			 * adaptive backoff is off, or the adjusted value if
418167802Sjkim			 * adaptive backoff is on.
41967754Smsmith			 */
420193267Sjkim			if (V_htcp_rtt_scaling)
42167754Smsmith				alpha = max(1, (min(max(HTCP_MINROWE,
42267754Smsmith				    (CCV(ccv, t_srtt) << HTCP_SHIFT) /
423167802Sjkim				    htcp_rtt_ref), HTCP_MAXROWE) * alpha)
424167802Sjkim				    >> HTCP_SHIFT);
425193267Sjkim
426167802Sjkim		} else
42767754Smsmith			alpha = 1;
428193267Sjkim
429193267Sjkim		htcp_data->alpha = alpha;
430167802Sjkim	}
43167754Smsmith}
43267754Smsmith
43367754Smsmithstatic void
43467754Smsmithhtcp_recalc_beta(struct cc_var *ccv)
435167802Sjkim{
43667754Smsmith	struct htcp *htcp_data;
437167802Sjkim
43867754Smsmith	htcp_data = ccv->cc_data;
43967754Smsmith
440193267Sjkim	/*
441193267Sjkim	 * TCPTV_SRTTBASE is the initialised value of each connection's SRTT, so
442207344Sjkim	 * we only calc beta if the connection's SRTT has been changed from its
443167802Sjkim	 * inital value. beta is bounded to ensure it is always between
444167802Sjkim	 * HTCP_MINBETA and HTCP_MAXBETA.
445167802Sjkim	 */
446167802Sjkim	if (V_htcp_adaptive_backoff && htcp_data->minrtt != TCPTV_SRTTBASE &&
447167802Sjkim	    htcp_data->maxrtt != TCPTV_SRTTBASE)
448167802Sjkim		htcp_data->beta = min(max(HTCP_MINBETA,
44967754Smsmith		    (htcp_data->minrtt << HTCP_SHIFT) / htcp_data->maxrtt),
450167802Sjkim		    HTCP_MAXBETA);
451167802Sjkim	else
452167802Sjkim		htcp_data->beta = HTCP_MINBETA;
453167802Sjkim}
454167802Sjkim
455167802Sjkim/*
456167802Sjkim * Record the minimum and maximum RTT seen for the connection. These are used in
457167802Sjkim * the calculation of beta if adaptive backoff is enabled.
458167802Sjkim */
459167802Sjkimstatic void
460167802Sjkimhtcp_record_rtt(struct cc_var *ccv)
461167802Sjkim{
462117521Snjl	struct htcp *htcp_data;
46367754Smsmith
464167802Sjkim	htcp_data = ccv->cc_data;
46567754Smsmith
46667754Smsmith	/* XXXLAS: Should there be some hysteresis for minrtt? */
467167802Sjkim
46867754Smsmith	/*
469167802Sjkim	 * Record the current SRTT as our minrtt if it's the smallest we've seen
47067754Smsmith	 * or minrtt is currently equal to its initialised value. Ignore SRTT
47167754Smsmith	 * until a min number of samples have been taken.
472167802Sjkim	 */
47367754Smsmith	if ((CCV(ccv, t_srtt) < htcp_data->minrtt ||
474167802Sjkim	    htcp_data->minrtt == TCPTV_SRTTBASE) &&
475167802Sjkim	    (CCV(ccv, t_rttupdated) >= HTCP_MIN_RTT_SAMPLES))
47667754Smsmith		htcp_data->minrtt = CCV(ccv, t_srtt);
477167802Sjkim
47867754Smsmith	/*
479167802Sjkim	 * Record the current SRTT as our maxrtt if it's the largest we've
48067754Smsmith	 * seen. Ignore SRTT until a min number of samples have been taken.
48167754Smsmith	 */
48267754Smsmith	if (CCV(ccv, t_srtt) > htcp_data->maxrtt
48367754Smsmith	    && CCV(ccv, t_rttupdated) >= HTCP_MIN_RTT_SAMPLES)
484167802Sjkim		htcp_data->maxrtt = CCV(ccv, t_srtt);
485193267Sjkim}
486167802Sjkim
48767754Smsmith/*
48867754Smsmith * Update the ssthresh in the event of congestion.
48967754Smsmith */
49067754Smsmithstatic void
491167802Sjkimhtcp_ssthresh_update(struct cc_var *ccv)
49267754Smsmith{
49377424Smsmith	struct htcp *htcp_data;
494167802Sjkim
495167802Sjkim	htcp_data = ccv->cc_data;
496167802Sjkim
49767754Smsmith	/*
49867754Smsmith	 * On the first congestion event, set ssthresh to cwnd * 0.5, on
49967754Smsmith	 * subsequent congestion events, set it to cwnd * beta.
50067754Smsmith	 */
501167802Sjkim	if (CCV(ccv, snd_ssthresh) == TCP_MAXWIN << TCP_MAX_WINSHIFT)
50267754Smsmith		CCV(ccv, snd_ssthresh) = (CCV(ccv, snd_cwnd) * HTCP_MINBETA)
503167802Sjkim		    >> HTCP_SHIFT;
504167802Sjkim	else {
505207344Sjkim		htcp_recalc_beta(ccv);
50667754Smsmith		CCV(ccv, snd_ssthresh) = (CCV(ccv, snd_cwnd) * htcp_data->beta)
507167802Sjkim		    >> HTCP_SHIFT;
50867754Smsmith	}
50967754Smsmith}
51067754Smsmith
511167802Sjkim
51267754SmsmithSYSCTL_DECL(_net_inet_tcp_cc_htcp);
513167802SjkimSYSCTL_NODE(_net_inet_tcp_cc, OID_AUTO, htcp, CTLFLAG_RW,
51467754Smsmith    NULL, "H-TCP related settings");
515167802SjkimSYSCTL_VNET_UINT(_net_inet_tcp_cc_htcp, OID_AUTO, adaptive_backoff, CTLFLAG_RW,
516167802Sjkim    &VNET_NAME(htcp_adaptive_backoff), 0, "enable H-TCP adaptive backoff");
517167802SjkimSYSCTL_VNET_UINT(_net_inet_tcp_cc_htcp, OID_AUTO, rtt_scaling, CTLFLAG_RW,
518167802Sjkim    &VNET_NAME(htcp_rtt_scaling), 0, "enable H-TCP RTT scaling");
519167802Sjkim
520167802SjkimDECLARE_CC_MODULE(htcp, &htcp_cc_algo);
52167754Smsmith