1/*-
2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * a) Redistributions of source code must retain the above copyright notice,
10 *    this list of conditions and the following disclaimer.
11 *
12 * b) Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in
14 *    the documentation and/or other materials provided with the distribution.
15 *
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 *    contributors may be used to endorse or promote products derived
18 *    from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/11/sys/netinet/sctp_cc_functions.c 360758 2020-05-07 03:01:01Z tuexen $");
35
36#include <netinet/sctp_os.h>
37#include <netinet/sctp_var.h>
38#include <netinet/sctp_sysctl.h>
39#include <netinet/sctp_pcb.h>
40#include <netinet/sctp_header.h>
41#include <netinet/sctputil.h>
42#include <netinet/sctp_output.h>
43#include <netinet/sctp_input.h>
44#include <netinet/sctp_indata.h>
45#include <netinet/sctp_uio.h>
46#include <netinet/sctp_timer.h>
47#include <netinet/sctp_auth.h>
48#include <netinet/sctp_asconf.h>
49#include <netinet/sctp_dtrace_declare.h>
50
51#define SHIFT_MPTCP_MULTI_N 40
52#define SHIFT_MPTCP_MULTI_Z 16
53#define SHIFT_MPTCP_MULTI 8
54
55static void
56sctp_enforce_cwnd_limit(struct sctp_association *assoc, struct sctp_nets *net)
57{
58	if ((assoc->max_cwnd > 0) &&
59	    (net->cwnd > assoc->max_cwnd) &&
60	    (net->cwnd > (net->mtu - sizeof(struct sctphdr)))) {
61		net->cwnd = assoc->max_cwnd;
62		if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) {
63			net->cwnd = net->mtu - sizeof(struct sctphdr);
64		}
65	}
66}
67
68static void
69sctp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
70{
71	struct sctp_association *assoc;
72	uint32_t cwnd_in_mtu;
73
74	assoc = &stcb->asoc;
75	cwnd_in_mtu = SCTP_BASE_SYSCTL(sctp_initial_cwnd);
76	if (cwnd_in_mtu == 0) {
77		/* Using 0 means that the value of RFC 4960 is used. */
78		net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
79	} else {
80		/*
81		 * We take the minimum of the burst limit and the initial
82		 * congestion window.
83		 */
84		if ((assoc->max_burst > 0) && (cwnd_in_mtu > assoc->max_burst))
85			cwnd_in_mtu = assoc->max_burst;
86		net->cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu;
87	}
88	if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) ||
89	    (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2)) {
90		/* In case of resource pooling initialize appropriately */
91		net->cwnd /= assoc->numnets;
92		if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) {
93			net->cwnd = net->mtu - sizeof(struct sctphdr);
94		}
95	}
96	sctp_enforce_cwnd_limit(assoc, net);
97	net->ssthresh = assoc->peers_rwnd;
98	SDT_PROBE5(sctp, cwnd, net, init,
99	    stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
100	    0, net->cwnd);
101	if (SCTP_BASE_SYSCTL(sctp_logging_level) &
102	    (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
103		sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
104	}
105}
106
107static void
108sctp_cwnd_update_after_fr(struct sctp_tcb *stcb,
109    struct sctp_association *asoc)
110{
111	struct sctp_nets *net;
112	uint32_t t_ssthresh, t_cwnd;
113	uint64_t t_ucwnd_sbw;
114
115	/* MT FIXME: Don't compute this over and over again */
116	t_ssthresh = 0;
117	t_cwnd = 0;
118	t_ucwnd_sbw = 0;
119	if ((asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) ||
120	    (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2)) {
121		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
122			t_ssthresh += net->ssthresh;
123			t_cwnd += net->cwnd;
124			if (net->lastsa > 0) {
125				t_ucwnd_sbw += (uint64_t)net->cwnd / (uint64_t)net->lastsa;
126			}
127		}
128		if (t_ucwnd_sbw == 0) {
129			t_ucwnd_sbw = 1;
130		}
131	}
132
133	/*-
134	 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) &&
135	 * (net->fast_retran_loss_recovery == 0)))
136	 */
137	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
138		if ((asoc->fast_retran_loss_recovery == 0) ||
139		    (asoc->sctp_cmt_on_off > 0)) {
140			/* out of a RFC2582 Fast recovery window? */
141			if (net->net_ack > 0) {
142				/*
143				 * per section 7.2.3, are there any
144				 * destinations that had a fast retransmit
145				 * to them. If so what we need to do is
146				 * adjust ssthresh and cwnd.
147				 */
148				struct sctp_tmit_chunk *lchk;
149				int old_cwnd = net->cwnd;
150
151				if ((asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) ||
152				    (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2)) {
153					if (asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) {
154						net->ssthresh = (uint32_t)(((uint64_t)4 *
155						    (uint64_t)net->mtu *
156						    (uint64_t)net->ssthresh) /
157						    (uint64_t)t_ssthresh);
158
159					}
160					if (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2) {
161						uint32_t srtt;
162
163						srtt = net->lastsa;
164						/*
165						 * lastsa>>3;  we don't need
166						 * to devide ...
167						 */
168						if (srtt == 0) {
169							srtt = 1;
170						}
171						/*
172						 * Short Version => Equal to
173						 * Contel Version MBe
174						 */
175						net->ssthresh = (uint32_t)(((uint64_t)4 *
176						    (uint64_t)net->mtu *
177						    (uint64_t)net->cwnd) /
178						    ((uint64_t)srtt *
179						    t_ucwnd_sbw));
180						 /* INCREASE FACTOR */ ;
181					}
182					if ((net->cwnd > t_cwnd / 2) &&
183					    (net->ssthresh < net->cwnd - t_cwnd / 2)) {
184						net->ssthresh = net->cwnd - t_cwnd / 2;
185					}
186					if (net->ssthresh < net->mtu) {
187						net->ssthresh = net->mtu;
188					}
189				} else {
190					net->ssthresh = net->cwnd / 2;
191					if (net->ssthresh < (net->mtu * 2)) {
192						net->ssthresh = 2 * net->mtu;
193					}
194				}
195				net->cwnd = net->ssthresh;
196				sctp_enforce_cwnd_limit(asoc, net);
197				SDT_PROBE5(sctp, cwnd, net, fr,
198				    stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
199				    old_cwnd, net->cwnd);
200				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
201					sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
202					    SCTP_CWND_LOG_FROM_FR);
203				}
204				lchk = TAILQ_FIRST(&asoc->send_queue);
205
206				net->partial_bytes_acked = 0;
207				/* Turn on fast recovery window */
208				asoc->fast_retran_loss_recovery = 1;
209				if (lchk == NULL) {
210					/* Mark end of the window */
211					asoc->fast_recovery_tsn = asoc->sending_seq - 1;
212				} else {
213					asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1;
214				}
215
216				/*
217				 * CMT fast recovery -- per destination
218				 * recovery variable.
219				 */
220				net->fast_retran_loss_recovery = 1;
221
222				if (lchk == NULL) {
223					/* Mark end of the window */
224					net->fast_recovery_tsn = asoc->sending_seq - 1;
225				} else {
226					net->fast_recovery_tsn = lchk->rec.data.tsn - 1;
227				}
228
229				sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
230				    stcb->sctp_ep, stcb, net,
231				    SCTP_FROM_SCTP_CC_FUNCTIONS + SCTP_LOC_1);
232				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
233				    stcb->sctp_ep, stcb, net);
234			}
235		} else if (net->net_ack > 0) {
236			/*
237			 * Mark a peg that we WOULD have done a cwnd
238			 * reduction but RFC2582 prevented this action.
239			 */
240			SCTP_STAT_INCR(sctps_fastretransinrtt);
241		}
242	}
243}
244
245/* Defines for instantaneous bw decisions */
246#define SCTP_INST_LOOSING 1	/* Losing to other flows */
247#define SCTP_INST_NEUTRAL 2	/* Neutral, no indication */
248#define SCTP_INST_GAINING 3	/* Gaining, step down possible */
249
250
251static int
252cc_bw_same(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw,
253    uint64_t rtt_offset, uint64_t vtag, uint8_t inst_ind)
254{
255	uint64_t oth, probepoint;
256
257	probepoint = (((uint64_t)net->cwnd) << 32);
258	if (net->rtt > net->cc_mod.rtcc.lbw_rtt + rtt_offset) {
259		/*
260		 * rtt increased we don't update bw.. so we don't update the
261		 * rtt either.
262		 */
263		/* Probe point 5 */
264		probepoint |= ((5 << 16) | 1);
265		SDT_PROBE5(sctp, cwnd, net, rttvar,
266		    vtag,
267		    ((net->cc_mod.rtcc.lbw << 32) | nbw),
268		    ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
269		    net->flight_size,
270		    probepoint);
271		if ((net->cc_mod.rtcc.steady_step) && (inst_ind != SCTP_INST_LOOSING)) {
272			if (net->cc_mod.rtcc.last_step_state == 5)
273				net->cc_mod.rtcc.step_cnt++;
274			else
275				net->cc_mod.rtcc.step_cnt = 1;
276			net->cc_mod.rtcc.last_step_state = 5;
277			if ((net->cc_mod.rtcc.step_cnt == net->cc_mod.rtcc.steady_step) ||
278			    ((net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step) &&
279			    ((net->cc_mod.rtcc.step_cnt % net->cc_mod.rtcc.steady_step) == 0))) {
280				/* Try a step down */
281				oth = net->cc_mod.rtcc.vol_reduce;
282				oth <<= 16;
283				oth |= net->cc_mod.rtcc.step_cnt;
284				oth <<= 16;
285				oth |= net->cc_mod.rtcc.last_step_state;
286				SDT_PROBE5(sctp, cwnd, net, rttstep,
287				    vtag,
288				    ((net->cc_mod.rtcc.lbw << 32) | nbw),
289				    ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
290				    oth,
291				    probepoint);
292				if (net->cwnd > (4 * net->mtu)) {
293					net->cwnd -= net->mtu;
294					net->cc_mod.rtcc.vol_reduce++;
295				} else {
296					net->cc_mod.rtcc.step_cnt = 0;
297				}
298			}
299		}
300		return (1);
301	}
302	if (net->rtt < net->cc_mod.rtcc.lbw_rtt - rtt_offset) {
303		/*
304		 * rtt decreased, there could be more room. we update both
305		 * the bw and the rtt here to lock this in as a good step
306		 * down.
307		 */
308		/* Probe point 6 */
309		probepoint |= ((6 << 16) | 0);
310		SDT_PROBE5(sctp, cwnd, net, rttvar,
311		    vtag,
312		    ((net->cc_mod.rtcc.lbw << 32) | nbw),
313		    ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
314		    net->flight_size,
315		    probepoint);
316		if (net->cc_mod.rtcc.steady_step) {
317			oth = net->cc_mod.rtcc.vol_reduce;
318			oth <<= 16;
319			oth |= net->cc_mod.rtcc.step_cnt;
320			oth <<= 16;
321			oth |= net->cc_mod.rtcc.last_step_state;
322			SDT_PROBE5(sctp, cwnd, net, rttstep,
323			    vtag,
324			    ((net->cc_mod.rtcc.lbw << 32) | nbw),
325			    ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
326			    oth,
327			    probepoint);
328			if ((net->cc_mod.rtcc.last_step_state == 5) &&
329			    (net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step)) {
330				/* Step down worked */
331				net->cc_mod.rtcc.step_cnt = 0;
332				return (1);
333			} else {
334				net->cc_mod.rtcc.last_step_state = 6;
335				net->cc_mod.rtcc.step_cnt = 0;
336			}
337		}
338		net->cc_mod.rtcc.lbw = nbw;
339		net->cc_mod.rtcc.lbw_rtt = net->rtt;
340		net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd;
341		if (inst_ind == SCTP_INST_GAINING)
342			return (1);
343		else if (inst_ind == SCTP_INST_NEUTRAL)
344			return (1);
345		else
346			return (0);
347	}
348	/*
349	 * Ok bw and rtt remained the same .. no update to any
350	 */
351	/* Probe point 7 */
352	probepoint |= ((7 << 16) | net->cc_mod.rtcc.ret_from_eq);
353	SDT_PROBE5(sctp, cwnd, net, rttvar,
354	    vtag,
355	    ((net->cc_mod.rtcc.lbw << 32) | nbw),
356	    ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
357	    net->flight_size,
358	    probepoint);
359	if ((net->cc_mod.rtcc.steady_step) && (inst_ind != SCTP_INST_LOOSING)) {
360		if (net->cc_mod.rtcc.last_step_state == 5)
361			net->cc_mod.rtcc.step_cnt++;
362		else
363			net->cc_mod.rtcc.step_cnt = 1;
364		net->cc_mod.rtcc.last_step_state = 5;
365		if ((net->cc_mod.rtcc.step_cnt == net->cc_mod.rtcc.steady_step) ||
366		    ((net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step) &&
367		    ((net->cc_mod.rtcc.step_cnt % net->cc_mod.rtcc.steady_step) == 0))) {
368			/* Try a step down */
369			if (net->cwnd > (4 * net->mtu)) {
370				net->cwnd -= net->mtu;
371				net->cc_mod.rtcc.vol_reduce++;
372				return (1);
373			} else {
374				net->cc_mod.rtcc.step_cnt = 0;
375			}
376		}
377	}
378	if (inst_ind == SCTP_INST_GAINING)
379		return (1);
380	else if (inst_ind == SCTP_INST_NEUTRAL)
381		return (1);
382	else
383		return ((int)net->cc_mod.rtcc.ret_from_eq);
384}
385
386static int
387cc_bw_decrease(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, uint64_t rtt_offset,
388    uint64_t vtag, uint8_t inst_ind)
389{
390	uint64_t oth, probepoint;
391
392	/* Bandwidth decreased. */
393	probepoint = (((uint64_t)net->cwnd) << 32);
394	if (net->rtt > net->cc_mod.rtcc.lbw_rtt + rtt_offset) {
395		/* rtt increased */
396		/* Did we add more */
397		if ((net->cwnd > net->cc_mod.rtcc.cwnd_at_bw_set) &&
398		    (inst_ind != SCTP_INST_LOOSING)) {
399			/* We caused it maybe.. back off? */
400			/* PROBE POINT 1 */
401			probepoint |= ((1 << 16) | 1);
402			SDT_PROBE5(sctp, cwnd, net, rttvar,
403			    vtag,
404			    ((net->cc_mod.rtcc.lbw << 32) | nbw),
405			    ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
406			    net->flight_size,
407			    probepoint);
408			if (net->cc_mod.rtcc.ret_from_eq) {
409				/*
410				 * Switch over to CA if we are less
411				 * aggressive
412				 */
413				net->ssthresh = net->cwnd - 1;
414				net->partial_bytes_acked = 0;
415			}
416			return (1);
417		}
418		/* Probe point 2 */
419		probepoint |= ((2 << 16) | 0);
420		SDT_PROBE5(sctp, cwnd, net, rttvar,
421		    vtag,
422		    ((net->cc_mod.rtcc.lbw << 32) | nbw),
423		    ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
424		    net->flight_size,
425		    probepoint);
426		/* Someone else - fight for more? */
427		if (net->cc_mod.rtcc.steady_step) {
428			oth = net->cc_mod.rtcc.vol_reduce;
429			oth <<= 16;
430			oth |= net->cc_mod.rtcc.step_cnt;
431			oth <<= 16;
432			oth |= net->cc_mod.rtcc.last_step_state;
433			SDT_PROBE5(sctp, cwnd, net, rttstep,
434			    vtag,
435			    ((net->cc_mod.rtcc.lbw << 32) | nbw),
436			    ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
437			    oth,
438			    probepoint);
439			/*
440			 * Did we voluntarily give up some? if so take one
441			 * back please
442			 */
443			if ((net->cc_mod.rtcc.vol_reduce) &&
444			    (inst_ind != SCTP_INST_GAINING)) {
445				net->cwnd += net->mtu;
446				sctp_enforce_cwnd_limit(&stcb->asoc, net);
447				net->cc_mod.rtcc.vol_reduce--;
448			}
449			net->cc_mod.rtcc.last_step_state = 2;
450			net->cc_mod.rtcc.step_cnt = 0;
451		}
452		goto out_decision;
453	} else if (net->rtt < net->cc_mod.rtcc.lbw_rtt - rtt_offset) {
454		/* bw & rtt decreased */
455		/* Probe point 3 */
456		probepoint |= ((3 << 16) | 0);
457		SDT_PROBE5(sctp, cwnd, net, rttvar,
458		    vtag,
459		    ((net->cc_mod.rtcc.lbw << 32) | nbw),
460		    ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
461		    net->flight_size,
462		    probepoint);
463		if (net->cc_mod.rtcc.steady_step) {
464			oth = net->cc_mod.rtcc.vol_reduce;
465			oth <<= 16;
466			oth |= net->cc_mod.rtcc.step_cnt;
467			oth <<= 16;
468			oth |= net->cc_mod.rtcc.last_step_state;
469			SDT_PROBE5(sctp, cwnd, net, rttstep,
470			    vtag,
471			    ((net->cc_mod.rtcc.lbw << 32) | nbw),
472			    ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
473			    oth,
474			    probepoint);
475			if ((net->cc_mod.rtcc.vol_reduce) &&
476			    (inst_ind != SCTP_INST_GAINING)) {
477				net->cwnd += net->mtu;
478				sctp_enforce_cwnd_limit(&stcb->asoc, net);
479				net->cc_mod.rtcc.vol_reduce--;
480			}
481			net->cc_mod.rtcc.last_step_state = 3;
482			net->cc_mod.rtcc.step_cnt = 0;
483		}
484		goto out_decision;
485	}
486	/* The bw decreased but rtt stayed the same */
487	/* Probe point 4 */
488	probepoint |= ((4 << 16) | 0);
489	SDT_PROBE5(sctp, cwnd, net, rttvar,
490	    vtag,
491	    ((net->cc_mod.rtcc.lbw << 32) | nbw),
492	    ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
493	    net->flight_size,
494	    probepoint);
495	if (net->cc_mod.rtcc.steady_step) {
496		oth = net->cc_mod.rtcc.vol_reduce;
497		oth <<= 16;
498		oth |= net->cc_mod.rtcc.step_cnt;
499		oth <<= 16;
500		oth |= net->cc_mod.rtcc.last_step_state;
501		SDT_PROBE5(sctp, cwnd, net, rttstep,
502		    vtag,
503		    ((net->cc_mod.rtcc.lbw << 32) | nbw),
504		    ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
505		    oth,
506		    probepoint);
507		if ((net->cc_mod.rtcc.vol_reduce) &&
508		    (inst_ind != SCTP_INST_GAINING)) {
509			net->cwnd += net->mtu;
510			sctp_enforce_cwnd_limit(&stcb->asoc, net);
511			net->cc_mod.rtcc.vol_reduce--;
512		}
513		net->cc_mod.rtcc.last_step_state = 4;
514		net->cc_mod.rtcc.step_cnt = 0;
515	}
516out_decision:
517	net->cc_mod.rtcc.lbw = nbw;
518	net->cc_mod.rtcc.lbw_rtt = net->rtt;
519	net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd;
520	if (inst_ind == SCTP_INST_GAINING) {
521		return (1);
522	} else {
523		return (0);
524	}
525}
526
527static int
528cc_bw_increase(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, uint64_t vtag)
529{
530	uint64_t oth, probepoint;
531
532	/*
533	 * BW increased, so update and return 0, since all actions in our
534	 * table say to do the normal CC update. Note that we pay no
535	 * attention to the inst_ind since our overall sum is increasing.
536	 */
537	/* PROBE POINT 0 */
538	probepoint = (((uint64_t)net->cwnd) << 32);
539	SDT_PROBE5(sctp, cwnd, net, rttvar,
540	    vtag,
541	    ((net->cc_mod.rtcc.lbw << 32) | nbw),
542	    ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
543	    net->flight_size,
544	    probepoint);
545	if (net->cc_mod.rtcc.steady_step) {
546		oth = net->cc_mod.rtcc.vol_reduce;
547		oth <<= 16;
548		oth |= net->cc_mod.rtcc.step_cnt;
549		oth <<= 16;
550		oth |= net->cc_mod.rtcc.last_step_state;
551		SDT_PROBE5(sctp, cwnd, net, rttstep,
552		    vtag,
553		    ((net->cc_mod.rtcc.lbw << 32) | nbw),
554		    ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
555		    oth,
556		    probepoint);
557		net->cc_mod.rtcc.last_step_state = 0;
558		net->cc_mod.rtcc.step_cnt = 0;
559		net->cc_mod.rtcc.vol_reduce = 0;
560	}
561	net->cc_mod.rtcc.lbw = nbw;
562	net->cc_mod.rtcc.lbw_rtt = net->rtt;
563	net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd;
564	return (0);
565}
566
567/* RTCC Algorithm to limit growth of cwnd, return
568 * true if you want to NOT allow cwnd growth
569 */
570static int
571cc_bw_limit(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw)
572{
573	uint64_t bw_offset, rtt_offset;
574	uint64_t probepoint, rtt, vtag;
575	uint64_t bytes_for_this_rtt, inst_bw;
576	uint64_t div, inst_off;
577	int bw_shift;
578	uint8_t inst_ind;
579	int ret;
580
581	/*-
582	 * Here we need to see if we want
583	 * to limit cwnd growth due to increase
584	 * in overall rtt but no increase in bw.
585	 * We use the following table to figure
586	 * out what we should do. When we return
587	 * 0, cc update goes on as planned. If we
588	 * return 1, then no cc update happens and cwnd
589	 * stays where it is at.
590	 * ----------------------------------
591	 *   BW    |    RTT   | Action
592	 * *********************************
593	 *   INC   |    INC   | return 0
594	 * ----------------------------------
595	 *   INC   |    SAME  | return 0
596	 * ----------------------------------
597	 *   INC   |    DECR  | return 0
598	 * ----------------------------------
599	 *   SAME  |    INC   | return 1
600	 * ----------------------------------
601	 *   SAME  |    SAME  | return 1
602	 * ----------------------------------
603	 *   SAME  |    DECR  | return 0
604	 * ----------------------------------
605	 *   DECR  |    INC   | return 0 or 1 based on if we caused.
606	 * ----------------------------------
607	 *   DECR  |    SAME  | return 0
608	 * ----------------------------------
609	 *   DECR  |    DECR  | return 0
610	 * ----------------------------------
611	 *
612	 * We are a bit fuzz on what an increase or
613	 * decrease is. For BW it is the same if
614	 * it did not change within 1/64th. For
615	 * RTT it stayed the same if it did not
616	 * change within 1/32nd
617	 */
618	bw_shift = SCTP_BASE_SYSCTL(sctp_rttvar_bw);
619	rtt = stcb->asoc.my_vtag;
620	vtag = (rtt << 32) | (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) | (stcb->rport);
621	probepoint = (((uint64_t)net->cwnd) << 32);
622	rtt = net->rtt;
623	if (net->cc_mod.rtcc.rtt_set_this_sack) {
624		net->cc_mod.rtcc.rtt_set_this_sack = 0;
625		bytes_for_this_rtt = net->cc_mod.rtcc.bw_bytes - net->cc_mod.rtcc.bw_bytes_at_last_rttc;
626		net->cc_mod.rtcc.bw_bytes_at_last_rttc = net->cc_mod.rtcc.bw_bytes;
627		if (net->rtt) {
628			div = net->rtt / 1000;
629			if (div) {
630				inst_bw = bytes_for_this_rtt / div;
631				inst_off = inst_bw >> bw_shift;
632				if (inst_bw > nbw)
633					inst_ind = SCTP_INST_GAINING;
634				else if ((inst_bw + inst_off) < nbw)
635					inst_ind = SCTP_INST_LOOSING;
636				else
637					inst_ind = SCTP_INST_NEUTRAL;
638				probepoint |= ((0xb << 16) | inst_ind);
639			} else {
640				inst_ind = net->cc_mod.rtcc.last_inst_ind;
641				inst_bw = bytes_for_this_rtt / (uint64_t)(net->rtt);
642				/* Can't determine do not change */
643				probepoint |= ((0xc << 16) | inst_ind);
644			}
645		} else {
646			inst_ind = net->cc_mod.rtcc.last_inst_ind;
647			inst_bw = bytes_for_this_rtt;
648			/* Can't determine do not change */
649			probepoint |= ((0xd << 16) | inst_ind);
650		}
651		SDT_PROBE5(sctp, cwnd, net, rttvar,
652		    vtag,
653		    ((nbw << 32) | inst_bw),
654		    ((net->cc_mod.rtcc.lbw_rtt << 32) | rtt),
655		    net->flight_size,
656		    probepoint);
657	} else {
658		/* No rtt measurement, use last one */
659		inst_ind = net->cc_mod.rtcc.last_inst_ind;
660	}
661	bw_offset = net->cc_mod.rtcc.lbw >> bw_shift;
662	if (nbw > net->cc_mod.rtcc.lbw + bw_offset) {
663		ret = cc_bw_increase(stcb, net, nbw, vtag);
664		goto out;
665	}
666	rtt_offset = net->cc_mod.rtcc.lbw_rtt >> SCTP_BASE_SYSCTL(sctp_rttvar_rtt);
667	if (nbw < net->cc_mod.rtcc.lbw - bw_offset) {
668		ret = cc_bw_decrease(stcb, net, nbw, rtt_offset, vtag, inst_ind);
669		goto out;
670	}
671	/*
672	 * If we reach here then we are in a situation where the bw stayed
673	 * the same.
674	 */
675	ret = cc_bw_same(stcb, net, nbw, rtt_offset, vtag, inst_ind);
676out:
677	net->cc_mod.rtcc.last_inst_ind = inst_ind;
678	return (ret);
679}
680
681static void
682sctp_cwnd_update_after_sack_common(struct sctp_tcb *stcb,
683    struct sctp_association *asoc,
684    int accum_moved, int reneged_all SCTP_UNUSED, int will_exit, int use_rtcc)
685{
686	struct sctp_nets *net;
687	int old_cwnd;
688	uint32_t t_ssthresh, t_cwnd, incr;
689	uint64_t t_ucwnd_sbw;
690	uint64_t t_path_mptcp;
691	uint64_t mptcp_like_alpha;
692	uint32_t srtt;
693	uint64_t max_path;
694
695	/* MT FIXME: Don't compute this over and over again */
696	t_ssthresh = 0;
697	t_cwnd = 0;
698	t_ucwnd_sbw = 0;
699	t_path_mptcp = 0;
700	mptcp_like_alpha = 1;
701	if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) ||
702	    (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2) ||
703	    (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_MPTCP)) {
704		max_path = 0;
705		TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
706			t_ssthresh += net->ssthresh;
707			t_cwnd += net->cwnd;
708			/* lastsa>>3;  we don't need to devide ... */
709			srtt = net->lastsa;
710			if (srtt > 0) {
711				uint64_t tmp;
712
713				t_ucwnd_sbw += (uint64_t)net->cwnd / (uint64_t)srtt;
714				t_path_mptcp += (((uint64_t)net->cwnd) << SHIFT_MPTCP_MULTI_Z) /
715				    (((uint64_t)net->mtu) * (uint64_t)srtt);
716				tmp = (((uint64_t)net->cwnd) << SHIFT_MPTCP_MULTI_N) /
717				    ((uint64_t)net->mtu * (uint64_t)(srtt * srtt));
718				if (tmp > max_path) {
719					max_path = tmp;
720				}
721			}
722		}
723		if (t_path_mptcp > 0) {
724			mptcp_like_alpha = max_path / (t_path_mptcp * t_path_mptcp);
725		} else {
726			mptcp_like_alpha = 1;
727		}
728	}
729	if (t_ssthresh == 0) {
730		t_ssthresh = 1;
731	}
732	if (t_ucwnd_sbw == 0) {
733		t_ucwnd_sbw = 1;
734	}
735	/******************************/
736	/* update cwnd and Early FR   */
737	/******************************/
738	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
739
740#ifdef JANA_CMT_FAST_RECOVERY
741		/*
742		 * CMT fast recovery code. Need to debug.
743		 */
744		if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
745			if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
746			    SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) {
747				net->will_exit_fast_recovery = 1;
748			}
749		}
750#endif
751		/* if nothing was acked on this destination skip it */
752		if (net->net_ack == 0) {
753			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
754				sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
755			}
756			continue;
757		}
758#ifdef JANA_CMT_FAST_RECOVERY
759		/*
760		 * CMT fast recovery code
761		 */
762		/*
763		 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery
764		 * && net->will_exit_fast_recovery == 0) { @@@ Do something
765		 * } else if (sctp_cmt_on_off == 0 &&
766		 * asoc->fast_retran_loss_recovery && will_exit == 0) {
767		 */
768#endif
769
770		if (asoc->fast_retran_loss_recovery &&
771		    (will_exit == 0) &&
772		    (asoc->sctp_cmt_on_off == 0)) {
773			/*
774			 * If we are in loss recovery we skip any cwnd
775			 * update
776			 */
777			return;
778		}
779		/*
780		 * Did any measurements go on for this network?
781		 */
782		if (use_rtcc && (net->cc_mod.rtcc.tls_needs_set > 0)) {
783			uint64_t nbw;
784
785			/*
786			 * At this point our bw_bytes has been updated by
787			 * incoming sack information.
788			 *
789			 * But our bw may not yet be set.
790			 *
791			 */
792			if ((net->cc_mod.rtcc.new_tot_time / 1000) > 0) {
793				nbw = net->cc_mod.rtcc.bw_bytes / (net->cc_mod.rtcc.new_tot_time / 1000);
794			} else {
795				nbw = net->cc_mod.rtcc.bw_bytes;
796			}
797			if (net->cc_mod.rtcc.lbw) {
798				if (cc_bw_limit(stcb, net, nbw)) {
799					/* Hold here, no update */
800					continue;
801				}
802			} else {
803				uint64_t vtag, probepoint;
804
805				probepoint = (((uint64_t)net->cwnd) << 32);
806				probepoint |= ((0xa << 16) | 0);
807				vtag = (net->rtt << 32) |
808				    (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) |
809				    (stcb->rport);
810
811				SDT_PROBE5(sctp, cwnd, net, rttvar,
812				    vtag,
813				    nbw,
814				    ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
815				    net->flight_size,
816				    probepoint);
817				net->cc_mod.rtcc.lbw = nbw;
818				net->cc_mod.rtcc.lbw_rtt = net->rtt;
819				if (net->cc_mod.rtcc.rtt_set_this_sack) {
820					net->cc_mod.rtcc.rtt_set_this_sack = 0;
821					net->cc_mod.rtcc.bw_bytes_at_last_rttc = net->cc_mod.rtcc.bw_bytes;
822				}
823			}
824		}
825		/*
826		 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
827		 * moved.
828		 */
829		if (accum_moved ||
830		    ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) {
831			/* If the cumulative ack moved we can proceed */
832			if (net->cwnd <= net->ssthresh) {
833				/* We are in slow start */
834				if (net->flight_size + net->net_ack >= net->cwnd) {
835					uint32_t limit;
836
837					old_cwnd = net->cwnd;
838					switch (asoc->sctp_cmt_on_off) {
839					case SCTP_CMT_RPV1:
840						limit = (uint32_t)(((uint64_t)net->mtu *
841						    (uint64_t)SCTP_BASE_SYSCTL(sctp_L2_abc_variable) *
842						    (uint64_t)net->ssthresh) /
843						    (uint64_t)t_ssthresh);
844						incr = (uint32_t)(((uint64_t)net->net_ack *
845						    (uint64_t)net->ssthresh) /
846						    (uint64_t)t_ssthresh);
847						if (incr > limit) {
848							incr = limit;
849						}
850						if (incr == 0) {
851							incr = 1;
852						}
853						break;
854					case SCTP_CMT_RPV2:
855						/*
856						 * lastsa>>3;  we don't need
857						 * to divide ...
858						 */
859						srtt = net->lastsa;
860						if (srtt == 0) {
861							srtt = 1;
862						}
863						limit = (uint32_t)(((uint64_t)net->mtu *
864						    (uint64_t)SCTP_BASE_SYSCTL(sctp_L2_abc_variable) *
865						    (uint64_t)net->cwnd) /
866						    ((uint64_t)srtt * t_ucwnd_sbw));
867						/* INCREASE FACTOR */
868						incr = (uint32_t)(((uint64_t)net->net_ack *
869						    (uint64_t)net->cwnd) /
870						    ((uint64_t)srtt * t_ucwnd_sbw));
871						/* INCREASE FACTOR */
872						if (incr > limit) {
873							incr = limit;
874						}
875						if (incr == 0) {
876							incr = 1;
877						}
878						break;
879					case SCTP_CMT_MPTCP:
880						limit = (uint32_t)(((uint64_t)net->mtu *
881						    mptcp_like_alpha *
882						    (uint64_t)SCTP_BASE_SYSCTL(sctp_L2_abc_variable)) >>
883						    SHIFT_MPTCP_MULTI);
884						incr = (uint32_t)(((uint64_t)net->net_ack *
885						    mptcp_like_alpha) >>
886						    SHIFT_MPTCP_MULTI);
887						if (incr > limit) {
888							incr = limit;
889						}
890						if (incr > net->net_ack) {
891							incr = net->net_ack;
892						}
893						if (incr > net->mtu) {
894							incr = net->mtu;
895						}
896						break;
897					default:
898						incr = net->net_ack;
899						if (incr > net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable)) {
900							incr = net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable);
901						}
902						break;
903					}
904					net->cwnd += incr;
905					sctp_enforce_cwnd_limit(asoc, net);
906					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
907						sctp_log_cwnd(stcb, net, incr,
908						    SCTP_CWND_LOG_FROM_SS);
909					}
910					SDT_PROBE5(sctp, cwnd, net, ack,
911					    stcb->asoc.my_vtag,
912					    ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
913					    net,
914					    old_cwnd, net->cwnd);
915				} else {
916					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
917						sctp_log_cwnd(stcb, net, net->net_ack,
918						    SCTP_CWND_LOG_NOADV_SS);
919					}
920				}
921			} else {
922				/* We are in congestion avoidance */
923				/*
924				 * Add to pba
925				 */
926				net->partial_bytes_acked += net->net_ack;
927
928				if ((net->flight_size + net->net_ack >= net->cwnd) &&
929				    (net->partial_bytes_acked >= net->cwnd)) {
930					net->partial_bytes_acked -= net->cwnd;
931					old_cwnd = net->cwnd;
932					switch (asoc->sctp_cmt_on_off) {
933					case SCTP_CMT_RPV1:
934						incr = (uint32_t)(((uint64_t)net->mtu *
935						    (uint64_t)net->ssthresh) /
936						    (uint64_t)t_ssthresh);
937						if (incr == 0) {
938							incr = 1;
939						}
940						break;
941					case SCTP_CMT_RPV2:
942						/*
943						 * lastsa>>3;  we don't need
944						 * to divide ...
945						 */
946						srtt = net->lastsa;
947						if (srtt == 0) {
948							srtt = 1;
949						}
950						incr = (uint32_t)((uint64_t)net->mtu *
951						    (uint64_t)net->cwnd /
952						    ((uint64_t)srtt *
953						    t_ucwnd_sbw));
954						/* INCREASE FACTOR */
955						if (incr == 0) {
956							incr = 1;
957						}
958						break;
959					case SCTP_CMT_MPTCP:
960						incr = (uint32_t)((mptcp_like_alpha *
961						    (uint64_t)net->cwnd) >>
962						    SHIFT_MPTCP_MULTI);
963						if (incr > net->mtu) {
964							incr = net->mtu;
965						}
966						break;
967					default:
968						incr = net->mtu;
969						break;
970					}
971					net->cwnd += incr;
972					sctp_enforce_cwnd_limit(asoc, net);
973					SDT_PROBE5(sctp, cwnd, net, ack,
974					    stcb->asoc.my_vtag,
975					    ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
976					    net,
977					    old_cwnd, net->cwnd);
978					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
979						sctp_log_cwnd(stcb, net, net->mtu,
980						    SCTP_CWND_LOG_FROM_CA);
981					}
982				} else {
983					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
984						sctp_log_cwnd(stcb, net, net->net_ack,
985						    SCTP_CWND_LOG_NOADV_CA);
986					}
987				}
988			}
989		} else {
990			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
991				sctp_log_cwnd(stcb, net, net->mtu,
992				    SCTP_CWND_LOG_NO_CUMACK);
993			}
994		}
995	}
996}
997
998static void
999sctp_cwnd_update_exit_pf_common(struct sctp_tcb *stcb, struct sctp_nets *net)
1000{
1001	int old_cwnd;
1002
1003	old_cwnd = net->cwnd;
1004	net->cwnd = net->mtu;
1005	SDT_PROBE5(sctp, cwnd, net, ack,
1006	    stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
1007	    old_cwnd, net->cwnd);
1008	SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n",
1009	    (void *)net, net->cwnd);
1010}
1011
1012
1013static void
1014sctp_cwnd_update_after_timeout(struct sctp_tcb *stcb, struct sctp_nets *net)
1015{
1016	int old_cwnd = net->cwnd;
1017	uint32_t t_ssthresh, t_cwnd;
1018	uint64_t t_ucwnd_sbw;
1019
1020	/* MT FIXME: Don't compute this over and over again */
1021	t_ssthresh = 0;
1022	t_cwnd = 0;
1023	if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) ||
1024	    (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2)) {
1025		struct sctp_nets *lnet;
1026		uint32_t srtt;
1027
1028		t_ucwnd_sbw = 0;
1029		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1030			t_ssthresh += lnet->ssthresh;
1031			t_cwnd += lnet->cwnd;
1032			srtt = lnet->lastsa;
1033			/* lastsa>>3;  we don't need to divide ... */
1034			if (srtt > 0) {
1035				t_ucwnd_sbw += (uint64_t)lnet->cwnd / (uint64_t)srtt;
1036			}
1037		}
1038		if (t_ssthresh < 1) {
1039			t_ssthresh = 1;
1040		}
1041		if (t_ucwnd_sbw < 1) {
1042			t_ucwnd_sbw = 1;
1043		}
1044		if (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) {
1045			net->ssthresh = (uint32_t)(((uint64_t)4 *
1046			    (uint64_t)net->mtu *
1047			    (uint64_t)net->ssthresh) /
1048			    (uint64_t)t_ssthresh);
1049		} else {
1050			uint64_t cc_delta;
1051
1052			srtt = net->lastsa;
1053			/* lastsa>>3;  we don't need to divide ... */
1054			if (srtt == 0) {
1055				srtt = 1;
1056			}
1057			cc_delta = t_ucwnd_sbw * (uint64_t)srtt / 2;
1058			if (cc_delta < t_cwnd) {
1059				net->ssthresh = (uint32_t)((uint64_t)t_cwnd - cc_delta);
1060			} else {
1061				net->ssthresh = net->mtu;
1062			}
1063		}
1064		if ((net->cwnd > t_cwnd / 2) &&
1065		    (net->ssthresh < net->cwnd - t_cwnd / 2)) {
1066			net->ssthresh = net->cwnd - t_cwnd / 2;
1067		}
1068		if (net->ssthresh < net->mtu) {
1069			net->ssthresh = net->mtu;
1070		}
1071	} else {
1072		net->ssthresh = max(net->cwnd / 2, 4 * net->mtu);
1073	}
1074	net->cwnd = net->mtu;
1075	net->partial_bytes_acked = 0;
1076	SDT_PROBE5(sctp, cwnd, net, to,
1077	    stcb->asoc.my_vtag,
1078	    ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
1079	    net,
1080	    old_cwnd, net->cwnd);
1081	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1082		sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
1083	}
1084}
1085
1086static void
1087sctp_cwnd_update_after_ecn_echo_common(struct sctp_tcb *stcb, struct sctp_nets *net,
1088    int in_window, int num_pkt_lost, int use_rtcc)
1089{
1090	int old_cwnd = net->cwnd;
1091
1092	if ((use_rtcc) && (net->lan_type == SCTP_LAN_LOCAL) && (net->cc_mod.rtcc.use_dccc_ecn)) {
1093		/* Data center Congestion Control */
1094		if (in_window == 0) {
1095			/*
1096			 * Go to CA with the cwnd at the point we sent the
1097			 * TSN that was marked with a CE.
1098			 */
1099			if (net->ecn_prev_cwnd < net->cwnd) {
1100				/* Restore to prev cwnd */
1101				net->cwnd = net->ecn_prev_cwnd - (net->mtu * num_pkt_lost);
1102			} else {
1103				/* Just cut in 1/2 */
1104				net->cwnd /= 2;
1105			}
1106			/* Drop to CA */
1107			net->ssthresh = net->cwnd - (num_pkt_lost * net->mtu);
1108			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1109				sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
1110			}
1111		} else {
1112			/*
1113			 * Further tuning down required over the drastic
1114			 * original cut
1115			 */
1116			net->ssthresh -= (net->mtu * num_pkt_lost);
1117			net->cwnd -= (net->mtu * num_pkt_lost);
1118			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1119				sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
1120			}
1121
1122		}
1123		SCTP_STAT_INCR(sctps_ecnereducedcwnd);
1124	} else {
1125		if (in_window == 0) {
1126			SCTP_STAT_INCR(sctps_ecnereducedcwnd);
1127			net->ssthresh = net->cwnd / 2;
1128			if (net->ssthresh < net->mtu) {
1129				net->ssthresh = net->mtu;
1130				/*
1131				 * here back off the timer as well, to slow
1132				 * us down
1133				 */
1134				net->RTO <<= 1;
1135			}
1136			net->cwnd = net->ssthresh;
1137			SDT_PROBE5(sctp, cwnd, net, ecn,
1138			    stcb->asoc.my_vtag,
1139			    ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
1140			    net,
1141			    old_cwnd, net->cwnd);
1142			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1143				sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
1144			}
1145		}
1146	}
1147
1148}
1149
1150static void
1151sctp_cwnd_update_after_packet_dropped(struct sctp_tcb *stcb,
1152    struct sctp_nets *net, struct sctp_pktdrop_chunk *cp,
1153    uint32_t *bottle_bw, uint32_t *on_queue)
1154{
1155	uint32_t bw_avail;
1156	unsigned int incr;
1157	int old_cwnd = net->cwnd;
1158
1159	/* get bottle neck bw */
1160	*bottle_bw = ntohl(cp->bottle_bw);
1161	/* and whats on queue */
1162	*on_queue = ntohl(cp->current_onq);
1163	/*
1164	 * adjust the on-queue if our flight is more it could be that the
1165	 * router has not yet gotten data "in-flight" to it
1166	 */
1167	if (*on_queue < net->flight_size) {
1168		*on_queue = net->flight_size;
1169	}
1170	/* rtt is measured in micro seconds, bottle_bw in bytes per second */
1171	bw_avail = (uint32_t)(((uint64_t)(*bottle_bw) * net->rtt) / (uint64_t)1000000);
1172	if (bw_avail > *bottle_bw) {
1173		/*
1174		 * Cap the growth to no more than the bottle neck. This can
1175		 * happen as RTT slides up due to queues. It also means if
1176		 * you have more than a 1 second RTT with a empty queue you
1177		 * will be limited to the bottle_bw per second no matter if
1178		 * other points have 1/2 the RTT and you could get more
1179		 * out...
1180		 */
1181		bw_avail = *bottle_bw;
1182	}
1183	if (*on_queue > bw_avail) {
1184		/*
1185		 * No room for anything else don't allow anything else to be
1186		 * "added to the fire".
1187		 */
1188		int seg_inflight, seg_onqueue, my_portion;
1189
1190		net->partial_bytes_acked = 0;
1191		/* how much are we over queue size? */
1192		incr = *on_queue - bw_avail;
1193		if (stcb->asoc.seen_a_sack_this_pkt) {
1194			/*
1195			 * undo any cwnd adjustment that the sack might have
1196			 * made
1197			 */
1198			net->cwnd = net->prev_cwnd;
1199		}
1200		/* Now how much of that is mine? */
1201		seg_inflight = net->flight_size / net->mtu;
1202		seg_onqueue = *on_queue / net->mtu;
1203		my_portion = (incr * seg_inflight) / seg_onqueue;
1204
1205		/* Have I made an adjustment already */
1206		if (net->cwnd > net->flight_size) {
1207			/*
1208			 * for this flight I made an adjustment we need to
1209			 * decrease the portion by a share our previous
1210			 * adjustment.
1211			 */
1212			int diff_adj;
1213
1214			diff_adj = net->cwnd - net->flight_size;
1215			if (diff_adj > my_portion)
1216				my_portion = 0;
1217			else
1218				my_portion -= diff_adj;
1219		}
1220		/*
1221		 * back down to the previous cwnd (assume we have had a sack
1222		 * before this packet). minus what ever portion of the
1223		 * overage is my fault.
1224		 */
1225		net->cwnd -= my_portion;
1226
1227		/* we will NOT back down more than 1 MTU */
1228		if (net->cwnd <= net->mtu) {
1229			net->cwnd = net->mtu;
1230		}
1231		/* force into CA */
1232		net->ssthresh = net->cwnd - 1;
1233	} else {
1234		/*
1235		 * Take 1/4 of the space left or max burst up .. whichever
1236		 * is less.
1237		 */
1238		incr = (bw_avail - *on_queue) >> 2;
1239		if ((stcb->asoc.max_burst > 0) &&
1240		    (stcb->asoc.max_burst * net->mtu < incr)) {
1241			incr = stcb->asoc.max_burst * net->mtu;
1242		}
1243		net->cwnd += incr;
1244	}
1245	if (net->cwnd > bw_avail) {
1246		/* We can't exceed the pipe size */
1247		net->cwnd = bw_avail;
1248	}
1249	if (net->cwnd < net->mtu) {
1250		/* We always have 1 MTU */
1251		net->cwnd = net->mtu;
1252	}
1253	sctp_enforce_cwnd_limit(&stcb->asoc, net);
1254	if (net->cwnd - old_cwnd != 0) {
1255		/* log only changes */
1256		SDT_PROBE5(sctp, cwnd, net, pd,
1257		    stcb->asoc.my_vtag,
1258		    ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
1259		    net,
1260		    old_cwnd, net->cwnd);
1261		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1262			sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
1263			    SCTP_CWND_LOG_FROM_SAT);
1264		}
1265	}
1266}
1267
1268static void
1269sctp_cwnd_update_after_output(struct sctp_tcb *stcb,
1270    struct sctp_nets *net, int burst_limit)
1271{
1272	int old_cwnd = net->cwnd;
1273
1274	if (net->ssthresh < net->cwnd)
1275		net->ssthresh = net->cwnd;
1276	if (burst_limit) {
1277		net->cwnd = (net->flight_size + (burst_limit * net->mtu));
1278		sctp_enforce_cwnd_limit(&stcb->asoc, net);
1279		SDT_PROBE5(sctp, cwnd, net, bl,
1280		    stcb->asoc.my_vtag,
1281		    ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
1282		    net,
1283		    old_cwnd, net->cwnd);
1284		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1285			sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_BRST);
1286		}
1287	}
1288}
1289
1290static void
1291sctp_cwnd_update_after_sack(struct sctp_tcb *stcb,
1292    struct sctp_association *asoc,
1293    int accum_moved, int reneged_all, int will_exit)
1294{
1295	/* Passing a zero argument in last disables the rtcc algorithm */
1296	sctp_cwnd_update_after_sack_common(stcb, asoc, accum_moved, reneged_all, will_exit, 0);
1297}
1298
1299static void
1300sctp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
1301    int in_window, int num_pkt_lost)
1302{
1303	/* Passing a zero argument in last disables the rtcc algorithm */
1304	sctp_cwnd_update_after_ecn_echo_common(stcb, net, in_window, num_pkt_lost, 0);
1305}
1306
1307/* Here starts the RTCCVAR type CC invented by RRS which
1308 * is a slight mod to RFC2581. We reuse a common routine or
1309 * two since these algorithms are so close and need to
1310 * remain the same.
1311 */
1312static void
1313sctp_cwnd_update_rtcc_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
1314    int in_window, int num_pkt_lost)
1315{
1316	sctp_cwnd_update_after_ecn_echo_common(stcb, net, in_window, num_pkt_lost, 1);
1317}
1318
1319
1320static
1321void
1322sctp_cwnd_update_rtcc_tsn_acknowledged(struct sctp_nets *net,
1323    struct sctp_tmit_chunk *tp1)
1324{
1325	net->cc_mod.rtcc.bw_bytes += tp1->send_size;
1326}
1327
1328static void
1329sctp_cwnd_prepare_rtcc_net_for_sack(struct sctp_tcb *stcb SCTP_UNUSED,
1330    struct sctp_nets *net)
1331{
1332	if (net->cc_mod.rtcc.tls_needs_set > 0) {
1333		/* We had a bw measurment going on */
1334		struct timeval ltls;
1335
1336		SCTP_GETPTIME_TIMEVAL(&ltls);
1337		timevalsub(&ltls, &net->cc_mod.rtcc.tls);
1338		net->cc_mod.rtcc.new_tot_time = (ltls.tv_sec * 1000000) + ltls.tv_usec;
1339	}
1340}
1341
1342static void
1343sctp_cwnd_new_rtcc_transmission_begins(struct sctp_tcb *stcb,
1344    struct sctp_nets *net)
1345{
1346	uint64_t vtag, probepoint;
1347
1348	if (net->cc_mod.rtcc.lbw) {
1349		/* Clear the old bw.. we went to 0 in-flight */
1350		vtag = (net->rtt << 32) | (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) |
1351		    (stcb->rport);
1352		probepoint = (((uint64_t)net->cwnd) << 32);
1353		/* Probe point 8 */
1354		probepoint |= ((8 << 16) | 0);
1355		SDT_PROBE5(sctp, cwnd, net, rttvar,
1356		    vtag,
1357		    ((net->cc_mod.rtcc.lbw << 32) | 0),
1358		    ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
1359		    net->flight_size,
1360		    probepoint);
1361		net->cc_mod.rtcc.lbw_rtt = 0;
1362		net->cc_mod.rtcc.cwnd_at_bw_set = 0;
1363		net->cc_mod.rtcc.lbw = 0;
1364		net->cc_mod.rtcc.bw_bytes_at_last_rttc = 0;
1365		net->cc_mod.rtcc.vol_reduce = 0;
1366		net->cc_mod.rtcc.bw_tot_time = 0;
1367		net->cc_mod.rtcc.bw_bytes = 0;
1368		net->cc_mod.rtcc.tls_needs_set = 0;
1369		if (net->cc_mod.rtcc.steady_step) {
1370			net->cc_mod.rtcc.vol_reduce = 0;
1371			net->cc_mod.rtcc.step_cnt = 0;
1372			net->cc_mod.rtcc.last_step_state = 0;
1373		}
1374		if (net->cc_mod.rtcc.ret_from_eq) {
1375			/* less aggressive one - reset cwnd too */
1376			uint32_t cwnd_in_mtu, cwnd;
1377
1378			cwnd_in_mtu = SCTP_BASE_SYSCTL(sctp_initial_cwnd);
1379			if (cwnd_in_mtu == 0) {
1380				/*
1381				 * Using 0 means that the value of RFC 4960
1382				 * is used.
1383				 */
1384				cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
1385			} else {
1386				/*
1387				 * We take the minimum of the burst limit
1388				 * and the initial congestion window.
1389				 */
1390				if ((stcb->asoc.max_burst > 0) && (cwnd_in_mtu > stcb->asoc.max_burst))
1391					cwnd_in_mtu = stcb->asoc.max_burst;
1392				cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu;
1393			}
1394			if (net->cwnd > cwnd) {
1395				/*
1396				 * Only set if we are not a timeout (i.e.
1397				 * down to 1 mtu)
1398				 */
1399				net->cwnd = cwnd;
1400			}
1401		}
1402	}
1403}
1404
1405static void
1406sctp_set_rtcc_initial_cc_param(struct sctp_tcb *stcb,
1407    struct sctp_nets *net)
1408{
1409	uint64_t vtag, probepoint;
1410
1411	sctp_set_initial_cc_param(stcb, net);
1412	stcb->asoc.use_precise_time = 1;
1413	probepoint = (((uint64_t)net->cwnd) << 32);
1414	probepoint |= ((9 << 16) | 0);
1415	vtag = (net->rtt << 32) |
1416	    (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) |
1417	    (stcb->rport);
1418	SDT_PROBE5(sctp, cwnd, net, rttvar,
1419	    vtag,
1420	    0,
1421	    0,
1422	    0,
1423	    probepoint);
1424	net->cc_mod.rtcc.lbw_rtt = 0;
1425	net->cc_mod.rtcc.cwnd_at_bw_set = 0;
1426	net->cc_mod.rtcc.vol_reduce = 0;
1427	net->cc_mod.rtcc.lbw = 0;
1428	net->cc_mod.rtcc.vol_reduce = 0;
1429	net->cc_mod.rtcc.bw_bytes_at_last_rttc = 0;
1430	net->cc_mod.rtcc.bw_tot_time = 0;
1431	net->cc_mod.rtcc.bw_bytes = 0;
1432	net->cc_mod.rtcc.tls_needs_set = 0;
1433	net->cc_mod.rtcc.ret_from_eq = SCTP_BASE_SYSCTL(sctp_rttvar_eqret);
1434	net->cc_mod.rtcc.steady_step = SCTP_BASE_SYSCTL(sctp_steady_step);
1435	net->cc_mod.rtcc.use_dccc_ecn = SCTP_BASE_SYSCTL(sctp_use_dccc_ecn);
1436	net->cc_mod.rtcc.step_cnt = 0;
1437	net->cc_mod.rtcc.last_step_state = 0;
1438
1439
1440}
1441
1442static int
1443sctp_cwnd_rtcc_socket_option(struct sctp_tcb *stcb, int setorget,
1444    struct sctp_cc_option *cc_opt)
1445{
1446	struct sctp_nets *net;
1447
1448	if (setorget == 1) {
1449		/* a set */
1450		if (cc_opt->option == SCTP_CC_OPT_RTCC_SETMODE) {
1451			if ((cc_opt->aid_value.assoc_value != 0) &&
1452			    (cc_opt->aid_value.assoc_value != 1)) {
1453				return (EINVAL);
1454			}
1455			TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1456				net->cc_mod.rtcc.ret_from_eq = cc_opt->aid_value.assoc_value;
1457			}
1458		} else if (cc_opt->option == SCTP_CC_OPT_USE_DCCC_ECN) {
1459			if ((cc_opt->aid_value.assoc_value != 0) &&
1460			    (cc_opt->aid_value.assoc_value != 1)) {
1461				return (EINVAL);
1462			}
1463			TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1464				net->cc_mod.rtcc.use_dccc_ecn = cc_opt->aid_value.assoc_value;
1465			}
1466		} else if (cc_opt->option == SCTP_CC_OPT_STEADY_STEP) {
1467			TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1468				net->cc_mod.rtcc.steady_step = cc_opt->aid_value.assoc_value;
1469			}
1470		} else {
1471			return (EINVAL);
1472		}
1473	} else {
1474		/* a get */
1475		if (cc_opt->option == SCTP_CC_OPT_RTCC_SETMODE) {
1476			net = TAILQ_FIRST(&stcb->asoc.nets);
1477			if (net == NULL) {
1478				return (EFAULT);
1479			}
1480			cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.ret_from_eq;
1481		} else if (cc_opt->option == SCTP_CC_OPT_USE_DCCC_ECN) {
1482			net = TAILQ_FIRST(&stcb->asoc.nets);
1483			if (net == NULL) {
1484				return (EFAULT);
1485			}
1486			cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.use_dccc_ecn;
1487		} else if (cc_opt->option == SCTP_CC_OPT_STEADY_STEP) {
1488			net = TAILQ_FIRST(&stcb->asoc.nets);
1489			if (net == NULL) {
1490				return (EFAULT);
1491			}
1492			cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.steady_step;
1493		} else {
1494			return (EINVAL);
1495		}
1496	}
1497	return (0);
1498}
1499
1500static void
1501sctp_cwnd_update_rtcc_packet_transmitted(struct sctp_tcb *stcb SCTP_UNUSED,
1502    struct sctp_nets *net)
1503{
1504	if (net->cc_mod.rtcc.tls_needs_set == 0) {
1505		SCTP_GETPTIME_TIMEVAL(&net->cc_mod.rtcc.tls);
1506		net->cc_mod.rtcc.tls_needs_set = 2;
1507	}
1508}
1509
1510static void
1511sctp_cwnd_update_rtcc_after_sack(struct sctp_tcb *stcb,
1512    struct sctp_association *asoc,
1513    int accum_moved, int reneged_all, int will_exit)
1514{
1515	/* Passing a one argument at the last enables the rtcc algorithm */
1516	sctp_cwnd_update_after_sack_common(stcb, asoc, accum_moved, reneged_all, will_exit, 1);
1517}
1518
1519static void
1520sctp_rtt_rtcc_calculated(struct sctp_tcb *stcb SCTP_UNUSED,
1521    struct sctp_nets *net,
1522    struct timeval *now SCTP_UNUSED)
1523{
1524	net->cc_mod.rtcc.rtt_set_this_sack = 1;
1525}
1526
1527/* Here starts Sally Floyds HS-TCP */
1528
1529struct sctp_hs_raise_drop {
1530	int32_t cwnd;
1531	int8_t increase;
1532	int8_t drop_percent;
1533};
1534
1535#define SCTP_HS_TABLE_SIZE 73
1536
1537static const struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = {
1538	{38, 1, 50},		/* 0   */
1539	{118, 2, 44},		/* 1   */
1540	{221, 3, 41},		/* 2   */
1541	{347, 4, 38},		/* 3   */
1542	{495, 5, 37},		/* 4   */
1543	{663, 6, 35},		/* 5   */
1544	{851, 7, 34},		/* 6   */
1545	{1058, 8, 33},		/* 7   */
1546	{1284, 9, 32},		/* 8   */
1547	{1529, 10, 31},		/* 9   */
1548	{1793, 11, 30},		/* 10  */
1549	{2076, 12, 29},		/* 11  */
1550	{2378, 13, 28},		/* 12  */
1551	{2699, 14, 28},		/* 13  */
1552	{3039, 15, 27},		/* 14  */
1553	{3399, 16, 27},		/* 15  */
1554	{3778, 17, 26},		/* 16  */
1555	{4177, 18, 26},		/* 17  */
1556	{4596, 19, 25},		/* 18  */
1557	{5036, 20, 25},		/* 19  */
1558	{5497, 21, 24},		/* 20  */
1559	{5979, 22, 24},		/* 21  */
1560	{6483, 23, 23},		/* 22  */
1561	{7009, 24, 23},		/* 23  */
1562	{7558, 25, 22},		/* 24  */
1563	{8130, 26, 22},		/* 25  */
1564	{8726, 27, 22},		/* 26  */
1565	{9346, 28, 21},		/* 27  */
1566	{9991, 29, 21},		/* 28  */
1567	{10661, 30, 21},	/* 29  */
1568	{11358, 31, 20},	/* 30  */
1569	{12082, 32, 20},	/* 31  */
1570	{12834, 33, 20},	/* 32  */
1571	{13614, 34, 19},	/* 33  */
1572	{14424, 35, 19},	/* 34  */
1573	{15265, 36, 19},	/* 35  */
1574	{16137, 37, 19},	/* 36  */
1575	{17042, 38, 18},	/* 37  */
1576	{17981, 39, 18},	/* 38  */
1577	{18955, 40, 18},	/* 39  */
1578	{19965, 41, 17},	/* 40  */
1579	{21013, 42, 17},	/* 41  */
1580	{22101, 43, 17},	/* 42  */
1581	{23230, 44, 17},	/* 43  */
1582	{24402, 45, 16},	/* 44  */
1583	{25618, 46, 16},	/* 45  */
1584	{26881, 47, 16},	/* 46  */
1585	{28193, 48, 16},	/* 47  */
1586	{29557, 49, 15},	/* 48  */
1587	{30975, 50, 15},	/* 49  */
1588	{32450, 51, 15},	/* 50  */
1589	{33986, 52, 15},	/* 51  */
1590	{35586, 53, 14},	/* 52  */
1591	{37253, 54, 14},	/* 53  */
1592	{38992, 55, 14},	/* 54  */
1593	{40808, 56, 14},	/* 55  */
1594	{42707, 57, 13},	/* 56  */
1595	{44694, 58, 13},	/* 57  */
1596	{46776, 59, 13},	/* 58  */
1597	{48961, 60, 13},	/* 59  */
1598	{51258, 61, 13},	/* 60  */
1599	{53677, 62, 12},	/* 61  */
1600	{56230, 63, 12},	/* 62  */
1601	{58932, 64, 12},	/* 63  */
1602	{61799, 65, 12},	/* 64  */
1603	{64851, 66, 11},	/* 65  */
1604	{68113, 67, 11},	/* 66  */
1605	{71617, 68, 11},	/* 67  */
1606	{75401, 69, 10},	/* 68  */
1607	{79517, 70, 10},	/* 69  */
1608	{84035, 71, 10},	/* 70  */
1609	{89053, 72, 10},	/* 71  */
1610	{94717, 73, 9}		/* 72  */
1611};
1612
1613static void
1614sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net)
1615{
1616	int cur_val, i, indx, incr;
1617	int old_cwnd = net->cwnd;
1618
1619	cur_val = net->cwnd >> 10;
1620	indx = SCTP_HS_TABLE_SIZE - 1;
1621
1622	if (cur_val < sctp_cwnd_adjust[0].cwnd) {
1623		/* normal mode */
1624		if (net->net_ack > net->mtu) {
1625			net->cwnd += net->mtu;
1626		} else {
1627			net->cwnd += net->net_ack;
1628		}
1629	} else {
1630		for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) {
1631			if (cur_val < sctp_cwnd_adjust[i].cwnd) {
1632				indx = i;
1633				break;
1634			}
1635		}
1636		net->last_hs_used = indx;
1637		incr = (((int32_t)sctp_cwnd_adjust[indx].increase) << 10);
1638		net->cwnd += incr;
1639	}
1640	sctp_enforce_cwnd_limit(&stcb->asoc, net);
1641	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1642		sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SS);
1643	}
1644}
1645
1646static void
1647sctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net)
1648{
1649	int cur_val, i, indx;
1650	int old_cwnd = net->cwnd;
1651
1652	cur_val = net->cwnd >> 10;
1653	if (cur_val < sctp_cwnd_adjust[0].cwnd) {
1654		/* normal mode */
1655		net->ssthresh = net->cwnd / 2;
1656		if (net->ssthresh < (net->mtu * 2)) {
1657			net->ssthresh = 2 * net->mtu;
1658		}
1659		net->cwnd = net->ssthresh;
1660	} else {
1661		/* drop by the proper amount */
1662		net->ssthresh = net->cwnd - (int)((net->cwnd / 100) *
1663		    (int32_t)sctp_cwnd_adjust[net->last_hs_used].drop_percent);
1664		net->cwnd = net->ssthresh;
1665		/* now where are we */
1666		indx = net->last_hs_used;
1667		cur_val = net->cwnd >> 10;
1668		/* reset where we are in the table */
1669		if (cur_val < sctp_cwnd_adjust[0].cwnd) {
1670			/* feel out of hs */
1671			net->last_hs_used = 0;
1672		} else {
1673			for (i = indx; i >= 1; i--) {
1674				if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) {
1675					break;
1676				}
1677			}
1678			net->last_hs_used = indx;
1679		}
1680	}
1681	sctp_enforce_cwnd_limit(&stcb->asoc, net);
1682	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1683		sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR);
1684	}
1685}
1686
1687static void
1688sctp_hs_cwnd_update_after_fr(struct sctp_tcb *stcb,
1689    struct sctp_association *asoc)
1690{
1691	struct sctp_nets *net;
1692
1693	/*
1694	 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) &&
1695	 * (net->fast_retran_loss_recovery == 0)))
1696	 */
1697	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
1698		if ((asoc->fast_retran_loss_recovery == 0) ||
1699		    (asoc->sctp_cmt_on_off > 0)) {
1700			/* out of a RFC2582 Fast recovery window? */
1701			if (net->net_ack > 0) {
1702				/*
1703				 * per section 7.2.3, are there any
1704				 * destinations that had a fast retransmit
1705				 * to them. If so what we need to do is
1706				 * adjust ssthresh and cwnd.
1707				 */
1708				struct sctp_tmit_chunk *lchk;
1709
1710				sctp_hs_cwnd_decrease(stcb, net);
1711
1712				lchk = TAILQ_FIRST(&asoc->send_queue);
1713
1714				net->partial_bytes_acked = 0;
1715				/* Turn on fast recovery window */
1716				asoc->fast_retran_loss_recovery = 1;
1717				if (lchk == NULL) {
1718					/* Mark end of the window */
1719					asoc->fast_recovery_tsn = asoc->sending_seq - 1;
1720				} else {
1721					asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1;
1722				}
1723
1724				/*
1725				 * CMT fast recovery -- per destination
1726				 * recovery variable.
1727				 */
1728				net->fast_retran_loss_recovery = 1;
1729
1730				if (lchk == NULL) {
1731					/* Mark end of the window */
1732					net->fast_recovery_tsn = asoc->sending_seq - 1;
1733				} else {
1734					net->fast_recovery_tsn = lchk->rec.data.tsn - 1;
1735				}
1736
1737				sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
1738				    stcb->sctp_ep, stcb, net,
1739				    SCTP_FROM_SCTP_CC_FUNCTIONS + SCTP_LOC_2);
1740				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
1741				    stcb->sctp_ep, stcb, net);
1742			}
1743		} else if (net->net_ack > 0) {
1744			/*
1745			 * Mark a peg that we WOULD have done a cwnd
1746			 * reduction but RFC2582 prevented this action.
1747			 */
1748			SCTP_STAT_INCR(sctps_fastretransinrtt);
1749		}
1750	}
1751}
1752
1753static void
1754sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb,
1755    struct sctp_association *asoc,
1756    int accum_moved, int reneged_all SCTP_UNUSED, int will_exit)
1757{
1758	struct sctp_nets *net;
1759
1760	/******************************/
1761	/* update cwnd and Early FR   */
1762	/******************************/
1763	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
1764
1765#ifdef JANA_CMT_FAST_RECOVERY
1766		/*
1767		 * CMT fast recovery code. Need to debug.
1768		 */
1769		if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
1770			if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
1771			    SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) {
1772				net->will_exit_fast_recovery = 1;
1773			}
1774		}
1775#endif
1776		/* if nothing was acked on this destination skip it */
1777		if (net->net_ack == 0) {
1778			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
1779				sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
1780			}
1781			continue;
1782		}
1783#ifdef JANA_CMT_FAST_RECOVERY
1784		/*
1785		 * CMT fast recovery code
1786		 */
1787		/*
1788		 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery
1789		 * && net->will_exit_fast_recovery == 0) { @@@ Do something
1790		 * } else if (sctp_cmt_on_off == 0 &&
1791		 * asoc->fast_retran_loss_recovery && will_exit == 0) {
1792		 */
1793#endif
1794
1795		if (asoc->fast_retran_loss_recovery &&
1796		    (will_exit == 0) &&
1797		    (asoc->sctp_cmt_on_off == 0)) {
1798			/*
1799			 * If we are in loss recovery we skip any cwnd
1800			 * update
1801			 */
1802			return;
1803		}
1804		/*
1805		 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
1806		 * moved.
1807		 */
1808		if (accum_moved ||
1809		    ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) {
1810			/* If the cumulative ack moved we can proceed */
1811			if (net->cwnd <= net->ssthresh) {
1812				/* We are in slow start */
1813				if (net->flight_size + net->net_ack >= net->cwnd) {
1814					sctp_hs_cwnd_increase(stcb, net);
1815				} else {
1816					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
1817						sctp_log_cwnd(stcb, net, net->net_ack,
1818						    SCTP_CWND_LOG_NOADV_SS);
1819					}
1820				}
1821			} else {
1822				/* We are in congestion avoidance */
1823				net->partial_bytes_acked += net->net_ack;
1824				if ((net->flight_size + net->net_ack >= net->cwnd) &&
1825				    (net->partial_bytes_acked >= net->cwnd)) {
1826					net->partial_bytes_acked -= net->cwnd;
1827					net->cwnd += net->mtu;
1828					sctp_enforce_cwnd_limit(asoc, net);
1829					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1830						sctp_log_cwnd(stcb, net, net->mtu,
1831						    SCTP_CWND_LOG_FROM_CA);
1832					}
1833				} else {
1834					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
1835						sctp_log_cwnd(stcb, net, net->net_ack,
1836						    SCTP_CWND_LOG_NOADV_CA);
1837					}
1838				}
1839			}
1840		} else {
1841			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
1842				sctp_log_cwnd(stcb, net, net->mtu,
1843				    SCTP_CWND_LOG_NO_CUMACK);
1844			}
1845		}
1846	}
1847}
1848
1849
1850/*
1851 * H-TCP congestion control. The algorithm is detailed in:
1852 * R.N.Shorten, D.J.Leith:
1853 *   "H-TCP: TCP for high-speed and long-distance networks"
1854 *   Proc. PFLDnet, Argonne, 2004.
1855 * http://www.hamilton.ie/net/htcp3.pdf
1856 */
1857
1858
1859static int use_rtt_scaling = 1;
1860static int use_bandwidth_switch = 1;
1861
1862static inline int
1863between(uint32_t seq1, uint32_t seq2, uint32_t seq3)
1864{
1865	return (seq3 - seq2 >= seq1 - seq2);
1866}
1867
1868static inline uint32_t
1869htcp_cong_time(struct htcp *ca)
1870{
1871	return (sctp_get_tick_count() - ca->last_cong);
1872}
1873
1874static inline uint32_t
1875htcp_ccount(struct htcp *ca)
1876{
1877	return (ca->minRTT == 0 ? htcp_cong_time(ca) : htcp_cong_time(ca) / ca->minRTT);
1878}
1879
1880static inline void
1881htcp_reset(struct htcp *ca)
1882{
1883	ca->undo_last_cong = ca->last_cong;
1884	ca->undo_maxRTT = ca->maxRTT;
1885	ca->undo_old_maxB = ca->old_maxB;
1886	ca->last_cong = sctp_get_tick_count();
1887}
1888
1889#ifdef SCTP_NOT_USED
1890
1891static uint32_t
1892htcp_cwnd_undo(struct sctp_tcb *stcb, struct sctp_nets *net)
1893{
1894	net->cc_mod.htcp_ca.last_cong = net->cc_mod.htcp_ca.undo_last_cong;
1895	net->cc_mod.htcp_ca.maxRTT = net->cc_mod.htcp_ca.undo_maxRTT;
1896	net->cc_mod.htcp_ca.old_maxB = net->cc_mod.htcp_ca.undo_old_maxB;
1897	return (max(net->cwnd, ((net->ssthresh / net->mtu << 7) / net->cc_mod.htcp_ca.beta) * net->mtu));
1898}
1899
1900#endif
1901
1902static inline void
1903measure_rtt(struct sctp_nets *net)
1904{
1905	uint32_t srtt = net->lastsa >> SCTP_RTT_SHIFT;
1906
1907	/* keep track of minimum RTT seen so far, minRTT is zero at first */
1908	if (net->cc_mod.htcp_ca.minRTT > srtt || !net->cc_mod.htcp_ca.minRTT)
1909		net->cc_mod.htcp_ca.minRTT = srtt;
1910
1911	/* max RTT */
1912	if (net->fast_retran_ip == 0 && net->ssthresh < 0xFFFF && htcp_ccount(&net->cc_mod.htcp_ca) > 3) {
1913		if (net->cc_mod.htcp_ca.maxRTT < net->cc_mod.htcp_ca.minRTT)
1914			net->cc_mod.htcp_ca.maxRTT = net->cc_mod.htcp_ca.minRTT;
1915		if (net->cc_mod.htcp_ca.maxRTT < srtt && srtt <= net->cc_mod.htcp_ca.maxRTT + MSEC_TO_TICKS(20))
1916			net->cc_mod.htcp_ca.maxRTT = srtt;
1917	}
1918}
1919
1920static void
1921measure_achieved_throughput(struct sctp_nets *net)
1922{
1923	uint32_t now = sctp_get_tick_count();
1924
1925	if (net->fast_retran_ip == 0)
1926		net->cc_mod.htcp_ca.bytes_acked = net->net_ack;
1927
1928	if (!use_bandwidth_switch)
1929		return;
1930
1931	/* achieved throughput calculations */
1932	/* JRS - not 100% sure of this statement */
1933	if (net->fast_retran_ip == 1) {
1934		net->cc_mod.htcp_ca.bytecount = 0;
1935		net->cc_mod.htcp_ca.lasttime = now;
1936		return;
1937	}
1938
1939	net->cc_mod.htcp_ca.bytecount += net->net_ack;
1940	if ((net->cc_mod.htcp_ca.bytecount >= net->cwnd - (((net->cc_mod.htcp_ca.alpha >> 7) ? (net->cc_mod.htcp_ca.alpha >> 7) : 1) * net->mtu)) &&
1941	    (now - net->cc_mod.htcp_ca.lasttime >= net->cc_mod.htcp_ca.minRTT) &&
1942	    (net->cc_mod.htcp_ca.minRTT > 0)) {
1943		uint32_t cur_Bi = net->cc_mod.htcp_ca.bytecount / net->mtu * hz / (now - net->cc_mod.htcp_ca.lasttime);
1944
1945		if (htcp_ccount(&net->cc_mod.htcp_ca) <= 3) {
1946			/* just after backoff */
1947			net->cc_mod.htcp_ca.minB = net->cc_mod.htcp_ca.maxB = net->cc_mod.htcp_ca.Bi = cur_Bi;
1948		} else {
1949			net->cc_mod.htcp_ca.Bi = (3 * net->cc_mod.htcp_ca.Bi + cur_Bi) / 4;
1950			if (net->cc_mod.htcp_ca.Bi > net->cc_mod.htcp_ca.maxB)
1951				net->cc_mod.htcp_ca.maxB = net->cc_mod.htcp_ca.Bi;
1952			if (net->cc_mod.htcp_ca.minB > net->cc_mod.htcp_ca.maxB)
1953				net->cc_mod.htcp_ca.minB = net->cc_mod.htcp_ca.maxB;
1954		}
1955		net->cc_mod.htcp_ca.bytecount = 0;
1956		net->cc_mod.htcp_ca.lasttime = now;
1957	}
1958}
1959
1960static inline void
1961htcp_beta_update(struct htcp *ca, uint32_t minRTT, uint32_t maxRTT)
1962{
1963	if (use_bandwidth_switch) {
1964		uint32_t maxB = ca->maxB;
1965		uint32_t old_maxB = ca->old_maxB;
1966
1967		ca->old_maxB = ca->maxB;
1968
1969		if (!between(5 * maxB, 4 * old_maxB, 6 * old_maxB)) {
1970			ca->beta = BETA_MIN;
1971			ca->modeswitch = 0;
1972			return;
1973		}
1974	}
1975
1976	if (ca->modeswitch && minRTT > (uint32_t)MSEC_TO_TICKS(10) && maxRTT) {
1977		ca->beta = (minRTT << 7) / maxRTT;
1978		if (ca->beta < BETA_MIN)
1979			ca->beta = BETA_MIN;
1980		else if (ca->beta > BETA_MAX)
1981			ca->beta = BETA_MAX;
1982	} else {
1983		ca->beta = BETA_MIN;
1984		ca->modeswitch = 1;
1985	}
1986}
1987
1988static inline void
1989htcp_alpha_update(struct htcp *ca)
1990{
1991	uint32_t minRTT = ca->minRTT;
1992	uint32_t factor = 1;
1993	uint32_t diff = htcp_cong_time(ca);
1994
1995	if (diff > (uint32_t)hz) {
1996		diff -= hz;
1997		factor = 1 + (10 * diff + ((diff / 2) * (diff / 2) / hz)) / hz;
1998	}
1999
2000	if (use_rtt_scaling && minRTT) {
2001		uint32_t scale = (hz << 3) / (10 * minRTT);
2002
2003		scale = min(max(scale, 1U << 2), 10U << 3);	/* clamping ratio to
2004								 * interval [0.5,10]<<3 */
2005		factor = (factor << 3) / scale;
2006		if (!factor)
2007			factor = 1;
2008	}
2009
2010	ca->alpha = 2 * factor * ((1 << 7) - ca->beta);
2011	if (!ca->alpha)
2012		ca->alpha = ALPHA_BASE;
2013}
2014
2015/* After we have the rtt data to calculate beta, we'd still prefer to wait one
2016 * rtt before we adjust our beta to ensure we are working from a consistent
2017 * data.
2018 *
2019 * This function should be called when we hit a congestion event since only at
2020 * that point do we really have a real sense of maxRTT (the queues en route
2021 * were getting just too full now).
2022 */
2023static void
2024htcp_param_update(struct sctp_nets *net)
2025{
2026	uint32_t minRTT = net->cc_mod.htcp_ca.minRTT;
2027	uint32_t maxRTT = net->cc_mod.htcp_ca.maxRTT;
2028
2029	htcp_beta_update(&net->cc_mod.htcp_ca, minRTT, maxRTT);
2030	htcp_alpha_update(&net->cc_mod.htcp_ca);
2031
2032	/*
2033	 * add slowly fading memory for maxRTT to accommodate routing
2034	 * changes etc
2035	 */
2036	if (minRTT > 0 && maxRTT > minRTT)
2037		net->cc_mod.htcp_ca.maxRTT = minRTT + ((maxRTT - minRTT) * 95) / 100;
2038}
2039
2040static uint32_t
2041htcp_recalc_ssthresh(struct sctp_nets *net)
2042{
2043	htcp_param_update(net);
2044	return (max(((net->cwnd / net->mtu * net->cc_mod.htcp_ca.beta) >> 7) * net->mtu, 2U * net->mtu));
2045}
2046
2047static void
2048htcp_cong_avoid(struct sctp_tcb *stcb, struct sctp_nets *net)
2049{
2050	/*-
2051	 * How to handle these functions?
2052         *	if (!tcp_is_cwnd_limited(sk, in_flight)) RRS - good question.
2053	 *		return;
2054	 */
2055	if (net->cwnd <= net->ssthresh) {
2056		/* We are in slow start */
2057		if (net->flight_size + net->net_ack >= net->cwnd) {
2058			if (net->net_ack > (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable))) {
2059				net->cwnd += (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable));
2060				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
2061					sctp_log_cwnd(stcb, net, net->mtu,
2062					    SCTP_CWND_LOG_FROM_SS);
2063				}
2064
2065			} else {
2066				net->cwnd += net->net_ack;
2067				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
2068					sctp_log_cwnd(stcb, net, net->net_ack,
2069					    SCTP_CWND_LOG_FROM_SS);
2070				}
2071
2072			}
2073			sctp_enforce_cwnd_limit(&stcb->asoc, net);
2074		} else {
2075			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2076				sctp_log_cwnd(stcb, net, net->net_ack,
2077				    SCTP_CWND_LOG_NOADV_SS);
2078			}
2079		}
2080	} else {
2081		measure_rtt(net);
2082
2083		/*
2084		 * In dangerous area, increase slowly. In theory this is
2085		 * net->cwnd += alpha / net->cwnd
2086		 */
2087		/* What is snd_cwnd_cnt?? */
2088		if (((net->partial_bytes_acked / net->mtu * net->cc_mod.htcp_ca.alpha) >> 7) * net->mtu >= net->cwnd) {
2089			/*-
2090			 * Does SCTP have a cwnd clamp?
2091			 * if (net->snd_cwnd < net->snd_cwnd_clamp) - Nope (RRS).
2092			 */
2093			net->cwnd += net->mtu;
2094			net->partial_bytes_acked = 0;
2095			sctp_enforce_cwnd_limit(&stcb->asoc, net);
2096			htcp_alpha_update(&net->cc_mod.htcp_ca);
2097			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
2098				sctp_log_cwnd(stcb, net, net->mtu,
2099				    SCTP_CWND_LOG_FROM_CA);
2100			}
2101		} else {
2102			net->partial_bytes_acked += net->net_ack;
2103			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2104				sctp_log_cwnd(stcb, net, net->net_ack,
2105				    SCTP_CWND_LOG_NOADV_CA);
2106			}
2107		}
2108
2109		net->cc_mod.htcp_ca.bytes_acked = net->mtu;
2110	}
2111}
2112
2113#ifdef SCTP_NOT_USED
2114/* Lower bound on congestion window. */
2115static uint32_t
2116htcp_min_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net)
2117{
2118	return (net->ssthresh);
2119}
2120#endif
2121
2122static void
2123htcp_init(struct sctp_nets *net)
2124{
2125	memset(&net->cc_mod.htcp_ca, 0, sizeof(struct htcp));
2126	net->cc_mod.htcp_ca.alpha = ALPHA_BASE;
2127	net->cc_mod.htcp_ca.beta = BETA_MIN;
2128	net->cc_mod.htcp_ca.bytes_acked = net->mtu;
2129	net->cc_mod.htcp_ca.last_cong = sctp_get_tick_count();
2130}
2131
2132static void
2133sctp_htcp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
2134{
2135	/*
2136	 * We take the max of the burst limit times a MTU or the
2137	 * INITIAL_CWND. We then limit this to 4 MTU's of sending.
2138	 */
2139	net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
2140	net->ssthresh = stcb->asoc.peers_rwnd;
2141	sctp_enforce_cwnd_limit(&stcb->asoc, net);
2142	htcp_init(net);
2143
2144	if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
2145		sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
2146	}
2147}
2148
2149static void
2150sctp_htcp_cwnd_update_after_sack(struct sctp_tcb *stcb,
2151    struct sctp_association *asoc,
2152    int accum_moved, int reneged_all SCTP_UNUSED, int will_exit)
2153{
2154	struct sctp_nets *net;
2155
2156	/******************************/
2157	/* update cwnd and Early FR   */
2158	/******************************/
2159	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
2160
2161#ifdef JANA_CMT_FAST_RECOVERY
2162		/*
2163		 * CMT fast recovery code. Need to debug.
2164		 */
2165		if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
2166			if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
2167			    SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) {
2168				net->will_exit_fast_recovery = 1;
2169			}
2170		}
2171#endif
2172		/* if nothing was acked on this destination skip it */
2173		if (net->net_ack == 0) {
2174			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2175				sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
2176			}
2177			continue;
2178		}
2179#ifdef JANA_CMT_FAST_RECOVERY
2180		/*
2181		 * CMT fast recovery code
2182		 */
2183		/*
2184		 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery
2185		 * && net->will_exit_fast_recovery == 0) { @@@ Do something
2186		 * } else if (sctp_cmt_on_off == 0 &&
2187		 * asoc->fast_retran_loss_recovery && will_exit == 0) {
2188		 */
2189#endif
2190
2191		if (asoc->fast_retran_loss_recovery &&
2192		    will_exit == 0 &&
2193		    (asoc->sctp_cmt_on_off == 0)) {
2194			/*
2195			 * If we are in loss recovery we skip any cwnd
2196			 * update
2197			 */
2198			return;
2199		}
2200		/*
2201		 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
2202		 * moved.
2203		 */
2204		if (accum_moved ||
2205		    ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) {
2206			htcp_cong_avoid(stcb, net);
2207			measure_achieved_throughput(net);
2208		} else {
2209			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2210				sctp_log_cwnd(stcb, net, net->mtu,
2211				    SCTP_CWND_LOG_NO_CUMACK);
2212			}
2213		}
2214	}
2215}
2216
2217static void
2218sctp_htcp_cwnd_update_after_fr(struct sctp_tcb *stcb,
2219    struct sctp_association *asoc)
2220{
2221	struct sctp_nets *net;
2222
2223	/*
2224	 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) &&
2225	 * (net->fast_retran_loss_recovery == 0)))
2226	 */
2227	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
2228		if ((asoc->fast_retran_loss_recovery == 0) ||
2229		    (asoc->sctp_cmt_on_off > 0)) {
2230			/* out of a RFC2582 Fast recovery window? */
2231			if (net->net_ack > 0) {
2232				/*
2233				 * per section 7.2.3, are there any
2234				 * destinations that had a fast retransmit
2235				 * to them. If so what we need to do is
2236				 * adjust ssthresh and cwnd.
2237				 */
2238				struct sctp_tmit_chunk *lchk;
2239				int old_cwnd = net->cwnd;
2240
2241				/* JRS - reset as if state were changed */
2242				htcp_reset(&net->cc_mod.htcp_ca);
2243				net->ssthresh = htcp_recalc_ssthresh(net);
2244				net->cwnd = net->ssthresh;
2245				sctp_enforce_cwnd_limit(asoc, net);
2246				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
2247					sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
2248					    SCTP_CWND_LOG_FROM_FR);
2249				}
2250				lchk = TAILQ_FIRST(&asoc->send_queue);
2251
2252				net->partial_bytes_acked = 0;
2253				/* Turn on fast recovery window */
2254				asoc->fast_retran_loss_recovery = 1;
2255				if (lchk == NULL) {
2256					/* Mark end of the window */
2257					asoc->fast_recovery_tsn = asoc->sending_seq - 1;
2258				} else {
2259					asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1;
2260				}
2261
2262				/*
2263				 * CMT fast recovery -- per destination
2264				 * recovery variable.
2265				 */
2266				net->fast_retran_loss_recovery = 1;
2267
2268				if (lchk == NULL) {
2269					/* Mark end of the window */
2270					net->fast_recovery_tsn = asoc->sending_seq - 1;
2271				} else {
2272					net->fast_recovery_tsn = lchk->rec.data.tsn - 1;
2273				}
2274
2275				sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
2276				    stcb->sctp_ep, stcb, net,
2277				    SCTP_FROM_SCTP_CC_FUNCTIONS + SCTP_LOC_3);
2278				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
2279				    stcb->sctp_ep, stcb, net);
2280			}
2281		} else if (net->net_ack > 0) {
2282			/*
2283			 * Mark a peg that we WOULD have done a cwnd
2284			 * reduction but RFC2582 prevented this action.
2285			 */
2286			SCTP_STAT_INCR(sctps_fastretransinrtt);
2287		}
2288	}
2289}
2290
2291static void
2292sctp_htcp_cwnd_update_after_timeout(struct sctp_tcb *stcb,
2293    struct sctp_nets *net)
2294{
2295	int old_cwnd = net->cwnd;
2296
2297	/* JRS - reset as if the state were being changed to timeout */
2298	htcp_reset(&net->cc_mod.htcp_ca);
2299	net->ssthresh = htcp_recalc_ssthresh(net);
2300	net->cwnd = net->mtu;
2301	net->partial_bytes_acked = 0;
2302	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
2303		sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
2304	}
2305}
2306
2307static void
2308sctp_htcp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb,
2309    struct sctp_nets *net, int in_window, int num_pkt_lost SCTP_UNUSED)
2310{
2311	int old_cwnd;
2312
2313	old_cwnd = net->cwnd;
2314
2315	/* JRS - reset hctp as if state changed */
2316	if (in_window == 0) {
2317		htcp_reset(&net->cc_mod.htcp_ca);
2318		SCTP_STAT_INCR(sctps_ecnereducedcwnd);
2319		net->ssthresh = htcp_recalc_ssthresh(net);
2320		if (net->ssthresh < net->mtu) {
2321			net->ssthresh = net->mtu;
2322			/* here back off the timer as well, to slow us down */
2323			net->RTO <<= 1;
2324		}
2325		net->cwnd = net->ssthresh;
2326		sctp_enforce_cwnd_limit(&stcb->asoc, net);
2327		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
2328			sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
2329		}
2330	}
2331}
2332
2333const struct sctp_cc_functions sctp_cc_functions[] = {
2334	{
2335		.sctp_set_initial_cc_param = sctp_set_initial_cc_param,
2336		.sctp_cwnd_update_after_sack = sctp_cwnd_update_after_sack,
2337		.sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common,
2338		.sctp_cwnd_update_after_fr = sctp_cwnd_update_after_fr,
2339		.sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout,
2340		.sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_after_ecn_echo,
2341		.sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
2342		.sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
2343	},
2344	{
2345		.sctp_set_initial_cc_param = sctp_set_initial_cc_param,
2346		.sctp_cwnd_update_after_sack = sctp_hs_cwnd_update_after_sack,
2347		.sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common,
2348		.sctp_cwnd_update_after_fr = sctp_hs_cwnd_update_after_fr,
2349		.sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout,
2350		.sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_after_ecn_echo,
2351		.sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
2352		.sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
2353	},
2354	{
2355		.sctp_set_initial_cc_param = sctp_htcp_set_initial_cc_param,
2356		.sctp_cwnd_update_after_sack = sctp_htcp_cwnd_update_after_sack,
2357		.sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common,
2358		.sctp_cwnd_update_after_fr = sctp_htcp_cwnd_update_after_fr,
2359		.sctp_cwnd_update_after_timeout = sctp_htcp_cwnd_update_after_timeout,
2360		.sctp_cwnd_update_after_ecn_echo = sctp_htcp_cwnd_update_after_ecn_echo,
2361		.sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
2362		.sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
2363	},
2364	{
2365		.sctp_set_initial_cc_param = sctp_set_rtcc_initial_cc_param,
2366		.sctp_cwnd_update_after_sack = sctp_cwnd_update_rtcc_after_sack,
2367		.sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common,
2368		.sctp_cwnd_update_after_fr = sctp_cwnd_update_after_fr,
2369		.sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout,
2370		.sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_rtcc_after_ecn_echo,
2371		.sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
2372		.sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
2373		.sctp_cwnd_update_packet_transmitted = sctp_cwnd_update_rtcc_packet_transmitted,
2374		.sctp_cwnd_update_tsn_acknowledged = sctp_cwnd_update_rtcc_tsn_acknowledged,
2375		.sctp_cwnd_new_transmission_begins = sctp_cwnd_new_rtcc_transmission_begins,
2376		.sctp_cwnd_prepare_net_for_sack = sctp_cwnd_prepare_rtcc_net_for_sack,
2377		.sctp_cwnd_socket_option = sctp_cwnd_rtcc_socket_option,
2378		.sctp_rtt_calculated = sctp_rtt_rtcc_calculated
2379	}
2380};
2381