sctp_cc_functions.c revision 218232
1/*-
2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2011, by Randall Stewart, rrs@lakerest.net and
4 *                          Michael Tuexen, tuexen@fh-muenster.de
5 *                          All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
9 *
10 * a) Redistributions of source code must retain the above copyright notice,
11 *   this list of conditions and the following disclaimer.
12 *
13 * b) Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in
15 *   the documentation and/or other materials provided with the distribution.
16 *
17 * c) Neither the name of Cisco Systems, Inc. nor the names of its
18 *    contributors may be used to endorse or promote products derived
19 *    from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
23 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include <netinet/sctp_os.h>
35#include <netinet/sctp_var.h>
36#include <netinet/sctp_sysctl.h>
37#include <netinet/sctp_pcb.h>
38#include <netinet/sctp_header.h>
39#include <netinet/sctputil.h>
40#include <netinet/sctp_output.h>
41#include <netinet/sctp_input.h>
42#include <netinet/sctp_indata.h>
43#include <netinet/sctp_uio.h>
44#include <netinet/sctp_timer.h>
45#include <netinet/sctp_auth.h>
46#include <netinet/sctp_asconf.h>
47#include <netinet/sctp_dtrace_declare.h>
48#include <sys/cdefs.h>
49__FBSDID("$FreeBSD: head/sys/netinet/sctp_cc_functions.c 218232 2011-02-03 19:22:21Z rrs $");
50
51static void
52sctp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
53{
54	struct sctp_association *assoc;
55	uint32_t cwnd_in_mtu;
56
57	assoc = &stcb->asoc;
58	cwnd_in_mtu = SCTP_BASE_SYSCTL(sctp_initial_cwnd);
59	if (cwnd_in_mtu == 0) {
60		/* Using 0 means that the value of RFC 4960 is used. */
61		net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
62	} else {
63		/*
64		 * We take the minimum of the burst limit and the initial
65		 * congestion window.
66		 */
67		if ((assoc->max_burst > 0) && (cwnd_in_mtu > assoc->max_burst))
68			cwnd_in_mtu = assoc->max_burst;
69		net->cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu;
70	}
71	if (stcb->asoc.sctp_cmt_on_off == 2) {
72		/* In case of resource pooling initialize appropriately */
73		net->cwnd /= assoc->numnets;
74		if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) {
75			net->cwnd = net->mtu - sizeof(struct sctphdr);
76		}
77	}
78	net->ssthresh = assoc->peers_rwnd;
79
80	SDT_PROBE(sctp, cwnd, net, init,
81	    stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
82	    0, net->cwnd);
83	if (SCTP_BASE_SYSCTL(sctp_logging_level) &
84	    (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
85		sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
86	}
87}
88
89static void
90sctp_cwnd_update_after_fr(struct sctp_tcb *stcb,
91    struct sctp_association *asoc)
92{
93	struct sctp_nets *net;
94	uint32_t t_ssthresh, t_cwnd;
95
96	/* MT FIXME: Don't compute this over and over again */
97	t_ssthresh = 0;
98	t_cwnd = 0;
99	if (asoc->sctp_cmt_on_off == 2) {
100		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
101			t_ssthresh += net->ssthresh;
102			t_cwnd += net->cwnd;
103		}
104	}
105	/*-
106	 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) &&
107	 * (net->fast_retran_loss_recovery == 0)))
108	 */
109	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
110		if ((asoc->fast_retran_loss_recovery == 0) ||
111		    (asoc->sctp_cmt_on_off > 0)) {
112			/* out of a RFC2582 Fast recovery window? */
113			if (net->net_ack > 0) {
114				/*
115				 * per section 7.2.3, are there any
116				 * destinations that had a fast retransmit
117				 * to them. If so what we need to do is
118				 * adjust ssthresh and cwnd.
119				 */
120				struct sctp_tmit_chunk *lchk;
121				int old_cwnd = net->cwnd;
122
123				if (asoc->sctp_cmt_on_off == 2) {
124					net->ssthresh = (uint32_t) (((uint64_t) 4 *
125					    (uint64_t) net->mtu *
126					    (uint64_t) net->ssthresh) /
127					    (uint64_t) t_ssthresh);
128					if ((net->cwnd > t_cwnd / 2) &&
129					    (net->ssthresh < net->cwnd - t_cwnd / 2)) {
130						net->ssthresh = net->cwnd - t_cwnd / 2;
131					}
132					if (net->ssthresh < net->mtu) {
133						net->ssthresh = net->mtu;
134					}
135				} else {
136					net->ssthresh = net->cwnd / 2;
137					if (net->ssthresh < (net->mtu * 2)) {
138						net->ssthresh = 2 * net->mtu;
139					}
140				}
141				net->cwnd = net->ssthresh;
142				SDT_PROBE(sctp, cwnd, net, fr,
143				    stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
144				    old_cwnd, net->cwnd);
145				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
146					sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
147					    SCTP_CWND_LOG_FROM_FR);
148				}
149				lchk = TAILQ_FIRST(&asoc->send_queue);
150
151				net->partial_bytes_acked = 0;
152				/* Turn on fast recovery window */
153				asoc->fast_retran_loss_recovery = 1;
154				if (lchk == NULL) {
155					/* Mark end of the window */
156					asoc->fast_recovery_tsn = asoc->sending_seq - 1;
157				} else {
158					asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
159				}
160
161				/*
162				 * CMT fast recovery -- per destination
163				 * recovery variable.
164				 */
165				net->fast_retran_loss_recovery = 1;
166
167				if (lchk == NULL) {
168					/* Mark end of the window */
169					net->fast_recovery_tsn = asoc->sending_seq - 1;
170				} else {
171					net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
172				}
173
174				sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
175				    stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
176				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
177				    stcb->sctp_ep, stcb, net);
178			}
179		} else if (net->net_ack > 0) {
180			/*
181			 * Mark a peg that we WOULD have done a cwnd
182			 * reduction but RFC2582 prevented this action.
183			 */
184			SCTP_STAT_INCR(sctps_fastretransinrtt);
185		}
186	}
187}
188
189static void
190sctp_cwnd_update_after_sack(struct sctp_tcb *stcb,
191    struct sctp_association *asoc,
192    int accum_moved, int reneged_all, int will_exit)
193{
194	struct sctp_nets *net;
195	int old_cwnd;
196	uint32_t t_ssthresh, t_cwnd, incr;
197
198	/* MT FIXME: Don't compute this over and over again */
199	t_ssthresh = 0;
200	t_cwnd = 0;
201	if (stcb->asoc.sctp_cmt_on_off == 2) {
202		TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
203			t_ssthresh += net->ssthresh;
204			t_cwnd += net->cwnd;
205		}
206	}
207	/******************************/
208	/* update cwnd and Early FR   */
209	/******************************/
210	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
211
212#ifdef JANA_CMT_FAST_RECOVERY
213		/*
214		 * CMT fast recovery code. Need to debug.
215		 */
216		if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
217			if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
218			    SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) {
219				net->will_exit_fast_recovery = 1;
220			}
221		}
222#endif
223		if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
224			/*
225			 * So, first of all do we need to have a Early FR
226			 * timer running?
227			 */
228			if ((!TAILQ_EMPTY(&asoc->sent_queue) &&
229			    (net->ref_count > 1) &&
230			    (net->flight_size < net->cwnd)) ||
231			    (reneged_all)) {
232				/*
233				 * yes, so in this case stop it if its
234				 * running, and then restart it. Reneging
235				 * all is a special case where we want to
236				 * run the Early FR timer and then force the
237				 * last few unacked to be sent, causing us
238				 * to illicit a sack with gaps to force out
239				 * the others.
240				 */
241				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
242					SCTP_STAT_INCR(sctps_earlyfrstpidsck2);
243					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
244					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
245				}
246				SCTP_STAT_INCR(sctps_earlyfrstrid);
247				sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
248			} else {
249				/* No, stop it if its running */
250				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
251					SCTP_STAT_INCR(sctps_earlyfrstpidsck3);
252					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
253					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_21);
254				}
255			}
256		}
257		/* if nothing was acked on this destination skip it */
258		if (net->net_ack == 0) {
259			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
260				sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
261			}
262			continue;
263		}
264		if (net->net_ack2 > 0) {
265			/*
266			 * Karn's rule applies to clearing error count, this
267			 * is optional.
268			 */
269			net->error_count = 0;
270			if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
271			    SCTP_ADDR_NOT_REACHABLE) {
272				/* addr came good */
273				net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
274				net->dest_state |= SCTP_ADDR_REACHABLE;
275				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
276				    SCTP_RECEIVED_SACK, (void *)net, SCTP_SO_NOT_LOCKED);
277				/* now was it the primary? if so restore */
278				if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
279					(void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net);
280				}
281			}
282			/*
283			 * JRS 5/14/07 - If CMT PF is on and the destination
284			 * is in PF state, set the destination to active
285			 * state and set the cwnd to one or two MTU's based
286			 * on whether PF1 or PF2 is being used.
287			 *
288			 * Should we stop any running T3 timer here?
289			 */
290			if ((asoc->sctp_cmt_on_off > 0) &&
291			    (asoc->sctp_cmt_pf > 0) &&
292			    ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) {
293				net->dest_state &= ~SCTP_ADDR_PF;
294				old_cwnd = net->cwnd;
295				net->cwnd = net->mtu * asoc->sctp_cmt_pf;
296				SDT_PROBE(sctp, cwnd, net, ack,
297				    stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
298				    old_cwnd, net->cwnd);
299				SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n",
300				    net, net->cwnd);
301				/*
302				 * Since the cwnd value is explicitly set,
303				 * skip the code that updates the cwnd
304				 * value.
305				 */
306				goto skip_cwnd_update;
307			}
308		}
309#ifdef JANA_CMT_FAST_RECOVERY
310		/*
311		 * CMT fast recovery code
312		 */
313		/*
314		 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery
315		 * && net->will_exit_fast_recovery == 0) { @@@ Do something
316		 * } else if (sctp_cmt_on_off == 0 &&
317		 * asoc->fast_retran_loss_recovery && will_exit == 0) {
318		 */
319#endif
320
321		if (asoc->fast_retran_loss_recovery &&
322		    (will_exit == 0) &&
323		    (asoc->sctp_cmt_on_off == 0)) {
324			/*
325			 * If we are in loss recovery we skip any cwnd
326			 * update
327			 */
328			goto skip_cwnd_update;
329		}
330		/*
331		 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
332		 * moved.
333		 */
334		if (accum_moved ||
335		    ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) {
336			/* If the cumulative ack moved we can proceed */
337			if (net->cwnd <= net->ssthresh) {
338				/* We are in slow start */
339				if (net->flight_size + net->net_ack >= net->cwnd) {
340					old_cwnd = net->cwnd;
341					if (stcb->asoc.sctp_cmt_on_off == 2) {
342						uint32_t limit;
343
344						limit = (uint32_t) (((uint64_t) net->mtu *
345						    (uint64_t) SCTP_BASE_SYSCTL(sctp_L2_abc_variable) *
346						    (uint64_t) net->ssthresh) /
347						    (uint64_t) t_ssthresh);
348						incr = (uint32_t) (((uint64_t) net->net_ack *
349						    (uint64_t) net->ssthresh) /
350						    (uint64_t) t_ssthresh);
351						if (incr > limit) {
352							incr = limit;
353						}
354						if (incr == 0) {
355							incr = 1;
356						}
357					} else {
358						incr = net->net_ack;
359						if (incr > net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable)) {
360							incr = net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable);
361						}
362					}
363					net->cwnd += incr;
364					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
365						sctp_log_cwnd(stcb, net, incr,
366						    SCTP_CWND_LOG_FROM_SS);
367					}
368					SDT_PROBE(sctp, cwnd, net, ack,
369					    stcb->asoc.my_vtag,
370					    ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
371					    net,
372					    old_cwnd, net->cwnd);
373				} else {
374					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
375						sctp_log_cwnd(stcb, net, net->net_ack,
376						    SCTP_CWND_LOG_NOADV_SS);
377					}
378				}
379			} else {
380				/* We are in congestion avoidance */
381				/*
382				 * Add to pba
383				 */
384				net->partial_bytes_acked += net->net_ack;
385
386				if ((net->flight_size + net->net_ack >= net->cwnd) &&
387				    (net->partial_bytes_acked >= net->cwnd)) {
388					net->partial_bytes_acked -= net->cwnd;
389					old_cwnd = net->cwnd;
390					if (asoc->sctp_cmt_on_off == 2) {
391						incr = (uint32_t) (((uint64_t) net->mtu *
392						    (uint64_t) net->ssthresh) /
393						    (uint64_t) t_ssthresh);
394						if (incr == 0) {
395							incr = 1;
396						}
397					} else {
398						incr = net->mtu;
399					}
400					net->cwnd += incr;
401					SDT_PROBE(sctp, cwnd, net, ack,
402					    stcb->asoc.my_vtag,
403					    ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
404					    net,
405					    old_cwnd, net->cwnd);
406					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
407						sctp_log_cwnd(stcb, net, net->mtu,
408						    SCTP_CWND_LOG_FROM_CA);
409					}
410				} else {
411					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
412						sctp_log_cwnd(stcb, net, net->net_ack,
413						    SCTP_CWND_LOG_NOADV_CA);
414					}
415				}
416			}
417		} else {
418			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
419				sctp_log_cwnd(stcb, net, net->mtu,
420				    SCTP_CWND_LOG_NO_CUMACK);
421			}
422		}
423skip_cwnd_update:
424		/*
425		 * NOW, according to Karn's rule do we need to restore the
426		 * RTO timer back? Check our net_ack2. If not set then we
427		 * have a ambiguity.. i.e. all data ack'd was sent to more
428		 * than one place.
429		 */
430		if (net->net_ack2) {
431			/* restore any doubled timers */
432			net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1;
433			if (net->RTO < stcb->asoc.minrto) {
434				net->RTO = stcb->asoc.minrto;
435			}
436			if (net->RTO > stcb->asoc.maxrto) {
437				net->RTO = stcb->asoc.maxrto;
438			}
439		}
440	}
441}
442
443static void
444sctp_cwnd_update_after_timeout(struct sctp_tcb *stcb, struct sctp_nets *net)
445{
446	int old_cwnd = net->cwnd;
447	uint32_t t_ssthresh, t_cwnd;
448
449	/* MT FIXME: Don't compute this over and over again */
450	t_ssthresh = 0;
451	t_cwnd = 0;
452	if (stcb->asoc.sctp_cmt_on_off == 2) {
453		struct sctp_nets *lnet;
454
455		TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
456			t_ssthresh += lnet->ssthresh;
457			t_cwnd += lnet->cwnd;
458		}
459		net->ssthresh = (uint32_t) (((uint64_t) 4 *
460		    (uint64_t) net->mtu *
461		    (uint64_t) net->ssthresh) /
462		    (uint64_t) t_ssthresh);
463		if ((net->cwnd > t_cwnd / 2) &&
464		    (net->ssthresh < net->cwnd - t_cwnd / 2)) {
465			net->ssthresh = net->cwnd - t_cwnd / 2;
466		}
467		if (net->ssthresh < net->mtu) {
468			net->ssthresh = net->mtu;
469		}
470	} else {
471		net->ssthresh = max(net->cwnd / 2, 4 * net->mtu);
472	}
473	net->cwnd = net->mtu;
474	net->partial_bytes_acked = 0;
475	SDT_PROBE(sctp, cwnd, net, to,
476	    stcb->asoc.my_vtag,
477	    ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
478	    net,
479	    old_cwnd, net->cwnd);
480	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
481		sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
482	}
483}
484
485
486static void
487sctp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
488    int in_window, int num_pkt_lost)
489{
490	int old_cwnd = net->cwnd;
491
492	if (in_window == 0) {
493		SCTP_STAT_INCR(sctps_ecnereducedcwnd);
494		net->ssthresh = net->cwnd / 2;
495		if (net->ssthresh < net->mtu) {
496			net->ssthresh = net->mtu;
497			/* here back off the timer as well, to slow us down */
498			net->RTO <<= 1;
499		}
500		net->cwnd = net->ssthresh;
501		SDT_PROBE(sctp, cwnd, net, ecn,
502		    stcb->asoc.my_vtag,
503		    ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
504		    net,
505		    old_cwnd, net->cwnd);
506		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
507			sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
508		}
509	}
510}
511
512static void
513sctp_cwnd_update_after_packet_dropped(struct sctp_tcb *stcb,
514    struct sctp_nets *net, struct sctp_pktdrop_chunk *cp,
515    uint32_t * bottle_bw, uint32_t * on_queue)
516{
517	uint32_t bw_avail;
518	int rtt;
519	unsigned int incr;
520	int old_cwnd = net->cwnd;
521
522	/* need real RTT for this calc */
523	rtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
524	/* get bottle neck bw */
525	*bottle_bw = ntohl(cp->bottle_bw);
526	/* and whats on queue */
527	*on_queue = ntohl(cp->current_onq);
528	/*
529	 * adjust the on-queue if our flight is more it could be that the
530	 * router has not yet gotten data "in-flight" to it
531	 */
532	if (*on_queue < net->flight_size)
533		*on_queue = net->flight_size;
534	/* calculate the available space */
535	bw_avail = (*bottle_bw * rtt) / 1000;
536	if (bw_avail > *bottle_bw) {
537		/*
538		 * Cap the growth to no more than the bottle neck. This can
539		 * happen as RTT slides up due to queues. It also means if
540		 * you have more than a 1 second RTT with a empty queue you
541		 * will be limited to the bottle_bw per second no matter if
542		 * other points have 1/2 the RTT and you could get more
543		 * out...
544		 */
545		bw_avail = *bottle_bw;
546	}
547	if (*on_queue > bw_avail) {
548		/*
549		 * No room for anything else don't allow anything else to be
550		 * "added to the fire".
551		 */
552		int seg_inflight, seg_onqueue, my_portion;
553
554		net->partial_bytes_acked = 0;
555
556		/* how much are we over queue size? */
557		incr = *on_queue - bw_avail;
558		if (stcb->asoc.seen_a_sack_this_pkt) {
559			/*
560			 * undo any cwnd adjustment that the sack might have
561			 * made
562			 */
563			net->cwnd = net->prev_cwnd;
564		}
565		/* Now how much of that is mine? */
566		seg_inflight = net->flight_size / net->mtu;
567		seg_onqueue = *on_queue / net->mtu;
568		my_portion = (incr * seg_inflight) / seg_onqueue;
569
570		/* Have I made an adjustment already */
571		if (net->cwnd > net->flight_size) {
572			/*
573			 * for this flight I made an adjustment we need to
574			 * decrease the portion by a share our previous
575			 * adjustment.
576			 */
577			int diff_adj;
578
579			diff_adj = net->cwnd - net->flight_size;
580			if (diff_adj > my_portion)
581				my_portion = 0;
582			else
583				my_portion -= diff_adj;
584		}
585		/*
586		 * back down to the previous cwnd (assume we have had a sack
587		 * before this packet). minus what ever portion of the
588		 * overage is my fault.
589		 */
590		net->cwnd -= my_portion;
591
592		/* we will NOT back down more than 1 MTU */
593		if (net->cwnd <= net->mtu) {
594			net->cwnd = net->mtu;
595		}
596		/* force into CA */
597		net->ssthresh = net->cwnd - 1;
598	} else {
599		/*
600		 * Take 1/4 of the space left or max burst up .. whichever
601		 * is less.
602		 */
603		incr = (bw_avail - *on_queue) >> 2;
604		if ((stcb->asoc.max_burst > 0) &&
605		    (stcb->asoc.max_burst * net->mtu < incr)) {
606			incr = stcb->asoc.max_burst * net->mtu;
607		}
608		net->cwnd += incr;
609	}
610	if (net->cwnd > bw_avail) {
611		/* We can't exceed the pipe size */
612		net->cwnd = bw_avail;
613	}
614	if (net->cwnd < net->mtu) {
615		/* We always have 1 MTU */
616		net->cwnd = net->mtu;
617	}
618	if (net->cwnd - old_cwnd != 0) {
619		/* log only changes */
620		SDT_PROBE(sctp, cwnd, net, pd,
621		    stcb->asoc.my_vtag,
622		    ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
623		    net,
624		    old_cwnd, net->cwnd);
625		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
626			sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
627			    SCTP_CWND_LOG_FROM_SAT);
628		}
629	}
630}
631
632static void
633sctp_cwnd_update_after_output(struct sctp_tcb *stcb,
634    struct sctp_nets *net, int burst_limit)
635{
636	int old_cwnd = net->cwnd;
637
638	if (net->ssthresh < net->cwnd)
639		net->ssthresh = net->cwnd;
640	net->cwnd = (net->flight_size + (burst_limit * net->mtu));
641	SDT_PROBE(sctp, cwnd, net, bl,
642	    stcb->asoc.my_vtag,
643	    ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
644	    net,
645	    old_cwnd, net->cwnd);
646	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
647		sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_BRST);
648	}
649}
650
651static void
652sctp_cwnd_update_after_fr_timer(struct sctp_inpcb *inp,
653    struct sctp_tcb *stcb, struct sctp_nets *net)
654{
655	int old_cwnd = net->cwnd;
656
657	sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR, SCTP_SO_NOT_LOCKED);
658	/*
659	 * make a small adjustment to cwnd and force to CA.
660	 */
661	if (net->cwnd > net->mtu)
662		/* drop down one MTU after sending */
663		net->cwnd -= net->mtu;
664	if (net->cwnd < net->ssthresh)
665		/* still in SS move to CA */
666		net->ssthresh = net->cwnd - 1;
667	SDT_PROBE(sctp, cwnd, net, fr,
668	    stcb->asoc.my_vtag,
669	    ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
670	    net,
671	    old_cwnd, net->cwnd);
672	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
673		sctp_log_cwnd(stcb, net, (old_cwnd - net->cwnd), SCTP_CWND_LOG_FROM_FR);
674	}
675}
676
677struct sctp_hs_raise_drop {
678	int32_t cwnd;
679	int32_t increase;
680	int32_t drop_percent;
681};
682
683#define SCTP_HS_TABLE_SIZE 73
684
685struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = {
686	{38, 1, 50},		/* 0   */
687	{118, 2, 44},		/* 1   */
688	{221, 3, 41},		/* 2   */
689	{347, 4, 38},		/* 3   */
690	{495, 5, 37},		/* 4   */
691	{663, 6, 35},		/* 5   */
692	{851, 7, 34},		/* 6   */
693	{1058, 8, 33},		/* 7   */
694	{1284, 9, 32},		/* 8   */
695	{1529, 10, 31},		/* 9   */
696	{1793, 11, 30},		/* 10  */
697	{2076, 12, 29},		/* 11  */
698	{2378, 13, 28},		/* 12  */
699	{2699, 14, 28},		/* 13  */
700	{3039, 15, 27},		/* 14  */
701	{3399, 16, 27},		/* 15  */
702	{3778, 17, 26},		/* 16  */
703	{4177, 18, 26},		/* 17  */
704	{4596, 19, 25},		/* 18  */
705	{5036, 20, 25},		/* 19  */
706	{5497, 21, 24},		/* 20  */
707	{5979, 22, 24},		/* 21  */
708	{6483, 23, 23},		/* 22  */
709	{7009, 24, 23},		/* 23  */
710	{7558, 25, 22},		/* 24  */
711	{8130, 26, 22},		/* 25  */
712	{8726, 27, 22},		/* 26  */
713	{9346, 28, 21},		/* 27  */
714	{9991, 29, 21},		/* 28  */
715	{10661, 30, 21},	/* 29  */
716	{11358, 31, 20},	/* 30  */
717	{12082, 32, 20},	/* 31  */
718	{12834, 33, 20},	/* 32  */
719	{13614, 34, 19},	/* 33  */
720	{14424, 35, 19},	/* 34  */
721	{15265, 36, 19},	/* 35  */
722	{16137, 37, 19},	/* 36  */
723	{17042, 38, 18},	/* 37  */
724	{17981, 39, 18},	/* 38  */
725	{18955, 40, 18},	/* 39  */
726	{19965, 41, 17},	/* 40  */
727	{21013, 42, 17},	/* 41  */
728	{22101, 43, 17},	/* 42  */
729	{23230, 44, 17},	/* 43  */
730	{24402, 45, 16},	/* 44  */
731	{25618, 46, 16},	/* 45  */
732	{26881, 47, 16},	/* 46  */
733	{28193, 48, 16},	/* 47  */
734	{29557, 49, 15},	/* 48  */
735	{30975, 50, 15},	/* 49  */
736	{32450, 51, 15},	/* 50  */
737	{33986, 52, 15},	/* 51  */
738	{35586, 53, 14},	/* 52  */
739	{37253, 54, 14},	/* 53  */
740	{38992, 55, 14},	/* 54  */
741	{40808, 56, 14},	/* 55  */
742	{42707, 57, 13},	/* 56  */
743	{44694, 58, 13},	/* 57  */
744	{46776, 59, 13},	/* 58  */
745	{48961, 60, 13},	/* 59  */
746	{51258, 61, 13},	/* 60  */
747	{53677, 62, 12},	/* 61  */
748	{56230, 63, 12},	/* 62  */
749	{58932, 64, 12},	/* 63  */
750	{61799, 65, 12},	/* 64  */
751	{64851, 66, 11},	/* 65  */
752	{68113, 67, 11},	/* 66  */
753	{71617, 68, 11},	/* 67  */
754	{75401, 69, 10},	/* 68  */
755	{79517, 70, 10},	/* 69  */
756	{84035, 71, 10},	/* 70  */
757	{89053, 72, 10},	/* 71  */
758	{94717, 73, 9}		/* 72  */
759};
760
761static void
762sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net)
763{
764	int cur_val, i, indx, incr;
765
766	cur_val = net->cwnd >> 10;
767	indx = SCTP_HS_TABLE_SIZE - 1;
768#ifdef SCTP_DEBUG
769	printf("HS CC CAlled.\n");
770#endif
771	if (cur_val < sctp_cwnd_adjust[0].cwnd) {
772		/* normal mode */
773		if (net->net_ack > net->mtu) {
774			net->cwnd += net->mtu;
775			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
776				sctp_log_cwnd(stcb, net, net->mtu, SCTP_CWND_LOG_FROM_SS);
777			}
778		} else {
779			net->cwnd += net->net_ack;
780			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
781				sctp_log_cwnd(stcb, net, net->net_ack, SCTP_CWND_LOG_FROM_SS);
782			}
783		}
784	} else {
785		for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) {
786			if (cur_val < sctp_cwnd_adjust[i].cwnd) {
787				indx = i;
788				break;
789			}
790		}
791		net->last_hs_used = indx;
792		incr = ((sctp_cwnd_adjust[indx].increase) << 10);
793		net->cwnd += incr;
794		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
795			sctp_log_cwnd(stcb, net, incr, SCTP_CWND_LOG_FROM_SS);
796		}
797	}
798}
799
800static void
801sctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net)
802{
803	int cur_val, i, indx;
804	int old_cwnd = net->cwnd;
805
806	cur_val = net->cwnd >> 10;
807	if (cur_val < sctp_cwnd_adjust[0].cwnd) {
808		/* normal mode */
809		net->ssthresh = net->cwnd / 2;
810		if (net->ssthresh < (net->mtu * 2)) {
811			net->ssthresh = 2 * net->mtu;
812		}
813		net->cwnd = net->ssthresh;
814	} else {
815		/* drop by the proper amount */
816		net->ssthresh = net->cwnd - (int)((net->cwnd / 100) *
817		    sctp_cwnd_adjust[net->last_hs_used].drop_percent);
818		net->cwnd = net->ssthresh;
819		/* now where are we */
820		indx = net->last_hs_used;
821		cur_val = net->cwnd >> 10;
822		/* reset where we are in the table */
823		if (cur_val < sctp_cwnd_adjust[0].cwnd) {
824			/* feel out of hs */
825			net->last_hs_used = 0;
826		} else {
827			for (i = indx; i >= 1; i--) {
828				if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) {
829					break;
830				}
831			}
832			net->last_hs_used = indx;
833		}
834	}
835	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
836		sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR);
837	}
838}
839
840static void
841sctp_hs_cwnd_update_after_fr(struct sctp_tcb *stcb,
842    struct sctp_association *asoc)
843{
844	struct sctp_nets *net;
845
846	/*
847	 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) &&
848	 * (net->fast_retran_loss_recovery == 0)))
849	 */
850	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
851		if ((asoc->fast_retran_loss_recovery == 0) ||
852		    (asoc->sctp_cmt_on_off > 0)) {
853			/* out of a RFC2582 Fast recovery window? */
854			if (net->net_ack > 0) {
855				/*
856				 * per section 7.2.3, are there any
857				 * destinations that had a fast retransmit
858				 * to them. If so what we need to do is
859				 * adjust ssthresh and cwnd.
860				 */
861				struct sctp_tmit_chunk *lchk;
862
863				sctp_hs_cwnd_decrease(stcb, net);
864
865				lchk = TAILQ_FIRST(&asoc->send_queue);
866
867				net->partial_bytes_acked = 0;
868				/* Turn on fast recovery window */
869				asoc->fast_retran_loss_recovery = 1;
870				if (lchk == NULL) {
871					/* Mark end of the window */
872					asoc->fast_recovery_tsn = asoc->sending_seq - 1;
873				} else {
874					asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
875				}
876
877				/*
878				 * CMT fast recovery -- per destination
879				 * recovery variable.
880				 */
881				net->fast_retran_loss_recovery = 1;
882
883				if (lchk == NULL) {
884					/* Mark end of the window */
885					net->fast_recovery_tsn = asoc->sending_seq - 1;
886				} else {
887					net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
888				}
889
890				sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
891				    stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
892				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
893				    stcb->sctp_ep, stcb, net);
894			}
895		} else if (net->net_ack > 0) {
896			/*
897			 * Mark a peg that we WOULD have done a cwnd
898			 * reduction but RFC2582 prevented this action.
899			 */
900			SCTP_STAT_INCR(sctps_fastretransinrtt);
901		}
902	}
903}
904
905static void
906sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb,
907    struct sctp_association *asoc,
908    int accum_moved, int reneged_all, int will_exit)
909{
910	struct sctp_nets *net;
911
912	/******************************/
913	/* update cwnd and Early FR   */
914	/******************************/
915	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
916
917#ifdef JANA_CMT_FAST_RECOVERY
918		/*
919		 * CMT fast recovery code. Need to debug.
920		 */
921		if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
922			if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
923			    SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) {
924				net->will_exit_fast_recovery = 1;
925			}
926		}
927#endif
928		if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
929			/*
930			 * So, first of all do we need to have a Early FR
931			 * timer running?
932			 */
933			if ((!TAILQ_EMPTY(&asoc->sent_queue) &&
934			    (net->ref_count > 1) &&
935			    (net->flight_size < net->cwnd)) ||
936			    (reneged_all)) {
937				/*
938				 * yes, so in this case stop it if its
939				 * running, and then restart it. Reneging
940				 * all is a special case where we want to
941				 * run the Early FR timer and then force the
942				 * last few unacked to be sent, causing us
943				 * to illicit a sack with gaps to force out
944				 * the others.
945				 */
946				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
947					SCTP_STAT_INCR(sctps_earlyfrstpidsck2);
948					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
949					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
950				}
951				SCTP_STAT_INCR(sctps_earlyfrstrid);
952				sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
953			} else {
954				/* No, stop it if its running */
955				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
956					SCTP_STAT_INCR(sctps_earlyfrstpidsck3);
957					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
958					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_21);
959				}
960			}
961		}
962		/* if nothing was acked on this destination skip it */
963		if (net->net_ack == 0) {
964			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
965				sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
966			}
967			continue;
968		}
969		if (net->net_ack2 > 0) {
970			/*
971			 * Karn's rule applies to clearing error count, this
972			 * is optional.
973			 */
974			net->error_count = 0;
975			if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
976			    SCTP_ADDR_NOT_REACHABLE) {
977				/* addr came good */
978				net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
979				net->dest_state |= SCTP_ADDR_REACHABLE;
980				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
981				    SCTP_RECEIVED_SACK, (void *)net, SCTP_SO_NOT_LOCKED);
982				/* now was it the primary? if so restore */
983				if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
984					(void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net);
985				}
986			}
987			/*
988			 * JRS 5/14/07 - If CMT PF is on and the destination
989			 * is in PF state, set the destination to active
990			 * state and set the cwnd to one or two MTU's based
991			 * on whether PF1 or PF2 is being used.
992			 *
993			 * Should we stop any running T3 timer here?
994			 */
995			if ((asoc->sctp_cmt_on_off > 0) &&
996			    (asoc->sctp_cmt_pf > 0) &&
997			    ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) {
998				net->dest_state &= ~SCTP_ADDR_PF;
999				net->cwnd = net->mtu * asoc->sctp_cmt_pf;
1000				SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n",
1001				    net, net->cwnd);
1002				/*
1003				 * Since the cwnd value is explicitly set,
1004				 * skip the code that updates the cwnd
1005				 * value.
1006				 */
1007				goto skip_cwnd_update;
1008			}
1009		}
1010#ifdef JANA_CMT_FAST_RECOVERY
1011		/*
1012		 * CMT fast recovery code
1013		 */
1014		/*
1015		 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery
1016		 * && net->will_exit_fast_recovery == 0) { @@@ Do something
1017		 * } else if (sctp_cmt_on_off == 0 &&
1018		 * asoc->fast_retran_loss_recovery && will_exit == 0) {
1019		 */
1020#endif
1021
1022		if (asoc->fast_retran_loss_recovery &&
1023		    (will_exit == 0) &&
1024		    (asoc->sctp_cmt_on_off == 0)) {
1025			/*
1026			 * If we are in loss recovery we skip any cwnd
1027			 * update
1028			 */
1029			goto skip_cwnd_update;
1030		}
1031		/*
1032		 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
1033		 * moved.
1034		 */
1035		if (accum_moved ||
1036		    ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) {
1037			/* If the cumulative ack moved we can proceed */
1038			if (net->cwnd <= net->ssthresh) {
1039				/* We are in slow start */
1040				if (net->flight_size + net->net_ack >= net->cwnd) {
1041
1042					sctp_hs_cwnd_increase(stcb, net);
1043
1044				} else {
1045					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
1046						sctp_log_cwnd(stcb, net, net->net_ack,
1047						    SCTP_CWND_LOG_NOADV_SS);
1048					}
1049				}
1050			} else {
1051				/* We are in congestion avoidance */
1052				net->partial_bytes_acked += net->net_ack;
1053				if ((net->flight_size + net->net_ack >= net->cwnd) &&
1054				    (net->partial_bytes_acked >= net->cwnd)) {
1055					net->partial_bytes_acked -= net->cwnd;
1056					net->cwnd += net->mtu;
1057					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1058						sctp_log_cwnd(stcb, net, net->mtu,
1059						    SCTP_CWND_LOG_FROM_CA);
1060					}
1061				} else {
1062					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
1063						sctp_log_cwnd(stcb, net, net->net_ack,
1064						    SCTP_CWND_LOG_NOADV_CA);
1065					}
1066				}
1067			}
1068		} else {
1069			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
1070				sctp_log_cwnd(stcb, net, net->mtu,
1071				    SCTP_CWND_LOG_NO_CUMACK);
1072			}
1073		}
1074skip_cwnd_update:
1075		/*
1076		 * NOW, according to Karn's rule do we need to restore the
1077		 * RTO timer back? Check our net_ack2. If not set then we
1078		 * have a ambiguity.. i.e. all data ack'd was sent to more
1079		 * than one place.
1080		 */
1081		if (net->net_ack2) {
1082			/* restore any doubled timers */
1083			net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1;
1084			if (net->RTO < stcb->asoc.minrto) {
1085				net->RTO = stcb->asoc.minrto;
1086			}
1087			if (net->RTO > stcb->asoc.maxrto) {
1088				net->RTO = stcb->asoc.maxrto;
1089			}
1090		}
1091	}
1092}
1093
1094
1095/*
1096 * H-TCP congestion control. The algorithm is detailed in:
1097 * R.N.Shorten, D.J.Leith:
1098 *   "H-TCP: TCP for high-speed and long-distance networks"
1099 *   Proc. PFLDnet, Argonne, 2004.
1100 * http://www.hamilton.ie/net/htcp3.pdf
1101 */
1102
1103
1104static int use_rtt_scaling = 1;
1105static int use_bandwidth_switch = 1;
1106
1107static inline int
1108between(uint32_t seq1, uint32_t seq2, uint32_t seq3)
1109{
1110	return seq3 - seq2 >= seq1 - seq2;
1111}
1112
1113static inline uint32_t
1114htcp_cong_time(struct htcp *ca)
1115{
1116	return sctp_get_tick_count() - ca->last_cong;
1117}
1118
1119static inline uint32_t
1120htcp_ccount(struct htcp *ca)
1121{
1122	return htcp_cong_time(ca) / ca->minRTT;
1123}
1124
1125static inline void
1126htcp_reset(struct htcp *ca)
1127{
1128	ca->undo_last_cong = ca->last_cong;
1129	ca->undo_maxRTT = ca->maxRTT;
1130	ca->undo_old_maxB = ca->old_maxB;
1131	ca->last_cong = sctp_get_tick_count();
1132}
1133
1134#ifdef SCTP_NOT_USED
1135
1136static uint32_t
1137htcp_cwnd_undo(struct sctp_tcb *stcb, struct sctp_nets *net)
1138{
1139	net->htcp_ca.last_cong = net->htcp_ca.undo_last_cong;
1140	net->htcp_ca.maxRTT = net->htcp_ca.undo_maxRTT;
1141	net->htcp_ca.old_maxB = net->htcp_ca.undo_old_maxB;
1142	return max(net->cwnd, ((net->ssthresh / net->mtu << 7) / net->htcp_ca.beta) * net->mtu);
1143}
1144
1145#endif
1146
1147static inline void
1148measure_rtt(struct sctp_tcb *stcb, struct sctp_nets *net)
1149{
1150	uint32_t srtt = net->lastsa >> 3;
1151
1152	/* keep track of minimum RTT seen so far, minRTT is zero at first */
1153	if (net->htcp_ca.minRTT > srtt || !net->htcp_ca.minRTT)
1154		net->htcp_ca.minRTT = srtt;
1155
1156	/* max RTT */
1157	if (net->fast_retran_ip == 0 && net->ssthresh < 0xFFFF && htcp_ccount(&net->htcp_ca) > 3) {
1158		if (net->htcp_ca.maxRTT < net->htcp_ca.minRTT)
1159			net->htcp_ca.maxRTT = net->htcp_ca.minRTT;
1160		if (net->htcp_ca.maxRTT < srtt && srtt <= net->htcp_ca.maxRTT + MSEC_TO_TICKS(20))
1161			net->htcp_ca.maxRTT = srtt;
1162	}
1163}
1164
1165static void
1166measure_achieved_throughput(struct sctp_tcb *stcb, struct sctp_nets *net)
1167{
1168	uint32_t now = sctp_get_tick_count();
1169
1170	if (net->fast_retran_ip == 0)
1171		net->htcp_ca.bytes_acked = net->net_ack;
1172
1173	if (!use_bandwidth_switch)
1174		return;
1175
1176	/* achieved throughput calculations */
1177	/* JRS - not 100% sure of this statement */
1178	if (net->fast_retran_ip == 1) {
1179		net->htcp_ca.bytecount = 0;
1180		net->htcp_ca.lasttime = now;
1181		return;
1182	}
1183	net->htcp_ca.bytecount += net->net_ack;
1184
1185	if (net->htcp_ca.bytecount >= net->cwnd - ((net->htcp_ca.alpha >> 7 ? : 1) * net->mtu)
1186	    && now - net->htcp_ca.lasttime >= net->htcp_ca.minRTT
1187	    && net->htcp_ca.minRTT > 0) {
1188		uint32_t cur_Bi = net->htcp_ca.bytecount / net->mtu * hz / (now - net->htcp_ca.lasttime);
1189
1190		if (htcp_ccount(&net->htcp_ca) <= 3) {
1191			/* just after backoff */
1192			net->htcp_ca.minB = net->htcp_ca.maxB = net->htcp_ca.Bi = cur_Bi;
1193		} else {
1194			net->htcp_ca.Bi = (3 * net->htcp_ca.Bi + cur_Bi) / 4;
1195			if (net->htcp_ca.Bi > net->htcp_ca.maxB)
1196				net->htcp_ca.maxB = net->htcp_ca.Bi;
1197			if (net->htcp_ca.minB > net->htcp_ca.maxB)
1198				net->htcp_ca.minB = net->htcp_ca.maxB;
1199		}
1200		net->htcp_ca.bytecount = 0;
1201		net->htcp_ca.lasttime = now;
1202	}
1203}
1204
1205static inline void
1206htcp_beta_update(struct htcp *ca, uint32_t minRTT, uint32_t maxRTT)
1207{
1208	if (use_bandwidth_switch) {
1209		uint32_t maxB = ca->maxB;
1210		uint32_t old_maxB = ca->old_maxB;
1211
1212		ca->old_maxB = ca->maxB;
1213
1214		if (!between(5 * maxB, 4 * old_maxB, 6 * old_maxB)) {
1215			ca->beta = BETA_MIN;
1216			ca->modeswitch = 0;
1217			return;
1218		}
1219	}
1220	if (ca->modeswitch && minRTT > (uint32_t) MSEC_TO_TICKS(10) && maxRTT) {
1221		ca->beta = (minRTT << 7) / maxRTT;
1222		if (ca->beta < BETA_MIN)
1223			ca->beta = BETA_MIN;
1224		else if (ca->beta > BETA_MAX)
1225			ca->beta = BETA_MAX;
1226	} else {
1227		ca->beta = BETA_MIN;
1228		ca->modeswitch = 1;
1229	}
1230}
1231
1232static inline void
1233htcp_alpha_update(struct htcp *ca)
1234{
1235	uint32_t minRTT = ca->minRTT;
1236	uint32_t factor = 1;
1237	uint32_t diff = htcp_cong_time(ca);
1238
1239	if (diff > (uint32_t) hz) {
1240		diff -= hz;
1241		factor = 1 + (10 * diff + ((diff / 2) * (diff / 2) / hz)) / hz;
1242	}
1243	if (use_rtt_scaling && minRTT) {
1244		uint32_t scale = (hz << 3) / (10 * minRTT);
1245
1246		scale = min(max(scale, 1U << 2), 10U << 3);	/* clamping ratio to
1247								 * interval [0.5,10]<<3 */
1248		factor = (factor << 3) / scale;
1249		if (!factor)
1250			factor = 1;
1251	}
1252	ca->alpha = 2 * factor * ((1 << 7) - ca->beta);
1253	if (!ca->alpha)
1254		ca->alpha = ALPHA_BASE;
1255}
1256
1257/* After we have the rtt data to calculate beta, we'd still prefer to wait one
1258 * rtt before we adjust our beta to ensure we are working from a consistent
1259 * data.
1260 *
1261 * This function should be called when we hit a congestion event since only at
1262 * that point do we really have a real sense of maxRTT (the queues en route
1263 * were getting just too full now).
1264 */
1265static void
1266htcp_param_update(struct sctp_tcb *stcb, struct sctp_nets *net)
1267{
1268	uint32_t minRTT = net->htcp_ca.minRTT;
1269	uint32_t maxRTT = net->htcp_ca.maxRTT;
1270
1271	htcp_beta_update(&net->htcp_ca, minRTT, maxRTT);
1272	htcp_alpha_update(&net->htcp_ca);
1273
1274	/*
1275	 * add slowly fading memory for maxRTT to accommodate routing
1276	 * changes etc
1277	 */
1278	if (minRTT > 0 && maxRTT > minRTT)
1279		net->htcp_ca.maxRTT = minRTT + ((maxRTT - minRTT) * 95) / 100;
1280}
1281
1282static uint32_t
1283htcp_recalc_ssthresh(struct sctp_tcb *stcb, struct sctp_nets *net)
1284{
1285	htcp_param_update(stcb, net);
1286	return max(((net->cwnd / net->mtu * net->htcp_ca.beta) >> 7) * net->mtu, 2U * net->mtu);
1287}
1288
1289static void
1290htcp_cong_avoid(struct sctp_tcb *stcb, struct sctp_nets *net)
1291{
1292	/*-
1293	 * How to handle these functions?
1294         *	if (!tcp_is_cwnd_limited(sk, in_flight)) RRS - good question.
1295	 *		return;
1296	 */
1297	if (net->cwnd <= net->ssthresh) {
1298		/* We are in slow start */
1299		if (net->flight_size + net->net_ack >= net->cwnd) {
1300			if (net->net_ack > (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable))) {
1301				net->cwnd += (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable));
1302				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1303					sctp_log_cwnd(stcb, net, net->mtu,
1304					    SCTP_CWND_LOG_FROM_SS);
1305				}
1306			} else {
1307				net->cwnd += net->net_ack;
1308				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1309					sctp_log_cwnd(stcb, net, net->net_ack,
1310					    SCTP_CWND_LOG_FROM_SS);
1311				}
1312			}
1313		} else {
1314			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
1315				sctp_log_cwnd(stcb, net, net->net_ack,
1316				    SCTP_CWND_LOG_NOADV_SS);
1317			}
1318		}
1319	} else {
1320		measure_rtt(stcb, net);
1321
1322		/*
1323		 * In dangerous area, increase slowly. In theory this is
1324		 * net->cwnd += alpha / net->cwnd
1325		 */
1326		/* What is snd_cwnd_cnt?? */
1327		if (((net->partial_bytes_acked / net->mtu * net->htcp_ca.alpha) >> 7) * net->mtu >= net->cwnd) {
1328			/*-
1329			 * Does SCTP have a cwnd clamp?
1330			 * if (net->snd_cwnd < net->snd_cwnd_clamp) - Nope (RRS).
1331			 */
1332			net->cwnd += net->mtu;
1333			net->partial_bytes_acked = 0;
1334			htcp_alpha_update(&net->htcp_ca);
1335			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1336				sctp_log_cwnd(stcb, net, net->mtu,
1337				    SCTP_CWND_LOG_FROM_CA);
1338			}
1339		} else {
1340			net->partial_bytes_acked += net->net_ack;
1341			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
1342				sctp_log_cwnd(stcb, net, net->net_ack,
1343				    SCTP_CWND_LOG_NOADV_CA);
1344			}
1345		}
1346
1347		net->htcp_ca.bytes_acked = net->mtu;
1348	}
1349}
1350
1351#ifdef SCTP_NOT_USED
1352/* Lower bound on congestion window. */
1353static uint32_t
1354htcp_min_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net)
1355{
1356	return net->ssthresh;
1357}
1358
1359#endif
1360
1361static void
1362htcp_init(struct sctp_tcb *stcb, struct sctp_nets *net)
1363{
1364	memset(&net->htcp_ca, 0, sizeof(struct htcp));
1365	net->htcp_ca.alpha = ALPHA_BASE;
1366	net->htcp_ca.beta = BETA_MIN;
1367	net->htcp_ca.bytes_acked = net->mtu;
1368	net->htcp_ca.last_cong = sctp_get_tick_count();
1369}
1370
1371static void
1372sctp_htcp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
1373{
1374	/*
1375	 * We take the max of the burst limit times a MTU or the
1376	 * INITIAL_CWND. We then limit this to 4 MTU's of sending.
1377	 */
1378	net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
1379	net->ssthresh = stcb->asoc.peers_rwnd;
1380	htcp_init(stcb, net);
1381
1382	if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
1383		sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
1384	}
1385}
1386
1387static void
1388sctp_htcp_cwnd_update_after_sack(struct sctp_tcb *stcb,
1389    struct sctp_association *asoc,
1390    int accum_moved, int reneged_all, int will_exit)
1391{
1392	struct sctp_nets *net;
1393
1394	/******************************/
1395	/* update cwnd and Early FR   */
1396	/******************************/
1397	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
1398
1399#ifdef JANA_CMT_FAST_RECOVERY
1400		/*
1401		 * CMT fast recovery code. Need to debug.
1402		 */
1403		if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
1404			if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
1405			    SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) {
1406				net->will_exit_fast_recovery = 1;
1407			}
1408		}
1409#endif
1410		if (SCTP_BASE_SYSCTL(sctp_early_fr)) {
1411			/*
1412			 * So, first of all do we need to have a Early FR
1413			 * timer running?
1414			 */
1415			if ((!TAILQ_EMPTY(&asoc->sent_queue) &&
1416			    (net->ref_count > 1) &&
1417			    (net->flight_size < net->cwnd)) ||
1418			    (reneged_all)) {
1419				/*
1420				 * yes, so in this case stop it if its
1421				 * running, and then restart it. Reneging
1422				 * all is a special case where we want to
1423				 * run the Early FR timer and then force the
1424				 * last few unacked to be sent, causing us
1425				 * to illicit a sack with gaps to force out
1426				 * the others.
1427				 */
1428				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
1429					SCTP_STAT_INCR(sctps_earlyfrstpidsck2);
1430					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
1431					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
1432				}
1433				SCTP_STAT_INCR(sctps_earlyfrstrid);
1434				sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
1435			} else {
1436				/* No, stop it if its running */
1437				if (SCTP_OS_TIMER_PENDING(&net->fr_timer.timer)) {
1438					SCTP_STAT_INCR(sctps_earlyfrstpidsck3);
1439					sctp_timer_stop(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net,
1440					    SCTP_FROM_SCTP_INDATA + SCTP_LOC_21);
1441				}
1442			}
1443		}
1444		/* if nothing was acked on this destination skip it */
1445		if (net->net_ack == 0) {
1446			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
1447				sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
1448			}
1449			continue;
1450		}
1451		if (net->net_ack2 > 0) {
1452			/*
1453			 * Karn's rule applies to clearing error count, this
1454			 * is optional.
1455			 */
1456			net->error_count = 0;
1457			if ((net->dest_state & SCTP_ADDR_NOT_REACHABLE) ==
1458			    SCTP_ADDR_NOT_REACHABLE) {
1459				/* addr came good */
1460				net->dest_state &= ~SCTP_ADDR_NOT_REACHABLE;
1461				net->dest_state |= SCTP_ADDR_REACHABLE;
1462				sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
1463				    SCTP_RECEIVED_SACK, (void *)net, SCTP_SO_NOT_LOCKED);
1464				/* now was it the primary? if so restore */
1465				if (net->dest_state & SCTP_ADDR_WAS_PRIMARY) {
1466					(void)sctp_set_primary_addr(stcb, (struct sockaddr *)NULL, net);
1467				}
1468			}
1469			/*
1470			 * JRS 5/14/07 - If CMT PF is on and the destination
1471			 * is in PF state, set the destination to active
1472			 * state and set the cwnd to one or two MTU's based
1473			 * on whether PF1 or PF2 is being used.
1474			 *
1475			 * Should we stop any running T3 timer here?
1476			 */
1477			if ((asoc->sctp_cmt_on_off > 0) &&
1478			    (asoc->sctp_cmt_pf > 0) &&
1479			    ((net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF)) {
1480				net->dest_state &= ~SCTP_ADDR_PF;
1481				net->cwnd = net->mtu * asoc->sctp_cmt_pf;
1482				SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n",
1483				    net, net->cwnd);
1484				/*
1485				 * Since the cwnd value is explicitly set,
1486				 * skip the code that updates the cwnd
1487				 * value.
1488				 */
1489				goto skip_cwnd_update;
1490			}
1491		}
1492#ifdef JANA_CMT_FAST_RECOVERY
1493		/*
1494		 * CMT fast recovery code
1495		 */
1496		/*
1497		 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery
1498		 * && net->will_exit_fast_recovery == 0) { @@@ Do something
1499		 * } else if (sctp_cmt_on_off == 0 &&
1500		 * asoc->fast_retran_loss_recovery && will_exit == 0) {
1501		 */
1502#endif
1503
1504		if (asoc->fast_retran_loss_recovery &&
1505		    will_exit == 0 &&
1506		    (asoc->sctp_cmt_on_off == 0)) {
1507			/*
1508			 * If we are in loss recovery we skip any cwnd
1509			 * update
1510			 */
1511			goto skip_cwnd_update;
1512		}
1513		/*
1514		 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
1515		 * moved.
1516		 */
1517		if (accum_moved ||
1518		    ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) {
1519			htcp_cong_avoid(stcb, net);
1520			measure_achieved_throughput(stcb, net);
1521		} else {
1522			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
1523				sctp_log_cwnd(stcb, net, net->mtu,
1524				    SCTP_CWND_LOG_NO_CUMACK);
1525			}
1526		}
1527skip_cwnd_update:
1528		/*
1529		 * NOW, according to Karn's rule do we need to restore the
1530		 * RTO timer back? Check our net_ack2. If not set then we
1531		 * have a ambiguity.. i.e. all data ack'd was sent to more
1532		 * than one place.
1533		 */
1534		if (net->net_ack2) {
1535			/* restore any doubled timers */
1536			net->RTO = ((net->lastsa >> 2) + net->lastsv) >> 1;
1537			if (net->RTO < stcb->asoc.minrto) {
1538				net->RTO = stcb->asoc.minrto;
1539			}
1540			if (net->RTO > stcb->asoc.maxrto) {
1541				net->RTO = stcb->asoc.maxrto;
1542			}
1543		}
1544	}
1545}
1546
1547static void
1548sctp_htcp_cwnd_update_after_fr(struct sctp_tcb *stcb,
1549    struct sctp_association *asoc)
1550{
1551	struct sctp_nets *net;
1552
1553	/*
1554	 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) &&
1555	 * (net->fast_retran_loss_recovery == 0)))
1556	 */
1557	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
1558		if ((asoc->fast_retran_loss_recovery == 0) ||
1559		    (asoc->sctp_cmt_on_off > 0)) {
1560			/* out of a RFC2582 Fast recovery window? */
1561			if (net->net_ack > 0) {
1562				/*
1563				 * per section 7.2.3, are there any
1564				 * destinations that had a fast retransmit
1565				 * to them. If so what we need to do is
1566				 * adjust ssthresh and cwnd.
1567				 */
1568				struct sctp_tmit_chunk *lchk;
1569				int old_cwnd = net->cwnd;
1570
1571				/* JRS - reset as if state were changed */
1572				htcp_reset(&net->htcp_ca);
1573				net->ssthresh = htcp_recalc_ssthresh(stcb, net);
1574				net->cwnd = net->ssthresh;
1575				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1576					sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
1577					    SCTP_CWND_LOG_FROM_FR);
1578				}
1579				lchk = TAILQ_FIRST(&asoc->send_queue);
1580
1581				net->partial_bytes_acked = 0;
1582				/* Turn on fast recovery window */
1583				asoc->fast_retran_loss_recovery = 1;
1584				if (lchk == NULL) {
1585					/* Mark end of the window */
1586					asoc->fast_recovery_tsn = asoc->sending_seq - 1;
1587				} else {
1588					asoc->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
1589				}
1590
1591				/*
1592				 * CMT fast recovery -- per destination
1593				 * recovery variable.
1594				 */
1595				net->fast_retran_loss_recovery = 1;
1596
1597				if (lchk == NULL) {
1598					/* Mark end of the window */
1599					net->fast_recovery_tsn = asoc->sending_seq - 1;
1600				} else {
1601					net->fast_recovery_tsn = lchk->rec.data.TSN_seq - 1;
1602				}
1603
1604				sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
1605				    stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
1606				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
1607				    stcb->sctp_ep, stcb, net);
1608			}
1609		} else if (net->net_ack > 0) {
1610			/*
1611			 * Mark a peg that we WOULD have done a cwnd
1612			 * reduction but RFC2582 prevented this action.
1613			 */
1614			SCTP_STAT_INCR(sctps_fastretransinrtt);
1615		}
1616	}
1617}
1618
1619static void
1620sctp_htcp_cwnd_update_after_timeout(struct sctp_tcb *stcb,
1621    struct sctp_nets *net)
1622{
1623	int old_cwnd = net->cwnd;
1624
1625	/* JRS - reset as if the state were being changed to timeout */
1626	htcp_reset(&net->htcp_ca);
1627	net->ssthresh = htcp_recalc_ssthresh(stcb, net);
1628	net->cwnd = net->mtu;
1629	net->partial_bytes_acked = 0;
1630	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1631		sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
1632	}
1633}
1634
1635static void
1636sctp_htcp_cwnd_update_after_fr_timer(struct sctp_inpcb *inp,
1637    struct sctp_tcb *stcb, struct sctp_nets *net)
1638{
1639	int old_cwnd;
1640
1641	old_cwnd = net->cwnd;
1642
1643	sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR, SCTP_SO_NOT_LOCKED);
1644	net->htcp_ca.last_cong = sctp_get_tick_count();
1645	/*
1646	 * make a small adjustment to cwnd and force to CA.
1647	 */
1648	if (net->cwnd > net->mtu)
1649		/* drop down one MTU after sending */
1650		net->cwnd -= net->mtu;
1651	if (net->cwnd < net->ssthresh)
1652		/* still in SS move to CA */
1653		net->ssthresh = net->cwnd - 1;
1654	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1655		sctp_log_cwnd(stcb, net, (old_cwnd - net->cwnd), SCTP_CWND_LOG_FROM_FR);
1656	}
1657}
1658
1659static void
1660sctp_htcp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb,
1661    struct sctp_nets *net, int in_window, int num_pkt_lost)
1662{
1663	int old_cwnd;
1664
1665	old_cwnd = net->cwnd;
1666
1667	/* JRS - reset hctp as if state changed */
1668	if (in_window == 0) {
1669		htcp_reset(&net->htcp_ca);
1670		SCTP_STAT_INCR(sctps_ecnereducedcwnd);
1671		net->ssthresh = htcp_recalc_ssthresh(stcb, net);
1672		if (net->ssthresh < net->mtu) {
1673			net->ssthresh = net->mtu;
1674			/* here back off the timer as well, to slow us down */
1675			net->RTO <<= 1;
1676		}
1677		net->cwnd = net->ssthresh;
1678		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1679			sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
1680		}
1681	}
1682}
1683
1684struct sctp_cc_functions sctp_cc_functions[] = {
1685	{
1686		.sctp_set_initial_cc_param = sctp_set_initial_cc_param,
1687		.sctp_cwnd_update_after_sack = sctp_cwnd_update_after_sack,
1688		.sctp_cwnd_update_after_fr = sctp_cwnd_update_after_fr,
1689		.sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout,
1690		.sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_after_ecn_echo,
1691		.sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
1692		.sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
1693		.sctp_cwnd_update_after_fr_timer = sctp_cwnd_update_after_fr_timer
1694	},
1695	{
1696		.sctp_set_initial_cc_param = sctp_set_initial_cc_param,
1697		.sctp_cwnd_update_after_sack = sctp_hs_cwnd_update_after_sack,
1698		.sctp_cwnd_update_after_fr = sctp_hs_cwnd_update_after_fr,
1699		.sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout,
1700		.sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_after_ecn_echo,
1701		.sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
1702		.sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
1703		.sctp_cwnd_update_after_fr_timer = sctp_cwnd_update_after_fr_timer
1704	},
1705	{
1706		.sctp_set_initial_cc_param = sctp_htcp_set_initial_cc_param,
1707		.sctp_cwnd_update_after_sack = sctp_htcp_cwnd_update_after_sack,
1708		.sctp_cwnd_update_after_fr = sctp_htcp_cwnd_update_after_fr,
1709		.sctp_cwnd_update_after_timeout = sctp_htcp_cwnd_update_after_timeout,
1710		.sctp_cwnd_update_after_ecn_echo = sctp_htcp_cwnd_update_after_ecn_echo,
1711		.sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
1712		.sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
1713		.sctp_cwnd_update_after_fr_timer = sctp_htcp_cwnd_update_after_fr_timer
1714	}
1715};
1716