1/*-
2 * Copyright (c) 2016-2020 Netflix, Inc.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 */
25
26#ifndef _NETINET_TCP_RACK_H_
27#define _NETINET_TCP_RACK_H_
28
29#define RACK_ACKED	    0x000001/* The remote endpoint acked this */
30#define RACK_TO_REXT	    0x000002/* A timeout occurred on this sendmap entry */
31#define RACK_DEFERRED	    0x000004/* We can't use this for RTT calc - not used */
32#define RACK_OVERMAX	    0x000008/* We have more retran's then we can fit */
33#define RACK_SACK_PASSED    0x000010/* A sack was done above this block */
34#define RACK_WAS_SACKPASS   0x000020/* We retransmitted due to SACK pass */
35#define RACK_HAS_FIN	    0x000040/* segment is sent with fin */
36#define RACK_TLP	    0x000080/* segment sent as tail-loss-probe */
37#define RACK_RWND_COLLAPSED 0x000100/* The peer collapsed the rwnd on the segment */
38#define RACK_APP_LIMITED    0x000200/* We went app limited after this send */
39#define RACK_WAS_ACKED	    0x000400/* a RTO undid the ack, but it already had a rtt calc done */
40#define RACK_HAS_SYN	    0x000800/* SYN is on this guy */
41#define RACK_SENT_W_DSACK   0x001000/* Sent with a dsack */
42#define RACK_SENT_SP	    0x002000/* sent in slow path */
43#define RACK_SENT_FP        0x004000/* sent in fast path */
44#define RACK_HAD_PUSH	    0x008000/* Push was sent on original send */
45#define RACK_MUST_RXT	    0x010000/* We must retransmit this rsm (non-sack/mtu chg)*/
46#define RACK_IN_GP_WIN	    0x020000/* Send was in GP window when sent */
47#define RACK_SHUFFLED	    0x040000/* The RSM was shuffled some data from one to another */
48#define RACK_MERGED	    0x080000/* The RSM was merged */
49#define RACK_PMTU_CHG	    0x100000/* The path mtu changed on this guy */
50#define RACK_STRADDLE	    0x200000/* The seq straddles the bucket line */
51#define RACK_WAS_LOST	    0x400000/* Is the rsm considered lost */
52#define RACK_IS_PCM         0x800000/* A PCM measurement is being taken */
53#define RACK_NUM_OF_RETRANS 3
54
55#define RACK_INITIAL_RTO 1000000 /* 1 second in microseconds */
56
57#define RACK_REQ_AVG 3 	/* Must be less than 256 */
58
59struct rack_sendmap {
60	TAILQ_ENTRY(rack_sendmap) next;
61	TAILQ_ENTRY(rack_sendmap) r_tnext;	/* Time of transmit based next */
62	uint32_t bindex;
63	uint32_t r_start;	/* Sequence number of the segment */
64	uint32_t r_end;		/* End seq, this is 1 beyond actually */
65	uint32_t r_rtr_bytes;	/* How many bytes have been retransmitted */
66	uint32_t r_flags : 24,	/* Flags as defined above */
67		 r_rtr_cnt : 8;	/* Retran count, index this -1 to get time */
68	uint32_t r_act_rxt_cnt; /* The actual total count of transmits */
69	struct mbuf *m;
70	uint32_t soff;
71	uint32_t orig_m_len;	/* The original mbuf len when we sent (can update) */
72	uint32_t orig_t_space;	/* The original trailing space when we sent (can update) */
73	uint32_t r_nseq_appl;	/* If this one is app limited, this is the nxt seq limited */
74	uint8_t r_dupack;	/* Dup ack count */
75	uint8_t r_in_tmap;	/* Flag to see if its in the r_tnext array */
76	uint8_t r_limit_type;	/* is this entry counted against a limit? */
77	uint8_t r_just_ret : 1, /* After sending, the next pkt was just returned, i.e. limited  */
78		r_one_out_nr : 1,	/* Special case 1 outstanding and not in recovery */
79		r_no_rtt_allowed : 1, /* No rtt measurement allowed */
80		r_hw_tls : 1,
81		r_avail : 4;
82	uint64_t r_tim_lastsent[RACK_NUM_OF_RETRANS];
83	uint64_t r_ack_arrival;	/* This is the time of ack-arrival (if SACK'd) */
84	uint32_t r_fas;		/* Flight at send */
85	uint8_t r_bas;		/* The burst size (burst at send = bas)  */
86};
87
88struct deferred_opt_list {
89	TAILQ_ENTRY(deferred_opt_list) next;
90	int optname;
91	uint64_t optval;
92};
93
94/*
95 * Timestamps in the rack sendmap are now moving to be
96 * uint64_t's. This means that if you want a uint32_t
97 * usec timestamp (the old usecond timestamp) you simply have
98 * to cast it to uint32_t. The reason we do this is not for
99 * wrap, but we need to get back, at times, to the millisecond
100 * timestamp that is used in the TSTMP option. To do this we
101 * can use the rack_ts_to_msec() inline below which can take
102 * the 64bit ts and make into the correct timestamp millisecond
103 * wise. Thats not possible with the 32bit usecond timestamp since
104 * the seconds wrap too quickly to cover all bases.
105 *
106 * There are quite a few places in rack where I simply cast
107 * back to uint32_t and then end up using the TSTMP_XX()
108 * macros. This is ok, but we could do simple compares if
109 * we ever decided to move all of those variables to 64 bits
110 * as well.
111 */
112
113static inline uint64_t
114rack_to_usec_ts(struct timeval *tv)
115{
116	return ((tv->tv_sec * HPTS_USEC_IN_SEC) + tv->tv_usec);
117}
118
119static inline uint32_t
120rack_ts_to_msec(uint64_t ts)
121{
122	return((uint32_t)(ts / HPTS_MSEC_IN_SEC));
123}
124
125
126TAILQ_HEAD(rack_head, rack_sendmap);
127TAILQ_HEAD(def_opt_head, deferred_opt_list);
128
129/* Map change logging */
130#define MAP_MERGE	0x01
131#define MAP_SPLIT	0x02
132#define MAP_NEW		0x03
133#define MAP_SACK_M1	0x04
134#define MAP_SACK_M2	0x05
135#define MAP_SACK_M3	0x06
136#define MAP_SACK_M4	0x07
137#define MAP_SACK_M5	0x08
138#define MAP_FREE	0x09
139#define MAP_TRIM_HEAD	0x0a
140
141#define RACK_LIMIT_TYPE_SPLIT	1
142
143/*
144 * We use the rate sample structure to
145 * assist in single sack/ack rate and rtt
146 * calculation. In the future we will expand
147 * this in BBR to do forward rate sample
148 * b/w estimation.
149 */
150#define RACK_RTT_EMPTY 0x00000001	/* Nothing yet stored in RTT's */
151#define RACK_RTT_VALID 0x00000002	/* We have at least one valid RTT */
152struct rack_rtt_sample {
153	uint32_t rs_flags;
154	uint32_t rs_rtt_lowest;
155	uint32_t rs_rtt_highest;
156	uint32_t rs_rtt_cnt;
157	uint32_t rs_us_rtt;
158	int32_t  confidence;
159	uint64_t rs_rtt_tot;
160	uint16_t rs_us_rtrcnt;
161};
162
163#define RACK_LOG_TYPE_ACK	0x01
164#define RACK_LOG_TYPE_OUT	0x02
165#define RACK_LOG_TYPE_TO	0x03
166#define RACK_LOG_TYPE_ALLOC     0x04
167#define RACK_LOG_TYPE_FREE      0x05
168
169/*
170 * Magic numbers for logging timeout events if the
171 * logging is enabled.
172 */
173#define RACK_TO_FRM_TMR  1
174#define RACK_TO_FRM_TLP  2
175#define RACK_TO_FRM_RACK 3
176#define RACK_TO_FRM_KEEP 4
177#define RACK_TO_FRM_PERSIST 5
178#define RACK_TO_FRM_DELACK 6
179
180#define RCV_PATH_RTT_MS 10	/* How many ms between recv path RTT's */
181
182struct rack_opts_stats {
183	uint64_t tcp_rack_tlp_reduce;
184	uint64_t tcp_rack_pace_always;
185	uint64_t tcp_rack_pace_reduce;
186	uint64_t tcp_rack_max_seg;
187	uint64_t tcp_rack_prr_sendalot;
188	uint64_t tcp_rack_min_to;
189	uint64_t tcp_rack_early_seg;
190	uint64_t tcp_rack_reord_thresh;
191	uint64_t tcp_rack_reord_fade;
192	uint64_t tcp_rack_tlp_thresh;
193	uint64_t tcp_rack_pkt_delay;
194	uint64_t tcp_rack_tlp_inc_var;
195	uint64_t tcp_tlp_use;
196	uint64_t tcp_rack_idle_reduce;
197	uint64_t tcp_rack_idle_reduce_high;
198	uint64_t rack_no_timer_in_hpts;
199	uint64_t tcp_rack_min_pace_seg;
200	uint64_t tcp_rack_pace_rate_ca;
201	uint64_t tcp_rack_rr;
202	uint64_t tcp_rack_rrr_no_conf_rate;
203	uint64_t tcp_initial_rate;
204	uint64_t tcp_initial_win;
205	uint64_t tcp_hdwr_pacing;
206	uint64_t tcp_gp_inc_ss;
207	uint64_t tcp_gp_inc_ca;
208	uint64_t tcp_gp_inc_rec;
209	uint64_t tcp_rack_force_max_seg;
210	uint64_t tcp_rack_pace_rate_ss;
211	uint64_t tcp_rack_pace_rate_rec;
212	/* Temp counters for dsack */
213	uint64_t tcp_sack_path_1; /* not used */
214	uint64_t tcp_sack_path_2a; /* not used */
215	uint64_t tcp_sack_path_2b; /* not used */
216	uint64_t tcp_sack_path_3; /* not used */
217	uint64_t tcp_sack_path_4; /* not used */
218	/* non temp counters */
219	uint64_t tcp_rack_scwnd;
220	uint64_t tcp_rack_noprr;
221	uint64_t tcp_rack_cfg_rate;
222	uint64_t tcp_timely_dyn;
223	uint64_t tcp_rack_mbufq;
224	uint64_t tcp_fillcw;
225	uint64_t tcp_npush;
226	uint64_t tcp_lscwnd;
227	uint64_t tcp_profile;
228	uint64_t tcp_hdwr_rate_cap;
229	uint64_t tcp_pacing_rate_cap;
230	uint64_t tcp_pacing_up_only;
231	uint64_t tcp_use_cmp_acks;
232	uint64_t tcp_rack_abc_val;
233	uint64_t tcp_rec_abc_val;
234	uint64_t tcp_rack_measure_cnt;
235	uint64_t tcp_rack_delayed_ack;
236	uint64_t tcp_rack_rtt_use;
237	uint64_t tcp_data_after_close;
238	uint64_t tcp_defer_opt;
239	uint64_t tcp_pol_detect;
240	uint64_t tcp_rack_beta;
241	uint64_t tcp_rack_beta_ecn;
242	uint64_t tcp_rack_timer_slop;
243	uint64_t tcp_rack_dsack_opt;
244	uint64_t tcp_rack_hi_beta;
245	uint64_t tcp_split_limit;
246	uint64_t tcp_rack_pacing_divisor;
247	uint64_t tcp_rack_min_seg;
248	uint64_t tcp_dgp_in_rec;
249	uint64_t tcp_notimely;
250	uint64_t tcp_honor_hpts;
251	uint64_t tcp_dyn_rec;
252	uint64_t tcp_fillcw_rate_cap;
253	uint64_t tcp_pol_mss;
254};
255
256/* RTT shrink reasons */
257#define RACK_RTTS_INIT     0
258#define RACK_RTTS_NEWRTT   1
259#define RACK_RTTS_EXITPROBE 2
260#define RACK_RTTS_ENTERPROBE 3
261#define RACK_RTTS_REACHTARGET 4
262#define RACK_RTTS_SEEHBP 5
263#define RACK_RTTS_NOBACKOFF 6
264#define RACK_RTTS_SAFETY 7
265
266#define RACK_USE_BEG 1
267#define RACK_USE_END 2
268#define RACK_USE_END_OR_THACK 3
269
270#define TLP_USE_ID	1	/* Internet draft behavior */
271#define TLP_USE_TWO_ONE 2	/* Use 2.1 behavior */
272#define TLP_USE_TWO_TWO 3	/* Use 2.2 behavior */
273#define RACK_MIN_BW 8000	/* 64kbps in Bps */
274
275#define CCSP_DIS_MASK	0x0001
276#define HYBRID_DIS_MASK	0x0002
277
278/* Rack quality indicators for GPUT measurements */
279#define RACK_QUALITY_NONE	0	/* No quality stated */
280#define RACK_QUALITY_HIGH 	1	/* A normal measurement of a GP RTT */
281#define RACK_QUALITY_APPLIMITED	2 	/* An app limited case that may be of lower quality */
282#define RACK_QUALITY_PERSIST	3	/* A measurement where we went into persists */
283#define RACK_QUALITY_PROBERTT	4	/* A measurement where we went into or exited probe RTT */
284#define RACK_QUALITY_ALLACKED	5	/* All data is now acknowledged */
285
286#define MIN_GP_WIN 6	/* We need at least 6 MSS in a GP measurement */
287#ifdef _KERNEL
288#define RACK_OPTS_SIZE (sizeof(struct rack_opts_stats)/sizeof(uint64_t))
289extern counter_u64_t rack_opts_arry[RACK_OPTS_SIZE];
290#define RACK_OPTS_ADD(name, amm) counter_u64_add(rack_opts_arry[(offsetof(struct rack_opts_stats, name)/sizeof(uint64_t))], (amm))
291#define RACK_OPTS_INC(name) RACK_OPTS_ADD(name, 1)
292#endif
293/*
294 * As we get each SACK we wade through the
295 * rc_map and mark off what is acked.
296 * We also increment rc_sacked as well.
297 *
298 * We also pay attention to missing entries
299 * based on the time and possibly mark them
300 * for retransmit. If we do and we are not already
301 * in recovery we enter recovery. In doing
302 * so we claer prr_delivered/holes_rxt and prr_sent_dur_rec.
303 * We also setup rc_next/rc_snd_nxt/rc_send_end so
304 * we will know where to send from. When not in
305 * recovery rc_next will be NULL and rc_snd_nxt should
306 * equal snd_max.
307 *
308 * Whenever we retransmit from recovery we increment
309 * rc_holes_rxt as we retran a block and mark it as retransmitted
310 * with the time it was sent. During non-recovery sending we
311 * add to our map and note the time down of any send expanding
312 * the rc_map at the tail and moving rc_snd_nxt up with snd_max.
313 *
314 * In recovery during SACK/ACK processing if a chunk has
315 * been retransmitted and it is now acked, we decrement rc_holes_rxt.
316 * When we retransmit from the scoreboard we use
317 * rc_next and rc_snd_nxt/rc_send_end to help us
318 * find what needs to be retran.
319 *
320 * To calculate pipe we simply take (snd_max - snd_una) + rc_holes_rxt
321 * This gets us the effect of RFC6675 pipe, counting twice for
322 * bytes retransmitted.
323 */
324
325#define TT_RACK_FR_TMR	0x2000
326
327/*
328 * Locking for the rack control block.
329 * a) Locked by INP_WLOCK
330 * b) Locked by the hpts-mutex
331 *
332 */
333#define RACK_GP_HIST 4	/* How much goodput history do we maintain? */
334#define RETRAN_CNT_SIZE 16
335
336#define RACK_NUM_FSB_DEBUG 16
337#ifdef _KERNEL
338struct rack_fast_send_blk {
339	uint32_t left_to_send;
340	uint16_t tcp_ip_hdr_len;
341	uint8_t tcp_flags;
342	uint8_t hoplimit;
343	uint8_t *tcp_ip_hdr;
344	uint32_t recwin;
345	uint32_t off;
346	struct tcphdr *th;
347	struct udphdr *udp;
348	struct mbuf *m;
349	uint32_t o_m_len;
350	uint32_t o_t_len;
351	uint32_t rfo_apply_push : 1,
352		hw_tls : 1,
353		unused : 30;
354};
355
356struct tailq_hash;
357
358struct rack_pcm_info {
359	/* Base send time and s/e filled in by rack_log_output */
360	uint64_t send_time;
361	uint32_t sseq;
362	uint32_t eseq;
363	/* Ack's fill in the rest of the data */
364	uint16_t cnt;
365	/* Maximum acks present */
366	uint16_t cnt_alloc;
367};
368
369#define RACK_DEFAULT_PCM_ARRAY 16
370
371struct rack_pcm_stats {
372	uint32_t sseq;
373	uint32_t eseq;
374	uint64_t ack_time;
375};
376
377
378struct rack_control {
379	/* Second cache line 0x40 from tcp_rack */
380	struct tailq_hash *tqh; /* Tree of all segments Lock(a) */
381	struct rack_head rc_tmap;	/* List in transmit order Lock(a) */
382	struct rack_sendmap *rc_tlpsend;	/* Remembered place for
383						 * tlp_sending Lock(a) */
384	struct rack_sendmap *rc_resend;	/* something we have been asked to
385					 * resend */
386	struct rack_fast_send_blk fsb;	/* The fast-send block */
387	uint32_t timer_slop;
388	uint16_t pace_len_divisor;
389	uint16_t rc_user_set_min_segs;
390	uint32_t rc_hpts_flags;
391	uint32_t rc_fixed_pacing_rate_ca;
392	uint32_t rc_fixed_pacing_rate_rec;
393	uint32_t rc_fixed_pacing_rate_ss;
394	uint32_t cwnd_to_use;	/* The cwnd in use */
395	uint32_t rc_timer_exp;	/* If a timer ticks of expiry */
396	uint32_t rc_rack_min_rtt;	/* lowest RTT seen Lock(a) */
397	uint32_t rc_rack_largest_cwnd;	/* Largest CWND we have seen Lock(a) */
398
399	/* Third Cache line 0x80 */
400	struct rack_head rc_free;	/* Allocation array */
401	uint64_t last_hw_bw_req;
402	uint64_t crte_prev_rate;
403	uint64_t bw_rate_cap;
404	uint64_t last_cumack_advance; /* Last time cumack moved forward */
405	uint32_t rc_reorder_ts;	/* Last time we saw reordering Lock(a) */
406
407	uint32_t rc_tlp_new_data;	/* we need to send new-data on a TLP
408					 * Lock(a) */
409	uint32_t rc_prr_out;	/* bytes sent during recovery Lock(a) */
410
411	uint32_t rc_prr_recovery_fs;	/* recovery fs point Lock(a) */
412
413	uint32_t rc_prr_sndcnt;	/* Prr sndcnt Lock(a) */
414
415	uint32_t rc_sacked;	/* Tot sacked on scoreboard Lock(a) */
416	uint32_t last_sent_tlp_seq;	/* Last tlp sequence that was retransmitted Lock(a) */
417
418	uint32_t rc_prr_delivered;	/* during recovery prr var Lock(a) */
419
420	uint16_t rc_tlp_cnt_out;	/* count of times we have sent a TLP without new data */
421	uint16_t last_sent_tlp_len;	/* Number of bytes in the last sent tlp */
422
423	uint32_t rc_loss_count;	/* How many bytes have been retransmitted
424				 * Lock(a) */
425	uint32_t rc_reorder_fade;	/* Socket option value Lock(a) */
426
427	/* Forth cache line 0xc0  */
428	/* Times */
429
430	uint32_t rc_rack_tmit_time;	/* Rack transmit time Lock(a) */
431	uint32_t rc_holes_rxt;	/* Tot retraned from scoreboard Lock(a) */
432
433	uint32_t rc_num_maps_alloced;	/* Number of map blocks (sacks) we
434					 * have allocated */
435	uint32_t rc_rcvtime;	/* When we last received data */
436	uint32_t rc_num_split_allocs;	/* num split map entries allocated */
437	uint32_t rc_split_limit;	/* Limit from control var can be set by socket opt */
438	uint32_t rack_avg_rec_sends;
439
440	uint32_t rc_last_output_to;
441	uint32_t rc_went_idle_time;
442
443	struct rack_sendmap *rc_sacklast;	/* sack remembered place
444						 * Lock(a) */
445
446	struct rack_sendmap *rc_first_appl;	/* Pointer to first app limited */
447	struct rack_sendmap *rc_end_appl;	/* Pointer to last app limited */
448	/* Cache line split 0x100 */
449	struct sack_filter rack_sf;
450	/* Cache line split 0x140 */
451	/* Flags for various things */
452	uint32_t rc_pace_max_segs;
453	uint32_t rc_pace_min_segs;
454	uint32_t rc_app_limited_cnt;
455	uint16_t rack_per_of_gp_ss; /* 100 = 100%, so from 65536 = 655 x bw  */
456	uint16_t rack_per_of_gp_ca; /* 100 = 100%, so from 65536 = 655 x bw  */
457	uint16_t rack_per_of_gp_rec; /* 100 = 100%, so from 65536 = 655 x bw, 0=off */
458	uint16_t rack_per_of_gp_probertt; /* 100 = 100%, so from 65536 = 655 x bw, 0=off */
459	uint32_t rc_high_rwnd;
460	struct rack_rtt_sample rack_rs;
461	const struct tcp_hwrate_limit_table *crte;
462	uint32_t rc_agg_early;
463	uint32_t rc_agg_delayed;
464	uint32_t rc_tlp_rxt_last_time;
465	uint32_t rc_saved_cwnd;
466	uint64_t rc_gp_output_ts; /* chg*/
467	uint64_t rc_gp_cumack_ts; /* chg*/
468	struct timeval act_rcv_time;
469	struct timeval rc_last_time_decay;	/* SAD time decay happened here */
470	uint64_t gp_bw;
471	uint64_t init_rate;
472#ifdef NETFLIX_SHARED_CWND
473	struct shared_cwnd *rc_scw;
474#endif
475	uint64_t last_gp_comp_bw;
476	uint64_t last_max_bw;	/* Our calculated max b/w last */
477	struct time_filter_small rc_gp_min_rtt;
478	struct def_opt_head opt_list;
479	uint64_t lt_bw_time;	/* Total time with data outstanding (lt_bw = long term bandwidth)  */
480	uint64_t lt_bw_bytes;	/* Total bytes acked */
481	uint64_t lt_timemark;	/* 64 bit timestamp when we started sending */
482	struct tcp_sendfile_track *rc_last_sft;
483	uint32_t lt_seq;	/* Seq at start of lt_bw gauge */
484	int32_t rc_rtt_diff;		/* Timely style rtt diff of our gp_srtt */
485	uint64_t last_tmit_time_acked;	/* Holds the last cumack point's last send time */
486	/* Recovery stats */
487	uint64_t time_entered_recovery;
488	uint64_t bytes_acked_in_recovery;
489	/* Policer Detection */
490	uint64_t last_policer_sndbytes;
491	uint64_t last_policer_snd_rxt_bytes;
492	uint64_t policer_bw;
493	uint64_t last_sendtime;
494
495	uint64_t last_gpest;
496	uint64_t last_tm_mark;		/* Last tm mark used */
497	uint64_t fillcw_cap;		/* B/W cap on fill cw */
498	struct rack_pcm_info pcm_i;
499	struct rack_pcm_stats *pcm_s;
500	uint32_t gp_gain_req;		/* Percent off gp gain req */
501	uint32_t last_rnd_of_gp_rise;
502	uint32_t gp_rnd_thresh;
503	uint32_t ss_hi_fs;
504	uint32_t gate_to_fs;
505	uint32_t policer_max_seg;
506	uint32_t pol_bw_comp;
507	uint16_t policer_rxt_threshold;
508	uint8_t  policer_avg_threshold;
509	uint8_t  policer_med_threshold;
510	uint32_t pcm_max_seg;
511	uint32_t last_pcm_round;
512	uint32_t pcm_idle_rounds;
513	uint32_t current_policer_bucket;
514	uint32_t policer_bucket_size;
515	uint32_t idle_snd_una;
516	uint32_t ack_for_idle;
517	uint32_t last_amount_before_rec;
518
519	uint32_t rc_gp_srtt;		/* Current GP srtt */
520	uint32_t rc_prev_gp_srtt;	/* Previous RTT */
521	uint32_t rc_entry_gp_rtt;	/* Entry to PRTT gp-rtt */
522	uint32_t rc_loss_at_start;	/* At measurement window where was our lost value */
523	uint32_t rc_considered_lost;	/* Count in recovery of non-retransmitted bytes considered lost */
524
525	uint32_t dsack_round_end;	/* In a round of seeing a DSACK */
526	uint32_t current_round;		/* Starting at zero */
527	uint32_t roundends;		/* acked value above which round ends */
528	uint32_t num_dsack;		/* Count of dsack's seen  (1 per window)*/
529	uint32_t forced_ack_ts;
530 	uint32_t last_collapse_point;	/* Last point peer collapsed too */
531	uint32_t high_collapse_point;
532	uint32_t rc_lower_rtt_us_cts;	/* Time our GP rtt was last lowered */
533	uint32_t rc_time_probertt_entered;
534	uint32_t rc_time_probertt_starts;
535	uint32_t rc_lowest_us_rtt;
536	uint32_t rc_highest_us_rtt;
537	uint32_t rc_last_us_rtt;
538	uint32_t rc_time_of_last_probertt;
539	uint32_t rc_target_probertt_flight;
540	uint32_t rc_probertt_sndmax_atexit;	/* Highest sent to in probe-rtt */
541	uint32_t rc_cwnd_at_erec;
542	uint32_t rc_ssthresh_at_erec;
543	uint32_t dsack_byte_cnt;
544	uint32_t retran_during_recovery;
545	uint32_t rc_gp_lowrtt;			/* Lowest rtt seen during GPUT measurement */
546	uint32_t rc_gp_high_rwnd;		/* Highest rwnd seen during GPUT measurement */
547	uint32_t rc_snd_max_at_rto;	/* For non-sack when the RTO occurred what was snd-max */
548	uint32_t rc_out_at_rto;
549	int32_t rc_scw_index;
550	uint32_t max_reduction;
551	uint32_t side_chan_dis_mask; 	/* Bit mask of socket opt's disabled */
552	uint32_t rc_tlp_threshold;	/* Socket option value Lock(a) */
553	uint32_t rc_last_timeout_snduna;
554	uint32_t last_tlp_acked_start;
555	uint32_t last_tlp_acked_end;
556	uint32_t challenge_ack_ts;
557	uint32_t challenge_ack_cnt;
558	uint32_t rc_min_to;	/* Socket option value Lock(a) */
559	uint32_t rc_pkt_delay;	/* Socket option value Lock(a) */
560	uint32_t persist_lost_ends;
561	uint32_t input_pkt;
562	uint32_t saved_input_pkt;
563	uint32_t saved_policer_val; 	/* The encoded value we used to setup policer detection */
564	uint32_t cleared_app_ack_seq;
565	uint32_t last_rcv_tstmp_for_rtt;
566	uint32_t last_time_of_arm_rcv;
567	uint32_t rto_ssthresh;
568	struct newreno rc_saved_beta;	/*
569					 * For newreno cc:
570					 * rc_saved_cc are the values we have had
571					 * set by the user, if pacing is not happening
572					 * (i.e. its early and we have not turned on yet
573					 *  or it was turned off). The minute pacing
574					 * is turned on we pull out the values currently
575					 * being used by newreno and replace them with
576					 * these values, then save off the old values here,
577					 * we also set the flag (if ecn_beta is set) to make
578					 * new_reno do less of a backoff for ecn (think abe).
579					 */
580	uint16_t rc_cnt_of_retran[RETRAN_CNT_SIZE];
581	uint16_t rc_early_recovery_segs;	/* Socket option value Lock(a) */
582	uint16_t rc_reorder_shift;	/* Socket option value Lock(a) */
583	uint8_t policer_del_mss;	/* How many mss during recovery for policer detection */
584	uint8_t rack_per_upper_bound_ss;
585	uint8_t rack_per_upper_bound_ca;
586	uint8_t cleared_app_ack;
587	uint8_t dsack_persist;
588	uint8_t rc_no_push_at_mrtt;	/* No push when we exceed max rtt */
589	uint8_t num_measurements;	/* Number of measurements (up to 0xff, we freeze at 0xff)  */
590	uint8_t req_measurements;	/* How many measurements are required? */
591	uint8_t saved_hibeta;
592	uint8_t rc_tlp_cwnd_reduce;	/* Socket option value Lock(a) */
593	uint8_t rc_prr_sendalot;/* Socket option value Lock(a) */
594	uint8_t rc_rate_sample_method;
595	uint8_t policer_alt_median;	/* Alternate median for policer detection */
596	uint8_t full_dgp_in_rec;	/* Flag to say if we do full DGP in recovery */
597	uint8_t client_suggested_maxseg;	/* Not sure what to do with this yet */
598	uint8_t use_gp_not_last;
599	uint8_t pacing_method;	       /* If pace_always, what type of pacing */
600	uint8_t already_had_a_excess;
601};
602#endif
603
604#define RACK_PACING_NONE 0x00
605#define RACK_DGP_PACING  0x01
606#define RACK_REG_PACING  0x02
607
608/* DGP with no buffer level mitigations */
609#define DGP_LEVEL0	0
610
611/*
612 * DGP with buffer level mitigation where BL:4 caps fillcw and BL:5
613 * turns off fillcw.
614 */
615#define DGP_LEVEL1	1
616
617/*
618 * DGP with buffer level mitigation where BL:3 caps fillcw and BL:4 turns off fillcw
619 * and BL:5 reduces by 10%
620 */
621#define DGP_LEVEL2	2
622
623/*
624 * DGP with buffer level mitigation where BL:2 caps fillcw and BL:3 turns off
625 * fillcw  BL:4 reduces by 10% and BL:5 reduces by 20%
626 */
627#define DGP_LEVEL3	3
628
629/* Hybrid pacing log defines */
630#define HYBRID_LOG_NO_ROOM	0	/* No room for the clients request */
631#define HYBRID_LOG_TURNED_OFF	1	/* Turned off hybrid pacing */
632#define HYBRID_LOG_NO_PACING	2	/* Failed to set pacing on */
633#define HYBRID_LOG_RULES_SET	3	/* Hybrid pacing for this chunk is set */
634#define HYBRID_LOG_NO_RANGE	4	/* In DGP mode, no range found */
635#define HYBRID_LOG_RULES_APP	5	/* The specified rules were applied */
636#define HYBRID_LOG_REQ_COMP	6	/* The request completed */
637#define HYBRID_LOG_BW_MEASURE	7	/* Follow up b/w measurements to the previous completed log */
638#define HYBRID_LOG_RATE_CAP	8	/* We had a rate cap apply */
639#define HYBRID_LOG_CAP_CALC	9	/* How we calculate the cap */
640#define HYBRID_LOG_ISSAME	10	/* Same as before  -- temp */
641#define HYBRID_LOG_ALLSENT	11	/* We sent it all no more rate-cap */
642#define HYBRID_LOG_OUTOFTIME	12	/* We are past the deadline DGP */
643#define HYBRID_LOG_CAPERROR	13	/* Hit one of the TSNH cases */
644#define HYBRID_LOG_EXTEND	14	/* We extended the end */
645#define HYBRID_LOG_SENT_LOST	15	/* A closing sent/lost report */
646
647#define LOST_ZERO	1 	/* Zero it out */
648#define LOST_ADD	2	/* Add to it */
649#define LOST_SUB	3	/* Sub from it */
650
651#define RACK_TIMELY_CNT_BOOST 5	/* At 5th increase boost */
652#define RACK_MINRTT_FILTER_TIM 10 /* Seconds */
653
654#define RACK_HYSTART_OFF	0
655#define RACK_HYSTART_ON		1	/* hystart++ on */
656#define RACK_HYSTART_ON_W_SC	2	/* hystart++ on +Slam Cwnd */
657#define RACK_HYSTART_ON_W_SC_C	3	/* hystart++ on,
658					 * Conservative ssthresh and
659					 * +Slam cwnd
660					 */
661
662#define MAX_USER_SET_SEG 0x3f	/* The max we can set is 63 which is probably too many */
663#define RACK_FREE_CNT_MAX 0x2f	/* Max our counter can do */
664
665#ifdef _KERNEL
666
667struct tcp_rack {
668	/* First cache line 0x00 */
669	TAILQ_ENTRY(tcp_rack) r_hpts;	/* hptsi queue next Lock(b) */
670	int32_t(*r_substate) (struct mbuf *, struct tcphdr *,
671	    struct socket *, struct tcpcb *, struct tcpopt *,
672	    int32_t, int32_t, uint32_t, int, int, uint8_t);	/* Lock(a) */
673	struct tcpcb *rc_tp;	/* The tcpcb Lock(a) */
674	struct inpcb *rc_inp;	/* The inpcb Lock(a) */
675	uint8_t rc_free_cnt : 6,
676		rc_skip_timely : 1,
677		pcm_enabled : 1;	/* Is PCM enabled */
678	uint8_t client_bufferlvl : 3, /* Expected range [0,5]: 0=unset, 1=low/empty */
679		rack_deferred_inited : 1,
680	        /* ******************************************************************** */
681	        /* Note for details of next two fields see rack_init_retransmit_rate()  */
682	        /* ******************************************************************** */
683		full_size_rxt: 1,
684		shape_rxt_to_pacing_min : 1,
685	        /* ******************************************************************** */
686		rc_ack_required: 1,
687		r_use_hpts_min : 1;
688	uint8_t no_prr_addback : 1,
689		gp_ready : 1,
690		defer_options: 1,
691		dis_lt_bw : 1,
692		rc_ack_can_sendout_data: 1, /*
693					     * If set it will override pacing restrictions on not sending
694					     * data when the pacing timer is running. I.e. you set this
695					     * and an ACK will send data. Default is off and its only used
696					     * without pacing when we are doing 5G speed up for there
697					     * ack filtering.
698					     */
699		rc_pacing_cc_set: 1,	     /*
700					      * If we are pacing (pace_always=1) and we have reached the
701					      * point where we start pacing (fixed or gp has reached its
702					      * magic gp_ready state) this flag indicates we have set in
703					      * values to effect CC's backoff's. If pacing is turned off
704					      * then we must restore the values saved in rc_saved_beta,
705					      * if its going to gp_ready we need to copy the values into
706					      * the CC module and set our flags.
707					      *
708					      * Note this only happens if the cc name is newreno (CCALGONAME_NEWRENO).
709					      */
710
711		rc_rack_tmr_std_based :1,
712		rc_rack_use_dsack: 1;
713	uint8_t rc_dsack_round_seen: 1,
714		rc_last_tlp_acked_set: 1,
715		rc_last_tlp_past_cumack: 1,
716		rc_last_sent_tlp_seq_valid: 1,
717		rc_last_sent_tlp_past_cumack: 1,
718		probe_not_answered: 1,
719		rack_hibeta : 1,
720		lt_bw_up : 1;
721	uint32_t rc_rack_rtt;	/* RACK-RTT Lock(a) */
722	uint16_t r_mbuf_queue : 1,	/* Do we do mbuf queue for non-paced */
723		 rtt_limit_mul : 4,	/* muliply this by low rtt */
724		 r_limit_scw : 1,
725		 r_must_retran : 1,	/* For non-sack customers we hit an RTO and new data should be resends */
726		 r_use_cmp_ack: 1,	/* Do we use compressed acks */
727		 r_ent_rec_ns: 1,	/* We entered recovery and have not sent */
728		 r_might_revert: 1,	/* Flag to find out if we might need to revert */
729		 r_fast_output: 1, 	/* Fast output is in progress we can skip the bulk of rack_output */
730		 r_fsb_inited: 1,
731		 r_rack_hw_rate_caps: 1,
732		 r_up_only: 1,
733		 r_via_fill_cw : 1,
734		 r_rcvpath_rtt_up : 1;
735
736	uint8_t rc_user_set_max_segs : 7,	/* Socket option value Lock(a) */
737		rc_fillcw_apply_discount;
738	uint8_t rc_labc;		/* Appropriate Byte Counting Value */
739	uint16_t forced_ack : 1,
740		rc_gp_incr : 1,
741		rc_gp_bwred : 1,
742		rc_gp_timely_inc_cnt : 3,
743		rc_gp_timely_dec_cnt : 3,
744		r_use_labc_for_rec: 1,
745		rc_highly_buffered: 1,		/* The path is highly buffered */
746		rc_dragged_bottom: 1,
747		rc_pace_dnd : 1,		/* The pace do not disturb bit */
748		rc_initial_ss_comp : 1,
749		rc_gp_filled : 1,
750		rc_hw_nobuf : 1;
751	uint8_t r_state : 4, 	/* Current rack state Lock(a) */
752		rc_catch_up : 1,	/* catch up mode in dgp */
753		rc_hybrid_mode : 1,	/* We are in hybrid mode */
754		rc_suspicious : 1,	/* Suspect sacks have been given */
755		rc_new_rnd_needed: 1;
756	uint8_t rc_tmr_stopped : 7,
757		t_timers_stopped : 1;
758	uint8_t rc_enobuf : 7,	/* count of enobufs on connection provides */
759		rc_on_min_to : 1;
760	uint8_t r_timer_override : 1,	/* hpts override Lock(a) */
761		r_is_v6 : 1,	/* V6 pcb Lock(a)  */
762		rc_in_persist : 1,
763		rc_tlp_in_progress : 1,
764		rc_always_pace : 1,	/* Socket option value Lock(a) */
765		rc_pace_to_cwnd : 1,
766		rc_pace_fill_if_rttin_range : 1,
767		rc_srtt_measure_made : 1;
768	uint8_t app_limited_needs_set : 1,
769		use_fixed_rate : 1,
770		rc_has_collapsed : 1,
771		use_lesser_lt_bw : 1,
772		cspr_is_fcc : 1,
773		rack_hdrw_pacing : 1,  /* We are doing Hardware pacing */
774		rack_hdw_pace_ena : 1, /* Is hardware pacing enabled? */
775		rack_attempt_hdwr_pace : 1; /* Did we attempt hdwr pacing (if allowed) */
776	uint8_t rack_tlp_threshold_use : 3,	/* only 1, 2 and 3 used so far */
777		rack_rec_nonrxt_use_cr : 1,
778		rack_enable_scwnd : 1,
779		rack_attempted_scwnd : 1,
780		rack_no_prr : 1,
781		rack_scwnd_is_idle : 1;
782	uint8_t rc_allow_data_af_clo: 1,
783		delayed_ack : 1,
784		set_pacing_done_a_iw : 1,
785		use_rack_rr : 1,
786		alloc_limit_reported : 1,
787		rack_avail : 2,
788		rc_force_max_seg : 1;
789	uint8_t r_early : 1,
790		r_late : 1,
791		r_wanted_output: 1,
792		r_rr_config : 2,
793		r_persist_lt_bw_off : 1,
794		r_collapse_point_valid : 1,
795		dgp_on : 1;
796	uint16_t rto_from_rec: 1,
797		avail_bit: 1,
798		pcm_in_progress: 1,
799		pcm_needed: 1,
800		policer_detect_on: 1,	/* Are we detecting policers? */
801		rc_policer_detected : 1,	/* We are beiing policed */
802		rc_policer_should_pace : 1,	/* The sizing algo thinks we should pace */
803		rc_sendvars_notset : 1,		/* Inside rack_init send variables (snd_max/una etc) were not set */
804		rc_gp_rtt_set : 1,
805		rc_gp_dyn_mul : 1,
806		rc_gp_saw_rec : 1,
807		rc_gp_saw_ca : 1,
808		rc_gp_saw_ss : 1,
809		rc_gp_no_rec_chg : 1,
810		in_probe_rtt : 1,
811		measure_saw_probe_rtt : 1;
812	/* Cache line 2 0x40 */
813	struct rack_control r_ctl;
814}        __aligned(CACHE_LINE_SIZE);
815
816
817void rack_update_pcm_ack(struct tcp_rack *rack, int was_cumack,
818	uint32_t ss, uint32_t es);
819
820#endif
821#endif
822