Lines Matching refs:tp

7 	const struct tcp_sock *tp = tcp_sk(sk);
9 if (!tp->reord_seen) {
16 if (tp->sacked_out >= tp->reordering &&
28 return min((tcp_min_rtt(tp) >> 2) * tp->rack.reo_wnd_steps,
29 tp->srtt_us >> 3);
32 s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd)
34 return tp->rack.rtt_us + reo_wnd -
35 tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb));
60 struct tcp_sock *tp = tcp_sk(sk);
66 list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue,
76 if (!tcp_skb_sent_after(tp->rack.mstamp,
78 tp->rack.end_seq, scb->end_seq))
84 remaining = tcp_rack_skb_timeout(tp, skb, reo_wnd);
97 struct tcp_sock *tp = tcp_sk(sk);
100 if (!tp->rack.advanced)
104 tp->rack.advanced = 0;
118 void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
123 rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
124 if (rtt_us < tcp_min_rtt(tp) && (sacked & TCPCB_RETRANS)) {
137 tp->rack.advanced = 1;
138 tp->rack.rtt_us = rtt_us;
139 if (tcp_skb_sent_after(xmit_time, tp->rack.mstamp,
140 end_seq, tp->rack.end_seq)) {
141 tp->rack.mstamp = xmit_time;
142 tp->rack.end_seq = end_seq;
151 struct tcp_sock *tp = tcp_sk(sk);
153 u32 lost = tp->lost;
155 prior_inflight = tcp_packets_in_flight(tp);
157 if (prior_inflight != tcp_packets_in_flight(tp)) {
161 tcp_cwnd_reduction(sk, 1, tp->lost - lost, 0);
189 struct tcp_sock *tp = tcp_sk(sk);
197 if (before(rs->prior_delivered, tp->rack.last_delivered))
198 tp->rack.dsack_seen = 0;
201 if (tp->rack.dsack_seen) {
202 tp->rack.reo_wnd_steps = min_t(u32, 0xFF,
203 tp->rack.reo_wnd_steps + 1);
204 tp->rack.dsack_seen = 0;
205 tp->rack.last_delivered = tp->delivered;
206 tp->rack.reo_wnd_persist = TCP_RACK_RECOVERY_THRESH;
207 } else if (!tp->rack.reo_wnd_persist) {
208 tp->rack.reo_wnd_steps = 1;
220 struct tcp_sock *tp = tcp_sk(sk);
222 if ((state < TCP_CA_Recovery && tp->sacked_out >= tp->reordering) ||