Lines Matching refs:icsk

120 void clean_acked_data_enable(struct inet_connection_sock *icsk,
123 icsk->icsk_clean_acked = cad;
128 void clean_acked_data_disable(struct inet_connection_sock *icsk)
131 icsk->icsk_clean_acked = NULL;
223 struct inet_connection_sock *icsk = inet_csk(sk);
224 const unsigned int lss = icsk->icsk_ack.last_seg_size;
227 icsk->icsk_ack.last_seg_size = 0;
233 if (len >= icsk->icsk_ack.rcv_mss) {
238 if (unlikely(len != icsk->icsk_ack.rcv_mss)) {
244 icsk->icsk_ack.rcv_mss = min_t(unsigned int, len,
247 DO_ONCE_LITE_IF(len > icsk->icsk_ack.rcv_mss + MAX_TCP_OPTION_SPACE,
261 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
282 icsk->icsk_ack.last_seg_size = len;
284 icsk->icsk_ack.rcv_mss = len;
288 if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)
289 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2;
290 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
296 struct inet_connection_sock *icsk = inet_csk(sk);
297 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
302 if (quickacks > icsk->icsk_ack.quick)
303 icsk->icsk_ack.quick = quickacks;
308 struct inet_connection_sock *icsk = inet_csk(sk);
312 icsk->icsk_ack.ato = TCP_ATO_MIN;
321 const struct inet_connection_sock *icsk = inet_csk(sk);
325 (icsk->icsk_ack.quick && !inet_csk_in_pingpong_mode(sk));
590 struct inet_connection_sock *icsk = inet_csk(sk);
594 icsk->icsk_ack.quick = 0;
789 struct inet_connection_sock *icsk = inet_csk(sk);
792 icsk->icsk_ack.lrcv_flowlabel = ntohl(ip6_flowlabel(ipv6_hdr(skb)));
809 struct inet_connection_sock *icsk = inet_csk(sk);
820 if (!icsk->icsk_ack.ato) {
825 icsk->icsk_ack.ato = TCP_ATO_MIN;
827 int m = now - icsk->icsk_ack.lrcvtime;
831 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2;
832 } else if (m < icsk->icsk_ack.ato) {
833 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m;
834 if (icsk->icsk_ack.ato > icsk->icsk_rto)
835 icsk->icsk_ack.ato = icsk->icsk_rto;
836 } else if (m > icsk->icsk_rto) {
843 icsk->icsk_ack.lrcvtime = now;
2176 const struct inet_connection_sock *icsk = inet_csk(sk);
2179 bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
2185 if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
2187 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
2190 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
2202 if (icsk->icsk_ca_state <= TCP_CA_Disorder &&
2216 (new_recovery || icsk->icsk_retransmits) &&
2536 const struct inet_connection_sock *icsk = inet_csk(sk);
2538 tcp_snd_cwnd_set(tp, icsk->icsk_ca_ops->undo_cwnd(sk));
2752 struct inet_connection_sock *icsk = inet_csk(sk);
2754 icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1;
2755 icsk->icsk_mtup.probe_size = 0;
2762 struct inet_connection_sock *icsk = inet_csk(sk);
2768 do_div(val, icsk->icsk_mtup.probe_size);
2776 icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
2777 icsk->icsk_mtup.probe_size = 0;
2778 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
2788 const struct inet_connection_sock *icsk = inet_csk(sk);
2828 if (icsk->icsk_ca_state != TCP_CA_Loss) {
3002 struct inet_connection_sock *icsk = inet_csk(sk);
3026 if (icsk->icsk_ca_state == TCP_CA_Open) {
3030 switch (icsk->icsk_ca_state) {
3051 switch (icsk->icsk_ca_state) {
3063 if (icsk->icsk_ca_state != TCP_CA_Recovery) {
3074 if (icsk->icsk_ca_state != TCP_CA_Loss)
3077 if (!(icsk->icsk_ca_state == TCP_CA_Open ||
3089 if (icsk->icsk_ca_state <= TCP_CA_Disorder)
3099 if (icsk->icsk_ca_state < TCP_CA_CWR &&
3100 icsk->icsk_mtup.probe_size &&
3191 const struct inet_connection_sock *icsk = inet_csk(sk);
3193 icsk->icsk_ca_ops->cong_avoid(sk, ack, acked);
3202 const struct inet_connection_sock *icsk = inet_csk(sk);
3216 if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
3217 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
3283 const struct inet_connection_sock *icsk = inet_csk(sk);
3414 if (unlikely(icsk->icsk_mtup.probe_size &&
3450 if (icsk->icsk_ca_ops->pkts_acked) {
3456 icsk->icsk_ca_ops->pkts_acked(sk, &sample);
3464 icsk = inet_csk(sk);
3467 tp->lost_out, icsk->icsk_ca_state);
3472 tp->sacked_out, icsk->icsk_ca_state);
3477 tp->retrans_out, icsk->icsk_ca_state);
3487 struct inet_connection_sock *icsk = inet_csk(sk);
3495 icsk->icsk_backoff = 0;
3496 icsk->icsk_probes_tstamp = 0;
3539 const struct inet_connection_sock *icsk = inet_csk(sk);
3541 if (icsk->icsk_ca_ops->cong_control) {
3542 icsk->icsk_ca_ops->cong_control(sk, rs);
3796 const struct inet_connection_sock *icsk = inet_csk(sk);
3798 if (icsk->icsk_ca_ops->in_ack_event)
3799 icsk->icsk_ca_ops->in_ack_event(sk, flags);
3841 struct inet_connection_sock *icsk = inet_csk(sk);
3888 icsk->icsk_retransmits = 0;
3892 if (icsk->icsk_clean_acked)
3893 icsk->icsk_clean_acked(sk, ack);
3961 icsk->icsk_probes_out = 0;
6189 struct inet_connection_sock *icsk = inet_csk(sk);
6193 icsk->icsk_af_ops->rebuild_header(sk);
6210 if (!icsk->icsk_ca_initialized)
6218 struct inet_connection_sock *icsk = inet_csk(sk);
6222 icsk->icsk_ack.lrcvtime = tcp_jiffies32;
6225 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
6337 struct inet_connection_sock *icsk = inet_csk(sk);
6360 if (icsk->icsk_retransmits == 0)
6442 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
6466 READ_ONCE(icsk->icsk_accept_queue.rskq_defer_accept) ||
6543 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
6626 struct inet_connection_sock *icsk = inet_csk(sk);
6655 icsk->icsk_af_ops->conn_request(sk, skb);