Lines Matching defs:tp

401 static u64 tcp_compute_delivery_rate(const struct tcp_sock *tp)
403 u32 rate = READ_ONCE(tp->rate_delivered);
404 u32 intv = READ_ONCE(tp->rate_interval_us);
408 rate64 = (u64)rate * tp->mss_cache * USEC_PER_SEC;
422 struct tcp_sock *tp = tcp_sk(sk);
424 tp->out_of_order_queue = RB_ROOT;
427 INIT_LIST_HEAD(&tp->tsq_node);
428 INIT_LIST_HEAD(&tp->tsorted_sent_queue);
433 tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
434 minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U);
441 tcp_snd_cwnd_set(tp, TCP_INIT_CWND);
444 tp->app_limited = ~0U;
445 tp->rate_app_limited = 1;
450 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
451 tp->snd_cwnd_clamp = ~0;
452 tp->mss_cache = TCP_MSS_DEFAULT;
454 tp->reordering = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering);
457 tp->tsoffset = 0;
458 tp->rack.reo_wnd_steps = 1;
508 const struct tcp_sock *tp = tcp_sk(sk);
560 (state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) {
562 u16 urg_data = READ_ONCE(tp->urg_data);
565 READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) &&
613 struct tcp_sock *tp = tcp_sk(sk);
627 answ = READ_ONCE(tp->urg_data) &&
628 READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq);
637 answ = READ_ONCE(tp->write_seq) - tp->snd_una;
646 answ = READ_ONCE(tp->write_seq) -
647 READ_ONCE(tp->snd_nxt);
658 void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
661 tp->pushed_seq = tp->write_seq;
664 static inline bool forced_push(const struct tcp_sock *tp)
666 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
671 struct tcp_sock *tp = tcp_sk(sk);
674 tcb->seq = tcb->end_seq = tp->write_seq;
680 if (tp->nonagle & TCP_NAGLE_PUSH)
681 tp->nonagle &= ~TCP_NAGLE_PUSH;
686 static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
689 tp->snd_up = tp->write_seq;
715 struct tcp_sock *tp = tcp_sk(sk);
721 if (!(flags & MSG_MORE) || forced_push(tp))
722 tcp_mark_push(tp, skb);
724 tcp_mark_urg(tp, flags);
907 struct tcp_sock *tp = tcp_sk(sk);
914 new_size_goal = tcp_bound_to_half_wnd(tp, sk->sk_gso_max_size);
917 size_goal = tp->gso_segs * mss_now;
920 tp->gso_segs = min_t(u16, new_size_goal / mss_now,
922 size_goal = tp->gso_segs * mss_now;
990 void tcp_free_fastopen_req(struct tcp_sock *tp)
992 if (tp->fastopen_req) {
993 kfree(tp->fastopen_req);
994 tp->fastopen_req = NULL;
1001 struct tcp_sock *tp = tcp_sk(sk);
1011 if (tp->fastopen_req)
1014 tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request),
1016 if (unlikely(!tp->fastopen_req))
1018 tp->fastopen_req->data = msg;
1019 tp->fastopen_req->size = size;
1020 tp->fastopen_req->uarg = uarg;
1037 if (tp->fastopen_req) {
1038 *copied = tp->fastopen_req->copied;
1039 tcp_free_fastopen_req(tp);
1047 struct tcp_sock *tp = tcp_sk(sk);
1083 !tp->repair) {
1106 if (unlikely(tp->repair)) {
1107 if (tp->repair_queue == TCP_RECV_QUEUE) {
1113 if (tp->repair_queue == TCP_NO_QUEUE)
1178 if (tp->repair)
1197 tcp_mark_push(tp, skb);
1246 tcp_mark_push(tp, skb);
1264 tcp_mark_push(tp, skb);
1281 WRITE_ONCE(tp->write_seq, tp->write_seq + copy);
1292 if (skb->len < size_goal || (flags & MSG_OOB) || unlikely(tp->repair))
1295 if (forced_push(tp)) {
1296 tcp_mark_push(tp, skb);
1319 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
1361 struct tcp_sock *tp = tcp_sk(sk);
1369 tcp_push(sk, 0, mss_now, tp->nonagle, size_goal);
1381 struct tcp_sock *tp = tcp_sk(sk);
1384 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
1385 tp->urg_data == TCP_URG_READ)
1391 if (tp->urg_data & TCP_URG_VALID) {
1393 char c = tp->urg_data;
1396 WRITE_ONCE(tp->urg_data, TCP_URG_READ);
1454 struct tcp_sock *tp = tcp_sk(sk);
1461 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
1483 __u32 rcv_window_now = tcp_receive_window(tp);
1486 if (2*rcv_window_now <= tp->window_clamp) {
1505 struct tcp_sock *tp = tcp_sk(sk);
1507 WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
1509 tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
1565 struct tcp_sock *tp = tcp_sk(sk);
1566 u32 seq = tp->copied_seq;
1579 if (unlikely(tp->urg_data)) {
1580 u32 urg_offset = tp->urg_seq - seq;
1620 WRITE_ONCE(tp->copied_seq, seq);
1622 WRITE_ONCE(tp->copied_seq, seq);
1667 struct tcp_sock *tp = tcp_sk(sk);
1668 u32 seq = tp->copied_seq;
1694 WRITE_ONCE(tp->copied_seq, seq);
2093 struct tcp_sock *tp = tcp_sk(sk);
2098 u32 seq = tp->copied_seq;
2212 WRITE_ONCE(tp->copied_seq, seq);
2298 const struct tcp_sock *tp = tcp_sk(sk);
2299 u32 copied_seq = READ_ONCE(tp->copied_seq);
2300 u32 rcv_nxt = READ_ONCE(tp->rcv_nxt);
2304 if (unlikely(inq < 0 || copied_seq != READ_ONCE(tp->copied_seq))) {
2306 inq = tp->rcv_nxt - tp->copied_seq;
2329 struct tcp_sock *tp = tcp_sk(sk);
2345 if (tp->recvmsg_inq) {
2355 if (unlikely(tp->repair)) {
2360 if (tp->repair_queue == TCP_SEND_QUEUE)
2364 if (tp->repair_queue == TCP_NO_QUEUE)
2370 seq = &tp->copied_seq;
2373 peek_seq = tp->copied_seq + peek_offset;
2383 if (unlikely(tp->urg_data) && tp->urg_seq == *seq) {
2402 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
2417 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
2476 (peek_seq - peek_offset - copied - urg_hole != tp->copied_seq)) {
2480 peek_seq = tp->copied_seq + peek_offset;
2491 if (unlikely(tp->urg_data)) {
2492 u32 urg_offset = tp->urg_seq - *seq;
2528 if (unlikely(tp->urg_data) && after(tp->copied_seq, tp->urg_seq)) {
2529 WRITE_ONCE(tp->urg_data, 0);
2904 struct tcp_sock *tp = tcp_sk(sk);
2905 if (READ_ONCE(tp->linger2) < 0) {
3013 struct tcp_sock *tp = tcp_sk(sk);
3023 } else if (unlikely(tp->repair)) {
3026 (tp->snd_nxt != tp->write_seq &&
3038 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
3039 WRITE_ONCE(tp->urg_data, 0);
3043 skb_rbtree_purge(&tp->out_of_order_queue);
3051 tp->srtt_us = 0;
3052 tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
3053 tp->rcv_rtt_last_tsecr = 0;
3055 seq = tp->write_seq + tp->max_window + 2;
3058 WRITE_ONCE(tp->write_seq, seq);
3066 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
3067 tcp_snd_cwnd_set(tp, TCP_INIT_CWND);
3068 tp->snd_cwnd_cnt = 0;
3069 tp->is_cwnd_limited = 0;
3070 tp->max_packets_out = 0;
3071 tp->window_clamp = 0;
3072 tp->delivered = 0;
3073 tp->delivered_ce = 0;
3079 tp->is_sack_reneg = 0;
3080 tcp_clear_retrans(tp);
3081 tp->total_retrans = 0;
3087 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
3090 tcp_saved_syn_free(tp);
3091 tp->compressed_ack = 0;
3092 tp->segs_in = 0;
3093 tp->segs_out = 0;
3094 tp->bytes_sent = 0;
3095 tp->bytes_acked = 0;
3096 tp->bytes_received = 0;
3097 tp->bytes_retrans = 0;
3098 tp->data_segs_in = 0;
3099 tp->data_segs_out = 0;
3100 tp->duplicate_sack[0].start_seq = 0;
3101 tp->duplicate_sack[0].end_seq = 0;
3102 tp->dsack_dups = 0;
3103 tp->reord_seen = 0;
3104 tp->retrans_out = 0;
3105 tp->sacked_out = 0;
3106 tp->tlp_high_seq = 0;
3107 tp->last_oow_ack_time = 0;
3108 tp->plb_rehash = 0;
3110 tp->app_limited = ~0U;
3111 tp->rate_app_limited = 1;
3112 tp->rack.mstamp = 0;
3113 tp->rack.advanced = 0;
3114 tp->rack.reo_wnd_steps = 1;
3115 tp->rack.last_delivered = 0;
3116 tp->rack.reo_wnd_persist = 0;
3117 tp->rack.dsack_seen = 0;
3118 tp->syn_data_acked = 0;
3119 tp->rx_opt.saw_tstamp = 0;
3120 tp->rx_opt.dsack = 0;
3121 tp->rx_opt.num_sacks = 0;
3122 tp->rcv_ooopack = 0;
3126 tcp_free_fastopen_req(tp);
3128 tp->fastopen_client_fail = 0;
3148 static int tcp_repair_set_window(struct tcp_sock *tp, sockptr_t optbuf, int len)
3152 if (!tp->repair)
3164 if (after(opt.snd_wl1, tp->rcv_nxt + opt.rcv_wnd))
3167 if (after(opt.rcv_wup, tp->rcv_nxt))
3170 tp->snd_wl1 = opt.snd_wl1;
3171 tp->snd_wnd = opt.snd_wnd;
3172 tp->max_window = opt.max_window;
3174 tp->rcv_wnd = opt.rcv_wnd;
3175 tp->rcv_wup = opt.rcv_wup;
3183 struct tcp_sock *tp = tcp_sk(sk);
3196 tp->rx_opt.mss_clamp = opt.opt_val;
3207 tp->rx_opt.snd_wscale = snd_wscale;
3208 tp->rx_opt.rcv_wscale = rcv_wscale;
3209 tp->rx_opt.wscale_ok = 1;
3216 tp->rx_opt.sack_ok |= TCP_SACK_SEEN;
3222 tp->rx_opt.tstamp_ok = 1;
3256 struct tcp_sock *tp = tcp_sk(sk);
3259 tp->nonagle |= TCP_NAGLE_CORK;
3261 tp->nonagle &= ~TCP_NAGLE_CORK;
3262 if (tp->nonagle & TCP_NAGLE_OFF)
3263 tp->nonagle |= TCP_NAGLE_PUSH;
3350 struct tcp_sock *tp = tcp_sk(sk);
3356 WRITE_ONCE(tp->keepalive_time, val * HZ);
3359 u32 elapsed = keepalive_time_elapsed(tp);
3361 if (tp->keepalive_time > elapsed)
3362 elapsed = tp->keepalive_time - elapsed;
3405 struct tcp_sock *tp = tcp_sk(sk);
3410 WRITE_ONCE(tp->window_clamp, 0);
3412 u32 new_rcv_ssthresh, old_window_clamp = tp->window_clamp;
3419 WRITE_ONCE(tp->window_clamp, new_window_clamp);
3424 __tcp_adjust_rcv_ssthresh(sk, tp->window_clamp);
3427 new_rcv_ssthresh = min(tp->rcv_wnd, tp->window_clamp);
3428 tp->rcv_ssthresh = max(new_rcv_ssthresh,
3429 tp->rcv_ssthresh);
3441 struct tcp_sock *tp = tcp_sk(sk);
3528 WRITE_ONCE(tp->linger2, -1);
3530 WRITE_ONCE(tp->linger2, TCP_FIN_TIMEOUT_MAX);
3532 WRITE_ONCE(tp->linger2, val * HZ);
3554 tp->rx_opt.user_mss = val;
3565 tp->thin_lto = val;
3577 tp->repair = 1;
3579 tp->repair_queue = TCP_NO_QUEUE;
3581 tp->repair = 0;
3585 tp->repair = 0;
3593 if (!tp->repair)
3596 tp->repair_queue = val;
3604 } else if (tp->repair_queue == TCP_SEND_QUEUE) {
3608 WRITE_ONCE(tp->write_seq, val);
3609 } else if (tp->repair_queue == TCP_RECV_QUEUE) {
3610 if (tp->rcv_nxt != tp->copied_seq) {
3613 WRITE_ONCE(tp->rcv_nxt, val);
3614 WRITE_ONCE(tp->copied_seq, val);
3622 if (!tp->repair)
3624 else if (sk->sk_state == TCP_ESTABLISHED && !tp->bytes_sent)
3642 tp->save_syn = val;
3673 if (tp->repair)
3678 err = tp->af_specific->ao_parse(sk, optname, optval, optlen);
3685 err = tp->af_specific->md5_parse(sk, optname, optval, optlen);
3704 tp->fastopen_connect = val;
3717 tp->fastopen_no_cookie = val;
3720 if (!tp->repair) {
3728 tp->tcp_usec_ts = val & 1;
3729 WRITE_ONCE(tp->tsoffset, val - tcp_clock_ts(tp->tcp_usec_ts));
3732 err = tcp_repair_set_window(tp, optval, optlen);
3735 WRITE_ONCE(tp->notsent_lowat, val);
3742 tp->recvmsg_inq = val;
3747 WRITE_ONCE(tp->tcp_tx_delay, val);
3771 static void tcp_get_info_chrono_stats(const struct tcp_sock *tp,
3778 stats[i] = tp->chrono_stat[i - 1];
3779 if (i == tp->chrono_type)
3780 stats[i] += tcp_jiffies32 - tp->chrono_start;
3793 const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */
3815 info->tcpi_reordering = tp->reordering;
3816 info->tcpi_snd_cwnd = tcp_snd_cwnd(tp);
3835 if (tp->rx_opt.tstamp_ok)
3837 if (tcp_is_sack(tp))
3839 if (tp->rx_opt.wscale_ok) {
3841 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
3842 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
3845 if (tp->ecn_flags & TCP_ECN_OK)
3847 if (tp->ecn_flags & TCP_ECN_SEEN)
3849 if (tp->syn_data_acked)
3851 if (tp->tcp_usec_ts)
3857 info->tcpi_snd_mss = tp->mss_cache;
3860 info->tcpi_unacked = tp->packets_out;
3861 info->tcpi_sacked = tp->sacked_out;
3863 info->tcpi_lost = tp->lost_out;
3864 info->tcpi_retrans = tp->retrans_out;
3867 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
3869 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
3872 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
3873 info->tcpi_rtt = tp->srtt_us >> 3;
3874 info->tcpi_rttvar = tp->mdev_us >> 2;
3875 info->tcpi_snd_ssthresh = tp->snd_ssthresh;
3876 info->tcpi_advmss = tp->advmss;
3878 info->tcpi_rcv_rtt = tp->rcv_rtt_est.rtt_us >> 3;
3879 info->tcpi_rcv_space = tp->rcvq_space.space;
3881 info->tcpi_total_retrans = tp->total_retrans;
3883 info->tcpi_bytes_acked = tp->bytes_acked;
3884 info->tcpi_bytes_received = tp->bytes_received;
3885 info->tcpi_notsent_bytes = max_t(int, 0, tp->write_seq - tp->snd_nxt);
3886 tcp_get_info_chrono_stats(tp, info);
3888 info->tcpi_segs_out = tp->segs_out;
3891 info->tcpi_segs_in = READ_ONCE(tp->segs_in);
3892 info->tcpi_data_segs_in = READ_ONCE(tp->data_segs_in);
3894 info->tcpi_min_rtt = tcp_min_rtt(tp);
3895 info->tcpi_data_segs_out = tp->data_segs_out;
3897 info->tcpi_delivery_rate_app_limited = tp->rate_app_limited ? 1 : 0;
3898 rate64 = tcp_compute_delivery_rate(tp);
3901 info->tcpi_delivered = tp->delivered;
3902 info->tcpi_delivered_ce = tp->delivered_ce;
3903 info->tcpi_bytes_sent = tp->bytes_sent;
3904 info->tcpi_bytes_retrans = tp->bytes_retrans;
3905 info->tcpi_dsack_dups = tp->dsack_dups;
3906 info->tcpi_reord_seen = tp->reord_seen;
3907 info->tcpi_rcv_ooopack = tp->rcv_ooopack;
3908 info->tcpi_snd_wnd = tp->snd_wnd;
3909 info->tcpi_rcv_wnd = tp->rcv_wnd;
3910 info->tcpi_rehash = tp->plb_rehash + tp->timeout_rehash;
3911 info->tcpi_fastopen_client_fail = tp->fastopen_client_fail;
3913 info->tcpi_total_rto = tp->total_rto;
3914 info->tcpi_total_rto_recoveries = tp->total_rto_recoveries;
3915 info->tcpi_total_rto_time = tp->total_rto_time;
3916 if (tp->rto_stamp)
3917 info->tcpi_total_rto_time += tcp_clock_ms() - tp->rto_stamp;
3971 const struct tcp_sock *tp = tcp_sk(sk);
3981 tcp_get_info_chrono_stats(tp, &info);
3989 tp->data_segs_out, TCP_NLA_PAD);
3991 tp->total_retrans, TCP_NLA_PAD);
3997 rate64 = tcp_compute_delivery_rate(tp);
4000 nla_put_u32(stats, TCP_NLA_SND_CWND, tcp_snd_cwnd(tp));
4001 nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering);
4002 nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp));
4005 nla_put_u8(stats, TCP_NLA_DELIVERY_RATE_APP_LMT, !!tp->rate_app_limited);
4006 nla_put_u32(stats, TCP_NLA_SND_SSTHRESH, tp->snd_ssthresh);
4007 nla_put_u32(stats, TCP_NLA_DELIVERED, tp->delivered);
4008 nla_put_u32(stats, TCP_NLA_DELIVERED_CE, tp->delivered_ce);
4010 nla_put_u32(stats, TCP_NLA_SNDQ_SIZE, tp->write_seq - tp->snd_una);
4013 nla_put_u64_64bit(stats, TCP_NLA_BYTES_SENT, tp->bytes_sent,
4015 nla_put_u64_64bit(stats, TCP_NLA_BYTES_RETRANS, tp->bytes_retrans,
4017 nla_put_u32(stats, TCP_NLA_DSACK_DUPS, tp->dsack_dups);
4018 nla_put_u32(stats, TCP_NLA_REORD_SEEN, tp->reord_seen);
4019 nla_put_u32(stats, TCP_NLA_SRTT, tp->srtt_us >> 3);
4020 nla_put_u16(stats, TCP_NLA_TIMEOUT_REHASH, tp->timeout_rehash);
4022 max_t(int, 0, tp->write_seq - tp->snd_nxt));
4029 nla_put_u32(stats, TCP_NLA_REHASH, tp->plb_rehash + tp->timeout_rehash);
4037 struct tcp_sock *tp = tcp_sk(sk);
4051 val = tp->mss_cache;
4052 if (tp->rx_opt.user_mss &&
4054 val = tp->rx_opt.user_mss;
4055 if (tp->repair)
4056 val = tp->rx_opt.mss_clamp;
4059 val = !!(tp->nonagle&TCP_NAGLE_OFF);
4062 val = !!(tp->nonagle&TCP_NAGLE_CORK);
4065 val = keepalive_time_when(tp) / HZ;
4068 val = keepalive_intvl_when(tp) / HZ;
4071 val = keepalive_probes(tp);
4078 val = READ_ONCE(tp->linger2);
4088 val = READ_ONCE(tp->window_clamp);
4172 val = tp->thin_lto;
4180 val = tp->repair;
4184 if (tp->repair)
4185 val = tp->repair_queue;
4199 if (!tp->repair)
4202 opt.snd_wl1 = tp->snd_wl1;
4203 opt.snd_wnd = tp->snd_wnd;
4204 opt.max_window = tp->max_window;
4205 opt.rcv_wnd = tp->rcv_wnd;
4206 opt.rcv_wup = tp->rcv_wup;
4213 if (tp->repair_queue == TCP_SEND_QUEUE)
4214 val = tp->write_seq;
4215 else if (tp->repair_queue == TCP_RECV_QUEUE)
4216 val = tp->rcv_nxt;
4230 val = tp->fastopen_connect;
4234 val = tp->fastopen_no_cookie;
4238 val = READ_ONCE(tp->tcp_tx_delay);
4242 val = tcp_clock_ts(tp->tcp_usec_ts) + READ_ONCE(tp->tsoffset);
4243 if (tp->tcp_usec_ts)
4249 val = READ_ONCE(tp->notsent_lowat);
4252 val = tp->recvmsg_inq;
4255 val = tp->save_syn;
4262 if (tp->saved_syn) {
4263 if (len < tcp_saved_syn_len(tp->saved_syn)) {
4264 len = tcp_saved_syn_len(tp->saved_syn);
4272 len = tcp_saved_syn_len(tp->saved_syn);
4277 if (copy_to_sockptr(optval, tp->saved_syn->data, len)) {
4281 tcp_saved_syn_free(tp);
4475 const struct tcp_sock *tp = tcp_sk(sk);
4495 genhash = tp->af_specific->calc_md5_hash(newhash, key,