1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * INET An implementation of the TCP/IP protocol suite for the LINUX 4 * operating system. INET is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * Definitions for the TCP protocol. 8 * 9 * Version: @(#)tcp.h 1.0.2 04/28/93 10 * 11 * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 */ 13#ifndef _LINUX_TCP_H 14#define _LINUX_TCP_H 15 16 17#include <linux/skbuff.h> 18#include <linux/win_minmax.h> 19#include <net/sock.h> 20#include <net/inet_connection_sock.h> 21#include <net/inet_timewait_sock.h> 22#include <uapi/linux/tcp.h> 23 24static inline struct tcphdr *tcp_hdr(const struct sk_buff *skb) 25{ 26 return (struct tcphdr *)skb_transport_header(skb); 27} 28 29static inline unsigned int __tcp_hdrlen(const struct tcphdr *th) 30{ 31 return th->doff * 4; 32} 33 34static inline unsigned int tcp_hdrlen(const struct sk_buff *skb) 35{ 36 return __tcp_hdrlen(tcp_hdr(skb)); 37} 38 39static inline struct tcphdr *inner_tcp_hdr(const struct sk_buff *skb) 40{ 41 return (struct tcphdr *)skb_inner_transport_header(skb); 42} 43 44static inline unsigned int inner_tcp_hdrlen(const struct sk_buff *skb) 45{ 46 return inner_tcp_hdr(skb)->doff * 4; 47} 48 49/** 50 * skb_tcp_all_headers - Returns size of all headers for a TCP packet 51 * @skb: buffer 52 * 53 * Used in TX path, for a packet known to be a TCP one. 54 * 55 * if (skb_is_gso(skb)) { 56 * int hlen = skb_tcp_all_headers(skb); 57 * ... 58 */ 59static inline int skb_tcp_all_headers(const struct sk_buff *skb) 60{ 61 return skb_transport_offset(skb) + tcp_hdrlen(skb); 62} 63 64/** 65 * skb_inner_tcp_all_headers - Returns size of all headers for an encap TCP packet 66 * @skb: buffer 67 * 68 * Used in TX path, for a packet known to be a TCP one. 69 * 70 * if (skb_is_gso(skb) && skb->encapsulation) { 71 * int hlen = skb_inner_tcp_all_headers(skb); 72 * ... 73 */ 74static inline int skb_inner_tcp_all_headers(const struct sk_buff *skb) 75{ 76 return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb); 77} 78 79static inline unsigned int tcp_optlen(const struct sk_buff *skb) 80{ 81 return (tcp_hdr(skb)->doff - 5) * 4; 82} 83 84/* TCP Fast Open */ 85#define TCP_FASTOPEN_COOKIE_MIN 4 /* Min Fast Open Cookie size in bytes */ 86#define TCP_FASTOPEN_COOKIE_MAX 16 /* Max Fast Open Cookie size in bytes */ 87#define TCP_FASTOPEN_COOKIE_SIZE 8 /* the size employed by this impl. */ 88 89/* TCP Fast Open Cookie as stored in memory */ 90struct tcp_fastopen_cookie { 91 __le64 val[DIV_ROUND_UP(TCP_FASTOPEN_COOKIE_MAX, sizeof(u64))]; 92 s8 len; 93 bool exp; /* In RFC6994 experimental option format */ 94}; 95 96/* This defines a selective acknowledgement block. */ 97struct tcp_sack_block_wire { 98 __be32 start_seq; 99 __be32 end_seq; 100}; 101 102struct tcp_sack_block { 103 u32 start_seq; 104 u32 end_seq; 105}; 106 107/*These are used to set the sack_ok field in struct tcp_options_received */ 108#define TCP_SACK_SEEN (1 << 0) /*1 = peer is SACK capable, */ 109#define TCP_DSACK_SEEN (1 << 2) /*1 = DSACK was received from peer*/ 110 111struct tcp_options_received { 112/* PAWS/RTTM data */ 113 int ts_recent_stamp;/* Time we stored ts_recent (for aging) */ 114 u32 ts_recent; /* Time stamp to echo next */ 115 u32 rcv_tsval; /* Time stamp value */ 116 u32 rcv_tsecr; /* Time stamp echo reply */ 117 u16 saw_tstamp : 1, /* Saw TIMESTAMP on last packet */ 118 tstamp_ok : 1, /* TIMESTAMP seen on SYN packet */ 119 dsack : 1, /* D-SACK is scheduled */ 120 wscale_ok : 1, /* Wscale seen on SYN packet */ 121 sack_ok : 3, /* SACK seen on SYN packet */ 122 smc_ok : 1, /* SMC seen on SYN packet */ 123 snd_wscale : 4, /* Window scaling received from sender */ 124 rcv_wscale : 4; /* Window scaling to send to receiver */ 125 u8 saw_unknown:1, /* Received unknown option */ 126 unused:7; 127 u8 num_sacks; /* Number of SACK blocks */ 128 u16 user_mss; /* mss requested by user in ioctl */ 129 u16 mss_clamp; /* Maximal mss, negotiated at connection setup */ 130}; 131 132static inline void tcp_clear_options(struct tcp_options_received *rx_opt) 133{ 134 rx_opt->tstamp_ok = rx_opt->sack_ok = 0; 135 rx_opt->wscale_ok = rx_opt->snd_wscale = 0; 136#if IS_ENABLED(CONFIG_SMC) 137 rx_opt->smc_ok = 0; 138#endif 139} 140 141/* This is the max number of SACKS that we'll generate and process. It's safe 142 * to increase this, although since: 143 * size = TCPOLEN_SACK_BASE_ALIGNED (4) + n * TCPOLEN_SACK_PERBLOCK (8) 144 * only four options will fit in a standard TCP header */ 145#define TCP_NUM_SACKS 4 146 147struct tcp_request_sock_ops; 148 149struct tcp_request_sock { 150 struct inet_request_sock req; 151 const struct tcp_request_sock_ops *af_specific; 152 u64 snt_synack; /* first SYNACK sent time */ 153 bool tfo_listener; 154 bool is_mptcp; 155 bool req_usec_ts; 156#if IS_ENABLED(CONFIG_MPTCP) 157 bool drop_req; 158#endif 159 u32 txhash; 160 u32 rcv_isn; 161 u32 snt_isn; 162 u32 ts_off; 163 u32 last_oow_ack_time; /* last SYNACK */ 164 u32 rcv_nxt; /* the ack # by SYNACK. For 165 * FastOpen it's the seq# 166 * after data-in-SYN. 167 */ 168 u8 syn_tos; 169#ifdef CONFIG_TCP_AO 170 u8 ao_keyid; 171 u8 ao_rcv_next; 172 bool used_tcp_ao; 173#endif 174}; 175 176static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req) 177{ 178 return (struct tcp_request_sock *)req; 179} 180 181static inline bool tcp_rsk_used_ao(const struct request_sock *req) 182{ 183#ifndef CONFIG_TCP_AO 184 return false; 185#else 186 return tcp_rsk(req)->used_tcp_ao; 187#endif 188} 189 190#define TCP_RMEM_TO_WIN_SCALE 8 191 192struct tcp_sock { 193 /* Cacheline organization can be found documented in 194 * Documentation/networking/net_cachelines/tcp_sock.rst. 195 * Please update the document when adding new fields. 196 */ 197 198 /* inet_connection_sock has to be the first member of tcp_sock */ 199 struct inet_connection_sock inet_conn; 200 201 /* TX read-mostly hotpath cache lines */ 202 __cacheline_group_begin(tcp_sock_read_tx); 203 /* timestamp of last sent data packet (for restart window) */ 204 u32 max_window; /* Maximal window ever seen from peer */ 205 u32 rcv_ssthresh; /* Current window clamp */ 206 u32 reordering; /* Packet reordering metric. */ 207 u32 notsent_lowat; /* TCP_NOTSENT_LOWAT */ 208 u16 gso_segs; /* Max number of segs per GSO packet */ 209 /* from STCP, retrans queue hinting */ 210 struct sk_buff *lost_skb_hint; 211 struct sk_buff *retransmit_skb_hint; 212 __cacheline_group_end(tcp_sock_read_tx); 213 214 /* TXRX read-mostly hotpath cache lines */ 215 __cacheline_group_begin(tcp_sock_read_txrx); 216 u32 tsoffset; /* timestamp offset */ 217 u32 snd_wnd; /* The window we expect to receive */ 218 u32 mss_cache; /* Cached effective mss, not including SACKS */ 219 u32 snd_cwnd; /* Sending congestion window */ 220 u32 prr_out; /* Total number of pkts sent during Recovery. */ 221 u32 lost_out; /* Lost packets */ 222 u32 sacked_out; /* SACK'd packets */ 223 u16 tcp_header_len; /* Bytes of tcp header to send */ 224 u8 scaling_ratio; /* see tcp_win_from_space() */ 225 u8 chrono_type : 2, /* current chronograph type */ 226 repair : 1, 227 tcp_usec_ts : 1, /* TSval values in usec */ 228 is_sack_reneg:1, /* in recovery from loss with SACK reneg? */ 229 is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */ 230 __cacheline_group_end(tcp_sock_read_txrx); 231 232 /* RX read-mostly hotpath cache lines */ 233 __cacheline_group_begin(tcp_sock_read_rx); 234 u32 copied_seq; /* Head of yet unread data */ 235 u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ 236 u32 snd_wl1; /* Sequence for window update */ 237 u32 tlp_high_seq; /* snd_nxt at the time of TLP */ 238 u32 rttvar_us; /* smoothed mdev_max */ 239 u32 retrans_out; /* Retransmitted packets out */ 240 u16 advmss; /* Advertised MSS */ 241 u16 urg_data; /* Saved octet of OOB data and control flags */ 242 u32 lost; /* Total data packets lost incl. rexmits */ 243 struct minmax rtt_min; 244 /* OOO segments go in this rbtree. Socket lock must be held. */ 245 struct rb_root out_of_order_queue; 246 u32 snd_ssthresh; /* Slow start size threshold */ 247 __cacheline_group_end(tcp_sock_read_rx); 248 249 /* TX read-write hotpath cache lines */ 250 __cacheline_group_begin(tcp_sock_write_tx) ____cacheline_aligned; 251 u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut 252 * The total number of segments sent. 253 */ 254 u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut 255 * total number of data segments sent. 256 */ 257 u64 bytes_sent; /* RFC4898 tcpEStatsPerfHCDataOctetsOut 258 * total number of data bytes sent. 259 */ 260 u32 snd_sml; /* Last byte of the most recently transmitted small packet */ 261 u32 chrono_start; /* Start time in jiffies of a TCP chrono */ 262 u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */ 263 u32 write_seq; /* Tail(+1) of data held in tcp send buffer */ 264 u32 pushed_seq; /* Last pushed seq, required to talk to windows */ 265 u32 lsndtime; 266 u32 mdev_us; /* medium deviation */ 267 u32 rtt_seq; /* sequence number to update rttvar */ 268 u64 tcp_wstamp_ns; /* departure time for next sent data packet */ 269 u64 tcp_clock_cache; /* cache last tcp_clock_ns() (see tcp_mstamp_refresh()) */ 270 u64 tcp_mstamp; /* most recent packet received/sent */ 271 struct list_head tsorted_sent_queue; /* time-sorted sent but un-SACKed skbs */ 272 struct sk_buff *highest_sack; /* skb just after the highest 273 * skb with SACKed bit set 274 * (validity guaranteed only if 275 * sacked_out > 0) 276 */ 277 u8 ecn_flags; /* ECN status bits. */ 278 __cacheline_group_end(tcp_sock_write_tx); 279 280 /* TXRX read-write hotpath cache lines */ 281 __cacheline_group_begin(tcp_sock_write_txrx); 282/* 283 * Header prediction flags 284 * 0x5?10 << 16 + snd_wnd in net byte order 285 */ 286 __be32 pred_flags; 287 u32 rcv_nxt; /* What we want to receive next */ 288 u32 snd_nxt; /* Next sequence we send */ 289 u32 snd_una; /* First byte we want an ack for */ 290 u32 window_clamp; /* Maximal window to advertise */ 291 u32 srtt_us; /* smoothed round trip time << 3 in usecs */ 292 u32 packets_out; /* Packets which are "in flight" */ 293 u32 snd_up; /* Urgent pointer */ 294 u32 delivered; /* Total data packets delivered incl. rexmits */ 295 u32 delivered_ce; /* Like the above but only ECE marked packets */ 296 u32 app_limited; /* limited until "delivered" reaches this val */ 297 u32 rcv_wnd; /* Current receiver window */ 298/* 299 * Options received (usually on last packet, some only on SYN packets). 300 */ 301 struct tcp_options_received rx_opt; 302 u8 nonagle : 4,/* Disable Nagle algorithm? */ 303 rate_app_limited:1; /* rate_{delivered,interval_us} limited? */ 304 __cacheline_group_end(tcp_sock_write_txrx); 305 306 /* RX read-write hotpath cache lines */ 307 __cacheline_group_begin(tcp_sock_write_rx) __aligned(8); 308 u64 bytes_received; 309 /* RFC4898 tcpEStatsAppHCThruOctetsReceived 310 * sum(delta(rcv_nxt)), or how many bytes 311 * were acked. 312 */ 313 u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn 314 * total number of segments in. 315 */ 316 u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn 317 * total number of data segments in. 318 */ 319 u32 rcv_wup; /* rcv_nxt on last window update sent */ 320 u32 max_packets_out; /* max packets_out in last window */ 321 u32 cwnd_usage_seq; /* right edge of cwnd usage tracking flight */ 322 u32 rate_delivered; /* saved rate sample: packets delivered */ 323 u32 rate_interval_us; /* saved rate sample: time elapsed */ 324 u32 rcv_rtt_last_tsecr; 325 u64 first_tx_mstamp; /* start of window send phase */ 326 u64 delivered_mstamp; /* time we reached "delivered" */ 327 u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked 328 * sum(delta(snd_una)), or how many bytes 329 * were acked. 330 */ 331 struct { 332 u32 rtt_us; 333 u32 seq; 334 u64 time; 335 } rcv_rtt_est; 336/* Receiver queue space */ 337 struct { 338 u32 space; 339 u32 seq; 340 u64 time; 341 } rcvq_space; 342 __cacheline_group_end(tcp_sock_write_rx); 343 /* End of Hot Path */ 344 345/* 346 * RFC793 variables by their proper names. This means you can 347 * read the code and the spec side by side (and laugh ...) 348 * See RFC793 and RFC1122. The RFC writes these in capitals. 349 */ 350 u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups 351 * total number of DSACK blocks received 352 */ 353 u32 compressed_ack_rcv_nxt; 354 struct list_head tsq_node; /* anchor in tsq_tasklet.head list */ 355 356 /* Information of the most recently (s)acked skb */ 357 struct tcp_rack { 358 u64 mstamp; /* (Re)sent time of the skb */ 359 u32 rtt_us; /* Associated RTT */ 360 u32 end_seq; /* Ending TCP sequence of the skb */ 361 u32 last_delivered; /* tp->delivered at last reo_wnd adj */ 362 u8 reo_wnd_steps; /* Allowed reordering window */ 363#define TCP_RACK_RECOVERY_THRESH 16 364 u8 reo_wnd_persist:5, /* No. of recovery since last adj */ 365 dsack_seen:1, /* Whether DSACK seen after last adj */ 366 advanced:1; /* mstamp advanced since last lost marking */ 367 } rack; 368 u8 compressed_ack; 369 u8 dup_ack_counter:2, 370 tlp_retrans:1, /* TLP is a retransmission */ 371 unused:5; 372 u8 thin_lto : 1,/* Use linear timeouts for thin streams */ 373 recvmsg_inq : 1,/* Indicate # of bytes in queue upon recvmsg */ 374 fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */ 375 fastopen_no_cookie:1, /* Allow send/recv SYN+data without a cookie */ 376 fastopen_client_fail:2, /* reason why fastopen failed */ 377 frto : 1;/* F-RTO (RFC5682) activated in CA_Loss */ 378 u8 repair_queue; 379 u8 save_syn:2, /* Save headers of SYN packet */ 380 syn_data:1, /* SYN includes data */ 381 syn_fastopen:1, /* SYN includes Fast Open option */ 382 syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */ 383 syn_fastopen_ch:1, /* Active TFO re-enabling probe */ 384 syn_data_acked:1;/* data in SYN is acked by SYN-ACK */ 385 386 u8 keepalive_probes; /* num of allowed keep alive probes */ 387 u32 tcp_tx_delay; /* delay (in usec) added to TX packets */ 388 389/* RTT measurement */ 390 u32 mdev_max_us; /* maximal mdev for the last rtt period */ 391 392 u32 reord_seen; /* number of data packet reordering events */ 393 394/* 395 * Slow start and congestion control (see also Nagle, and Karn & Partridge) 396 */ 397 u32 snd_cwnd_cnt; /* Linear increase counter */ 398 u32 snd_cwnd_clamp; /* Do not allow snd_cwnd to grow above this */ 399 u32 snd_cwnd_used; 400 u32 snd_cwnd_stamp; 401 u32 prior_cwnd; /* cwnd right before starting loss recovery */ 402 u32 prr_delivered; /* Number of newly delivered packets to 403 * receiver in Recovery. */ 404 u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */ 405 406 struct hrtimer pacing_timer; 407 struct hrtimer compressed_ack_timer; 408 409 struct sk_buff *ooo_last_skb; /* cache rb_last(out_of_order_queue) */ 410 411 /* SACKs data, these 2 need to be together (see tcp_options_write) */ 412 struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */ 413 struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/ 414 415 struct tcp_sack_block recv_sack_cache[4]; 416 417 int lost_cnt_hint; 418 419 u32 prior_ssthresh; /* ssthresh saved at recovery start */ 420 u32 high_seq; /* snd_nxt at onset of congestion */ 421 422 u32 retrans_stamp; /* Timestamp of the last retransmit, 423 * also used in SYN-SENT to remember stamp of 424 * the first SYN. */ 425 u32 undo_marker; /* snd_una upon a new recovery episode. */ 426 int undo_retrans; /* number of undoable retransmissions. */ 427 u64 bytes_retrans; /* RFC4898 tcpEStatsPerfOctetsRetrans 428 * Total data bytes retransmitted 429 */ 430 u32 total_retrans; /* Total retransmits for entire connection */ 431 u32 rto_stamp; /* Start time (ms) of last CA_Loss recovery */ 432 u16 total_rto; /* Total number of RTO timeouts, including 433 * SYN/SYN-ACK and recurring timeouts. 434 */ 435 u16 total_rto_recoveries; /* Total number of RTO recoveries, 436 * including any unfinished recovery. 437 */ 438 u32 total_rto_time; /* ms spent in (completed) RTO recoveries. */ 439 440 u32 urg_seq; /* Seq of received urgent pointer */ 441 unsigned int keepalive_time; /* time before keep alive takes place */ 442 unsigned int keepalive_intvl; /* time interval between keep alive probes */ 443 444 int linger2; 445 446 447/* Sock_ops bpf program related variables */ 448#ifdef CONFIG_BPF 449 u8 bpf_sock_ops_cb_flags; /* Control calling BPF programs 450 * values defined in uapi/linux/tcp.h 451 */ 452 u8 bpf_chg_cc_inprogress:1; /* In the middle of 453 * bpf_setsockopt(TCP_CONGESTION), 454 * it is to avoid the bpf_tcp_cc->init() 455 * to recur itself by calling 456 * bpf_setsockopt(TCP_CONGESTION, "itself"). 457 */ 458#define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) (TP->bpf_sock_ops_cb_flags & ARG) 459#else 460#define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0 461#endif 462 463 u16 timeout_rehash; /* Timeout-triggered rehash attempts */ 464 465 u32 rcv_ooopack; /* Received out-of-order packets, for tcpinfo */ 466 467/* TCP-specific MTU probe information. */ 468 struct { 469 u32 probe_seq_start; 470 u32 probe_seq_end; 471 } mtu_probe; 472 u32 plb_rehash; /* PLB-triggered rehash attempts */ 473 u32 mtu_info; /* We received an ICMP_FRAG_NEEDED / ICMPV6_PKT_TOOBIG 474 * while socket was owned by user. 475 */ 476#if IS_ENABLED(CONFIG_MPTCP) 477 bool is_mptcp; 478#endif 479#if IS_ENABLED(CONFIG_SMC) 480 bool syn_smc; /* SYN includes SMC */ 481 bool (*smc_hs_congested)(const struct sock *sk); 482#endif 483 484#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO) 485/* TCP AF-Specific parts; only used by TCP-AO/MD5 Signature support so far */ 486 const struct tcp_sock_af_ops *af_specific; 487 488#ifdef CONFIG_TCP_MD5SIG 489/* TCP MD5 Signature Option information */ 490 struct tcp_md5sig_info __rcu *md5sig_info; 491#endif 492#ifdef CONFIG_TCP_AO 493 struct tcp_ao_info __rcu *ao_info; 494#endif 495#endif 496 497/* TCP fastopen related information */ 498 struct tcp_fastopen_request *fastopen_req; 499 /* fastopen_rsk points to request_sock that resulted in this big 500 * socket. Used to retransmit SYNACKs etc. 501 */ 502 struct request_sock __rcu *fastopen_rsk; 503 struct saved_syn *saved_syn; 504}; 505 506enum tsq_enum { 507 TSQ_THROTTLED, 508 TSQ_QUEUED, 509 TCP_TSQ_DEFERRED, /* tcp_tasklet_func() found socket was owned */ 510 TCP_WRITE_TIMER_DEFERRED, /* tcp_write_timer() found socket was owned */ 511 TCP_DELACK_TIMER_DEFERRED, /* tcp_delack_timer() found socket was owned */ 512 TCP_MTU_REDUCED_DEFERRED, /* tcp_v{4|6}_err() could not call 513 * tcp_v{4|6}_mtu_reduced() 514 */ 515 TCP_ACK_DEFERRED, /* TX pure ack is deferred */ 516}; 517 518enum tsq_flags { 519 TSQF_THROTTLED = BIT(TSQ_THROTTLED), 520 TSQF_QUEUED = BIT(TSQ_QUEUED), 521 TCPF_TSQ_DEFERRED = BIT(TCP_TSQ_DEFERRED), 522 TCPF_WRITE_TIMER_DEFERRED = BIT(TCP_WRITE_TIMER_DEFERRED), 523 TCPF_DELACK_TIMER_DEFERRED = BIT(TCP_DELACK_TIMER_DEFERRED), 524 TCPF_MTU_REDUCED_DEFERRED = BIT(TCP_MTU_REDUCED_DEFERRED), 525 TCPF_ACK_DEFERRED = BIT(TCP_ACK_DEFERRED), 526}; 527 528#define tcp_sk(ptr) container_of_const(ptr, struct tcp_sock, inet_conn.icsk_inet.sk) 529 530/* Variant of tcp_sk() upgrading a const sock to a read/write tcp socket. 531 * Used in context of (lockless) tcp listeners. 532 */ 533#define tcp_sk_rw(ptr) container_of(ptr, struct tcp_sock, inet_conn.icsk_inet.sk) 534 535struct tcp_timewait_sock { 536 struct inet_timewait_sock tw_sk; 537#define tw_rcv_nxt tw_sk.__tw_common.skc_tw_rcv_nxt 538#define tw_snd_nxt tw_sk.__tw_common.skc_tw_snd_nxt 539 u32 tw_rcv_wnd; 540 u32 tw_ts_offset; 541 u32 tw_ts_recent; 542 543 /* The time we sent the last out-of-window ACK: */ 544 u32 tw_last_oow_ack_time; 545 546 int tw_ts_recent_stamp; 547 u32 tw_tx_delay; 548#ifdef CONFIG_TCP_MD5SIG 549 struct tcp_md5sig_key *tw_md5_key; 550#endif 551#ifdef CONFIG_TCP_AO 552 struct tcp_ao_info __rcu *ao_info; 553#endif 554}; 555 556static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk) 557{ 558 return (struct tcp_timewait_sock *)sk; 559} 560 561static inline bool tcp_passive_fastopen(const struct sock *sk) 562{ 563 return sk->sk_state == TCP_SYN_RECV && 564 rcu_access_pointer(tcp_sk(sk)->fastopen_rsk) != NULL; 565} 566 567static inline void fastopen_queue_tune(struct sock *sk, int backlog) 568{ 569 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; 570 int somaxconn = READ_ONCE(sock_net(sk)->core.sysctl_somaxconn); 571 572 WRITE_ONCE(queue->fastopenq.max_qlen, min_t(unsigned int, backlog, somaxconn)); 573} 574 575static inline void tcp_move_syn(struct tcp_sock *tp, 576 struct request_sock *req) 577{ 578 tp->saved_syn = req->saved_syn; 579 req->saved_syn = NULL; 580} 581 582static inline void tcp_saved_syn_free(struct tcp_sock *tp) 583{ 584 kfree(tp->saved_syn); 585 tp->saved_syn = NULL; 586} 587 588static inline u32 tcp_saved_syn_len(const struct saved_syn *saved_syn) 589{ 590 return saved_syn->mac_hdrlen + saved_syn->network_hdrlen + 591 saved_syn->tcp_hdrlen; 592} 593 594struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk, 595 const struct sk_buff *orig_skb, 596 const struct sk_buff *ack_skb); 597 598static inline u16 tcp_mss_clamp(const struct tcp_sock *tp, u16 mss) 599{ 600 /* We use READ_ONCE() here because socket might not be locked. 601 * This happens for listeners. 602 */ 603 u16 user_mss = READ_ONCE(tp->rx_opt.user_mss); 604 605 return (user_mss && user_mss < mss) ? user_mss : mss; 606} 607 608int tcp_skb_shift(struct sk_buff *to, struct sk_buff *from, int pcount, 609 int shiftlen); 610 611void __tcp_sock_set_cork(struct sock *sk, bool on); 612void tcp_sock_set_cork(struct sock *sk, bool on); 613int tcp_sock_set_keepcnt(struct sock *sk, int val); 614int tcp_sock_set_keepidle_locked(struct sock *sk, int val); 615int tcp_sock_set_keepidle(struct sock *sk, int val); 616int tcp_sock_set_keepintvl(struct sock *sk, int val); 617void __tcp_sock_set_nodelay(struct sock *sk, bool on); 618void tcp_sock_set_nodelay(struct sock *sk); 619void tcp_sock_set_quickack(struct sock *sk, int val); 620int tcp_sock_set_syncnt(struct sock *sk, int val); 621int tcp_sock_set_user_timeout(struct sock *sk, int val); 622 623static inline bool dst_tcp_usec_ts(const struct dst_entry *dst) 624{ 625 return dst_feature(dst, RTAX_FEATURE_TCP_USEC_TS); 626} 627 628#endif /* _LINUX_TCP_H */ 629