tcp_input.c revision 327520
1/*- 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 3 * The Regents of the University of California. All rights reserved. 4 * Copyright (c) 2007-2008,2010 5 * Swinburne University of Technology, Melbourne, Australia. 6 * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org> 7 * Copyright (c) 2010 The FreeBSD Foundation 8 * Copyright (c) 2010-2011 Juniper Networks, Inc. 9 * All rights reserved. 10 * 11 * Portions of this software were developed at the Centre for Advanced Internet 12 * Architectures, Swinburne University of Technology, by Lawrence Stewart, 13 * James Healy and David Hayes, made possible in part by a grant from the Cisco 14 * University Research Program Fund at Community Foundation Silicon Valley. 15 * 16 * Portions of this software were developed at the Centre for Advanced 17 * Internet Architectures, Swinburne University of Technology, Melbourne, 18 * Australia by David Hayes under sponsorship from the FreeBSD Foundation. 19 * 20 * Portions of this software were developed by Robert N. M. Watson under 21 * contract to Juniper Networks, Inc. 22 * 23 * Redistribution and use in source and binary forms, with or without 24 * modification, are permitted provided that the following conditions 25 * are met: 26 * 1. Redistributions of source code must retain the above copyright 27 * notice, this list of conditions and the following disclaimer. 28 * 2. Redistributions in binary form must reproduce the above copyright 29 * notice, this list of conditions and the following disclaimer in the 30 * documentation and/or other materials provided with the distribution. 31 * 4. Neither the name of the University nor the names of its contributors 32 * may be used to endorse or promote products derived from this software 33 * without specific prior written permission. 34 * 35 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 36 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 38 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 39 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 40 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 41 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 42 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 43 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 44 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 45 * SUCH DAMAGE. 46 * 47 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95 48 */ 49 50#include <sys/cdefs.h> 51__FBSDID("$FreeBSD: stable/10/sys/netinet/tcp_input.c 327520 2018-01-03 16:16:20Z smh $"); 52 53#include "opt_ipfw.h" /* for ipfw_fwd */ 54#include "opt_inet.h" 55#include "opt_inet6.h" 56#include "opt_ipsec.h" 57#include "opt_kdtrace.h" 58#include "opt_tcpdebug.h" 59 60#include <sys/param.h> 61#include <sys/kernel.h> 62#include <sys/hhook.h> 63#include <sys/malloc.h> 64#include <sys/mbuf.h> 65#include <sys/proc.h> /* for proc0 declaration */ 66#include <sys/protosw.h> 67#include <sys/sdt.h> 68#include <sys/signalvar.h> 69#include <sys/socket.h> 70#include <sys/socketvar.h> 71#include <sys/sysctl.h> 72#include <sys/syslog.h> 73#include <sys/systm.h> 74 75#include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */ 76 77#include <vm/uma.h> 78 79#include <net/if.h> 80#include <net/route.h> 81#include <net/vnet.h> 82 83#define TCPSTATES /* for logging */ 84 85#include <netinet/cc.h> 86#include <netinet/in.h> 87#include <netinet/in_kdtrace.h> 88#include <netinet/in_pcb.h> 89#include <netinet/in_systm.h> 90#include <netinet/in_var.h> 91#include <netinet/ip.h> 92#include <netinet/ip_icmp.h> /* required for icmp_var.h */ 93#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 94#include <netinet/ip_var.h> 95#include <netinet/ip_options.h> 96#include <netinet/ip6.h> 97#include <netinet/icmp6.h> 98#include <netinet6/in6_pcb.h> 99#include <netinet6/ip6_var.h> 100#include <netinet6/nd6.h> 101#ifdef TCP_RFC7413 102#include <netinet/tcp_fastopen.h> 103#endif 104#include <netinet/tcp_fsm.h> 105#include <netinet/tcp_seq.h> 106#include <netinet/tcp_timer.h> 107#include <netinet/tcp_var.h> 108#include <netinet6/tcp6_var.h> 109#include <netinet/tcpip.h> 110#include <netinet/tcp_syncache.h> 111#ifdef TCPDEBUG 112#include <netinet/tcp_debug.h> 113#endif /* TCPDEBUG */ 114#ifdef TCP_OFFLOAD 115#include <netinet/tcp_offload.h> 116#endif 117 118#ifdef IPSEC 119#include <netipsec/ipsec.h> 120#include <netipsec/ipsec6.h> 121#endif /*IPSEC*/ 122 123#include <machine/in_cksum.h> 124 125#include <security/mac/mac_framework.h> 126 127const int tcprexmtthresh = 3; 128 129int tcp_log_in_vain = 0; 130SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW, 131 &tcp_log_in_vain, 0, 132 "Log all incoming TCP segments to closed ports"); 133 134VNET_DEFINE(int, blackhole) = 0; 135#define V_blackhole VNET(blackhole) 136SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW, 137 &VNET_NAME(blackhole), 0, 138 "Do not send RST on segments to closed ports"); 139 140VNET_DEFINE(int, tcp_delack_enabled) = 1; 141SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW, 142 &VNET_NAME(tcp_delack_enabled), 0, 143 "Delay ACK to try and piggyback it onto a data packet"); 144 145VNET_DEFINE(int, drop_synfin) = 0; 146#define V_drop_synfin VNET(drop_synfin) 147SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW, 148 &VNET_NAME(drop_synfin), 0, 149 "Drop TCP packets with SYN+FIN set"); 150 151VNET_DEFINE(int, tcp_do_rfc6675_pipe) = 0; 152SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_pipe, CTLFLAG_VNET | CTLFLAG_RW, 153 &VNET_NAME(tcp_do_rfc6675_pipe), 0, 154 "Use calculated pipe/in-flight bytes per RFC 6675"); 155 156VNET_DEFINE(int, tcp_do_rfc3042) = 1; 157#define V_tcp_do_rfc3042 VNET(tcp_do_rfc3042) 158SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_RW, 159 &VNET_NAME(tcp_do_rfc3042), 0, 160 "Enable RFC 3042 (Limited Transmit)"); 161 162VNET_DEFINE(int, tcp_do_rfc3390) = 1; 163SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW, 164 &VNET_NAME(tcp_do_rfc3390), 0, 165 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)"); 166 167SYSCTL_NODE(_net_inet_tcp, OID_AUTO, experimental, CTLFLAG_RW, 0, 168 "Experimental TCP extensions"); 169 170VNET_DEFINE(int, tcp_do_initcwnd10) = 1; 171SYSCTL_VNET_INT(_net_inet_tcp_experimental, OID_AUTO, initcwnd10, CTLFLAG_RW, 172 &VNET_NAME(tcp_do_initcwnd10), 0, 173 "Enable RFC 6928 (Increasing initial CWND to 10)"); 174 175VNET_DEFINE(int, tcp_do_rfc3465) = 1; 176SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_RW, 177 &VNET_NAME(tcp_do_rfc3465), 0, 178 "Enable RFC 3465 (Appropriate Byte Counting)"); 179 180VNET_DEFINE(int, tcp_abc_l_var) = 2; 181SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, abc_l_var, CTLFLAG_RW, 182 &VNET_NAME(tcp_abc_l_var), 2, 183 "Cap the max cwnd increment during slow-start to this number of segments"); 184 185static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, ecn, CTLFLAG_RW, 0, "TCP ECN"); 186 187VNET_DEFINE(int, tcp_do_ecn) = 2; 188SYSCTL_VNET_INT(_net_inet_tcp_ecn, OID_AUTO, enable, CTLFLAG_RW, 189 &VNET_NAME(tcp_do_ecn), 0, 190 "TCP ECN support"); 191 192VNET_DEFINE(int, tcp_ecn_maxretries) = 1; 193SYSCTL_VNET_INT(_net_inet_tcp_ecn, OID_AUTO, maxretries, CTLFLAG_RW, 194 &VNET_NAME(tcp_ecn_maxretries), 0, 195 "Max retries before giving up on ECN"); 196 197VNET_DEFINE(int, tcp_insecure_rst) = 0; 198#define V_tcp_insecure_rst VNET(tcp_insecure_rst) 199SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_RW, 200 &VNET_NAME(tcp_insecure_rst), 0, 201 "Follow the old (insecure) criteria for accepting RST packets"); 202 203VNET_DEFINE(int, tcp_recvspace) = 1024*64; 204#define V_tcp_recvspace VNET(tcp_recvspace) 205SYSCTL_VNET_INT(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLFLAG_RW, 206 &VNET_NAME(tcp_recvspace), 0, "Initial receive socket buffer size"); 207 208VNET_DEFINE(int, tcp_do_autorcvbuf) = 1; 209#define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf) 210SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_RW, 211 &VNET_NAME(tcp_do_autorcvbuf), 0, 212 "Enable automatic receive buffer sizing"); 213 214VNET_DEFINE(int, tcp_autorcvbuf_inc) = 16*1024; 215#define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc) 216SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_RW, 217 &VNET_NAME(tcp_autorcvbuf_inc), 0, 218 "Incrementor step size of automatic receive buffer"); 219 220VNET_DEFINE(int, tcp_autorcvbuf_max) = 2*1024*1024; 221#define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max) 222SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_RW, 223 &VNET_NAME(tcp_autorcvbuf_max), 0, 224 "Max size of automatic receive buffer"); 225 226VNET_DEFINE(struct inpcbhead, tcb); 227#define tcb6 tcb /* for KAME src sync over BSD*'s */ 228VNET_DEFINE(struct inpcbinfo, tcbinfo); 229 230static void tcp_dooptions(struct tcpopt *, u_char *, int, int); 231static void tcp_do_segment(struct mbuf *, struct tcphdr *, 232 struct socket *, struct tcpcb *, int, int, uint8_t, 233 int); 234static void tcp_dropwithreset(struct mbuf *, struct tcphdr *, 235 struct tcpcb *, int, int); 236static void tcp_pulloutofband(struct socket *, 237 struct tcphdr *, struct mbuf *, int); 238static void tcp_xmit_timer(struct tcpcb *, int); 239static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *); 240static void inline cc_ack_received(struct tcpcb *tp, struct tcphdr *th, 241 uint16_t type); 242static void inline cc_conn_init(struct tcpcb *tp); 243static void inline cc_post_recovery(struct tcpcb *tp, struct tcphdr *th); 244static void inline hhook_run_tcp_est_in(struct tcpcb *tp, 245 struct tcphdr *th, struct tcpopt *to); 246 247/* 248 * TCP statistics are stored in an "array" of counter(9)s. 249 */ 250VNET_PCPUSTAT_DEFINE(struct tcpstat, tcpstat); 251VNET_PCPUSTAT_SYSINIT(tcpstat); 252SYSCTL_VNET_PCPUSTAT(_net_inet_tcp, TCPCTL_STATS, stats, struct tcpstat, 253 tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)"); 254 255#ifdef VIMAGE 256VNET_PCPUSTAT_SYSUNINIT(tcpstat); 257#endif /* VIMAGE */ 258/* 259 * Kernel module interface for updating tcpstat. The argument is an index 260 * into tcpstat treated as an array. 261 */ 262void 263kmod_tcpstat_inc(int statnum) 264{ 265 266 counter_u64_add(VNET(tcpstat)[statnum], 1); 267} 268 269/* 270 * Wrapper for the TCP established input helper hook. 271 */ 272static void inline 273hhook_run_tcp_est_in(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to) 274{ 275 struct tcp_hhook_data hhook_data; 276 277 if (V_tcp_hhh[HHOOK_TCP_EST_IN]->hhh_nhooks > 0) { 278 hhook_data.tp = tp; 279 hhook_data.th = th; 280 hhook_data.to = to; 281 282 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_IN], &hhook_data, 283 tp->osd); 284 } 285} 286 287/* 288 * CC wrapper hook functions 289 */ 290static void inline 291cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t type) 292{ 293 INP_WLOCK_ASSERT(tp->t_inpcb); 294 295 tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th); 296 if (tp->snd_cwnd <= tp->snd_wnd) 297 tp->ccv->flags |= CCF_CWND_LIMITED; 298 else 299 tp->ccv->flags &= ~CCF_CWND_LIMITED; 300 301 if (type == CC_ACK) { 302 if (tp->snd_cwnd > tp->snd_ssthresh) { 303 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack, 304 V_tcp_abc_l_var * tp->t_maxseg); 305 if (tp->t_bytes_acked >= tp->snd_cwnd) { 306 tp->t_bytes_acked -= tp->snd_cwnd; 307 tp->ccv->flags |= CCF_ABC_SENTAWND; 308 } 309 } else { 310 tp->ccv->flags &= ~CCF_ABC_SENTAWND; 311 tp->t_bytes_acked = 0; 312 } 313 } 314 315 if (CC_ALGO(tp)->ack_received != NULL) { 316 /* XXXLAS: Find a way to live without this */ 317 tp->ccv->curack = th->th_ack; 318 CC_ALGO(tp)->ack_received(tp->ccv, type); 319 } 320} 321 322static void inline 323cc_conn_init(struct tcpcb *tp) 324{ 325 struct hc_metrics_lite metrics; 326 struct inpcb *inp = tp->t_inpcb; 327 int rtt; 328 329 INP_WLOCK_ASSERT(tp->t_inpcb); 330 331 tcp_hc_get(&inp->inp_inc, &metrics); 332 333 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) { 334 tp->t_srtt = rtt; 335 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE; 336 TCPSTAT_INC(tcps_usedrtt); 337 if (metrics.rmx_rttvar) { 338 tp->t_rttvar = metrics.rmx_rttvar; 339 TCPSTAT_INC(tcps_usedrttvar); 340 } else { 341 /* default variation is +- 1 rtt */ 342 tp->t_rttvar = 343 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE; 344 } 345 TCPT_RANGESET(tp->t_rxtcur, 346 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, 347 tp->t_rttmin, TCPTV_REXMTMAX); 348 } 349 if (metrics.rmx_ssthresh) { 350 /* 351 * There's some sort of gateway or interface 352 * buffer limit on the path. Use this to set 353 * the slow start threshhold, but set the 354 * threshold to no less than 2*mss. 355 */ 356 tp->snd_ssthresh = max(2 * tp->t_maxseg, metrics.rmx_ssthresh); 357 TCPSTAT_INC(tcps_usedssthresh); 358 } 359 360 /* 361 * Set the initial slow-start flight size. 362 * 363 * RFC5681 Section 3.1 specifies the default conservative values. 364 * RFC3390 specifies slightly more aggressive values. 365 * RFC6928 increases it to ten segments. 366 * 367 * If a SYN or SYN/ACK was lost and retransmitted, we have to 368 * reduce the initial CWND to one segment as congestion is likely 369 * requiring us to be cautious. 370 */ 371 if (tp->snd_cwnd == 1) 372 tp->snd_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ 373 else if (V_tcp_do_initcwnd10) 374 tp->snd_cwnd = min(10 * tp->t_maxseg, 375 max(2 * tp->t_maxseg, 14600)); 376 else if (V_tcp_do_rfc3390) 377 tp->snd_cwnd = min(4 * tp->t_maxseg, 378 max(2 * tp->t_maxseg, 4380)); 379 else { 380 /* Per RFC5681 Section 3.1 */ 381 if (tp->t_maxseg > 2190) 382 tp->snd_cwnd = 2 * tp->t_maxseg; 383 else if (tp->t_maxseg > 1095) 384 tp->snd_cwnd = 3 * tp->t_maxseg; 385 else 386 tp->snd_cwnd = 4 * tp->t_maxseg; 387 } 388 389 if (CC_ALGO(tp)->conn_init != NULL) 390 CC_ALGO(tp)->conn_init(tp->ccv); 391} 392 393void inline 394cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type) 395{ 396 INP_WLOCK_ASSERT(tp->t_inpcb); 397 398 switch(type) { 399 case CC_NDUPACK: 400 if (!IN_FASTRECOVERY(tp->t_flags)) { 401 tp->snd_recover = tp->snd_max; 402 if (tp->t_flags & TF_ECN_PERMIT) 403 tp->t_flags |= TF_ECN_SND_CWR; 404 } 405 break; 406 case CC_ECN: 407 if (!IN_CONGRECOVERY(tp->t_flags)) { 408 TCPSTAT_INC(tcps_ecn_rcwnd); 409 tp->snd_recover = tp->snd_max; 410 if (tp->t_flags & TF_ECN_PERMIT) 411 tp->t_flags |= TF_ECN_SND_CWR; 412 } 413 break; 414 case CC_RTO: 415 tp->t_dupacks = 0; 416 tp->t_bytes_acked = 0; 417 EXIT_RECOVERY(tp->t_flags); 418 tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 / 419 tp->t_maxseg) * tp->t_maxseg; 420 tp->snd_cwnd = tp->t_maxseg; 421 break; 422 case CC_RTO_ERR: 423 TCPSTAT_INC(tcps_sndrexmitbad); 424 /* RTO was unnecessary, so reset everything. */ 425 tp->snd_cwnd = tp->snd_cwnd_prev; 426 tp->snd_ssthresh = tp->snd_ssthresh_prev; 427 tp->snd_recover = tp->snd_recover_prev; 428 if (tp->t_flags & TF_WASFRECOVERY) 429 ENTER_FASTRECOVERY(tp->t_flags); 430 if (tp->t_flags & TF_WASCRECOVERY) 431 ENTER_CONGRECOVERY(tp->t_flags); 432 tp->snd_nxt = tp->snd_max; 433 tp->t_flags &= ~TF_PREVVALID; 434 tp->t_badrxtwin = 0; 435 break; 436 } 437 438 if (CC_ALGO(tp)->cong_signal != NULL) { 439 if (th != NULL) 440 tp->ccv->curack = th->th_ack; 441 CC_ALGO(tp)->cong_signal(tp->ccv, type); 442 } 443} 444 445static void inline 446cc_post_recovery(struct tcpcb *tp, struct tcphdr *th) 447{ 448 INP_WLOCK_ASSERT(tp->t_inpcb); 449 450 /* XXXLAS: KASSERT that we're in recovery? */ 451 452 if (CC_ALGO(tp)->post_recovery != NULL) { 453 tp->ccv->curack = th->th_ack; 454 CC_ALGO(tp)->post_recovery(tp->ccv); 455 } 456 /* XXXLAS: EXIT_RECOVERY ? */ 457 tp->t_bytes_acked = 0; 458} 459 460#ifdef TCP_SIGNATURE 461static inline int 462tcp_signature_verify_input(struct mbuf *m, int off0, int tlen, int optlen, 463 struct tcpopt *to, struct tcphdr *th, u_int tcpbflag) 464{ 465 int ret; 466 467 tcp_fields_to_net(th); 468 ret = tcp_signature_verify(m, off0, tlen, optlen, to, th, tcpbflag); 469 tcp_fields_to_host(th); 470 return (ret); 471} 472#endif 473 474/* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */ 475#ifdef INET6 476#define ND6_HINT(tp) \ 477do { \ 478 if ((tp) && (tp)->t_inpcb && \ 479 ((tp)->t_inpcb->inp_vflag & INP_IPV6) != 0) \ 480 nd6_nud_hint(NULL, NULL, 0); \ 481} while (0) 482#else 483#define ND6_HINT(tp) 484#endif 485 486/* 487 * Indicate whether this ack should be delayed. We can delay the ack if 488 * - there is no delayed ack timer in progress and 489 * - our last ack wasn't a 0-sized window. We never want to delay 490 * the ack that opens up a 0-sized window and 491 * - delayed acks are enabled or 492 * - this is a half-synchronized T/TCP connection. 493 * - the segment size is not larger than the MSS and LRO wasn't used 494 * for this segment. 495 */ 496#define DELAY_ACK(tp, tlen) \ 497 ((!tcp_timer_active(tp, TT_DELACK) && \ 498 (tp->t_flags & TF_RXWIN0SENT) == 0) && \ 499 (tlen <= tp->t_maxopd) && \ 500 (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN))) 501 502/* 503 * TCP input handling is split into multiple parts: 504 * tcp6_input is a thin wrapper around tcp_input for the extended 505 * ip6_protox[] call format in ip6_input 506 * tcp_input handles primary segment validation, inpcb lookup and 507 * SYN processing on listen sockets 508 * tcp_do_segment processes the ACK and text of the segment for 509 * establishing, established and closing connections 510 */ 511#ifdef INET6 512int 513tcp6_input(struct mbuf **mp, int *offp, int proto) 514{ 515 struct mbuf *m = *mp; 516 struct in6_ifaddr *ia6; 517 518 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE); 519 520 /* 521 * draft-itojun-ipv6-tcp-to-anycast 522 * better place to put this in? 523 */ 524 ia6 = ip6_getdstifaddr(m); 525 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) { 526 struct ip6_hdr *ip6; 527 528 ifa_free(&ia6->ia_ifa); 529 ip6 = mtod(m, struct ip6_hdr *); 530 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR, 531 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6); 532 return IPPROTO_DONE; 533 } 534 if (ia6) 535 ifa_free(&ia6->ia_ifa); 536 537 tcp_input(m, *offp); 538 return IPPROTO_DONE; 539} 540#endif /* INET6 */ 541 542void 543tcp_input(struct mbuf *m, int off0) 544{ 545 struct tcphdr *th = NULL; 546 struct ip *ip = NULL; 547 struct inpcb *inp = NULL; 548 struct tcpcb *tp = NULL; 549 struct socket *so = NULL; 550 u_char *optp = NULL; 551 int optlen = 0; 552#ifdef INET 553 int len; 554#endif 555 int tlen = 0, off; 556 int drop_hdrlen; 557 int thflags; 558 int rstreason = 0; /* For badport_bandlim accounting purposes */ 559#ifdef TCP_SIGNATURE 560 uint8_t sig_checked = 0; 561#endif 562 uint8_t iptos = 0; 563 struct m_tag *fwd_tag = NULL; 564#ifdef INET6 565 struct ip6_hdr *ip6 = NULL; 566 int isipv6; 567#else 568 const void *ip6 = NULL; 569#endif /* INET6 */ 570 struct tcpopt to; /* options in this segment */ 571 char *s = NULL; /* address and port logging */ 572 int ti_locked; 573#define TI_UNLOCKED 1 574#define TI_RLOCKED 2 575 576#ifdef TCPDEBUG 577 /* 578 * The size of tcp_saveipgen must be the size of the max ip header, 579 * now IPv6. 580 */ 581 u_char tcp_saveipgen[IP6_HDR_LEN]; 582 struct tcphdr tcp_savetcp; 583 short ostate = 0; 584#endif 585 586#ifdef INET6 587 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0; 588#endif 589 590 to.to_flags = 0; 591 TCPSTAT_INC(tcps_rcvtotal); 592 593#ifdef INET6 594 if (isipv6) { 595 /* IP6_EXTHDR_CHECK() is already done at tcp6_input(). */ 596 597 if (m->m_len < (sizeof(*ip6) + sizeof(*th))) { 598 m = m_pullup(m, sizeof(*ip6) + sizeof(*th)); 599 if (m == NULL) { 600 TCPSTAT_INC(tcps_rcvshort); 601 return; 602 } 603 } 604 605 ip6 = mtod(m, struct ip6_hdr *); 606 th = (struct tcphdr *)((caddr_t)ip6 + off0); 607 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0; 608 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID_IPV6) { 609 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 610 th->th_sum = m->m_pkthdr.csum_data; 611 else 612 th->th_sum = in6_cksum_pseudo(ip6, tlen, 613 IPPROTO_TCP, m->m_pkthdr.csum_data); 614 th->th_sum ^= 0xffff; 615 } else 616 th->th_sum = in6_cksum(m, IPPROTO_TCP, off0, tlen); 617 if (th->th_sum) { 618 TCPSTAT_INC(tcps_rcvbadsum); 619 goto drop; 620 } 621 622 /* 623 * Be proactive about unspecified IPv6 address in source. 624 * As we use all-zero to indicate unbounded/unconnected pcb, 625 * unspecified IPv6 address can be used to confuse us. 626 * 627 * Note that packets with unspecified IPv6 destination is 628 * already dropped in ip6_input. 629 */ 630 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { 631 /* XXX stat */ 632 goto drop; 633 } 634 } 635#endif 636#if defined(INET) && defined(INET6) 637 else 638#endif 639#ifdef INET 640 { 641 /* 642 * Get IP and TCP header together in first mbuf. 643 * Note: IP leaves IP header in first mbuf. 644 */ 645 if (off0 > sizeof (struct ip)) { 646 ip_stripoptions(m); 647 off0 = sizeof(struct ip); 648 } 649 if (m->m_len < sizeof (struct tcpiphdr)) { 650 if ((m = m_pullup(m, sizeof (struct tcpiphdr))) 651 == NULL) { 652 TCPSTAT_INC(tcps_rcvshort); 653 return; 654 } 655 } 656 ip = mtod(m, struct ip *); 657 th = (struct tcphdr *)((caddr_t)ip + off0); 658 tlen = ntohs(ip->ip_len) - off0; 659 660 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 661 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 662 th->th_sum = m->m_pkthdr.csum_data; 663 else 664 th->th_sum = in_pseudo(ip->ip_src.s_addr, 665 ip->ip_dst.s_addr, 666 htonl(m->m_pkthdr.csum_data + tlen + 667 IPPROTO_TCP)); 668 th->th_sum ^= 0xffff; 669 } else { 670 struct ipovly *ipov = (struct ipovly *)ip; 671 672 /* 673 * Checksum extended TCP header and data. 674 */ 675 len = off0 + tlen; 676 bzero(ipov->ih_x1, sizeof(ipov->ih_x1)); 677 ipov->ih_len = htons(tlen); 678 th->th_sum = in_cksum(m, len); 679 /* Reset length for SDT probes. */ 680 ip->ip_len = htons(tlen + off0); 681 } 682 683 if (th->th_sum) { 684 TCPSTAT_INC(tcps_rcvbadsum); 685 goto drop; 686 } 687 /* Re-initialization for later version check */ 688 ip->ip_v = IPVERSION; 689 } 690#endif /* INET */ 691 692#ifdef INET6 693 if (isipv6) 694 iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; 695#endif 696#if defined(INET) && defined(INET6) 697 else 698#endif 699#ifdef INET 700 iptos = ip->ip_tos; 701#endif 702 703 /* 704 * Check that TCP offset makes sense, 705 * pull out TCP options and adjust length. XXX 706 */ 707 off = th->th_off << 2; 708 if (off < sizeof (struct tcphdr) || off > tlen) { 709 TCPSTAT_INC(tcps_rcvbadoff); 710 goto drop; 711 } 712 tlen -= off; /* tlen is used instead of ti->ti_len */ 713 if (off > sizeof (struct tcphdr)) { 714#ifdef INET6 715 if (isipv6) { 716 IP6_EXTHDR_CHECK(m, off0, off, ); 717 ip6 = mtod(m, struct ip6_hdr *); 718 th = (struct tcphdr *)((caddr_t)ip6 + off0); 719 } 720#endif 721#if defined(INET) && defined(INET6) 722 else 723#endif 724#ifdef INET 725 { 726 if (m->m_len < sizeof(struct ip) + off) { 727 if ((m = m_pullup(m, sizeof (struct ip) + off)) 728 == NULL) { 729 TCPSTAT_INC(tcps_rcvshort); 730 return; 731 } 732 ip = mtod(m, struct ip *); 733 th = (struct tcphdr *)((caddr_t)ip + off0); 734 } 735 } 736#endif 737 optlen = off - sizeof (struct tcphdr); 738 optp = (u_char *)(th + 1); 739 } 740 thflags = th->th_flags; 741 742 /* 743 * Convert TCP protocol specific fields to host format. 744 */ 745 tcp_fields_to_host(th); 746 747 /* 748 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options. 749 */ 750 drop_hdrlen = off0 + off; 751 752 /* 753 * Locate pcb for segment; if we're likely to add or remove a 754 * connection then first acquire pcbinfo lock. There are three cases 755 * where we might discover later we need a write lock despite the 756 * flags: ACKs moving a connection out of the syncache, ACKs for a 757 * connection in TIMEWAIT and SYNs not targeting a listening socket. 758 */ 759 if ((thflags & (TH_FIN | TH_RST)) != 0) { 760 INP_INFO_RLOCK(&V_tcbinfo); 761 ti_locked = TI_RLOCKED; 762 } else 763 ti_locked = TI_UNLOCKED; 764 765 /* 766 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. 767 */ 768 if ( 769#ifdef INET6 770 (isipv6 && (m->m_flags & M_IP6_NEXTHOP)) 771#ifdef INET 772 || (!isipv6 && (m->m_flags & M_IP_NEXTHOP)) 773#endif 774#endif 775#if defined(INET) && !defined(INET6) 776 (m->m_flags & M_IP_NEXTHOP) 777#endif 778 ) 779 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL); 780 781findpcb: 782#ifdef INVARIANTS 783 if (ti_locked == TI_RLOCKED) { 784 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 785 } else { 786 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 787 } 788#endif 789#ifdef INET6 790 if (isipv6 && fwd_tag != NULL) { 791 struct sockaddr_in6 *next_hop6; 792 793 next_hop6 = (struct sockaddr_in6 *)(fwd_tag + 1); 794 /* 795 * Transparently forwarded. Pretend to be the destination. 796 * Already got one like this? 797 */ 798 inp = in6_pcblookup_mbuf(&V_tcbinfo, 799 &ip6->ip6_src, th->th_sport, &ip6->ip6_dst, th->th_dport, 800 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif, m); 801 if (!inp) { 802 /* 803 * It's new. Try to find the ambushing socket. 804 * Because we've rewritten the destination address, 805 * any hardware-generated hash is ignored. 806 */ 807 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_src, 808 th->th_sport, &next_hop6->sin6_addr, 809 next_hop6->sin6_port ? ntohs(next_hop6->sin6_port) : 810 th->th_dport, INPLOOKUP_WILDCARD | 811 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif); 812 } 813 } else if (isipv6) { 814 inp = in6_pcblookup_mbuf(&V_tcbinfo, &ip6->ip6_src, 815 th->th_sport, &ip6->ip6_dst, th->th_dport, 816 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB, 817 m->m_pkthdr.rcvif, m); 818 } 819#endif /* INET6 */ 820#if defined(INET6) && defined(INET) 821 else 822#endif 823#ifdef INET 824 if (fwd_tag != NULL) { 825 struct sockaddr_in *next_hop; 826 827 next_hop = (struct sockaddr_in *)(fwd_tag+1); 828 /* 829 * Transparently forwarded. Pretend to be the destination. 830 * already got one like this? 831 */ 832 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, th->th_sport, 833 ip->ip_dst, th->th_dport, INPLOOKUP_WLOCKPCB, 834 m->m_pkthdr.rcvif, m); 835 if (!inp) { 836 /* 837 * It's new. Try to find the ambushing socket. 838 * Because we've rewritten the destination address, 839 * any hardware-generated hash is ignored. 840 */ 841 inp = in_pcblookup(&V_tcbinfo, ip->ip_src, 842 th->th_sport, next_hop->sin_addr, 843 next_hop->sin_port ? ntohs(next_hop->sin_port) : 844 th->th_dport, INPLOOKUP_WILDCARD | 845 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif); 846 } 847 } else 848 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, 849 th->th_sport, ip->ip_dst, th->th_dport, 850 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB, 851 m->m_pkthdr.rcvif, m); 852#endif /* INET */ 853 854 /* 855 * If the INPCB does not exist then all data in the incoming 856 * segment is discarded and an appropriate RST is sent back. 857 * XXX MRT Send RST using which routing table? 858 */ 859 if (inp == NULL) { 860 /* 861 * Log communication attempts to ports that are not 862 * in use. 863 */ 864 if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) || 865 tcp_log_in_vain == 2) { 866 if ((s = tcp_log_vain(NULL, th, (void *)ip, ip6))) 867 log(LOG_INFO, "%s; %s: Connection attempt " 868 "to closed port\n", s, __func__); 869 } 870 /* 871 * When blackholing do not respond with a RST but 872 * completely ignore the segment and drop it. 873 */ 874 if ((V_blackhole == 1 && (thflags & TH_SYN)) || 875 V_blackhole == 2) 876 goto dropunlock; 877 878 rstreason = BANDLIM_RST_CLOSEDPORT; 879 goto dropwithreset; 880 } 881 INP_WLOCK_ASSERT(inp); 882 /* 883 * While waiting for inp lock during the lookup, another thread 884 * can have dropped the inpcb, in which case we need to loop back 885 * and try to find a new inpcb to deliver to. 886 */ 887 if (inp->inp_flags & INP_DROPPED) { 888 INP_WUNLOCK(inp); 889 inp = NULL; 890 goto findpcb; 891 } 892 if ((inp->inp_flowtype == M_HASHTYPE_NONE) && 893 (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) && 894 ((inp->inp_socket == NULL) || 895 (inp->inp_socket->so_options & SO_ACCEPTCONN) == 0)) { 896 inp->inp_flowid = m->m_pkthdr.flowid; 897 inp->inp_flowtype = M_HASHTYPE_GET(m); 898 } 899#ifdef IPSEC 900#ifdef INET6 901 if (isipv6 && ipsec6_in_reject(m, inp)) { 902 IPSEC6STAT_INC(ips_in_polvio); 903 goto dropunlock; 904 } else 905#endif /* INET6 */ 906 if (ipsec4_in_reject(m, inp) != 0) { 907 IPSECSTAT_INC(ips_in_polvio); 908 goto dropunlock; 909 } 910#endif /* IPSEC */ 911 912 /* 913 * Check the minimum TTL for socket. 914 */ 915 if (inp->inp_ip_minttl != 0) { 916#ifdef INET6 917 if (isipv6 && inp->inp_ip_minttl > ip6->ip6_hlim) 918 goto dropunlock; 919 else 920#endif 921 if (inp->inp_ip_minttl > ip->ip_ttl) 922 goto dropunlock; 923 } 924 925 /* 926 * A previous connection in TIMEWAIT state is supposed to catch stray 927 * or duplicate segments arriving late. If this segment was a 928 * legitimate new connection attempt, the old INPCB gets removed and 929 * we can try again to find a listening socket. 930 * 931 * At this point, due to earlier optimism, we may hold only an inpcb 932 * lock, and not the inpcbinfo write lock. If so, we need to try to 933 * acquire it, or if that fails, acquire a reference on the inpcb, 934 * drop all locks, acquire a global write lock, and then re-acquire 935 * the inpcb lock. We may at that point discover that another thread 936 * has tried to free the inpcb, in which case we need to loop back 937 * and try to find a new inpcb to deliver to. 938 * 939 * XXXRW: It may be time to rethink timewait locking. 940 */ 941relocked: 942 if (inp->inp_flags & INP_TIMEWAIT) { 943 if (ti_locked == TI_UNLOCKED) { 944 if (INP_INFO_TRY_RLOCK(&V_tcbinfo) == 0) { 945 in_pcbref(inp); 946 INP_WUNLOCK(inp); 947 INP_INFO_RLOCK(&V_tcbinfo); 948 ti_locked = TI_RLOCKED; 949 INP_WLOCK(inp); 950 if (in_pcbrele_wlocked(inp)) { 951 inp = NULL; 952 goto findpcb; 953 } else if (inp->inp_flags & INP_DROPPED) { 954 INP_WUNLOCK(inp); 955 inp = NULL; 956 goto findpcb; 957 } 958 } else 959 ti_locked = TI_RLOCKED; 960 } 961 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 962 963 if (thflags & TH_SYN) 964 tcp_dooptions(&to, optp, optlen, TO_SYN); 965 /* 966 * NB: tcp_twcheck unlocks the INP and frees the mbuf. 967 */ 968 if (tcp_twcheck(inp, &to, th, m, tlen)) 969 goto findpcb; 970 INP_INFO_RUNLOCK(&V_tcbinfo); 971 return; 972 } 973 /* 974 * The TCPCB may no longer exist if the connection is winding 975 * down or it is in the CLOSED state. Either way we drop the 976 * segment and send an appropriate response. 977 */ 978 tp = intotcpcb(inp); 979 if (tp == NULL || tp->t_state == TCPS_CLOSED) { 980 rstreason = BANDLIM_RST_CLOSEDPORT; 981 goto dropwithreset; 982 } 983 984#ifdef TCP_OFFLOAD 985 if (tp->t_flags & TF_TOE) { 986 tcp_offload_input(tp, m); 987 m = NULL; /* consumed by the TOE driver */ 988 goto dropunlock; 989 } 990#endif 991 992 /* 993 * We've identified a valid inpcb, but it could be that we need an 994 * inpcbinfo write lock but don't hold it. In this case, attempt to 995 * acquire using the same strategy as the TIMEWAIT case above. If we 996 * relock, we have to jump back to 'relocked' as the connection might 997 * now be in TIMEWAIT. 998 */ 999#ifdef INVARIANTS 1000 if ((thflags & (TH_FIN | TH_RST)) != 0) 1001 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 1002#endif 1003 if (!((tp->t_state == TCPS_ESTABLISHED && (thflags & TH_SYN) == 0) || 1004 (tp->t_state == TCPS_LISTEN && (thflags & TH_SYN) && 1005 !(tp->t_flags & TF_FASTOPEN)))) { 1006 if (ti_locked == TI_UNLOCKED) { 1007 if (INP_INFO_TRY_RLOCK(&V_tcbinfo) == 0) { 1008 in_pcbref(inp); 1009 INP_WUNLOCK(inp); 1010 INP_INFO_RLOCK(&V_tcbinfo); 1011 ti_locked = TI_RLOCKED; 1012 INP_WLOCK(inp); 1013 if (in_pcbrele_wlocked(inp)) { 1014 inp = NULL; 1015 goto findpcb; 1016 } else if (inp->inp_flags & INP_DROPPED) { 1017 INP_WUNLOCK(inp); 1018 inp = NULL; 1019 goto findpcb; 1020 } 1021 goto relocked; 1022 } else 1023 ti_locked = TI_RLOCKED; 1024 } 1025 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 1026 } 1027 1028#ifdef MAC 1029 INP_WLOCK_ASSERT(inp); 1030 if (mac_inpcb_check_deliver(inp, m)) 1031 goto dropunlock; 1032#endif 1033 so = inp->inp_socket; 1034 KASSERT(so != NULL, ("%s: so == NULL", __func__)); 1035#ifdef TCPDEBUG 1036 if (so->so_options & SO_DEBUG) { 1037 ostate = tp->t_state; 1038#ifdef INET6 1039 if (isipv6) { 1040 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6)); 1041 } else 1042#endif 1043 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip)); 1044 tcp_savetcp = *th; 1045 } 1046#endif /* TCPDEBUG */ 1047 /* 1048 * When the socket is accepting connections (the INPCB is in LISTEN 1049 * state) we look into the SYN cache if this is a new connection 1050 * attempt or the completion of a previous one. 1051 */ 1052 if (so->so_options & SO_ACCEPTCONN) { 1053 struct in_conninfo inc; 1054 1055 KASSERT(tp->t_state == TCPS_LISTEN, ("%s: so accepting but " 1056 "tp not listening", __func__)); 1057 bzero(&inc, sizeof(inc)); 1058#ifdef INET6 1059 if (isipv6) { 1060 inc.inc_flags |= INC_ISIPV6; 1061 inc.inc6_faddr = ip6->ip6_src; 1062 inc.inc6_laddr = ip6->ip6_dst; 1063 } else 1064#endif 1065 { 1066 inc.inc_faddr = ip->ip_src; 1067 inc.inc_laddr = ip->ip_dst; 1068 } 1069 inc.inc_fport = th->th_sport; 1070 inc.inc_lport = th->th_dport; 1071 inc.inc_fibnum = so->so_fibnum; 1072 1073 /* 1074 * Check for an existing connection attempt in syncache if 1075 * the flag is only ACK. A successful lookup creates a new 1076 * socket appended to the listen queue in SYN_RECEIVED state. 1077 */ 1078 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) { 1079 1080 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 1081 /* 1082 * Parse the TCP options here because 1083 * syncookies need access to the reflected 1084 * timestamp. 1085 */ 1086 tcp_dooptions(&to, optp, optlen, 0); 1087 /* 1088 * NB: syncache_expand() doesn't unlock 1089 * inp and tcpinfo locks. 1090 */ 1091 if (!syncache_expand(&inc, &to, th, &so, m)) { 1092 /* 1093 * No syncache entry or ACK was not 1094 * for our SYN/ACK. Send a RST. 1095 * NB: syncache did its own logging 1096 * of the failure cause. 1097 */ 1098 rstreason = BANDLIM_RST_OPENPORT; 1099 goto dropwithreset; 1100 } 1101#ifdef TCP_RFC7413 1102new_tfo_socket: 1103#endif 1104 if (so == NULL) { 1105 /* 1106 * We completed the 3-way handshake 1107 * but could not allocate a socket 1108 * either due to memory shortage, 1109 * listen queue length limits or 1110 * global socket limits. Send RST 1111 * or wait and have the remote end 1112 * retransmit the ACK for another 1113 * try. 1114 */ 1115 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1116 log(LOG_DEBUG, "%s; %s: Listen socket: " 1117 "Socket allocation failed due to " 1118 "limits or memory shortage, %s\n", 1119 s, __func__, 1120 V_tcp_sc_rst_sock_fail ? 1121 "sending RST" : "try again"); 1122 if (V_tcp_sc_rst_sock_fail) { 1123 rstreason = BANDLIM_UNLIMITED; 1124 goto dropwithreset; 1125 } else 1126 goto dropunlock; 1127 } 1128 /* 1129 * Socket is created in state SYN_RECEIVED. 1130 * Unlock the listen socket, lock the newly 1131 * created socket and update the tp variable. 1132 */ 1133 INP_WUNLOCK(inp); /* listen socket */ 1134 inp = sotoinpcb(so); 1135 /* 1136 * New connection inpcb is already locked by 1137 * syncache_expand(). 1138 */ 1139 INP_WLOCK_ASSERT(inp); 1140 tp = intotcpcb(inp); 1141 KASSERT(tp->t_state == TCPS_SYN_RECEIVED, 1142 ("%s: ", __func__)); 1143#ifdef TCP_SIGNATURE 1144 if (sig_checked == 0) { 1145 tcp_dooptions(&to, optp, optlen, 1146 (thflags & TH_SYN) ? TO_SYN : 0); 1147 if (!tcp_signature_verify_input(m, off0, tlen, 1148 optlen, &to, th, tp->t_flags)) { 1149 1150 /* 1151 * In SYN_SENT state if it receives an 1152 * RST, it is allowed for further 1153 * processing. 1154 */ 1155 if ((thflags & TH_RST) == 0 || 1156 (tp->t_state == TCPS_SYN_SENT) == 0) 1157 goto dropunlock; 1158 } 1159 sig_checked = 1; 1160 } 1161#endif 1162 1163 /* 1164 * Process the segment and the data it 1165 * contains. tcp_do_segment() consumes 1166 * the mbuf chain and unlocks the inpcb. 1167 */ 1168 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, 1169 iptos, ti_locked); 1170 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1171 return; 1172 } 1173 /* 1174 * Segment flag validation for new connection attempts: 1175 * 1176 * Our (SYN|ACK) response was rejected. 1177 * Check with syncache and remove entry to prevent 1178 * retransmits. 1179 * 1180 * NB: syncache_chkrst does its own logging of failure 1181 * causes. 1182 */ 1183 if (thflags & TH_RST) { 1184 syncache_chkrst(&inc, th); 1185 goto dropunlock; 1186 } 1187 /* 1188 * We can't do anything without SYN. 1189 */ 1190 if ((thflags & TH_SYN) == 0) { 1191 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1192 log(LOG_DEBUG, "%s; %s: Listen socket: " 1193 "SYN is missing, segment ignored\n", 1194 s, __func__); 1195 TCPSTAT_INC(tcps_badsyn); 1196 goto dropunlock; 1197 } 1198 /* 1199 * (SYN|ACK) is bogus on a listen socket. 1200 */ 1201 if (thflags & TH_ACK) { 1202 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1203 log(LOG_DEBUG, "%s; %s: Listen socket: " 1204 "SYN|ACK invalid, segment rejected\n", 1205 s, __func__); 1206 syncache_badack(&inc); /* XXX: Not needed! */ 1207 TCPSTAT_INC(tcps_badsyn); 1208 rstreason = BANDLIM_RST_OPENPORT; 1209 goto dropwithreset; 1210 } 1211 /* 1212 * If the drop_synfin option is enabled, drop all 1213 * segments with both the SYN and FIN bits set. 1214 * This prevents e.g. nmap from identifying the 1215 * TCP/IP stack. 1216 * XXX: Poor reasoning. nmap has other methods 1217 * and is constantly refining its stack detection 1218 * strategies. 1219 * XXX: This is a violation of the TCP specification 1220 * and was used by RFC1644. 1221 */ 1222 if ((thflags & TH_FIN) && V_drop_synfin) { 1223 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1224 log(LOG_DEBUG, "%s; %s: Listen socket: " 1225 "SYN|FIN segment ignored (based on " 1226 "sysctl setting)\n", s, __func__); 1227 TCPSTAT_INC(tcps_badsyn); 1228 goto dropunlock; 1229 } 1230 /* 1231 * Segment's flags are (SYN) or (SYN|FIN). 1232 * 1233 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored 1234 * as they do not affect the state of the TCP FSM. 1235 * The data pointed to by TH_URG and th_urp is ignored. 1236 */ 1237 KASSERT((thflags & (TH_RST|TH_ACK)) == 0, 1238 ("%s: Listen socket: TH_RST or TH_ACK set", __func__)); 1239 KASSERT(thflags & (TH_SYN), 1240 ("%s: Listen socket: TH_SYN not set", __func__)); 1241#ifdef INET6 1242 /* 1243 * If deprecated address is forbidden, 1244 * we do not accept SYN to deprecated interface 1245 * address to prevent any new inbound connection from 1246 * getting established. 1247 * When we do not accept SYN, we send a TCP RST, 1248 * with deprecated source address (instead of dropping 1249 * it). We compromise it as it is much better for peer 1250 * to send a RST, and RST will be the final packet 1251 * for the exchange. 1252 * 1253 * If we do not forbid deprecated addresses, we accept 1254 * the SYN packet. RFC2462 does not suggest dropping 1255 * SYN in this case. 1256 * If we decipher RFC2462 5.5.4, it says like this: 1257 * 1. use of deprecated addr with existing 1258 * communication is okay - "SHOULD continue to be 1259 * used" 1260 * 2. use of it with new communication: 1261 * (2a) "SHOULD NOT be used if alternate address 1262 * with sufficient scope is available" 1263 * (2b) nothing mentioned otherwise. 1264 * Here we fall into (2b) case as we have no choice in 1265 * our source address selection - we must obey the peer. 1266 * 1267 * The wording in RFC2462 is confusing, and there are 1268 * multiple description text for deprecated address 1269 * handling - worse, they are not exactly the same. 1270 * I believe 5.5.4 is the best one, so we follow 5.5.4. 1271 */ 1272 if (isipv6 && !V_ip6_use_deprecated) { 1273 struct in6_ifaddr *ia6; 1274 1275 ia6 = ip6_getdstifaddr(m); 1276 if (ia6 != NULL && 1277 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) { 1278 ifa_free(&ia6->ia_ifa); 1279 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1280 log(LOG_DEBUG, "%s; %s: Listen socket: " 1281 "Connection attempt to deprecated " 1282 "IPv6 address rejected\n", 1283 s, __func__); 1284 rstreason = BANDLIM_RST_OPENPORT; 1285 goto dropwithreset; 1286 } 1287 if (ia6) 1288 ifa_free(&ia6->ia_ifa); 1289 } 1290#endif /* INET6 */ 1291 /* 1292 * Basic sanity checks on incoming SYN requests: 1293 * Don't respond if the destination is a link layer 1294 * broadcast according to RFC1122 4.2.3.10, p. 104. 1295 * If it is from this socket it must be forged. 1296 * Don't respond if the source or destination is a 1297 * global or subnet broad- or multicast address. 1298 * Note that it is quite possible to receive unicast 1299 * link-layer packets with a broadcast IP address. Use 1300 * in_broadcast() to find them. 1301 */ 1302 if (m->m_flags & (M_BCAST|M_MCAST)) { 1303 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1304 log(LOG_DEBUG, "%s; %s: Listen socket: " 1305 "Connection attempt from broad- or multicast " 1306 "link layer address ignored\n", s, __func__); 1307 goto dropunlock; 1308 } 1309#ifdef INET6 1310 if (isipv6) { 1311 if (th->th_dport == th->th_sport && 1312 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) { 1313 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1314 log(LOG_DEBUG, "%s; %s: Listen socket: " 1315 "Connection attempt to/from self " 1316 "ignored\n", s, __func__); 1317 goto dropunlock; 1318 } 1319 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 1320 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) { 1321 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1322 log(LOG_DEBUG, "%s; %s: Listen socket: " 1323 "Connection attempt from/to multicast " 1324 "address ignored\n", s, __func__); 1325 goto dropunlock; 1326 } 1327 } 1328#endif 1329#if defined(INET) && defined(INET6) 1330 else 1331#endif 1332#ifdef INET 1333 { 1334 if (th->th_dport == th->th_sport && 1335 ip->ip_dst.s_addr == ip->ip_src.s_addr) { 1336 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1337 log(LOG_DEBUG, "%s; %s: Listen socket: " 1338 "Connection attempt from/to self " 1339 "ignored\n", s, __func__); 1340 goto dropunlock; 1341 } 1342 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 1343 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 1344 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 1345 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) { 1346 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1347 log(LOG_DEBUG, "%s; %s: Listen socket: " 1348 "Connection attempt from/to broad- " 1349 "or multicast address ignored\n", 1350 s, __func__); 1351 goto dropunlock; 1352 } 1353 } 1354#endif 1355 /* 1356 * SYN appears to be valid. Create compressed TCP state 1357 * for syncache. 1358 */ 1359#ifdef TCPDEBUG 1360 if (so->so_options & SO_DEBUG) 1361 tcp_trace(TA_INPUT, ostate, tp, 1362 (void *)tcp_saveipgen, &tcp_savetcp, 0); 1363#endif 1364 tcp_dooptions(&to, optp, optlen, TO_SYN); 1365#ifdef TCP_RFC7413 1366 if (syncache_add(&inc, &to, th, inp, &so, m, NULL, NULL)) 1367 goto new_tfo_socket; 1368#else 1369 syncache_add(&inc, &to, th, inp, &so, m, NULL, NULL); 1370#endif 1371 /* 1372 * Entry added to syncache and mbuf consumed. 1373 * Only the listen socket is unlocked by syncache_add(). 1374 */ 1375 if (ti_locked == TI_RLOCKED) { 1376 INP_INFO_RUNLOCK(&V_tcbinfo); 1377 ti_locked = TI_UNLOCKED; 1378 } 1379 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1380 return; 1381 } else if (tp->t_state == TCPS_LISTEN) { 1382 /* 1383 * When a listen socket is torn down the SO_ACCEPTCONN 1384 * flag is removed first while connections are drained 1385 * from the accept queue in a unlock/lock cycle of the 1386 * ACCEPT_LOCK, opening a race condition allowing a SYN 1387 * attempt go through unhandled. 1388 */ 1389 goto dropunlock; 1390 } 1391 1392#ifdef TCP_SIGNATURE 1393 if (sig_checked == 0) { 1394 tcp_dooptions(&to, optp, optlen, 1395 (thflags & TH_SYN) ? TO_SYN : 0); 1396 if (!tcp_signature_verify_input(m, off0, tlen, optlen, &to, 1397 th, tp->t_flags)) { 1398 1399 /* 1400 * In SYN_SENT state if it receives an RST, it is 1401 * allowed for further processing. 1402 */ 1403 if ((thflags & TH_RST) == 0 || 1404 (tp->t_state == TCPS_SYN_SENT) == 0) 1405 goto dropunlock; 1406 } 1407 sig_checked = 1; 1408 } 1409#endif 1410 1411 TCP_PROBE5(receive, NULL, tp, mtod(m, const char *), tp, th); 1412 1413 /* 1414 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later 1415 * state. tcp_do_segment() always consumes the mbuf chain, unlocks 1416 * the inpcb, and unlocks pcbinfo. 1417 */ 1418 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, iptos, ti_locked); 1419 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1420 return; 1421 1422dropwithreset: 1423 TCP_PROBE5(receive, NULL, tp, mtod(m, const char *), tp, th); 1424 1425 if (ti_locked == TI_RLOCKED) { 1426 INP_INFO_RUNLOCK(&V_tcbinfo); 1427 ti_locked = TI_UNLOCKED; 1428 } 1429#ifdef INVARIANTS 1430 else { 1431 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropwithreset " 1432 "ti_locked: %d", __func__, ti_locked)); 1433 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1434 } 1435#endif 1436 1437 if (inp != NULL) { 1438 tcp_dropwithreset(m, th, tp, tlen, rstreason); 1439 INP_WUNLOCK(inp); 1440 } else 1441 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 1442 m = NULL; /* mbuf chain got consumed. */ 1443 goto drop; 1444 1445dropunlock: 1446 if (m != NULL) 1447 TCP_PROBE5(receive, NULL, tp, mtod(m, const char *), tp, th); 1448 1449 if (ti_locked == TI_RLOCKED) { 1450 INP_INFO_RUNLOCK(&V_tcbinfo); 1451 ti_locked = TI_UNLOCKED; 1452 } 1453#ifdef INVARIANTS 1454 else { 1455 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropunlock " 1456 "ti_locked: %d", __func__, ti_locked)); 1457 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1458 } 1459#endif 1460 1461 if (inp != NULL) 1462 INP_WUNLOCK(inp); 1463 1464drop: 1465 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1466 if (s != NULL) 1467 free(s, M_TCPLOG); 1468 if (m != NULL) 1469 m_freem(m); 1470} 1471 1472/* 1473 * Automatic sizing of receive socket buffer. Often the send 1474 * buffer size is not optimally adjusted to the actual network 1475 * conditions at hand (delay bandwidth product). Setting the 1476 * buffer size too small limits throughput on links with high 1477 * bandwidth and high delay (eg. trans-continental/oceanic links). 1478 * 1479 * On the receive side the socket buffer memory is only rarely 1480 * used to any significant extent. This allows us to be much 1481 * more aggressive in scaling the receive socket buffer. For 1482 * the case that the buffer space is actually used to a large 1483 * extent and we run out of kernel memory we can simply drop 1484 * the new segments; TCP on the sender will just retransmit it 1485 * later. Setting the buffer size too big may only consume too 1486 * much kernel memory if the application doesn't read() from 1487 * the socket or packet loss or reordering makes use of the 1488 * reassembly queue. 1489 * 1490 * The criteria to step up the receive buffer one notch are: 1491 * 1. Application has not set receive buffer size with 1492 * SO_RCVBUF. Setting SO_RCVBUF clears SB_AUTOSIZE. 1493 * 2. the number of bytes received during the time it takes 1494 * one timestamp to be reflected back to us (the RTT); 1495 * 3. received bytes per RTT is within seven eighth of the 1496 * current socket buffer size; 1497 * 4. receive buffer size has not hit maximal automatic size; 1498 * 1499 * This algorithm does one step per RTT at most and only if 1500 * we receive a bulk stream w/o packet losses or reorderings. 1501 * Shrinking the buffer during idle times is not necessary as 1502 * it doesn't consume any memory when idle. 1503 * 1504 * TODO: Only step up if the application is actually serving 1505 * the buffer to better manage the socket buffer resources. 1506 */ 1507int 1508tcp_autorcvbuf(struct mbuf *m, struct tcphdr *th, struct socket *so, 1509 struct tcpcb *tp, int tlen) 1510{ 1511 int newsize = 0; 1512 1513 if (V_tcp_do_autorcvbuf && (so->so_rcv.sb_flags & SB_AUTOSIZE) && 1514 tp->t_srtt != 0 && tp->rfbuf_ts != 0 && 1515 TCP_TS_TO_TICKS(tcp_ts_getticks() - tp->rfbuf_ts) > 1516 (tp->t_srtt >> TCP_RTT_SHIFT)) { 1517 if (tp->rfbuf_cnt > (so->so_rcv.sb_hiwat / 8 * 7) && 1518 so->so_rcv.sb_hiwat < V_tcp_autorcvbuf_max) { 1519 newsize = min(so->so_rcv.sb_hiwat + 1520 V_tcp_autorcvbuf_inc, V_tcp_autorcvbuf_max); 1521 } 1522 TCP_PROBE6(receive__autoresize, NULL, tp, mtod(m, const char *), 1523 tp, th, newsize); 1524 1525 /* Start over with next RTT. */ 1526 tp->rfbuf_ts = 0; 1527 tp->rfbuf_cnt = 0; 1528 } else { 1529 tp->rfbuf_cnt += tlen; /* add up */ 1530 } 1531 1532 return (newsize); 1533} 1534 1535static void 1536tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 1537 struct tcpcb *tp, int drop_hdrlen, int tlen, uint8_t iptos, 1538 int ti_locked) 1539{ 1540 int thflags, acked, ourfinisacked, needoutput = 0, sack_changed; 1541 int rstreason, todrop, win; 1542 u_long tiwin; 1543 char *s; 1544 struct in_conninfo *inc; 1545 struct mbuf *mfree; 1546 struct tcpopt to; 1547 int tfo_syn; 1548 1549#ifdef TCPDEBUG 1550 /* 1551 * The size of tcp_saveipgen must be the size of the max ip header, 1552 * now IPv6. 1553 */ 1554 u_char tcp_saveipgen[IP6_HDR_LEN]; 1555 struct tcphdr tcp_savetcp; 1556 short ostate = 0; 1557#endif 1558 thflags = th->th_flags; 1559 inc = &tp->t_inpcb->inp_inc; 1560 tp->sackhint.last_sack_ack = 0; 1561 sack_changed = 0; 1562 1563 /* 1564 * If this is either a state-changing packet or current state isn't 1565 * established, we require a write lock on tcbinfo. Otherwise, we 1566 * allow the tcbinfo to be in either alocked or unlocked, as the 1567 * caller may have unnecessarily acquired a write lock due to a race. 1568 */ 1569 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 || 1570 tp->t_state != TCPS_ESTABLISHED) { 1571 KASSERT(ti_locked == TI_RLOCKED, ("%s ti_locked %d for " 1572 "SYN/FIN/RST/!EST", __func__, ti_locked)); 1573 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 1574 } else { 1575#ifdef INVARIANTS 1576 if (ti_locked == TI_RLOCKED) 1577 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 1578 else { 1579 KASSERT(ti_locked == TI_UNLOCKED, ("%s: EST " 1580 "ti_locked: %d", __func__, ti_locked)); 1581 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1582 } 1583#endif 1584 } 1585 INP_WLOCK_ASSERT(tp->t_inpcb); 1586 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 1587 __func__)); 1588 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 1589 __func__)); 1590 1591 /* 1592 * Segment received on connection. 1593 * Reset idle time and keep-alive timer. 1594 * XXX: This should be done after segment 1595 * validation to ignore broken/spoofed segs. 1596 */ 1597 tp->t_rcvtime = ticks; 1598 if (TCPS_HAVEESTABLISHED(tp->t_state)) 1599 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp)); 1600 1601 /* 1602 * Unscale the window into a 32-bit value. 1603 * For the SYN_SENT state the scale is zero. 1604 */ 1605 tiwin = th->th_win << tp->snd_scale; 1606 1607 /* 1608 * TCP ECN processing. 1609 */ 1610 if (tp->t_flags & TF_ECN_PERMIT) { 1611 if (thflags & TH_CWR) 1612 tp->t_flags &= ~TF_ECN_SND_ECE; 1613 switch (iptos & IPTOS_ECN_MASK) { 1614 case IPTOS_ECN_CE: 1615 tp->t_flags |= TF_ECN_SND_ECE; 1616 TCPSTAT_INC(tcps_ecn_ce); 1617 break; 1618 case IPTOS_ECN_ECT0: 1619 TCPSTAT_INC(tcps_ecn_ect0); 1620 break; 1621 case IPTOS_ECN_ECT1: 1622 TCPSTAT_INC(tcps_ecn_ect1); 1623 break; 1624 } 1625 /* Congestion experienced. */ 1626 if (thflags & TH_ECE) { 1627 cc_cong_signal(tp, th, CC_ECN); 1628 } 1629 } 1630 1631 /* 1632 * Parse options on any incoming segment. 1633 */ 1634 tcp_dooptions(&to, (u_char *)(th + 1), 1635 (th->th_off << 2) - sizeof(struct tcphdr), 1636 (thflags & TH_SYN) ? TO_SYN : 0); 1637 1638 /* 1639 * If echoed timestamp is later than the current time, 1640 * fall back to non RFC1323 RTT calculation. Normalize 1641 * timestamp if syncookies were used when this connection 1642 * was established. 1643 */ 1644 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 1645 to.to_tsecr -= tp->ts_offset; 1646 if (TSTMP_GT(to.to_tsecr, tcp_ts_getticks())) 1647 to.to_tsecr = 0; 1648 } 1649 /* 1650 * Process options only when we get SYN/ACK back. The SYN case 1651 * for incoming connections is handled in tcp_syncache. 1652 * According to RFC1323 the window field in a SYN (i.e., a <SYN> 1653 * or <SYN,ACK>) segment itself is never scaled. 1654 * XXX this is traditional behavior, may need to be cleaned up. 1655 */ 1656 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 1657 if ((to.to_flags & TOF_SCALE) && 1658 (tp->t_flags & TF_REQ_SCALE)) { 1659 tp->t_flags |= TF_RCVD_SCALE; 1660 tp->snd_scale = to.to_wscale; 1661 } 1662 /* 1663 * Initial send window. It will be updated with 1664 * the next incoming segment to the scaled value. 1665 */ 1666 tp->snd_wnd = th->th_win; 1667 if (to.to_flags & TOF_TS) { 1668 tp->t_flags |= TF_RCVD_TSTMP; 1669 tp->ts_recent = to.to_tsval; 1670 tp->ts_recent_age = tcp_ts_getticks(); 1671 } 1672 if (to.to_flags & TOF_MSS) 1673 tcp_mss(tp, to.to_mss); 1674 if ((tp->t_flags & TF_SACK_PERMIT) && 1675 (to.to_flags & TOF_SACKPERM) == 0) 1676 tp->t_flags &= ~TF_SACK_PERMIT; 1677 } 1678 1679 /* 1680 * If timestamps were negotiated during SYN/ACK they should 1681 * appear on every segment during this session and vice versa. 1682 */ 1683 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS)) { 1684 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1685 log(LOG_DEBUG, "%s; %s: Timestamp missing, " 1686 "no action\n", s, __func__); 1687 free(s, M_TCPLOG); 1688 } 1689 } 1690 if (!(tp->t_flags & TF_RCVD_TSTMP) && (to.to_flags & TOF_TS)) { 1691 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1692 log(LOG_DEBUG, "%s; %s: Timestamp not expected, " 1693 "no action\n", s, __func__); 1694 free(s, M_TCPLOG); 1695 } 1696 } 1697 1698 /* 1699 * Header prediction: check for the two common cases 1700 * of a uni-directional data xfer. If the packet has 1701 * no control flags, is in-sequence, the window didn't 1702 * change and we're not retransmitting, it's a 1703 * candidate. If the length is zero and the ack moved 1704 * forward, we're the sender side of the xfer. Just 1705 * free the data acked & wake any higher level process 1706 * that was blocked waiting for space. If the length 1707 * is non-zero and the ack didn't move, we're the 1708 * receiver side. If we're getting packets in-order 1709 * (the reassembly queue is empty), add the data to 1710 * the socket buffer and note that we need a delayed ack. 1711 * Make sure that the hidden state-flags are also off. 1712 * Since we check for TCPS_ESTABLISHED first, it can only 1713 * be TH_NEEDSYN. 1714 */ 1715 if (tp->t_state == TCPS_ESTABLISHED && 1716 th->th_seq == tp->rcv_nxt && 1717 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && 1718 tp->snd_nxt == tp->snd_max && 1719 tiwin && tiwin == tp->snd_wnd && 1720 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) && 1721 LIST_EMPTY(&tp->t_segq) && 1722 ((to.to_flags & TOF_TS) == 0 || 1723 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) { 1724 1725 /* 1726 * If last ACK falls within this segment's sequence numbers, 1727 * record the timestamp. 1728 * NOTE that the test is modified according to the latest 1729 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1730 */ 1731 if ((to.to_flags & TOF_TS) != 0 && 1732 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 1733 tp->ts_recent_age = tcp_ts_getticks(); 1734 tp->ts_recent = to.to_tsval; 1735 } 1736 1737 if (tlen == 0) { 1738 if (SEQ_GT(th->th_ack, tp->snd_una) && 1739 SEQ_LEQ(th->th_ack, tp->snd_max) && 1740 !IN_RECOVERY(tp->t_flags) && 1741 (to.to_flags & TOF_SACK) == 0 && 1742 TAILQ_EMPTY(&tp->snd_holes)) { 1743 /* 1744 * This is a pure ack for outstanding data. 1745 */ 1746 if (ti_locked == TI_RLOCKED) 1747 INP_INFO_RUNLOCK(&V_tcbinfo); 1748 ti_locked = TI_UNLOCKED; 1749 1750 TCPSTAT_INC(tcps_predack); 1751 1752 /* 1753 * "bad retransmit" recovery. 1754 */ 1755 if (tp->t_rxtshift == 1 && 1756 tp->t_flags & TF_PREVVALID && 1757 (int)(ticks - tp->t_badrxtwin) < 0) { 1758 cc_cong_signal(tp, th, CC_RTO_ERR); 1759 } 1760 1761 /* 1762 * Recalculate the transmit timer / rtt. 1763 * 1764 * Some boxes send broken timestamp replies 1765 * during the SYN+ACK phase, ignore 1766 * timestamps of 0 or we could calculate a 1767 * huge RTT and blow up the retransmit timer. 1768 */ 1769 if ((to.to_flags & TOF_TS) != 0 && 1770 to.to_tsecr) { 1771 u_int t; 1772 1773 t = tcp_ts_getticks() - to.to_tsecr; 1774 if (!tp->t_rttlow || tp->t_rttlow > t) 1775 tp->t_rttlow = t; 1776 tcp_xmit_timer(tp, 1777 TCP_TS_TO_TICKS(t) + 1); 1778 } else if (tp->t_rtttime && 1779 SEQ_GT(th->th_ack, tp->t_rtseq)) { 1780 if (!tp->t_rttlow || 1781 tp->t_rttlow > ticks - tp->t_rtttime) 1782 tp->t_rttlow = ticks - tp->t_rtttime; 1783 tcp_xmit_timer(tp, 1784 ticks - tp->t_rtttime); 1785 } 1786 acked = BYTES_THIS_ACK(tp, th); 1787 1788 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 1789 hhook_run_tcp_est_in(tp, th, &to); 1790 1791 TCPSTAT_INC(tcps_rcvackpack); 1792 TCPSTAT_ADD(tcps_rcvackbyte, acked); 1793 sbdrop(&so->so_snd, acked); 1794 if (SEQ_GT(tp->snd_una, tp->snd_recover) && 1795 SEQ_LEQ(th->th_ack, tp->snd_recover)) 1796 tp->snd_recover = th->th_ack - 1; 1797 1798 /* 1799 * Let the congestion control algorithm update 1800 * congestion control related information. This 1801 * typically means increasing the congestion 1802 * window. 1803 */ 1804 cc_ack_received(tp, th, CC_ACK); 1805 1806 tp->snd_una = th->th_ack; 1807 /* 1808 * Pull snd_wl2 up to prevent seq wrap relative 1809 * to th_ack. 1810 */ 1811 tp->snd_wl2 = th->th_ack; 1812 tp->t_dupacks = 0; 1813 m_freem(m); 1814 ND6_HINT(tp); /* Some progress has been made. */ 1815 1816 /* 1817 * If all outstanding data are acked, stop 1818 * retransmit timer, otherwise restart timer 1819 * using current (possibly backed-off) value. 1820 * If process is waiting for space, 1821 * wakeup/selwakeup/signal. If data 1822 * are ready to send, let tcp_output 1823 * decide between more output or persist. 1824 */ 1825#ifdef TCPDEBUG 1826 if (so->so_options & SO_DEBUG) 1827 tcp_trace(TA_INPUT, ostate, tp, 1828 (void *)tcp_saveipgen, 1829 &tcp_savetcp, 0); 1830#endif 1831 if (tp->snd_una == tp->snd_max) 1832 tcp_timer_activate(tp, TT_REXMT, 0); 1833 else if (!tcp_timer_active(tp, TT_PERSIST)) 1834 tcp_timer_activate(tp, TT_REXMT, 1835 tp->t_rxtcur); 1836 sowwakeup(so); 1837 if (so->so_snd.sb_cc) 1838 (void) tcp_output(tp); 1839 goto check_delack; 1840 } 1841 } else if (th->th_ack == tp->snd_una && 1842 tlen <= sbspace(&so->so_rcv)) { 1843 int newsize = 0; /* automatic sockbuf scaling */ 1844 1845 /* 1846 * This is a pure, in-sequence data packet with 1847 * nothing on the reassembly queue and we have enough 1848 * buffer space to take it. 1849 */ 1850 if (ti_locked == TI_RLOCKED) 1851 INP_INFO_RUNLOCK(&V_tcbinfo); 1852 ti_locked = TI_UNLOCKED; 1853 1854 /* Clean receiver SACK report if present */ 1855 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks) 1856 tcp_clean_sackreport(tp); 1857 TCPSTAT_INC(tcps_preddat); 1858 tp->rcv_nxt += tlen; 1859 /* 1860 * Pull snd_wl1 up to prevent seq wrap relative to 1861 * th_seq. 1862 */ 1863 tp->snd_wl1 = th->th_seq; 1864 /* 1865 * Pull rcv_up up to prevent seq wrap relative to 1866 * rcv_nxt. 1867 */ 1868 tp->rcv_up = tp->rcv_nxt; 1869 TCPSTAT_INC(tcps_rcvpack); 1870 TCPSTAT_ADD(tcps_rcvbyte, tlen); 1871 ND6_HINT(tp); /* Some progress has been made */ 1872#ifdef TCPDEBUG 1873 if (so->so_options & SO_DEBUG) 1874 tcp_trace(TA_INPUT, ostate, tp, 1875 (void *)tcp_saveipgen, &tcp_savetcp, 0); 1876#endif 1877 newsize = tcp_autorcvbuf(m, th, so, tp, tlen); 1878 1879 /* Add data to socket buffer. */ 1880 SOCKBUF_LOCK(&so->so_rcv); 1881 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1882 m_freem(m); 1883 } else { 1884 /* 1885 * Set new socket buffer size. 1886 * Give up when limit is reached. 1887 */ 1888 if (newsize) 1889 if (!sbreserve_locked(&so->so_rcv, 1890 newsize, so, NULL)) 1891 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 1892 m_adj(m, drop_hdrlen); /* delayed header drop */ 1893 sbappendstream_locked(&so->so_rcv, m); 1894 } 1895 /* NB: sorwakeup_locked() does an implicit unlock. */ 1896 sorwakeup_locked(so); 1897 if (DELAY_ACK(tp, tlen)) { 1898 tp->t_flags |= TF_DELACK; 1899 } else { 1900 tp->t_flags |= TF_ACKNOW; 1901 tcp_output(tp); 1902 } 1903 goto check_delack; 1904 } 1905 } 1906 1907 /* 1908 * Calculate amount of space in receive window, 1909 * and then do TCP input processing. 1910 * Receive window is amount of space in rcv queue, 1911 * but not less than advertised window. 1912 */ 1913 win = sbspace(&so->so_rcv); 1914 if (win < 0) 1915 win = 0; 1916 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt)); 1917 1918 switch (tp->t_state) { 1919 1920 /* 1921 * If the state is SYN_RECEIVED: 1922 * if seg contains an ACK, but not for our SYN/ACK, send a RST. 1923 */ 1924 case TCPS_SYN_RECEIVED: 1925 if ((thflags & TH_ACK) && 1926 (SEQ_LEQ(th->th_ack, tp->snd_una) || 1927 SEQ_GT(th->th_ack, tp->snd_max))) { 1928 rstreason = BANDLIM_RST_OPENPORT; 1929 goto dropwithreset; 1930 } 1931#ifdef TCP_RFC7413 1932 if (tp->t_flags & TF_FASTOPEN) { 1933 /* 1934 * When a TFO connection is in SYN_RECEIVED, the 1935 * only valid packets are the initial SYN, a 1936 * retransmit/copy of the initial SYN (possibly with 1937 * a subset of the original data), a valid ACK, a 1938 * FIN, or a RST. 1939 */ 1940 if ((thflags & (TH_SYN|TH_ACK)) == (TH_SYN|TH_ACK)) { 1941 rstreason = BANDLIM_RST_OPENPORT; 1942 goto dropwithreset; 1943 } else if (thflags & TH_SYN) { 1944 /* non-initial SYN is ignored */ 1945 if ((tcp_timer_active(tp, TT_DELACK) || 1946 tcp_timer_active(tp, TT_REXMT))) 1947 goto drop; 1948 } else if (!(thflags & (TH_ACK|TH_FIN|TH_RST))) { 1949 goto drop; 1950 } 1951 } 1952#endif 1953 break; 1954 1955 /* 1956 * If the state is SYN_SENT: 1957 * if seg contains an ACK, but not for our SYN, drop the input. 1958 * if seg contains a RST, then drop the connection. 1959 * if seg does not contain SYN, then drop it. 1960 * Otherwise this is an acceptable SYN segment 1961 * initialize tp->rcv_nxt and tp->irs 1962 * if seg contains ack then advance tp->snd_una 1963 * if seg contains an ECE and ECN support is enabled, the stream 1964 * is ECN capable. 1965 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state 1966 * arrange for segment to be acked (eventually) 1967 * continue processing rest of data/controls, beginning with URG 1968 */ 1969 case TCPS_SYN_SENT: 1970 if ((thflags & TH_ACK) && 1971 (SEQ_LEQ(th->th_ack, tp->iss) || 1972 SEQ_GT(th->th_ack, tp->snd_max))) { 1973 rstreason = BANDLIM_UNLIMITED; 1974 goto dropwithreset; 1975 } 1976 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) { 1977 TCP_PROBE5(connect__refused, NULL, tp, 1978 mtod(m, const char *), tp, th); 1979 tp = tcp_drop(tp, ECONNREFUSED); 1980 } 1981 if (thflags & TH_RST) 1982 goto drop; 1983 if (!(thflags & TH_SYN)) 1984 goto drop; 1985 1986 tp->irs = th->th_seq; 1987 tcp_rcvseqinit(tp); 1988 if (thflags & TH_ACK) { 1989 TCPSTAT_INC(tcps_connects); 1990 soisconnected(so); 1991#ifdef MAC 1992 mac_socketpeer_set_from_mbuf(m, so); 1993#endif 1994 /* Do window scaling on this connection? */ 1995 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 1996 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 1997 tp->rcv_scale = tp->request_r_scale; 1998 } 1999 tp->rcv_adv += imin(tp->rcv_wnd, 2000 TCP_MAXWIN << tp->rcv_scale); 2001 tp->snd_una++; /* SYN is acked */ 2002 /* 2003 * If there's data, delay ACK; if there's also a FIN 2004 * ACKNOW will be turned on later. 2005 */ 2006 if (DELAY_ACK(tp, tlen) && tlen != 0) 2007 tcp_timer_activate(tp, TT_DELACK, 2008 tcp_delacktime); 2009 else 2010 tp->t_flags |= TF_ACKNOW; 2011 2012 if ((thflags & TH_ECE) && V_tcp_do_ecn) { 2013 tp->t_flags |= TF_ECN_PERMIT; 2014 TCPSTAT_INC(tcps_ecn_shs); 2015 } 2016 2017 /* 2018 * Received <SYN,ACK> in SYN_SENT[*] state. 2019 * Transitions: 2020 * SYN_SENT --> ESTABLISHED 2021 * SYN_SENT* --> FIN_WAIT_1 2022 */ 2023 tp->t_starttime = ticks; 2024 if (tp->t_flags & TF_NEEDFIN) { 2025 tcp_state_change(tp, TCPS_FIN_WAIT_1); 2026 tp->t_flags &= ~TF_NEEDFIN; 2027 thflags &= ~TH_SYN; 2028 } else { 2029 tcp_state_change(tp, TCPS_ESTABLISHED); 2030 TCP_PROBE5(connect__established, NULL, tp, 2031 mtod(m, const char *), tp, th); 2032 cc_conn_init(tp); 2033 tcp_timer_activate(tp, TT_KEEP, 2034 TP_KEEPIDLE(tp)); 2035 } 2036 } else { 2037 /* 2038 * Received initial SYN in SYN-SENT[*] state => 2039 * simultaneous open. If segment contains CC option 2040 * and there is a cached CC, apply TAO test. 2041 * If it succeeds, connection is * half-synchronized. 2042 * Otherwise, do 3-way handshake: 2043 * SYN-SENT -> SYN-RECEIVED 2044 * SYN-SENT* -> SYN-RECEIVED* 2045 * If there was no CC option, clear cached CC value. 2046 */ 2047 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 2048 tcp_timer_activate(tp, TT_REXMT, 0); 2049 tcp_state_change(tp, TCPS_SYN_RECEIVED); 2050 } 2051 2052 KASSERT(ti_locked == TI_RLOCKED, ("%s: trimthenstep6: " 2053 "ti_locked %d", __func__, ti_locked)); 2054 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 2055 INP_WLOCK_ASSERT(tp->t_inpcb); 2056 2057 /* 2058 * Advance th->th_seq to correspond to first data byte. 2059 * If data, trim to stay within window, 2060 * dropping FIN if necessary. 2061 */ 2062 th->th_seq++; 2063 if (tlen > tp->rcv_wnd) { 2064 todrop = tlen - tp->rcv_wnd; 2065 m_adj(m, -todrop); 2066 tlen = tp->rcv_wnd; 2067 thflags &= ~TH_FIN; 2068 TCPSTAT_INC(tcps_rcvpackafterwin); 2069 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 2070 } 2071 tp->snd_wl1 = th->th_seq - 1; 2072 tp->rcv_up = th->th_seq; 2073 /* 2074 * Client side of transaction: already sent SYN and data. 2075 * If the remote host used T/TCP to validate the SYN, 2076 * our data will be ACK'd; if so, enter normal data segment 2077 * processing in the middle of step 5, ack processing. 2078 * Otherwise, goto step 6. 2079 */ 2080 if (thflags & TH_ACK) 2081 goto process_ACK; 2082 2083 goto step6; 2084 2085 /* 2086 * If the state is LAST_ACK or CLOSING or TIME_WAIT: 2087 * do normal processing. 2088 * 2089 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later. 2090 */ 2091 case TCPS_LAST_ACK: 2092 case TCPS_CLOSING: 2093 break; /* continue normal processing */ 2094 } 2095 2096 /* 2097 * States other than LISTEN or SYN_SENT. 2098 * First check the RST flag and sequence number since reset segments 2099 * are exempt from the timestamp and connection count tests. This 2100 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix 2101 * below which allowed reset segments in half the sequence space 2102 * to fall though and be processed (which gives forged reset 2103 * segments with a random sequence number a 50 percent chance of 2104 * killing a connection). 2105 * Then check timestamp, if present. 2106 * Then check the connection count, if present. 2107 * Then check that at least some bytes of segment are within 2108 * receive window. If segment begins before rcv_nxt, 2109 * drop leading data (and SYN); if nothing left, just ack. 2110 * 2111 * 2112 * If the RST bit is set, check the sequence number to see 2113 * if this is a valid reset segment. 2114 * RFC 793 page 37: 2115 * In all states except SYN-SENT, all reset (RST) segments 2116 * are validated by checking their SEQ-fields. A reset is 2117 * valid if its sequence number is in the window. 2118 * Note: this does not take into account delayed ACKs, so 2119 * we should test against last_ack_sent instead of rcv_nxt. 2120 * The sequence number in the reset segment is normally an 2121 * echo of our outgoing acknowlegement numbers, but some hosts 2122 * send a reset with the sequence number at the rightmost edge 2123 * of our receive window, and we have to handle this case. 2124 * Note 2: Paul Watson's paper "Slipping in the Window" has shown 2125 * that brute force RST attacks are possible. To combat this, 2126 * we use a much stricter check while in the ESTABLISHED state, 2127 * only accepting RSTs where the sequence number is equal to 2128 * last_ack_sent. In all other states (the states in which a 2129 * RST is more likely), the more permissive check is used. 2130 * If we have multiple segments in flight, the initial reset 2131 * segment sequence numbers will be to the left of last_ack_sent, 2132 * but they will eventually catch up. 2133 * In any case, it never made sense to trim reset segments to 2134 * fit the receive window since RFC 1122 says: 2135 * 4.2.2.12 RST Segment: RFC-793 Section 3.4 2136 * 2137 * A TCP SHOULD allow a received RST segment to include data. 2138 * 2139 * DISCUSSION 2140 * It has been suggested that a RST segment could contain 2141 * ASCII text that encoded and explained the cause of the 2142 * RST. No standard has yet been established for such 2143 * data. 2144 * 2145 * If the reset segment passes the sequence number test examine 2146 * the state: 2147 * SYN_RECEIVED STATE: 2148 * If passive open, return to LISTEN state. 2149 * If active open, inform user that connection was refused. 2150 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES: 2151 * Inform user that connection was reset, and close tcb. 2152 * CLOSING, LAST_ACK STATES: 2153 * Close the tcb. 2154 * TIME_WAIT STATE: 2155 * Drop the segment - see Stevens, vol. 2, p. 964 and 2156 * RFC 1337. 2157 */ 2158 if (thflags & TH_RST) { 2159 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) && 2160 SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) { 2161 switch (tp->t_state) { 2162 2163 case TCPS_SYN_RECEIVED: 2164 so->so_error = ECONNREFUSED; 2165 goto close; 2166 2167 case TCPS_ESTABLISHED: 2168 if (V_tcp_insecure_rst == 0 && 2169 !(SEQ_GEQ(th->th_seq, tp->rcv_nxt - 1) && 2170 SEQ_LEQ(th->th_seq, tp->rcv_nxt + 1)) && 2171 !(SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) && 2172 SEQ_LEQ(th->th_seq, tp->last_ack_sent + 1))) { 2173 TCPSTAT_INC(tcps_badrst); 2174 goto drop; 2175 } 2176 /* FALLTHROUGH */ 2177 case TCPS_FIN_WAIT_1: 2178 case TCPS_FIN_WAIT_2: 2179 case TCPS_CLOSE_WAIT: 2180 so->so_error = ECONNRESET; 2181 close: 2182 KASSERT(ti_locked == TI_RLOCKED, 2183 ("tcp_do_segment: TH_RST 1 ti_locked %d", 2184 ti_locked)); 2185 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 2186 2187 tcp_state_change(tp, TCPS_CLOSED); 2188 TCPSTAT_INC(tcps_drops); 2189 tp = tcp_close(tp); 2190 break; 2191 2192 case TCPS_CLOSING: 2193 case TCPS_LAST_ACK: 2194 KASSERT(ti_locked == TI_RLOCKED, 2195 ("tcp_do_segment: TH_RST 2 ti_locked %d", 2196 ti_locked)); 2197 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 2198 2199 tp = tcp_close(tp); 2200 break; 2201 } 2202 } 2203 goto drop; 2204 } 2205 2206 /* 2207 * RFC 1323 PAWS: If we have a timestamp reply on this segment 2208 * and it's less than ts_recent, drop it. 2209 */ 2210 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent && 2211 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 2212 2213 /* Check to see if ts_recent is over 24 days old. */ 2214 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) { 2215 /* 2216 * Invalidate ts_recent. If this segment updates 2217 * ts_recent, the age will be reset later and ts_recent 2218 * will get a valid value. If it does not, setting 2219 * ts_recent to zero will at least satisfy the 2220 * requirement that zero be placed in the timestamp 2221 * echo reply when ts_recent isn't valid. The 2222 * age isn't reset until we get a valid ts_recent 2223 * because we don't want out-of-order segments to be 2224 * dropped when ts_recent is old. 2225 */ 2226 tp->ts_recent = 0; 2227 } else { 2228 TCPSTAT_INC(tcps_rcvduppack); 2229 TCPSTAT_ADD(tcps_rcvdupbyte, tlen); 2230 TCPSTAT_INC(tcps_pawsdrop); 2231 if (tlen) 2232 goto dropafterack; 2233 goto drop; 2234 } 2235 } 2236 2237 /* 2238 * In the SYN-RECEIVED state, validate that the packet belongs to 2239 * this connection before trimming the data to fit the receive 2240 * window. Check the sequence number versus IRS since we know 2241 * the sequence numbers haven't wrapped. This is a partial fix 2242 * for the "LAND" DoS attack. 2243 */ 2244 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) { 2245 rstreason = BANDLIM_RST_OPENPORT; 2246 goto dropwithreset; 2247 } 2248 2249 todrop = tp->rcv_nxt - th->th_seq; 2250 if (todrop > 0) { 2251 if (thflags & TH_SYN) { 2252 thflags &= ~TH_SYN; 2253 th->th_seq++; 2254 if (th->th_urp > 1) 2255 th->th_urp--; 2256 else 2257 thflags &= ~TH_URG; 2258 todrop--; 2259 } 2260 /* 2261 * Following if statement from Stevens, vol. 2, p. 960. 2262 */ 2263 if (todrop > tlen 2264 || (todrop == tlen && (thflags & TH_FIN) == 0)) { 2265 /* 2266 * Any valid FIN must be to the left of the window. 2267 * At this point the FIN must be a duplicate or out 2268 * of sequence; drop it. 2269 */ 2270 thflags &= ~TH_FIN; 2271 2272 /* 2273 * Send an ACK to resynchronize and drop any data. 2274 * But keep on processing for RST or ACK. 2275 */ 2276 tp->t_flags |= TF_ACKNOW; 2277 todrop = tlen; 2278 TCPSTAT_INC(tcps_rcvduppack); 2279 TCPSTAT_ADD(tcps_rcvdupbyte, todrop); 2280 } else { 2281 TCPSTAT_INC(tcps_rcvpartduppack); 2282 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop); 2283 } 2284 drop_hdrlen += todrop; /* drop from the top afterwards */ 2285 th->th_seq += todrop; 2286 tlen -= todrop; 2287 if (th->th_urp > todrop) 2288 th->th_urp -= todrop; 2289 else { 2290 thflags &= ~TH_URG; 2291 th->th_urp = 0; 2292 } 2293 } 2294 2295 /* 2296 * If new data are received on a connection after the 2297 * user processes are gone, then RST the other end. 2298 */ 2299 if ((so->so_state & SS_NOFDREF) && 2300 tp->t_state > TCPS_CLOSE_WAIT && tlen) { 2301 KASSERT(ti_locked == TI_RLOCKED, ("%s: SS_NOFDEREF && " 2302 "CLOSE_WAIT && tlen ti_locked %d", __func__, ti_locked)); 2303 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 2304 2305 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 2306 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data " 2307 "after socket was closed, " 2308 "sending RST and removing tcpcb\n", 2309 s, __func__, tcpstates[tp->t_state], tlen); 2310 free(s, M_TCPLOG); 2311 } 2312 tp = tcp_close(tp); 2313 TCPSTAT_INC(tcps_rcvafterclose); 2314 rstreason = BANDLIM_UNLIMITED; 2315 goto dropwithreset; 2316 } 2317 2318 /* 2319 * If segment ends after window, drop trailing data 2320 * (and PUSH and FIN); if nothing left, just ACK. 2321 */ 2322 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd); 2323 if (todrop > 0) { 2324 TCPSTAT_INC(tcps_rcvpackafterwin); 2325 if (todrop >= tlen) { 2326 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen); 2327 /* 2328 * If window is closed can only take segments at 2329 * window edge, and have to drop data and PUSH from 2330 * incoming segments. Continue processing, but 2331 * remember to ack. Otherwise, drop segment 2332 * and ack. 2333 */ 2334 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { 2335 tp->t_flags |= TF_ACKNOW; 2336 TCPSTAT_INC(tcps_rcvwinprobe); 2337 } else 2338 goto dropafterack; 2339 } else 2340 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 2341 m_adj(m, -todrop); 2342 tlen -= todrop; 2343 thflags &= ~(TH_PUSH|TH_FIN); 2344 } 2345 2346 /* 2347 * If last ACK falls within this segment's sequence numbers, 2348 * record its timestamp. 2349 * NOTE: 2350 * 1) That the test incorporates suggestions from the latest 2351 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 2352 * 2) That updating only on newer timestamps interferes with 2353 * our earlier PAWS tests, so this check should be solely 2354 * predicated on the sequence space of this segment. 2355 * 3) That we modify the segment boundary check to be 2356 * Last.ACK.Sent <= SEG.SEQ + SEG.Len 2357 * instead of RFC1323's 2358 * Last.ACK.Sent < SEG.SEQ + SEG.Len, 2359 * This modified check allows us to overcome RFC1323's 2360 * limitations as described in Stevens TCP/IP Illustrated 2361 * Vol. 2 p.869. In such cases, we can still calculate the 2362 * RTT correctly when RCV.NXT == Last.ACK.Sent. 2363 */ 2364 if ((to.to_flags & TOF_TS) != 0 && 2365 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 2366 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 2367 ((thflags & (TH_SYN|TH_FIN)) != 0))) { 2368 tp->ts_recent_age = tcp_ts_getticks(); 2369 tp->ts_recent = to.to_tsval; 2370 } 2371 2372 /* 2373 * If a SYN is in the window, then this is an 2374 * error and we send an RST and drop the connection. 2375 */ 2376 if (thflags & TH_SYN) { 2377 KASSERT(ti_locked == TI_RLOCKED, 2378 ("tcp_do_segment: TH_SYN ti_locked %d", ti_locked)); 2379 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 2380 2381 tp = tcp_drop(tp, ECONNRESET); 2382 rstreason = BANDLIM_UNLIMITED; 2383 goto drop; 2384 } 2385 2386 /* 2387 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN 2388 * flag is on (half-synchronized state), then queue data for 2389 * later processing; else drop segment and return. 2390 */ 2391 if ((thflags & TH_ACK) == 0) { 2392 if (tp->t_state == TCPS_SYN_RECEIVED || 2393 (tp->t_flags & TF_NEEDSYN)) { 2394#ifdef TCP_RFC7413 2395 if (tp->t_state == TCPS_SYN_RECEIVED && 2396 tp->t_flags & TF_FASTOPEN) { 2397 tp->snd_wnd = tiwin; 2398 cc_conn_init(tp); 2399 } 2400#endif 2401 goto step6; 2402 } else if (tp->t_flags & TF_ACKNOW) 2403 goto dropafterack; 2404 else 2405 goto drop; 2406 } 2407 2408 /* 2409 * Ack processing. 2410 */ 2411 switch (tp->t_state) { 2412 2413 /* 2414 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter 2415 * ESTABLISHED state and continue processing. 2416 * The ACK was checked above. 2417 */ 2418 case TCPS_SYN_RECEIVED: 2419 2420 TCPSTAT_INC(tcps_connects); 2421 soisconnected(so); 2422 /* Do window scaling? */ 2423 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2424 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2425 tp->rcv_scale = tp->request_r_scale; 2426 tp->snd_wnd = tiwin; 2427 } 2428 /* 2429 * Make transitions: 2430 * SYN-RECEIVED -> ESTABLISHED 2431 * SYN-RECEIVED* -> FIN-WAIT-1 2432 */ 2433 tp->t_starttime = ticks; 2434 if (tp->t_flags & TF_NEEDFIN) { 2435 tcp_state_change(tp, TCPS_FIN_WAIT_1); 2436 tp->t_flags &= ~TF_NEEDFIN; 2437 } else { 2438 tcp_state_change(tp, TCPS_ESTABLISHED); 2439 TCP_PROBE5(accept__established, NULL, tp, 2440 mtod(m, const char *), tp, th); 2441#ifdef TCP_RFC7413 2442 if (tp->t_tfo_pending) { 2443 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 2444 tp->t_tfo_pending = NULL; 2445 2446 /* 2447 * Account for the ACK of our SYN prior to 2448 * regular ACK processing below. 2449 */ 2450 tp->snd_una++; 2451 } 2452 /* 2453 * TFO connections call cc_conn_init() during SYN 2454 * processing. Calling it again here for such 2455 * connections is not harmless as it would undo the 2456 * snd_cwnd reduction that occurs when a TFO SYN|ACK 2457 * is retransmitted. 2458 */ 2459 if (!(tp->t_flags & TF_FASTOPEN)) 2460#endif 2461 cc_conn_init(tp); 2462 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp)); 2463 } 2464 /* 2465 * If segment contains data or ACK, will call tcp_reass() 2466 * later; if not, do so now to pass queued data to user. 2467 */ 2468 if (tlen == 0 && (thflags & TH_FIN) == 0) 2469 (void) tcp_reass(tp, (struct tcphdr *)0, 0, 2470 (struct mbuf *)0); 2471 tp->snd_wl1 = th->th_seq - 1; 2472 /* FALLTHROUGH */ 2473 2474 /* 2475 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range 2476 * ACKs. If the ack is in the range 2477 * tp->snd_una < th->th_ack <= tp->snd_max 2478 * then advance tp->snd_una to th->th_ack and drop 2479 * data from the retransmission queue. If this ACK reflects 2480 * more up to date window information we update our window information. 2481 */ 2482 case TCPS_ESTABLISHED: 2483 case TCPS_FIN_WAIT_1: 2484 case TCPS_FIN_WAIT_2: 2485 case TCPS_CLOSE_WAIT: 2486 case TCPS_CLOSING: 2487 case TCPS_LAST_ACK: 2488 if (SEQ_GT(th->th_ack, tp->snd_max)) { 2489 TCPSTAT_INC(tcps_rcvacktoomuch); 2490 goto dropafterack; 2491 } 2492 if ((tp->t_flags & TF_SACK_PERMIT) && 2493 ((to.to_flags & TOF_SACK) || 2494 !TAILQ_EMPTY(&tp->snd_holes))) 2495 sack_changed = tcp_sack_doack(tp, &to, th->th_ack); 2496 else 2497 /* 2498 * Reset the value so that previous (valid) value 2499 * from the last ack with SACK doesn't get used. 2500 */ 2501 tp->sackhint.sacked_bytes = 0; 2502 2503 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 2504 hhook_run_tcp_est_in(tp, th, &to); 2505 2506 if (SEQ_LEQ(th->th_ack, tp->snd_una)) { 2507 if (tlen == 0 && 2508 (tiwin == tp->snd_wnd || 2509 (tp->t_flags & TF_SACK_PERMIT))) { 2510 TCPSTAT_INC(tcps_rcvdupack); 2511 /* 2512 * If we have outstanding data (other than 2513 * a window probe), this is a completely 2514 * duplicate ack (ie, window info didn't 2515 * change), the ack is the biggest we've 2516 * seen and we've seen exactly our rexmt 2517 * threshhold of them, assume a packet 2518 * has been dropped and retransmit it. 2519 * Kludge snd_nxt & the congestion 2520 * window so we send only this one 2521 * packet. 2522 * 2523 * We know we're losing at the current 2524 * window size so do congestion avoidance 2525 * (set ssthresh to half the current window 2526 * and pull our congestion window back to 2527 * the new ssthresh). 2528 * 2529 * Dup acks mean that packets have left the 2530 * network (they're now cached at the receiver) 2531 * so bump cwnd by the amount in the receiver 2532 * to keep a constant cwnd packets in the 2533 * network. 2534 * 2535 * When using TCP ECN, notify the peer that 2536 * we reduced the cwnd. 2537 */ 2538 /* 2539 * Following 2 kinds of acks should not affect 2540 * dupack counting: 2541 * 1) Old acks 2542 * 2) Acks with SACK but without any new SACK 2543 * information in them. These could result from 2544 * any anomaly in the network like a switch 2545 * duplicating packets or a possible DoS attack. 2546 */ 2547 if (th->th_ack != tp->snd_una || 2548 ((tp->t_flags & TF_SACK_PERMIT) && 2549 !sack_changed)) 2550 break; 2551 else if (!tcp_timer_active(tp, TT_REXMT)) 2552 tp->t_dupacks = 0; 2553 else if (++tp->t_dupacks > tcprexmtthresh || 2554 IN_FASTRECOVERY(tp->t_flags)) { 2555 cc_ack_received(tp, th, CC_DUPACK); 2556 if ((tp->t_flags & TF_SACK_PERMIT) && 2557 IN_FASTRECOVERY(tp->t_flags)) { 2558 int awnd; 2559 2560 /* 2561 * Compute the amount of data in flight first. 2562 * We can inject new data into the pipe iff 2563 * we have less than 1/2 the original window's 2564 * worth of data in flight. 2565 */ 2566 if (V_tcp_do_rfc6675_pipe) 2567 awnd = tcp_compute_pipe(tp); 2568 else 2569 awnd = (tp->snd_nxt - tp->snd_fack) + 2570 tp->sackhint.sack_bytes_rexmit; 2571 2572 if (awnd < tp->snd_ssthresh) { 2573 tp->snd_cwnd += tp->t_maxseg; 2574 if (tp->snd_cwnd > tp->snd_ssthresh) 2575 tp->snd_cwnd = tp->snd_ssthresh; 2576 } 2577 } else 2578 tp->snd_cwnd += tp->t_maxseg; 2579 if ((thflags & TH_FIN) && 2580 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) { 2581 /* 2582 * If its a fin we need to process 2583 * it to avoid a race where both 2584 * sides enter FIN-WAIT and send FIN|ACK 2585 * at the same time. 2586 */ 2587 break; 2588 } 2589 (void) tcp_output(tp); 2590 goto drop; 2591 } else if (tp->t_dupacks == tcprexmtthresh) { 2592 tcp_seq onxt = tp->snd_nxt; 2593 2594 /* 2595 * If we're doing sack, check to 2596 * see if we're already in sack 2597 * recovery. If we're not doing sack, 2598 * check to see if we're in newreno 2599 * recovery. 2600 */ 2601 if (tp->t_flags & TF_SACK_PERMIT) { 2602 if (IN_FASTRECOVERY(tp->t_flags)) { 2603 tp->t_dupacks = 0; 2604 break; 2605 } 2606 } else { 2607 if (SEQ_LEQ(th->th_ack, 2608 tp->snd_recover)) { 2609 tp->t_dupacks = 0; 2610 break; 2611 } 2612 } 2613 /* Congestion signal before ack. */ 2614 cc_cong_signal(tp, th, CC_NDUPACK); 2615 cc_ack_received(tp, th, CC_DUPACK); 2616 tcp_timer_activate(tp, TT_REXMT, 0); 2617 tp->t_rtttime = 0; 2618 if (tp->t_flags & TF_SACK_PERMIT) { 2619 TCPSTAT_INC( 2620 tcps_sack_recovery_episode); 2621 tp->sack_newdata = tp->snd_nxt; 2622 tp->snd_cwnd = tp->t_maxseg; 2623 (void) tcp_output(tp); 2624 goto drop; 2625 } 2626 tp->snd_nxt = th->th_ack; 2627 tp->snd_cwnd = tp->t_maxseg; 2628 if ((thflags & TH_FIN) && 2629 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) { 2630 /* 2631 * If its a fin we need to process 2632 * it to avoid a race where both 2633 * sides enter FIN-WAIT and send FIN|ACK 2634 * at the same time. 2635 */ 2636 break; 2637 } 2638 (void) tcp_output(tp); 2639 KASSERT(tp->snd_limited <= 2, 2640 ("%s: tp->snd_limited too big", 2641 __func__)); 2642 tp->snd_cwnd = tp->snd_ssthresh + 2643 tp->t_maxseg * 2644 (tp->t_dupacks - tp->snd_limited); 2645 if (SEQ_GT(onxt, tp->snd_nxt)) 2646 tp->snd_nxt = onxt; 2647 goto drop; 2648 } else if (V_tcp_do_rfc3042) { 2649 /* 2650 * Process first and second duplicate 2651 * ACKs. Each indicates a segment 2652 * leaving the network, creating room 2653 * for more. Make sure we can send a 2654 * packet on reception of each duplicate 2655 * ACK by increasing snd_cwnd by one 2656 * segment. Restore the original 2657 * snd_cwnd after packet transmission. 2658 */ 2659 cc_ack_received(tp, th, CC_DUPACK); 2660 u_long oldcwnd = tp->snd_cwnd; 2661 tcp_seq oldsndmax = tp->snd_max; 2662 u_int sent; 2663 int avail; 2664 2665 KASSERT(tp->t_dupacks == 1 || 2666 tp->t_dupacks == 2, 2667 ("%s: dupacks not 1 or 2", 2668 __func__)); 2669 if (tp->t_dupacks == 1) 2670 tp->snd_limited = 0; 2671 tp->snd_cwnd = 2672 (tp->snd_nxt - tp->snd_una) + 2673 (tp->t_dupacks - tp->snd_limited) * 2674 tp->t_maxseg; 2675 if ((thflags & TH_FIN) && 2676 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) { 2677 /* 2678 * If its a fin we need to process 2679 * it to avoid a race where both 2680 * sides enter FIN-WAIT and send FIN|ACK 2681 * at the same time. 2682 */ 2683 break; 2684 } 2685 /* 2686 * Only call tcp_output when there 2687 * is new data available to be sent. 2688 * Otherwise we would send pure ACKs. 2689 */ 2690 SOCKBUF_LOCK(&so->so_snd); 2691 avail = so->so_snd.sb_cc - 2692 (tp->snd_nxt - tp->snd_una); 2693 SOCKBUF_UNLOCK(&so->so_snd); 2694 if (avail > 0) 2695 (void) tcp_output(tp); 2696 sent = tp->snd_max - oldsndmax; 2697 if (sent > tp->t_maxseg) { 2698 KASSERT((tp->t_dupacks == 2 && 2699 tp->snd_limited == 0) || 2700 (sent == tp->t_maxseg + 1 && 2701 tp->t_flags & TF_SENTFIN), 2702 ("%s: sent too much", 2703 __func__)); 2704 tp->snd_limited = 2; 2705 } else if (sent > 0) 2706 ++tp->snd_limited; 2707 tp->snd_cwnd = oldcwnd; 2708 goto drop; 2709 } 2710 } 2711 break; 2712 } else { 2713 /* 2714 * This ack is advancing the left edge, reset the 2715 * counter. 2716 */ 2717 tp->t_dupacks = 0; 2718 /* 2719 * If this ack also has new SACK info, increment the 2720 * counter as per rfc6675. 2721 */ 2722 if ((tp->t_flags & TF_SACK_PERMIT) && sack_changed) 2723 tp->t_dupacks++; 2724 } 2725 2726 KASSERT(SEQ_GT(th->th_ack, tp->snd_una), 2727 ("%s: th_ack <= snd_una", __func__)); 2728 2729 /* 2730 * If the congestion window was inflated to account 2731 * for the other side's cached packets, retract it. 2732 */ 2733 if (IN_FASTRECOVERY(tp->t_flags)) { 2734 if (SEQ_LT(th->th_ack, tp->snd_recover)) { 2735 if (tp->t_flags & TF_SACK_PERMIT) 2736 tcp_sack_partialack(tp, th); 2737 else 2738 tcp_newreno_partial_ack(tp, th); 2739 } else 2740 cc_post_recovery(tp, th); 2741 } 2742 /* 2743 * If we reach this point, ACK is not a duplicate, 2744 * i.e., it ACKs something we sent. 2745 */ 2746 if (tp->t_flags & TF_NEEDSYN) { 2747 /* 2748 * T/TCP: Connection was half-synchronized, and our 2749 * SYN has been ACK'd (so connection is now fully 2750 * synchronized). Go to non-starred state, 2751 * increment snd_una for ACK of SYN, and check if 2752 * we can do window scaling. 2753 */ 2754 tp->t_flags &= ~TF_NEEDSYN; 2755 tp->snd_una++; 2756 /* Do window scaling? */ 2757 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2758 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2759 tp->rcv_scale = tp->request_r_scale; 2760 /* Send window already scaled. */ 2761 } 2762 } 2763 2764process_ACK: 2765 INP_WLOCK_ASSERT(tp->t_inpcb); 2766 2767 acked = BYTES_THIS_ACK(tp, th); 2768 KASSERT(acked >= 0, ("%s: acked unexepectedly negative " 2769 "(tp->snd_una=%u, th->th_ack=%u, tp=%p, m=%p)", __func__, 2770 tp->snd_una, th->th_ack, tp, m)); 2771 TCPSTAT_INC(tcps_rcvackpack); 2772 TCPSTAT_ADD(tcps_rcvackbyte, acked); 2773 2774 /* 2775 * If we just performed our first retransmit, and the ACK 2776 * arrives within our recovery window, then it was a mistake 2777 * to do the retransmit in the first place. Recover our 2778 * original cwnd and ssthresh, and proceed to transmit where 2779 * we left off. 2780 */ 2781 if (tp->t_rxtshift == 1 && tp->t_flags & TF_PREVVALID && 2782 (int)(ticks - tp->t_badrxtwin) < 0) 2783 cc_cong_signal(tp, th, CC_RTO_ERR); 2784 2785 /* 2786 * If we have a timestamp reply, update smoothed 2787 * round trip time. If no timestamp is present but 2788 * transmit timer is running and timed sequence 2789 * number was acked, update smoothed round trip time. 2790 * Since we now have an rtt measurement, cancel the 2791 * timer backoff (cf., Phil Karn's retransmit alg.). 2792 * Recompute the initial retransmit timer. 2793 * 2794 * Some boxes send broken timestamp replies 2795 * during the SYN+ACK phase, ignore 2796 * timestamps of 0 or we could calculate a 2797 * huge RTT and blow up the retransmit timer. 2798 */ 2799 if ((to.to_flags & TOF_TS) != 0 && to.to_tsecr) { 2800 u_int t; 2801 2802 t = tcp_ts_getticks() - to.to_tsecr; 2803 if (!tp->t_rttlow || tp->t_rttlow > t) 2804 tp->t_rttlow = t; 2805 tcp_xmit_timer(tp, TCP_TS_TO_TICKS(t) + 1); 2806 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) { 2807 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime) 2808 tp->t_rttlow = ticks - tp->t_rtttime; 2809 tcp_xmit_timer(tp, ticks - tp->t_rtttime); 2810 } 2811 2812 /* 2813 * If all outstanding data is acked, stop retransmit 2814 * timer and remember to restart (more output or persist). 2815 * If there is more data to be acked, restart retransmit 2816 * timer, using current (possibly backed-off) value. 2817 */ 2818 if (th->th_ack == tp->snd_max) { 2819 tcp_timer_activate(tp, TT_REXMT, 0); 2820 needoutput = 1; 2821 } else if (!tcp_timer_active(tp, TT_PERSIST)) 2822 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur); 2823 2824 /* 2825 * If no data (only SYN) was ACK'd, 2826 * skip rest of ACK processing. 2827 */ 2828 if (acked == 0) 2829 goto step6; 2830 2831 /* 2832 * Let the congestion control algorithm update congestion 2833 * control related information. This typically means increasing 2834 * the congestion window. 2835 */ 2836 cc_ack_received(tp, th, CC_ACK); 2837 2838 SOCKBUF_LOCK(&so->so_snd); 2839 if (acked > so->so_snd.sb_cc) { 2840 if (tp->snd_wnd >= so->so_snd.sb_cc) 2841 tp->snd_wnd -= so->so_snd.sb_cc; 2842 else 2843 tp->snd_wnd = 0; 2844 mfree = sbcut_locked(&so->so_snd, 2845 (int)so->so_snd.sb_cc); 2846 ourfinisacked = 1; 2847 } else { 2848 mfree = sbcut_locked(&so->so_snd, acked); 2849 if (tp->snd_wnd >= (u_long) acked) 2850 tp->snd_wnd -= acked; 2851 else 2852 tp->snd_wnd = 0; 2853 ourfinisacked = 0; 2854 } 2855 /* NB: sowwakeup_locked() does an implicit unlock. */ 2856 sowwakeup_locked(so); 2857 m_freem(mfree); 2858 /* Detect una wraparound. */ 2859 if (!IN_RECOVERY(tp->t_flags) && 2860 SEQ_GT(tp->snd_una, tp->snd_recover) && 2861 SEQ_LEQ(th->th_ack, tp->snd_recover)) 2862 tp->snd_recover = th->th_ack - 1; 2863 /* XXXLAS: Can this be moved up into cc_post_recovery? */ 2864 if (IN_RECOVERY(tp->t_flags) && 2865 SEQ_GEQ(th->th_ack, tp->snd_recover)) { 2866 EXIT_RECOVERY(tp->t_flags); 2867 } 2868 tp->snd_una = th->th_ack; 2869 if (tp->t_flags & TF_SACK_PERMIT) { 2870 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 2871 tp->snd_recover = tp->snd_una; 2872 } 2873 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 2874 tp->snd_nxt = tp->snd_una; 2875 2876 switch (tp->t_state) { 2877 2878 /* 2879 * In FIN_WAIT_1 STATE in addition to the processing 2880 * for the ESTABLISHED state if our FIN is now acknowledged 2881 * then enter FIN_WAIT_2. 2882 */ 2883 case TCPS_FIN_WAIT_1: 2884 if (ourfinisacked) { 2885 /* 2886 * If we can't receive any more 2887 * data, then closing user can proceed. 2888 * Starting the timer is contrary to the 2889 * specification, but if we don't get a FIN 2890 * we'll hang forever. 2891 * 2892 * XXXjl: 2893 * we should release the tp also, and use a 2894 * compressed state. 2895 */ 2896 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 2897 soisdisconnected(so); 2898 tcp_timer_activate(tp, TT_2MSL, 2899 (tcp_fast_finwait2_recycle ? 2900 tcp_finwait2_timeout : 2901 TP_MAXIDLE(tp))); 2902 } 2903 tcp_state_change(tp, TCPS_FIN_WAIT_2); 2904 } 2905 break; 2906 2907 /* 2908 * In CLOSING STATE in addition to the processing for 2909 * the ESTABLISHED state if the ACK acknowledges our FIN 2910 * then enter the TIME-WAIT state, otherwise ignore 2911 * the segment. 2912 */ 2913 case TCPS_CLOSING: 2914 if (ourfinisacked) { 2915 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 2916 tcp_twstart(tp); 2917 INP_INFO_RUNLOCK(&V_tcbinfo); 2918 m_freem(m); 2919 return; 2920 } 2921 break; 2922 2923 /* 2924 * In LAST_ACK, we may still be waiting for data to drain 2925 * and/or to be acked, as well as for the ack of our FIN. 2926 * If our FIN is now acknowledged, delete the TCB, 2927 * enter the closed state and return. 2928 */ 2929 case TCPS_LAST_ACK: 2930 if (ourfinisacked) { 2931 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 2932 tp = tcp_close(tp); 2933 goto drop; 2934 } 2935 break; 2936 } 2937 } 2938 2939step6: 2940 INP_WLOCK_ASSERT(tp->t_inpcb); 2941 2942 /* 2943 * Update window information. 2944 * Don't look at window if no ACK: TAC's send garbage on first SYN. 2945 */ 2946 if ((thflags & TH_ACK) && 2947 (SEQ_LT(tp->snd_wl1, th->th_seq) || 2948 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 2949 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 2950 /* keep track of pure window updates */ 2951 if (tlen == 0 && 2952 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 2953 TCPSTAT_INC(tcps_rcvwinupd); 2954 tp->snd_wnd = tiwin; 2955 tp->snd_wl1 = th->th_seq; 2956 tp->snd_wl2 = th->th_ack; 2957 if (tp->snd_wnd > tp->max_sndwnd) 2958 tp->max_sndwnd = tp->snd_wnd; 2959 needoutput = 1; 2960 } 2961 2962 /* 2963 * Process segments with URG. 2964 */ 2965 if ((thflags & TH_URG) && th->th_urp && 2966 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2967 /* 2968 * This is a kludge, but if we receive and accept 2969 * random urgent pointers, we'll crash in 2970 * soreceive. It's hard to imagine someone 2971 * actually wanting to send this much urgent data. 2972 */ 2973 SOCKBUF_LOCK(&so->so_rcv); 2974 if (th->th_urp + so->so_rcv.sb_cc > sb_max) { 2975 th->th_urp = 0; /* XXX */ 2976 thflags &= ~TH_URG; /* XXX */ 2977 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */ 2978 goto dodata; /* XXX */ 2979 } 2980 /* 2981 * If this segment advances the known urgent pointer, 2982 * then mark the data stream. This should not happen 2983 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since 2984 * a FIN has been received from the remote side. 2985 * In these states we ignore the URG. 2986 * 2987 * According to RFC961 (Assigned Protocols), 2988 * the urgent pointer points to the last octet 2989 * of urgent data. We continue, however, 2990 * to consider it to indicate the first octet 2991 * of data past the urgent section as the original 2992 * spec states (in one of two places). 2993 */ 2994 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) { 2995 tp->rcv_up = th->th_seq + th->th_urp; 2996 so->so_oobmark = so->so_rcv.sb_cc + 2997 (tp->rcv_up - tp->rcv_nxt) - 1; 2998 if (so->so_oobmark == 0) 2999 so->so_rcv.sb_state |= SBS_RCVATMARK; 3000 sohasoutofband(so); 3001 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); 3002 } 3003 SOCKBUF_UNLOCK(&so->so_rcv); 3004 /* 3005 * Remove out of band data so doesn't get presented to user. 3006 * This can happen independent of advancing the URG pointer, 3007 * but if two URG's are pending at once, some out-of-band 3008 * data may creep in... ick. 3009 */ 3010 if (th->th_urp <= (u_long)tlen && 3011 !(so->so_options & SO_OOBINLINE)) { 3012 /* hdr drop is delayed */ 3013 tcp_pulloutofband(so, th, m, drop_hdrlen); 3014 } 3015 } else { 3016 /* 3017 * If no out of band data is expected, 3018 * pull receive urgent pointer along 3019 * with the receive window. 3020 */ 3021 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) 3022 tp->rcv_up = tp->rcv_nxt; 3023 } 3024dodata: /* XXX */ 3025 INP_WLOCK_ASSERT(tp->t_inpcb); 3026 3027 /* 3028 * Process the segment text, merging it into the TCP sequencing queue, 3029 * and arranging for acknowledgment of receipt if necessary. 3030 * This process logically involves adjusting tp->rcv_wnd as data 3031 * is presented to the user (this happens in tcp_usrreq.c, 3032 * case PRU_RCVD). If a FIN has already been received on this 3033 * connection then we just ignore the text. 3034 */ 3035 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 3036 (tp->t_flags & TF_FASTOPEN)); 3037 if ((tlen || (thflags & TH_FIN) || tfo_syn) && 3038 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 3039 tcp_seq save_start = th->th_seq; 3040 3041 m_adj(m, drop_hdrlen); /* delayed header drop */ 3042 /* 3043 * Insert segment which includes th into TCP reassembly queue 3044 * with control block tp. Set thflags to whether reassembly now 3045 * includes a segment with FIN. This handles the common case 3046 * inline (segment is the next to be received on an established 3047 * connection, and the queue is empty), avoiding linkage into 3048 * and removal from the queue and repetition of various 3049 * conversions. 3050 * Set DELACK for segments received in order, but ack 3051 * immediately when segments are out of order (so 3052 * fast retransmit can work). 3053 */ 3054 if (th->th_seq == tp->rcv_nxt && 3055 LIST_EMPTY(&tp->t_segq) && 3056 (TCPS_HAVEESTABLISHED(tp->t_state) || 3057 tfo_syn)) { 3058 if (DELAY_ACK(tp, tlen) || tfo_syn) 3059 tp->t_flags |= TF_DELACK; 3060 else 3061 tp->t_flags |= TF_ACKNOW; 3062 tp->rcv_nxt += tlen; 3063 thflags = th->th_flags & TH_FIN; 3064 TCPSTAT_INC(tcps_rcvpack); 3065 TCPSTAT_ADD(tcps_rcvbyte, tlen); 3066 ND6_HINT(tp); 3067 SOCKBUF_LOCK(&so->so_rcv); 3068 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 3069 m_freem(m); 3070 else 3071 sbappendstream_locked(&so->so_rcv, m); 3072 /* NB: sorwakeup_locked() does an implicit unlock. */ 3073 sorwakeup_locked(so); 3074 } else { 3075 /* 3076 * XXX: Due to the header drop above "th" is 3077 * theoretically invalid by now. Fortunately 3078 * m_adj() doesn't actually frees any mbufs 3079 * when trimming from the head. 3080 */ 3081 thflags = tcp_reass(tp, th, &tlen, m); 3082 tp->t_flags |= TF_ACKNOW; 3083 } 3084 if (tlen > 0 && (tp->t_flags & TF_SACK_PERMIT)) 3085 tcp_update_sack_list(tp, save_start, save_start + tlen); 3086#if 0 3087 /* 3088 * Note the amount of data that peer has sent into 3089 * our window, in order to estimate the sender's 3090 * buffer size. 3091 * XXX: Unused. 3092 */ 3093 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) 3094 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt); 3095 else 3096 len = so->so_rcv.sb_hiwat; 3097#endif 3098 } else { 3099 m_freem(m); 3100 thflags &= ~TH_FIN; 3101 } 3102 3103 /* 3104 * If FIN is received ACK the FIN and let the user know 3105 * that the connection is closing. 3106 */ 3107 if (thflags & TH_FIN) { 3108 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 3109 socantrcvmore(so); 3110 /* 3111 * If connection is half-synchronized 3112 * (ie NEEDSYN flag on) then delay ACK, 3113 * so it may be piggybacked when SYN is sent. 3114 * Otherwise, since we received a FIN then no 3115 * more input can be expected, send ACK now. 3116 */ 3117 if (tp->t_flags & TF_NEEDSYN) 3118 tp->t_flags |= TF_DELACK; 3119 else 3120 tp->t_flags |= TF_ACKNOW; 3121 tp->rcv_nxt++; 3122 } 3123 switch (tp->t_state) { 3124 3125 /* 3126 * In SYN_RECEIVED and ESTABLISHED STATES 3127 * enter the CLOSE_WAIT state. 3128 */ 3129 case TCPS_SYN_RECEIVED: 3130 tp->t_starttime = ticks; 3131 /* FALLTHROUGH */ 3132 case TCPS_ESTABLISHED: 3133 tcp_state_change(tp, TCPS_CLOSE_WAIT); 3134 break; 3135 3136 /* 3137 * If still in FIN_WAIT_1 STATE FIN has not been acked so 3138 * enter the CLOSING state. 3139 */ 3140 case TCPS_FIN_WAIT_1: 3141 tcp_state_change(tp, TCPS_CLOSING); 3142 break; 3143 3144 /* 3145 * In FIN_WAIT_2 state enter the TIME_WAIT state, 3146 * starting the time-wait timer, turning off the other 3147 * standard timers. 3148 */ 3149 case TCPS_FIN_WAIT_2: 3150 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 3151 KASSERT(ti_locked == TI_RLOCKED, ("%s: dodata " 3152 "TCP_FIN_WAIT_2 ti_locked: %d", __func__, 3153 ti_locked)); 3154 3155 tcp_twstart(tp); 3156 INP_INFO_RUNLOCK(&V_tcbinfo); 3157 return; 3158 } 3159 } 3160 if (ti_locked == TI_RLOCKED) 3161 INP_INFO_RUNLOCK(&V_tcbinfo); 3162 ti_locked = TI_UNLOCKED; 3163 3164#ifdef TCPDEBUG 3165 if (so->so_options & SO_DEBUG) 3166 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen, 3167 &tcp_savetcp, 0); 3168#endif 3169 3170 /* 3171 * Return any desired output. 3172 */ 3173 if (needoutput || (tp->t_flags & TF_ACKNOW)) 3174 (void) tcp_output(tp); 3175 3176check_delack: 3177 KASSERT(ti_locked == TI_UNLOCKED, ("%s: check_delack ti_locked %d", 3178 __func__, ti_locked)); 3179 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 3180 INP_WLOCK_ASSERT(tp->t_inpcb); 3181 3182 if (tp->t_flags & TF_DELACK) { 3183 tp->t_flags &= ~TF_DELACK; 3184 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime); 3185 } 3186 INP_WUNLOCK(tp->t_inpcb); 3187 return; 3188 3189dropafterack: 3190 /* 3191 * Generate an ACK dropping incoming segment if it occupies 3192 * sequence space, where the ACK reflects our state. 3193 * 3194 * We can now skip the test for the RST flag since all 3195 * paths to this code happen after packets containing 3196 * RST have been dropped. 3197 * 3198 * In the SYN-RECEIVED state, don't send an ACK unless the 3199 * segment we received passes the SYN-RECEIVED ACK test. 3200 * If it fails send a RST. This breaks the loop in the 3201 * "LAND" DoS attack, and also prevents an ACK storm 3202 * between two listening ports that have been sent forged 3203 * SYN segments, each with the source address of the other. 3204 */ 3205 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && 3206 (SEQ_GT(tp->snd_una, th->th_ack) || 3207 SEQ_GT(th->th_ack, tp->snd_max)) ) { 3208 rstreason = BANDLIM_RST_OPENPORT; 3209 goto dropwithreset; 3210 } 3211#ifdef TCPDEBUG 3212 if (so->so_options & SO_DEBUG) 3213 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 3214 &tcp_savetcp, 0); 3215#endif 3216 if (ti_locked == TI_RLOCKED) 3217 INP_INFO_RUNLOCK(&V_tcbinfo); 3218 ti_locked = TI_UNLOCKED; 3219 3220 tp->t_flags |= TF_ACKNOW; 3221 (void) tcp_output(tp); 3222 INP_WUNLOCK(tp->t_inpcb); 3223 m_freem(m); 3224 return; 3225 3226dropwithreset: 3227 if (ti_locked == TI_RLOCKED) 3228 INP_INFO_RUNLOCK(&V_tcbinfo); 3229 ti_locked = TI_UNLOCKED; 3230 3231 if (tp != NULL) { 3232 tcp_dropwithreset(m, th, tp, tlen, rstreason); 3233 INP_WUNLOCK(tp->t_inpcb); 3234 } else 3235 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 3236 return; 3237 3238drop: 3239 if (ti_locked == TI_RLOCKED) { 3240 INP_INFO_RUNLOCK(&V_tcbinfo); 3241 ti_locked = TI_UNLOCKED; 3242 } 3243#ifdef INVARIANTS 3244 else 3245 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 3246#endif 3247 3248 /* 3249 * Drop space held by incoming segment and return. 3250 */ 3251#ifdef TCPDEBUG 3252 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 3253 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 3254 &tcp_savetcp, 0); 3255#endif 3256 if (tp != NULL) 3257 INP_WUNLOCK(tp->t_inpcb); 3258 m_freem(m); 3259} 3260 3261/* 3262 * Issue RST and make ACK acceptable to originator of segment. 3263 * The mbuf must still include the original packet header. 3264 * tp may be NULL. 3265 */ 3266static void 3267tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, 3268 int tlen, int rstreason) 3269{ 3270#ifdef INET 3271 struct ip *ip; 3272#endif 3273#ifdef INET6 3274 struct ip6_hdr *ip6; 3275#endif 3276 3277 if (tp != NULL) { 3278 INP_WLOCK_ASSERT(tp->t_inpcb); 3279 } 3280 3281 /* Don't bother if destination was broadcast/multicast. */ 3282 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST)) 3283 goto drop; 3284#ifdef INET6 3285 if (mtod(m, struct ip *)->ip_v == 6) { 3286 ip6 = mtod(m, struct ip6_hdr *); 3287 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 3288 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 3289 goto drop; 3290 /* IPv6 anycast check is done at tcp6_input() */ 3291 } 3292#endif 3293#if defined(INET) && defined(INET6) 3294 else 3295#endif 3296#ifdef INET 3297 { 3298 ip = mtod(m, struct ip *); 3299 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 3300 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 3301 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 3302 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 3303 goto drop; 3304 } 3305#endif 3306 3307 /* Perform bandwidth limiting. */ 3308 if (badport_bandlim(rstreason) < 0) 3309 goto drop; 3310 3311 /* tcp_respond consumes the mbuf chain. */ 3312 if (th->th_flags & TH_ACK) { 3313 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, 3314 th->th_ack, TH_RST); 3315 } else { 3316 if (th->th_flags & TH_SYN) 3317 tlen++; 3318 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen, 3319 (tcp_seq)0, TH_RST|TH_ACK); 3320 } 3321 return; 3322drop: 3323 m_freem(m); 3324} 3325 3326/* 3327 * Parse TCP options and place in tcpopt. 3328 */ 3329static void 3330tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags) 3331{ 3332 int opt, optlen; 3333 3334 to->to_flags = 0; 3335 for (; cnt > 0; cnt -= optlen, cp += optlen) { 3336 opt = cp[0]; 3337 if (opt == TCPOPT_EOL) 3338 break; 3339 if (opt == TCPOPT_NOP) 3340 optlen = 1; 3341 else { 3342 if (cnt < 2) 3343 break; 3344 optlen = cp[1]; 3345 if (optlen < 2 || optlen > cnt) 3346 break; 3347 } 3348 switch (opt) { 3349 case TCPOPT_MAXSEG: 3350 if (optlen != TCPOLEN_MAXSEG) 3351 continue; 3352 if (!(flags & TO_SYN)) 3353 continue; 3354 to->to_flags |= TOF_MSS; 3355 bcopy((char *)cp + 2, 3356 (char *)&to->to_mss, sizeof(to->to_mss)); 3357 to->to_mss = ntohs(to->to_mss); 3358 break; 3359 case TCPOPT_WINDOW: 3360 if (optlen != TCPOLEN_WINDOW) 3361 continue; 3362 if (!(flags & TO_SYN)) 3363 continue; 3364 to->to_flags |= TOF_SCALE; 3365 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT); 3366 break; 3367 case TCPOPT_TIMESTAMP: 3368 if (optlen != TCPOLEN_TIMESTAMP) 3369 continue; 3370 to->to_flags |= TOF_TS; 3371 bcopy((char *)cp + 2, 3372 (char *)&to->to_tsval, sizeof(to->to_tsval)); 3373 to->to_tsval = ntohl(to->to_tsval); 3374 bcopy((char *)cp + 6, 3375 (char *)&to->to_tsecr, sizeof(to->to_tsecr)); 3376 to->to_tsecr = ntohl(to->to_tsecr); 3377 break; 3378#ifdef TCP_SIGNATURE 3379 /* 3380 * XXX In order to reply to a host which has set the 3381 * TCP_SIGNATURE option in its initial SYN, we have to 3382 * record the fact that the option was observed here 3383 * for the syncache code to perform the correct response. 3384 */ 3385 case TCPOPT_SIGNATURE: 3386 if (optlen != TCPOLEN_SIGNATURE) 3387 continue; 3388 to->to_flags |= TOF_SIGNATURE; 3389 to->to_signature = cp + 2; 3390 break; 3391#endif 3392 case TCPOPT_SACK_PERMITTED: 3393 if (optlen != TCPOLEN_SACK_PERMITTED) 3394 continue; 3395 if (!(flags & TO_SYN)) 3396 continue; 3397 if (!V_tcp_do_sack) 3398 continue; 3399 to->to_flags |= TOF_SACKPERM; 3400 break; 3401 case TCPOPT_SACK: 3402 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0) 3403 continue; 3404 if (flags & TO_SYN) 3405 continue; 3406 to->to_flags |= TOF_SACK; 3407 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK; 3408 to->to_sacks = cp + 2; 3409 TCPSTAT_INC(tcps_sack_rcv_blocks); 3410 break; 3411#ifdef TCP_RFC7413 3412 case TCPOPT_FAST_OPEN: 3413 if ((optlen != TCPOLEN_FAST_OPEN_EMPTY) && 3414 (optlen < TCPOLEN_FAST_OPEN_MIN) && 3415 (optlen > TCPOLEN_FAST_OPEN_MAX)) 3416 continue; 3417 if (!(flags & TO_SYN)) 3418 continue; 3419 if (!V_tcp_fastopen_enabled) 3420 continue; 3421 to->to_flags |= TOF_FASTOPEN; 3422 to->to_tfo_len = optlen - 2; 3423 to->to_tfo_cookie = to->to_tfo_len ? cp + 2 : NULL; 3424 break; 3425#endif 3426 default: 3427 continue; 3428 } 3429 } 3430} 3431 3432/* 3433 * Pull out of band byte out of a segment so 3434 * it doesn't appear in the user's data queue. 3435 * It is still reflected in the segment length for 3436 * sequencing purposes. 3437 */ 3438static void 3439tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, 3440 int off) 3441{ 3442 int cnt = off + th->th_urp - 1; 3443 3444 while (cnt >= 0) { 3445 if (m->m_len > cnt) { 3446 char *cp = mtod(m, caddr_t) + cnt; 3447 struct tcpcb *tp = sototcpcb(so); 3448 3449 INP_WLOCK_ASSERT(tp->t_inpcb); 3450 3451 tp->t_iobc = *cp; 3452 tp->t_oobflags |= TCPOOB_HAVEDATA; 3453 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1)); 3454 m->m_len--; 3455 if (m->m_flags & M_PKTHDR) 3456 m->m_pkthdr.len--; 3457 return; 3458 } 3459 cnt -= m->m_len; 3460 m = m->m_next; 3461 if (m == NULL) 3462 break; 3463 } 3464 panic("tcp_pulloutofband"); 3465} 3466 3467/* 3468 * Collect new round-trip time estimate 3469 * and update averages and current timeout. 3470 */ 3471static void 3472tcp_xmit_timer(struct tcpcb *tp, int rtt) 3473{ 3474 int delta; 3475 3476 INP_WLOCK_ASSERT(tp->t_inpcb); 3477 3478 TCPSTAT_INC(tcps_rttupdated); 3479 tp->t_rttupdated++; 3480 if (tp->t_srtt != 0) { 3481 /* 3482 * srtt is stored as fixed point with 5 bits after the 3483 * binary point (i.e., scaled by 8). The following magic 3484 * is equivalent to the smoothing algorithm in rfc793 with 3485 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed 3486 * point). Adjust rtt to origin 0. 3487 */ 3488 delta = ((rtt - 1) << TCP_DELTA_SHIFT) 3489 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); 3490 3491 if ((tp->t_srtt += delta) <= 0) 3492 tp->t_srtt = 1; 3493 3494 /* 3495 * We accumulate a smoothed rtt variance (actually, a 3496 * smoothed mean difference), then set the retransmit 3497 * timer to smoothed rtt + 4 times the smoothed variance. 3498 * rttvar is stored as fixed point with 4 bits after the 3499 * binary point (scaled by 16). The following is 3500 * equivalent to rfc793 smoothing with an alpha of .75 3501 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces 3502 * rfc793's wired-in beta. 3503 */ 3504 if (delta < 0) 3505 delta = -delta; 3506 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); 3507 if ((tp->t_rttvar += delta) <= 0) 3508 tp->t_rttvar = 1; 3509 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 3510 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 3511 } else { 3512 /* 3513 * No rtt measurement yet - use the unsmoothed rtt. 3514 * Set the variance to half the rtt (so our first 3515 * retransmit happens at 3*rtt). 3516 */ 3517 tp->t_srtt = rtt << TCP_RTT_SHIFT; 3518 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); 3519 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 3520 } 3521 tp->t_rtttime = 0; 3522 tp->t_rxtshift = 0; 3523 3524 /* 3525 * the retransmit should happen at rtt + 4 * rttvar. 3526 * Because of the way we do the smoothing, srtt and rttvar 3527 * will each average +1/2 tick of bias. When we compute 3528 * the retransmit timer, we want 1/2 tick of rounding and 3529 * 1 extra tick because of +-1/2 tick uncertainty in the 3530 * firing of the timer. The bias will give us exactly the 3531 * 1.5 tick we need. But, because the bias is 3532 * statistical, we have to test that we don't drop below 3533 * the minimum feasible timer (which is 2 ticks). 3534 */ 3535 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 3536 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX); 3537 3538 /* 3539 * We received an ack for a packet that wasn't retransmitted; 3540 * it is probably safe to discard any error indications we've 3541 * received recently. This isn't quite right, but close enough 3542 * for now (a route might have failed after we sent a segment, 3543 * and the return path might not be symmetrical). 3544 */ 3545 tp->t_softerror = 0; 3546} 3547 3548/* 3549 * Determine a reasonable value for maxseg size. 3550 * If the route is known, check route for mtu. 3551 * If none, use an mss that can be handled on the outgoing interface 3552 * without forcing IP to fragment. If no route is found, route has no mtu, 3553 * or the destination isn't local, use a default, hopefully conservative 3554 * size (usually 512 or the default IP max size, but no more than the mtu 3555 * of the interface), as we can't discover anything about intervening 3556 * gateways or networks. We also initialize the congestion/slow start 3557 * window to be a single segment if the destination isn't local. 3558 * While looking at the routing entry, we also initialize other path-dependent 3559 * parameters from pre-set or cached values in the routing entry. 3560 * 3561 * Also take into account the space needed for options that we 3562 * send regularly. Make maxseg shorter by that amount to assure 3563 * that we can send maxseg amount of data even when the options 3564 * are present. Store the upper limit of the length of options plus 3565 * data in maxopd. 3566 * 3567 * NOTE that this routine is only called when we process an incoming 3568 * segment, or an ICMP need fragmentation datagram. Outgoing SYN/ACK MSS 3569 * settings are handled in tcp_mssopt(). 3570 */ 3571void 3572tcp_mss_update(struct tcpcb *tp, int offer, int mtuoffer, 3573 struct hc_metrics_lite *metricptr, struct tcp_ifcap *cap) 3574{ 3575 int mss = 0; 3576 u_long maxmtu = 0; 3577 struct inpcb *inp = tp->t_inpcb; 3578 struct hc_metrics_lite metrics; 3579 int origoffer; 3580#ifdef INET6 3581 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0; 3582 size_t min_protoh = isipv6 ? 3583 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) : 3584 sizeof (struct tcpiphdr); 3585#else 3586 const size_t min_protoh = sizeof(struct tcpiphdr); 3587#endif 3588 3589 INP_WLOCK_ASSERT(tp->t_inpcb); 3590 3591 if (mtuoffer != -1) { 3592 KASSERT(offer == -1, ("%s: conflict", __func__)); 3593 offer = mtuoffer - min_protoh; 3594 } 3595 origoffer = offer; 3596 3597 /* Initialize. */ 3598#ifdef INET6 3599 if (isipv6) { 3600 maxmtu = tcp_maxmtu6(&inp->inp_inc, cap); 3601 tp->t_maxopd = tp->t_maxseg = V_tcp_v6mssdflt; 3602 } 3603#endif 3604#if defined(INET) && defined(INET6) 3605 else 3606#endif 3607#ifdef INET 3608 { 3609 maxmtu = tcp_maxmtu(&inp->inp_inc, cap); 3610 tp->t_maxopd = tp->t_maxseg = V_tcp_mssdflt; 3611 } 3612#endif 3613 3614 /* 3615 * No route to sender, stay with default mss and return. 3616 */ 3617 if (maxmtu == 0) { 3618 /* 3619 * In case we return early we need to initialize metrics 3620 * to a defined state as tcp_hc_get() would do for us 3621 * if there was no cache hit. 3622 */ 3623 if (metricptr != NULL) 3624 bzero(metricptr, sizeof(struct hc_metrics_lite)); 3625 return; 3626 } 3627 3628 /* What have we got? */ 3629 switch (offer) { 3630 case 0: 3631 /* 3632 * Offer == 0 means that there was no MSS on the SYN 3633 * segment, in this case we use tcp_mssdflt as 3634 * already assigned to t_maxopd above. 3635 */ 3636 offer = tp->t_maxopd; 3637 break; 3638 3639 case -1: 3640 /* 3641 * Offer == -1 means that we didn't receive SYN yet. 3642 */ 3643 /* FALLTHROUGH */ 3644 3645 default: 3646 /* 3647 * Prevent DoS attack with too small MSS. Round up 3648 * to at least minmss. 3649 */ 3650 offer = max(offer, V_tcp_minmss); 3651 } 3652 3653 /* 3654 * rmx information is now retrieved from tcp_hostcache. 3655 */ 3656 tcp_hc_get(&inp->inp_inc, &metrics); 3657 if (metricptr != NULL) 3658 bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite)); 3659 3660 /* 3661 * If there's a discovered mtu int tcp hostcache, use it 3662 * else, use the link mtu. 3663 */ 3664 if (metrics.rmx_mtu) 3665 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh; 3666 else { 3667#ifdef INET6 3668 if (isipv6) { 3669 mss = maxmtu - min_protoh; 3670 if (!V_path_mtu_discovery && 3671 !in6_localaddr(&inp->in6p_faddr)) 3672 mss = min(mss, V_tcp_v6mssdflt); 3673 } 3674#endif 3675#if defined(INET) && defined(INET6) 3676 else 3677#endif 3678#ifdef INET 3679 { 3680 mss = maxmtu - min_protoh; 3681 if (!V_path_mtu_discovery && 3682 !in_localaddr(inp->inp_faddr)) 3683 mss = min(mss, V_tcp_mssdflt); 3684 } 3685#endif 3686 /* 3687 * XXX - The above conditional (mss = maxmtu - min_protoh) 3688 * probably violates the TCP spec. 3689 * The problem is that, since we don't know the 3690 * other end's MSS, we are supposed to use a conservative 3691 * default. But, if we do that, then MTU discovery will 3692 * never actually take place, because the conservative 3693 * default is much less than the MTUs typically seen 3694 * on the Internet today. For the moment, we'll sweep 3695 * this under the carpet. 3696 * 3697 * The conservative default might not actually be a problem 3698 * if the only case this occurs is when sending an initial 3699 * SYN with options and data to a host we've never talked 3700 * to before. Then, they will reply with an MSS value which 3701 * will get recorded and the new parameters should get 3702 * recomputed. For Further Study. 3703 */ 3704 } 3705 mss = min(mss, offer); 3706 3707 /* 3708 * Sanity check: make sure that maxopd will be large 3709 * enough to allow some data on segments even if the 3710 * all the option space is used (40bytes). Otherwise 3711 * funny things may happen in tcp_output. 3712 */ 3713 mss = max(mss, 64); 3714 3715 /* 3716 * maxopd stores the maximum length of data AND options 3717 * in a segment; maxseg is the amount of data in a normal 3718 * segment. We need to store this value (maxopd) apart 3719 * from maxseg, because now every segment carries options 3720 * and thus we normally have somewhat less data in segments. 3721 */ 3722 tp->t_maxopd = mss; 3723 3724 /* 3725 * origoffer==-1 indicates that no segments were received yet. 3726 * In this case we just guess. 3727 */ 3728 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && 3729 (origoffer == -1 || 3730 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)) 3731 mss -= TCPOLEN_TSTAMP_APPA; 3732 3733 tp->t_maxseg = mss; 3734} 3735 3736void 3737tcp_mss(struct tcpcb *tp, int offer) 3738{ 3739 int mss; 3740 u_long bufsize; 3741 struct inpcb *inp; 3742 struct socket *so; 3743 struct hc_metrics_lite metrics; 3744 struct tcp_ifcap cap; 3745 3746 KASSERT(tp != NULL, ("%s: tp == NULL", __func__)); 3747 3748 bzero(&cap, sizeof(cap)); 3749 tcp_mss_update(tp, offer, -1, &metrics, &cap); 3750 3751 mss = tp->t_maxseg; 3752 inp = tp->t_inpcb; 3753 3754 /* 3755 * If there's a pipesize, change the socket buffer to that size, 3756 * don't change if sb_hiwat is different than default (then it 3757 * has been changed on purpose with setsockopt). 3758 * Make the socket buffers an integral number of mss units; 3759 * if the mss is larger than the socket buffer, decrease the mss. 3760 */ 3761 so = inp->inp_socket; 3762 SOCKBUF_LOCK(&so->so_snd); 3763 if ((so->so_snd.sb_hiwat == V_tcp_sendspace) && metrics.rmx_sendpipe) 3764 bufsize = metrics.rmx_sendpipe; 3765 else 3766 bufsize = so->so_snd.sb_hiwat; 3767 if (bufsize < mss) 3768 mss = bufsize; 3769 else { 3770 bufsize = roundup(bufsize, mss); 3771 if (bufsize > sb_max) 3772 bufsize = sb_max; 3773 if (bufsize > so->so_snd.sb_hiwat) 3774 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL); 3775 } 3776 SOCKBUF_UNLOCK(&so->so_snd); 3777 tp->t_maxseg = mss; 3778 3779 SOCKBUF_LOCK(&so->so_rcv); 3780 if ((so->so_rcv.sb_hiwat == V_tcp_recvspace) && metrics.rmx_recvpipe) 3781 bufsize = metrics.rmx_recvpipe; 3782 else 3783 bufsize = so->so_rcv.sb_hiwat; 3784 if (bufsize > mss) { 3785 bufsize = roundup(bufsize, mss); 3786 if (bufsize > sb_max) 3787 bufsize = sb_max; 3788 if (bufsize > so->so_rcv.sb_hiwat) 3789 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL); 3790 } 3791 SOCKBUF_UNLOCK(&so->so_rcv); 3792 3793 /* Check the interface for TSO capabilities. */ 3794 if (cap.ifcap & CSUM_TSO) { 3795 tp->t_flags |= TF_TSO; 3796 tp->t_tsomax = cap.tsomax; 3797 tp->t_tsomaxsegcount = cap.tsomaxsegcount; 3798 tp->t_tsomaxsegsize = cap.tsomaxsegsize; 3799 } 3800} 3801 3802/* 3803 * Determine the MSS option to send on an outgoing SYN. 3804 */ 3805int 3806tcp_mssopt(struct in_conninfo *inc) 3807{ 3808 int mss = 0; 3809 u_long maxmtu = 0; 3810 u_long thcmtu = 0; 3811 size_t min_protoh; 3812 3813 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer")); 3814 3815#ifdef INET6 3816 if (inc->inc_flags & INC_ISIPV6) { 3817 mss = V_tcp_v6mssdflt; 3818 maxmtu = tcp_maxmtu6(inc, NULL); 3819 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 3820 } 3821#endif 3822#if defined(INET) && defined(INET6) 3823 else 3824#endif 3825#ifdef INET 3826 { 3827 mss = V_tcp_mssdflt; 3828 maxmtu = tcp_maxmtu(inc, NULL); 3829 min_protoh = sizeof(struct tcpiphdr); 3830 } 3831#endif 3832#if defined(INET6) || defined(INET) 3833 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */ 3834#endif 3835 3836 if (maxmtu && thcmtu) 3837 mss = min(maxmtu, thcmtu) - min_protoh; 3838 else if (maxmtu || thcmtu) 3839 mss = max(maxmtu, thcmtu) - min_protoh; 3840 3841 return (mss); 3842} 3843 3844 3845/* 3846 * On a partial ack arrives, force the retransmission of the 3847 * next unacknowledged segment. Do not clear tp->t_dupacks. 3848 * By setting snd_nxt to ti_ack, this forces retransmission timer to 3849 * be started again. 3850 */ 3851static void 3852tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th) 3853{ 3854 tcp_seq onxt = tp->snd_nxt; 3855 u_long ocwnd = tp->snd_cwnd; 3856 3857 INP_WLOCK_ASSERT(tp->t_inpcb); 3858 3859 tcp_timer_activate(tp, TT_REXMT, 0); 3860 tp->t_rtttime = 0; 3861 tp->snd_nxt = th->th_ack; 3862 /* 3863 * Set snd_cwnd to one segment beyond acknowledged offset. 3864 * (tp->snd_una has not yet been updated when this function is called.) 3865 */ 3866 tp->snd_cwnd = tp->t_maxseg + BYTES_THIS_ACK(tp, th); 3867 tp->t_flags |= TF_ACKNOW; 3868 (void) tcp_output(tp); 3869 tp->snd_cwnd = ocwnd; 3870 if (SEQ_GT(onxt, tp->snd_nxt)) 3871 tp->snd_nxt = onxt; 3872 /* 3873 * Partial window deflation. Relies on fact that tp->snd_una 3874 * not updated yet. 3875 */ 3876 if (tp->snd_cwnd > BYTES_THIS_ACK(tp, th)) 3877 tp->snd_cwnd -= BYTES_THIS_ACK(tp, th); 3878 else 3879 tp->snd_cwnd = 0; 3880 tp->snd_cwnd += tp->t_maxseg; 3881} 3882 3883int 3884tcp_compute_pipe(struct tcpcb *tp) 3885{ 3886 return (tp->snd_max - tp->snd_una + 3887 tp->sackhint.sack_bytes_rexmit - 3888 tp->sackhint.sacked_bytes); 3889} 3890