tcp_input.c revision 315514
1285SN/A/*- 2462SN/A * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 3285SN/A * The Regents of the University of California. All rights reserved. 4285SN/A * Copyright (c) 2007-2008,2010 5285SN/A * Swinburne University of Technology, Melbourne, Australia. 6285SN/A * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org> 7285SN/A * Copyright (c) 2010 The FreeBSD Foundation 8285SN/A * Copyright (c) 2010-2011 Juniper Networks, Inc. 9285SN/A * All rights reserved. 10285SN/A * 11285SN/A * Portions of this software were developed at the Centre for Advanced Internet 12285SN/A * Architectures, Swinburne University of Technology, by Lawrence Stewart, 13285SN/A * James Healy and David Hayes, made possible in part by a grant from the Cisco 14285SN/A * University Research Program Fund at Community Foundation Silicon Valley. 15285SN/A * 16285SN/A * Portions of this software were developed at the Centre for Advanced 17285SN/A * Internet Architectures, Swinburne University of Technology, Melbourne, 18285SN/A * Australia by David Hayes under sponsorship from the FreeBSD Foundation. 19285SN/A * 20285SN/A * Portions of this software were developed by Robert N. M. Watson under 21285SN/A * contract to Juniper Networks, Inc. 22285SN/A * 23285SN/A * Redistribution and use in source and binary forms, with or without 24285SN/A * modification, are permitted provided that the following conditions 25285SN/A * are met: 26285SN/A * 1. Redistributions of source code must retain the above copyright 27285SN/A * notice, this list of conditions and the following disclaimer. 28285SN/A * 2. Redistributions in binary form must reproduce the above copyright 29285SN/A * notice, this list of conditions and the following disclaimer in the 30285SN/A * documentation and/or other materials provided with the distribution. 31285SN/A * 4. Neither the name of the University nor the names of its contributors 32285SN/A * may be used to endorse or promote products derived from this software 33285SN/A * without specific prior written permission. 34285SN/A * 35285SN/A * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 36285SN/A * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 37285SN/A * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 38285SN/A * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 39285SN/A * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 40285SN/A * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 41285SN/A * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 42285SN/A * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 43285SN/A * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 44285SN/A * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 45285SN/A * SUCH DAMAGE. 46285SN/A * 47285SN/A * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95 48 */ 49 50#include <sys/cdefs.h> 51__FBSDID("$FreeBSD: stable/11/sys/netinet/tcp_input.c 315514 2017-03-18 22:04:20Z ae $"); 52 53#include "opt_inet.h" 54#include "opt_inet6.h" 55#include "opt_ipsec.h" 56#include "opt_tcpdebug.h" 57 58#include <sys/param.h> 59#include <sys/kernel.h> 60#include <sys/hhook.h> 61#include <sys/malloc.h> 62#include <sys/mbuf.h> 63#include <sys/proc.h> /* for proc0 declaration */ 64#include <sys/protosw.h> 65#include <sys/sdt.h> 66#include <sys/signalvar.h> 67#include <sys/socket.h> 68#include <sys/socketvar.h> 69#include <sys/sysctl.h> 70#include <sys/syslog.h> 71#include <sys/systm.h> 72 73#include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */ 74 75#include <vm/uma.h> 76 77#include <net/if.h> 78#include <net/if_var.h> 79#include <net/route.h> 80#include <net/vnet.h> 81 82#define TCPSTATES /* for logging */ 83 84#include <netinet/in.h> 85#include <netinet/in_kdtrace.h> 86#include <netinet/in_pcb.h> 87#include <netinet/in_systm.h> 88#include <netinet/ip.h> 89#include <netinet/ip_icmp.h> /* required for icmp_var.h */ 90#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 91#include <netinet/ip_var.h> 92#include <netinet/ip_options.h> 93#include <netinet/ip6.h> 94#include <netinet/icmp6.h> 95#include <netinet6/in6_pcb.h> 96#include <netinet6/in6_var.h> 97#include <netinet6/ip6_var.h> 98#include <netinet6/nd6.h> 99#ifdef TCP_RFC7413 100#include <netinet/tcp_fastopen.h> 101#endif 102#include <netinet/tcp.h> 103#include <netinet/tcp_fsm.h> 104#include <netinet/tcp_seq.h> 105#include <netinet/tcp_timer.h> 106#include <netinet/tcp_var.h> 107#include <netinet6/tcp6_var.h> 108#include <netinet/tcpip.h> 109#include <netinet/cc/cc.h> 110#ifdef TCPPCAP 111#include <netinet/tcp_pcap.h> 112#endif 113#include <netinet/tcp_syncache.h> 114#ifdef TCPDEBUG 115#include <netinet/tcp_debug.h> 116#endif /* TCPDEBUG */ 117#ifdef TCP_OFFLOAD 118#include <netinet/tcp_offload.h> 119#endif 120 121#include <netipsec/ipsec_support.h> 122 123#include <machine/in_cksum.h> 124 125#include <security/mac/mac_framework.h> 126 127const int tcprexmtthresh = 3; 128 129int tcp_log_in_vain = 0; 130SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW, 131 &tcp_log_in_vain, 0, 132 "Log all incoming TCP segments to closed ports"); 133 134VNET_DEFINE(int, blackhole) = 0; 135#define V_blackhole VNET(blackhole) 136SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_VNET | CTLFLAG_RW, 137 &VNET_NAME(blackhole), 0, 138 "Do not send RST on segments to closed ports"); 139 140VNET_DEFINE(int, tcp_delack_enabled) = 1; 141SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_VNET | CTLFLAG_RW, 142 &VNET_NAME(tcp_delack_enabled), 0, 143 "Delay ACK to try and piggyback it onto a data packet"); 144 145VNET_DEFINE(int, drop_synfin) = 0; 146#define V_drop_synfin VNET(drop_synfin) 147SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_VNET | CTLFLAG_RW, 148 &VNET_NAME(drop_synfin), 0, 149 "Drop TCP packets with SYN+FIN set"); 150 151VNET_DEFINE(int, tcp_do_rfc6675_pipe) = 0; 152SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc6675_pipe, CTLFLAG_VNET | CTLFLAG_RW, 153 &VNET_NAME(tcp_do_rfc6675_pipe), 0, 154 "Use calculated pipe/in-flight bytes per RFC 6675"); 155 156VNET_DEFINE(int, tcp_do_rfc3042) = 1; 157#define V_tcp_do_rfc3042 VNET(tcp_do_rfc3042) 158SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_VNET | CTLFLAG_RW, 159 &VNET_NAME(tcp_do_rfc3042), 0, 160 "Enable RFC 3042 (Limited Transmit)"); 161 162VNET_DEFINE(int, tcp_do_rfc3390) = 1; 163SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_VNET | CTLFLAG_RW, 164 &VNET_NAME(tcp_do_rfc3390), 0, 165 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)"); 166 167VNET_DEFINE(int, tcp_initcwnd_segments) = 10; 168SYSCTL_INT(_net_inet_tcp, OID_AUTO, initcwnd_segments, 169 CTLFLAG_VNET | CTLFLAG_RW, &VNET_NAME(tcp_initcwnd_segments), 0, 170 "Slow-start flight size (initial congestion window) in number of segments"); 171 172VNET_DEFINE(int, tcp_do_rfc3465) = 1; 173SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_VNET | CTLFLAG_RW, 174 &VNET_NAME(tcp_do_rfc3465), 0, 175 "Enable RFC 3465 (Appropriate Byte Counting)"); 176 177VNET_DEFINE(int, tcp_abc_l_var) = 2; 178SYSCTL_INT(_net_inet_tcp, OID_AUTO, abc_l_var, CTLFLAG_VNET | CTLFLAG_RW, 179 &VNET_NAME(tcp_abc_l_var), 2, 180 "Cap the max cwnd increment during slow-start to this number of segments"); 181 182static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, ecn, CTLFLAG_RW, 0, "TCP ECN"); 183 184VNET_DEFINE(int, tcp_do_ecn) = 2; 185SYSCTL_INT(_net_inet_tcp_ecn, OID_AUTO, enable, CTLFLAG_VNET | CTLFLAG_RW, 186 &VNET_NAME(tcp_do_ecn), 0, 187 "TCP ECN support"); 188 189VNET_DEFINE(int, tcp_ecn_maxretries) = 1; 190SYSCTL_INT(_net_inet_tcp_ecn, OID_AUTO, maxretries, CTLFLAG_VNET | CTLFLAG_RW, 191 &VNET_NAME(tcp_ecn_maxretries), 0, 192 "Max retries before giving up on ECN"); 193 194VNET_DEFINE(int, tcp_insecure_syn) = 0; 195#define V_tcp_insecure_syn VNET(tcp_insecure_syn) 196SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_syn, CTLFLAG_VNET | CTLFLAG_RW, 197 &VNET_NAME(tcp_insecure_syn), 0, 198 "Follow RFC793 instead of RFC5961 criteria for accepting SYN packets"); 199 200VNET_DEFINE(int, tcp_insecure_rst) = 0; 201#define V_tcp_insecure_rst VNET(tcp_insecure_rst) 202SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_VNET | CTLFLAG_RW, 203 &VNET_NAME(tcp_insecure_rst), 0, 204 "Follow RFC793 instead of RFC5961 criteria for accepting RST packets"); 205 206VNET_DEFINE(int, tcp_recvspace) = 1024*64; 207#define V_tcp_recvspace VNET(tcp_recvspace) 208SYSCTL_INT(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLFLAG_VNET | CTLFLAG_RW, 209 &VNET_NAME(tcp_recvspace), 0, "Initial receive socket buffer size"); 210 211VNET_DEFINE(int, tcp_do_autorcvbuf) = 1; 212#define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf) 213SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_VNET | CTLFLAG_RW, 214 &VNET_NAME(tcp_do_autorcvbuf), 0, 215 "Enable automatic receive buffer sizing"); 216 217VNET_DEFINE(int, tcp_autorcvbuf_inc) = 16*1024; 218#define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc) 219SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_VNET | CTLFLAG_RW, 220 &VNET_NAME(tcp_autorcvbuf_inc), 0, 221 "Incrementor step size of automatic receive buffer"); 222 223VNET_DEFINE(int, tcp_autorcvbuf_max) = 2*1024*1024; 224#define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max) 225SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_VNET | CTLFLAG_RW, 226 &VNET_NAME(tcp_autorcvbuf_max), 0, 227 "Max size of automatic receive buffer"); 228 229VNET_DEFINE(struct inpcbhead, tcb); 230#define tcb6 tcb /* for KAME src sync over BSD*'s */ 231VNET_DEFINE(struct inpcbinfo, tcbinfo); 232 233/* 234 * TCP statistics are stored in an array of counter(9)s, which size matches 235 * size of struct tcpstat. TCP running connection count is a regular array. 236 */ 237VNET_PCPUSTAT_DEFINE(struct tcpstat, tcpstat); 238SYSCTL_VNET_PCPUSTAT(_net_inet_tcp, TCPCTL_STATS, stats, struct tcpstat, 239 tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)"); 240VNET_DEFINE(counter_u64_t, tcps_states[TCP_NSTATES]); 241SYSCTL_COUNTER_U64_ARRAY(_net_inet_tcp, TCPCTL_STATES, states, CTLFLAG_RD | 242 CTLFLAG_VNET, &VNET_NAME(tcps_states)[0], TCP_NSTATES, 243 "TCP connection counts by TCP state"); 244 245static void 246tcp_vnet_init(const void *unused) 247{ 248 249 COUNTER_ARRAY_ALLOC(V_tcps_states, TCP_NSTATES, M_WAITOK); 250 VNET_PCPUSTAT_ALLOC(tcpstat, M_WAITOK); 251} 252VNET_SYSINIT(tcp_vnet_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, 253 tcp_vnet_init, NULL); 254 255#ifdef VIMAGE 256static void 257tcp_vnet_uninit(const void *unused) 258{ 259 260 COUNTER_ARRAY_FREE(V_tcps_states, TCP_NSTATES); 261 VNET_PCPUSTAT_FREE(tcpstat); 262} 263VNET_SYSUNINIT(tcp_vnet_uninit, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY, 264 tcp_vnet_uninit, NULL); 265#endif /* VIMAGE */ 266 267/* 268 * Kernel module interface for updating tcpstat. The argument is an index 269 * into tcpstat treated as an array. 270 */ 271void 272kmod_tcpstat_inc(int statnum) 273{ 274 275 counter_u64_add(VNET(tcpstat)[statnum], 1); 276} 277 278/* 279 * Wrapper for the TCP established input helper hook. 280 */ 281void 282hhook_run_tcp_est_in(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to) 283{ 284 struct tcp_hhook_data hhook_data; 285 286 if (V_tcp_hhh[HHOOK_TCP_EST_IN]->hhh_nhooks > 0) { 287 hhook_data.tp = tp; 288 hhook_data.th = th; 289 hhook_data.to = to; 290 291 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_IN], &hhook_data, 292 tp->osd); 293 } 294} 295 296/* 297 * CC wrapper hook functions 298 */ 299void 300cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t type) 301{ 302 INP_WLOCK_ASSERT(tp->t_inpcb); 303 304 tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th); 305 if (tp->snd_cwnd <= tp->snd_wnd) 306 tp->ccv->flags |= CCF_CWND_LIMITED; 307 else 308 tp->ccv->flags &= ~CCF_CWND_LIMITED; 309 310 if (type == CC_ACK) { 311 if (tp->snd_cwnd > tp->snd_ssthresh) { 312 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack, 313 V_tcp_abc_l_var * tcp_maxseg(tp)); 314 if (tp->t_bytes_acked >= tp->snd_cwnd) { 315 tp->t_bytes_acked -= tp->snd_cwnd; 316 tp->ccv->flags |= CCF_ABC_SENTAWND; 317 } 318 } else { 319 tp->ccv->flags &= ~CCF_ABC_SENTAWND; 320 tp->t_bytes_acked = 0; 321 } 322 } 323 324 if (CC_ALGO(tp)->ack_received != NULL) { 325 /* XXXLAS: Find a way to live without this */ 326 tp->ccv->curack = th->th_ack; 327 CC_ALGO(tp)->ack_received(tp->ccv, type); 328 } 329} 330 331void 332cc_conn_init(struct tcpcb *tp) 333{ 334 struct hc_metrics_lite metrics; 335 struct inpcb *inp = tp->t_inpcb; 336 u_int maxseg; 337 int rtt; 338 339 INP_WLOCK_ASSERT(tp->t_inpcb); 340 341 tcp_hc_get(&inp->inp_inc, &metrics); 342 maxseg = tcp_maxseg(tp); 343 344 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) { 345 tp->t_srtt = rtt; 346 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE; 347 TCPSTAT_INC(tcps_usedrtt); 348 if (metrics.rmx_rttvar) { 349 tp->t_rttvar = metrics.rmx_rttvar; 350 TCPSTAT_INC(tcps_usedrttvar); 351 } else { 352 /* default variation is +- 1 rtt */ 353 tp->t_rttvar = 354 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE; 355 } 356 TCPT_RANGESET(tp->t_rxtcur, 357 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, 358 tp->t_rttmin, TCPTV_REXMTMAX); 359 } 360 if (metrics.rmx_ssthresh) { 361 /* 362 * There's some sort of gateway or interface 363 * buffer limit on the path. Use this to set 364 * the slow start threshold, but set the 365 * threshold to no less than 2*mss. 366 */ 367 tp->snd_ssthresh = max(2 * maxseg, metrics.rmx_ssthresh); 368 TCPSTAT_INC(tcps_usedssthresh); 369 } 370 371 /* 372 * Set the initial slow-start flight size. 373 * 374 * RFC5681 Section 3.1 specifies the default conservative values. 375 * RFC3390 specifies slightly more aggressive values. 376 * RFC6928 increases it to ten segments. 377 * Support for user specified value for initial flight size. 378 * 379 * If a SYN or SYN/ACK was lost and retransmitted, we have to 380 * reduce the initial CWND to one segment as congestion is likely 381 * requiring us to be cautious. 382 */ 383 if (tp->snd_cwnd == 1) 384 tp->snd_cwnd = maxseg; /* SYN(-ACK) lost */ 385 else if (V_tcp_initcwnd_segments) 386 tp->snd_cwnd = min(V_tcp_initcwnd_segments * maxseg, 387 max(2 * maxseg, V_tcp_initcwnd_segments * 1460)); 388 else if (V_tcp_do_rfc3390) 389 tp->snd_cwnd = min(4 * maxseg, max(2 * maxseg, 4380)); 390 else { 391 /* Per RFC5681 Section 3.1 */ 392 if (maxseg > 2190) 393 tp->snd_cwnd = 2 * maxseg; 394 else if (maxseg > 1095) 395 tp->snd_cwnd = 3 * maxseg; 396 else 397 tp->snd_cwnd = 4 * maxseg; 398 } 399 400 if (CC_ALGO(tp)->conn_init != NULL) 401 CC_ALGO(tp)->conn_init(tp->ccv); 402} 403 404void inline 405cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type) 406{ 407 u_int maxseg; 408 409 INP_WLOCK_ASSERT(tp->t_inpcb); 410 411 switch(type) { 412 case CC_NDUPACK: 413 if (!IN_FASTRECOVERY(tp->t_flags)) { 414 tp->snd_recover = tp->snd_max; 415 if (tp->t_flags & TF_ECN_PERMIT) 416 tp->t_flags |= TF_ECN_SND_CWR; 417 } 418 break; 419 case CC_ECN: 420 if (!IN_CONGRECOVERY(tp->t_flags)) { 421 TCPSTAT_INC(tcps_ecn_rcwnd); 422 tp->snd_recover = tp->snd_max; 423 if (tp->t_flags & TF_ECN_PERMIT) 424 tp->t_flags |= TF_ECN_SND_CWR; 425 } 426 break; 427 case CC_RTO: 428 maxseg = tcp_maxseg(tp); 429 tp->t_dupacks = 0; 430 tp->t_bytes_acked = 0; 431 EXIT_RECOVERY(tp->t_flags); 432 tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 / 433 maxseg) * maxseg; 434 tp->snd_cwnd = maxseg; 435 break; 436 case CC_RTO_ERR: 437 TCPSTAT_INC(tcps_sndrexmitbad); 438 /* RTO was unnecessary, so reset everything. */ 439 tp->snd_cwnd = tp->snd_cwnd_prev; 440 tp->snd_ssthresh = tp->snd_ssthresh_prev; 441 tp->snd_recover = tp->snd_recover_prev; 442 if (tp->t_flags & TF_WASFRECOVERY) 443 ENTER_FASTRECOVERY(tp->t_flags); 444 if (tp->t_flags & TF_WASCRECOVERY) 445 ENTER_CONGRECOVERY(tp->t_flags); 446 tp->snd_nxt = tp->snd_max; 447 tp->t_flags &= ~TF_PREVVALID; 448 tp->t_badrxtwin = 0; 449 break; 450 } 451 452 if (CC_ALGO(tp)->cong_signal != NULL) { 453 if (th != NULL) 454 tp->ccv->curack = th->th_ack; 455 CC_ALGO(tp)->cong_signal(tp->ccv, type); 456 } 457} 458 459void inline 460cc_post_recovery(struct tcpcb *tp, struct tcphdr *th) 461{ 462 INP_WLOCK_ASSERT(tp->t_inpcb); 463 464 /* XXXLAS: KASSERT that we're in recovery? */ 465 466 if (CC_ALGO(tp)->post_recovery != NULL) { 467 tp->ccv->curack = th->th_ack; 468 CC_ALGO(tp)->post_recovery(tp->ccv); 469 } 470 /* XXXLAS: EXIT_RECOVERY ? */ 471 tp->t_bytes_acked = 0; 472} 473 474/* 475 * Indicate whether this ack should be delayed. We can delay the ack if 476 * following conditions are met: 477 * - There is no delayed ack timer in progress. 478 * - Our last ack wasn't a 0-sized window. We never want to delay 479 * the ack that opens up a 0-sized window. 480 * - LRO wasn't used for this segment. We make sure by checking that the 481 * segment size is not larger than the MSS. 482 */ 483#define DELAY_ACK(tp, tlen) \ 484 ((!tcp_timer_active(tp, TT_DELACK) && \ 485 (tp->t_flags & TF_RXWIN0SENT) == 0) && \ 486 (tlen <= tp->t_maxseg) && \ 487 (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN))) 488 489static void inline 490cc_ecnpkt_handler(struct tcpcb *tp, struct tcphdr *th, uint8_t iptos) 491{ 492 INP_WLOCK_ASSERT(tp->t_inpcb); 493 494 if (CC_ALGO(tp)->ecnpkt_handler != NULL) { 495 switch (iptos & IPTOS_ECN_MASK) { 496 case IPTOS_ECN_CE: 497 tp->ccv->flags |= CCF_IPHDR_CE; 498 break; 499 case IPTOS_ECN_ECT0: 500 tp->ccv->flags &= ~CCF_IPHDR_CE; 501 break; 502 case IPTOS_ECN_ECT1: 503 tp->ccv->flags &= ~CCF_IPHDR_CE; 504 break; 505 } 506 507 if (th->th_flags & TH_CWR) 508 tp->ccv->flags |= CCF_TCPHDR_CWR; 509 else 510 tp->ccv->flags &= ~CCF_TCPHDR_CWR; 511 512 if (tp->t_flags & TF_DELACK) 513 tp->ccv->flags |= CCF_DELACK; 514 else 515 tp->ccv->flags &= ~CCF_DELACK; 516 517 CC_ALGO(tp)->ecnpkt_handler(tp->ccv); 518 519 if (tp->ccv->flags & CCF_ACKNOW) 520 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime); 521 } 522} 523 524/* 525 * TCP input handling is split into multiple parts: 526 * tcp6_input is a thin wrapper around tcp_input for the extended 527 * ip6_protox[] call format in ip6_input 528 * tcp_input handles primary segment validation, inpcb lookup and 529 * SYN processing on listen sockets 530 * tcp_do_segment processes the ACK and text of the segment for 531 * establishing, established and closing connections 532 */ 533#ifdef INET6 534int 535tcp6_input(struct mbuf **mp, int *offp, int proto) 536{ 537 struct mbuf *m = *mp; 538 struct in6_ifaddr *ia6; 539 struct ip6_hdr *ip6; 540 541 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE); 542 543 /* 544 * draft-itojun-ipv6-tcp-to-anycast 545 * better place to put this in? 546 */ 547 ip6 = mtod(m, struct ip6_hdr *); 548 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */); 549 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) { 550 struct ip6_hdr *ip6; 551 552 ifa_free(&ia6->ia_ifa); 553 ip6 = mtod(m, struct ip6_hdr *); 554 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR, 555 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6); 556 return (IPPROTO_DONE); 557 } 558 if (ia6) 559 ifa_free(&ia6->ia_ifa); 560 561 return (tcp_input(mp, offp, proto)); 562} 563#endif /* INET6 */ 564 565int 566tcp_input(struct mbuf **mp, int *offp, int proto) 567{ 568 struct mbuf *m = *mp; 569 struct tcphdr *th = NULL; 570 struct ip *ip = NULL; 571 struct inpcb *inp = NULL; 572 struct tcpcb *tp = NULL; 573 struct socket *so = NULL; 574 u_char *optp = NULL; 575 int off0; 576 int optlen = 0; 577#ifdef INET 578 int len; 579#endif 580 int tlen = 0, off; 581 int drop_hdrlen; 582 int thflags; 583 int rstreason = 0; /* For badport_bandlim accounting purposes */ 584 uint8_t iptos; 585 struct m_tag *fwd_tag = NULL; 586#ifdef INET6 587 struct ip6_hdr *ip6 = NULL; 588 int isipv6; 589#else 590 const void *ip6 = NULL; 591#endif /* INET6 */ 592 struct tcpopt to; /* options in this segment */ 593 char *s = NULL; /* address and port logging */ 594 int ti_locked; 595#ifdef TCPDEBUG 596 /* 597 * The size of tcp_saveipgen must be the size of the max ip header, 598 * now IPv6. 599 */ 600 u_char tcp_saveipgen[IP6_HDR_LEN]; 601 struct tcphdr tcp_savetcp; 602 short ostate = 0; 603#endif 604 605#ifdef INET6 606 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0; 607#endif 608 609 off0 = *offp; 610 m = *mp; 611 *mp = NULL; 612 to.to_flags = 0; 613 TCPSTAT_INC(tcps_rcvtotal); 614 615#ifdef INET6 616 if (isipv6) { 617 /* IP6_EXTHDR_CHECK() is already done at tcp6_input(). */ 618 619 if (m->m_len < (sizeof(*ip6) + sizeof(*th))) { 620 m = m_pullup(m, sizeof(*ip6) + sizeof(*th)); 621 if (m == NULL) { 622 TCPSTAT_INC(tcps_rcvshort); 623 return (IPPROTO_DONE); 624 } 625 } 626 627 ip6 = mtod(m, struct ip6_hdr *); 628 th = (struct tcphdr *)((caddr_t)ip6 + off0); 629 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0; 630 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID_IPV6) { 631 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 632 th->th_sum = m->m_pkthdr.csum_data; 633 else 634 th->th_sum = in6_cksum_pseudo(ip6, tlen, 635 IPPROTO_TCP, m->m_pkthdr.csum_data); 636 th->th_sum ^= 0xffff; 637 } else 638 th->th_sum = in6_cksum(m, IPPROTO_TCP, off0, tlen); 639 if (th->th_sum) { 640 TCPSTAT_INC(tcps_rcvbadsum); 641 goto drop; 642 } 643 644 /* 645 * Be proactive about unspecified IPv6 address in source. 646 * As we use all-zero to indicate unbounded/unconnected pcb, 647 * unspecified IPv6 address can be used to confuse us. 648 * 649 * Note that packets with unspecified IPv6 destination is 650 * already dropped in ip6_input. 651 */ 652 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { 653 /* XXX stat */ 654 goto drop; 655 } 656 iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; 657 } 658#endif 659#if defined(INET) && defined(INET6) 660 else 661#endif 662#ifdef INET 663 { 664 /* 665 * Get IP and TCP header together in first mbuf. 666 * Note: IP leaves IP header in first mbuf. 667 */ 668 if (off0 > sizeof (struct ip)) { 669 ip_stripoptions(m); 670 off0 = sizeof(struct ip); 671 } 672 if (m->m_len < sizeof (struct tcpiphdr)) { 673 if ((m = m_pullup(m, sizeof (struct tcpiphdr))) 674 == NULL) { 675 TCPSTAT_INC(tcps_rcvshort); 676 return (IPPROTO_DONE); 677 } 678 } 679 ip = mtod(m, struct ip *); 680 th = (struct tcphdr *)((caddr_t)ip + off0); 681 tlen = ntohs(ip->ip_len) - off0; 682 683 iptos = ip->ip_tos; 684 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 685 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 686 th->th_sum = m->m_pkthdr.csum_data; 687 else 688 th->th_sum = in_pseudo(ip->ip_src.s_addr, 689 ip->ip_dst.s_addr, 690 htonl(m->m_pkthdr.csum_data + tlen + 691 IPPROTO_TCP)); 692 th->th_sum ^= 0xffff; 693 } else { 694 struct ipovly *ipov = (struct ipovly *)ip; 695 696 /* 697 * Checksum extended TCP header and data. 698 */ 699 len = off0 + tlen; 700 bzero(ipov->ih_x1, sizeof(ipov->ih_x1)); 701 ipov->ih_len = htons(tlen); 702 th->th_sum = in_cksum(m, len); 703 /* Reset length for SDT probes. */ 704 ip->ip_len = htons(len); 705 /* Reset TOS bits */ 706 ip->ip_tos = iptos; 707 /* Re-initialization for later version check */ 708 ip->ip_v = IPVERSION; 709 } 710 711 if (th->th_sum) { 712 TCPSTAT_INC(tcps_rcvbadsum); 713 goto drop; 714 } 715 } 716#endif /* INET */ 717 718 /* 719 * Check that TCP offset makes sense, 720 * pull out TCP options and adjust length. XXX 721 */ 722 off = th->th_off << 2; 723 if (off < sizeof (struct tcphdr) || off > tlen) { 724 TCPSTAT_INC(tcps_rcvbadoff); 725 goto drop; 726 } 727 tlen -= off; /* tlen is used instead of ti->ti_len */ 728 if (off > sizeof (struct tcphdr)) { 729#ifdef INET6 730 if (isipv6) { 731 IP6_EXTHDR_CHECK(m, off0, off, IPPROTO_DONE); 732 ip6 = mtod(m, struct ip6_hdr *); 733 th = (struct tcphdr *)((caddr_t)ip6 + off0); 734 } 735#endif 736#if defined(INET) && defined(INET6) 737 else 738#endif 739#ifdef INET 740 { 741 if (m->m_len < sizeof(struct ip) + off) { 742 if ((m = m_pullup(m, sizeof (struct ip) + off)) 743 == NULL) { 744 TCPSTAT_INC(tcps_rcvshort); 745 return (IPPROTO_DONE); 746 } 747 ip = mtod(m, struct ip *); 748 th = (struct tcphdr *)((caddr_t)ip + off0); 749 } 750 } 751#endif 752 optlen = off - sizeof (struct tcphdr); 753 optp = (u_char *)(th + 1); 754 } 755 thflags = th->th_flags; 756 757 /* 758 * Convert TCP protocol specific fields to host format. 759 */ 760 tcp_fields_to_host(th); 761 762 /* 763 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options. 764 */ 765 drop_hdrlen = off0 + off; 766 767 /* 768 * Locate pcb for segment; if we're likely to add or remove a 769 * connection then first acquire pcbinfo lock. There are three cases 770 * where we might discover later we need a write lock despite the 771 * flags: ACKs moving a connection out of the syncache, ACKs for a 772 * connection in TIMEWAIT and SYNs not targeting a listening socket. 773 */ 774 if ((thflags & (TH_FIN | TH_RST)) != 0) { 775 INP_INFO_RLOCK(&V_tcbinfo); 776 ti_locked = TI_RLOCKED; 777 } else 778 ti_locked = TI_UNLOCKED; 779 780 /* 781 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. 782 */ 783 if ( 784#ifdef INET6 785 (isipv6 && (m->m_flags & M_IP6_NEXTHOP)) 786#ifdef INET 787 || (!isipv6 && (m->m_flags & M_IP_NEXTHOP)) 788#endif 789#endif 790#if defined(INET) && !defined(INET6) 791 (m->m_flags & M_IP_NEXTHOP) 792#endif 793 ) 794 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL); 795 796findpcb: 797#ifdef INVARIANTS 798 if (ti_locked == TI_RLOCKED) { 799 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 800 } else { 801 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 802 } 803#endif 804#ifdef INET6 805 if (isipv6 && fwd_tag != NULL) { 806 struct sockaddr_in6 *next_hop6; 807 808 next_hop6 = (struct sockaddr_in6 *)(fwd_tag + 1); 809 /* 810 * Transparently forwarded. Pretend to be the destination. 811 * Already got one like this? 812 */ 813 inp = in6_pcblookup_mbuf(&V_tcbinfo, 814 &ip6->ip6_src, th->th_sport, &ip6->ip6_dst, th->th_dport, 815 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif, m); 816 if (!inp) { 817 /* 818 * It's new. Try to find the ambushing socket. 819 * Because we've rewritten the destination address, 820 * any hardware-generated hash is ignored. 821 */ 822 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_src, 823 th->th_sport, &next_hop6->sin6_addr, 824 next_hop6->sin6_port ? ntohs(next_hop6->sin6_port) : 825 th->th_dport, INPLOOKUP_WILDCARD | 826 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif); 827 } 828 } else if (isipv6) { 829 inp = in6_pcblookup_mbuf(&V_tcbinfo, &ip6->ip6_src, 830 th->th_sport, &ip6->ip6_dst, th->th_dport, 831 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB, 832 m->m_pkthdr.rcvif, m); 833 } 834#endif /* INET6 */ 835#if defined(INET6) && defined(INET) 836 else 837#endif 838#ifdef INET 839 if (fwd_tag != NULL) { 840 struct sockaddr_in *next_hop; 841 842 next_hop = (struct sockaddr_in *)(fwd_tag+1); 843 /* 844 * Transparently forwarded. Pretend to be the destination. 845 * already got one like this? 846 */ 847 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, th->th_sport, 848 ip->ip_dst, th->th_dport, INPLOOKUP_WLOCKPCB, 849 m->m_pkthdr.rcvif, m); 850 if (!inp) { 851 /* 852 * It's new. Try to find the ambushing socket. 853 * Because we've rewritten the destination address, 854 * any hardware-generated hash is ignored. 855 */ 856 inp = in_pcblookup(&V_tcbinfo, ip->ip_src, 857 th->th_sport, next_hop->sin_addr, 858 next_hop->sin_port ? ntohs(next_hop->sin_port) : 859 th->th_dport, INPLOOKUP_WILDCARD | 860 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif); 861 } 862 } else 863 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, 864 th->th_sport, ip->ip_dst, th->th_dport, 865 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB, 866 m->m_pkthdr.rcvif, m); 867#endif /* INET */ 868 869 /* 870 * If the INPCB does not exist then all data in the incoming 871 * segment is discarded and an appropriate RST is sent back. 872 * XXX MRT Send RST using which routing table? 873 */ 874 if (inp == NULL) { 875 /* 876 * Log communication attempts to ports that are not 877 * in use. 878 */ 879 if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) || 880 tcp_log_in_vain == 2) { 881 if ((s = tcp_log_vain(NULL, th, (void *)ip, ip6))) 882 log(LOG_INFO, "%s; %s: Connection attempt " 883 "to closed port\n", s, __func__); 884 } 885 /* 886 * When blackholing do not respond with a RST but 887 * completely ignore the segment and drop it. 888 */ 889 if ((V_blackhole == 1 && (thflags & TH_SYN)) || 890 V_blackhole == 2) 891 goto dropunlock; 892 893 rstreason = BANDLIM_RST_CLOSEDPORT; 894 goto dropwithreset; 895 } 896 INP_WLOCK_ASSERT(inp); 897 /* 898 * While waiting for inp lock during the lookup, another thread 899 * can have dropped the inpcb, in which case we need to loop back 900 * and try to find a new inpcb to deliver to. 901 */ 902 if (inp->inp_flags & INP_DROPPED) { 903 INP_WUNLOCK(inp); 904 inp = NULL; 905 goto findpcb; 906 } 907 if ((inp->inp_flowtype == M_HASHTYPE_NONE) && 908 (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) && 909 ((inp->inp_socket == NULL) || 910 (inp->inp_socket->so_options & SO_ACCEPTCONN) == 0)) { 911 inp->inp_flowid = m->m_pkthdr.flowid; 912 inp->inp_flowtype = M_HASHTYPE_GET(m); 913 } 914#if defined(IPSEC) || defined(IPSEC_SUPPORT) 915#ifdef INET6 916 if (isipv6 && IPSEC_ENABLED(ipv6) && 917 IPSEC_CHECK_POLICY(ipv6, m, inp) != 0) { 918 goto dropunlock; 919 } 920#ifdef INET 921 else 922#endif 923#endif /* INET6 */ 924#ifdef INET 925 if (IPSEC_ENABLED(ipv4) && 926 IPSEC_CHECK_POLICY(ipv4, m, inp) != 0) { 927 goto dropunlock; 928 } 929#endif /* INET */ 930#endif /* IPSEC */ 931 932 /* 933 * Check the minimum TTL for socket. 934 */ 935 if (inp->inp_ip_minttl != 0) { 936#ifdef INET6 937 if (isipv6) { 938 if (inp->inp_ip_minttl > ip6->ip6_hlim) 939 goto dropunlock; 940 } else 941#endif 942 if (inp->inp_ip_minttl > ip->ip_ttl) 943 goto dropunlock; 944 } 945 946 /* 947 * A previous connection in TIMEWAIT state is supposed to catch stray 948 * or duplicate segments arriving late. If this segment was a 949 * legitimate new connection attempt, the old INPCB gets removed and 950 * we can try again to find a listening socket. 951 * 952 * At this point, due to earlier optimism, we may hold only an inpcb 953 * lock, and not the inpcbinfo write lock. If so, we need to try to 954 * acquire it, or if that fails, acquire a reference on the inpcb, 955 * drop all locks, acquire a global write lock, and then re-acquire 956 * the inpcb lock. We may at that point discover that another thread 957 * has tried to free the inpcb, in which case we need to loop back 958 * and try to find a new inpcb to deliver to. 959 * 960 * XXXRW: It may be time to rethink timewait locking. 961 */ 962relocked: 963 if (inp->inp_flags & INP_TIMEWAIT) { 964 if (ti_locked == TI_UNLOCKED) { 965 if (INP_INFO_TRY_RLOCK(&V_tcbinfo) == 0) { 966 in_pcbref(inp); 967 INP_WUNLOCK(inp); 968 INP_INFO_RLOCK(&V_tcbinfo); 969 ti_locked = TI_RLOCKED; 970 INP_WLOCK(inp); 971 if (in_pcbrele_wlocked(inp)) { 972 inp = NULL; 973 goto findpcb; 974 } else if (inp->inp_flags & INP_DROPPED) { 975 INP_WUNLOCK(inp); 976 inp = NULL; 977 goto findpcb; 978 } 979 } else 980 ti_locked = TI_RLOCKED; 981 } 982 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 983 984 if (thflags & TH_SYN) 985 tcp_dooptions(&to, optp, optlen, TO_SYN); 986 /* 987 * NB: tcp_twcheck unlocks the INP and frees the mbuf. 988 */ 989 if (tcp_twcheck(inp, &to, th, m, tlen)) 990 goto findpcb; 991 INP_INFO_RUNLOCK(&V_tcbinfo); 992 return (IPPROTO_DONE); 993 } 994 /* 995 * The TCPCB may no longer exist if the connection is winding 996 * down or it is in the CLOSED state. Either way we drop the 997 * segment and send an appropriate response. 998 */ 999 tp = intotcpcb(inp); 1000 if (tp == NULL || tp->t_state == TCPS_CLOSED) { 1001 rstreason = BANDLIM_RST_CLOSEDPORT; 1002 goto dropwithreset; 1003 } 1004 1005#ifdef TCP_OFFLOAD 1006 if (tp->t_flags & TF_TOE) { 1007 tcp_offload_input(tp, m); 1008 m = NULL; /* consumed by the TOE driver */ 1009 goto dropunlock; 1010 } 1011#endif 1012 1013 /* 1014 * We've identified a valid inpcb, but it could be that we need an 1015 * inpcbinfo write lock but don't hold it. In this case, attempt to 1016 * acquire using the same strategy as the TIMEWAIT case above. If we 1017 * relock, we have to jump back to 'relocked' as the connection might 1018 * now be in TIMEWAIT. 1019 */ 1020#ifdef INVARIANTS 1021 if ((thflags & (TH_FIN | TH_RST)) != 0) 1022 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 1023#endif 1024 if (!((tp->t_state == TCPS_ESTABLISHED && (thflags & TH_SYN) == 0) || 1025 (tp->t_state == TCPS_LISTEN && (thflags & TH_SYN) && 1026 !(tp->t_flags & TF_FASTOPEN)))) { 1027 if (ti_locked == TI_UNLOCKED) { 1028 if (INP_INFO_TRY_RLOCK(&V_tcbinfo) == 0) { 1029 in_pcbref(inp); 1030 INP_WUNLOCK(inp); 1031 INP_INFO_RLOCK(&V_tcbinfo); 1032 ti_locked = TI_RLOCKED; 1033 INP_WLOCK(inp); 1034 if (in_pcbrele_wlocked(inp)) { 1035 inp = NULL; 1036 goto findpcb; 1037 } else if (inp->inp_flags & INP_DROPPED) { 1038 INP_WUNLOCK(inp); 1039 inp = NULL; 1040 goto findpcb; 1041 } 1042 goto relocked; 1043 } else 1044 ti_locked = TI_RLOCKED; 1045 } 1046 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 1047 } 1048 1049#ifdef MAC 1050 INP_WLOCK_ASSERT(inp); 1051 if (mac_inpcb_check_deliver(inp, m)) 1052 goto dropunlock; 1053#endif 1054 so = inp->inp_socket; 1055 KASSERT(so != NULL, ("%s: so == NULL", __func__)); 1056#ifdef TCPDEBUG 1057 if (so->so_options & SO_DEBUG) { 1058 ostate = tp->t_state; 1059#ifdef INET6 1060 if (isipv6) { 1061 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6)); 1062 } else 1063#endif 1064 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip)); 1065 tcp_savetcp = *th; 1066 } 1067#endif /* TCPDEBUG */ 1068 /* 1069 * When the socket is accepting connections (the INPCB is in LISTEN 1070 * state) we look into the SYN cache if this is a new connection 1071 * attempt or the completion of a previous one. 1072 */ 1073 if (so->so_options & SO_ACCEPTCONN) { 1074 struct in_conninfo inc; 1075 1076 KASSERT(tp->t_state == TCPS_LISTEN, ("%s: so accepting but " 1077 "tp not listening", __func__)); 1078 bzero(&inc, sizeof(inc)); 1079#ifdef INET6 1080 if (isipv6) { 1081 inc.inc_flags |= INC_ISIPV6; 1082 inc.inc6_faddr = ip6->ip6_src; 1083 inc.inc6_laddr = ip6->ip6_dst; 1084 } else 1085#endif 1086 { 1087 inc.inc_faddr = ip->ip_src; 1088 inc.inc_laddr = ip->ip_dst; 1089 } 1090 inc.inc_fport = th->th_sport; 1091 inc.inc_lport = th->th_dport; 1092 inc.inc_fibnum = so->so_fibnum; 1093 1094 /* 1095 * Check for an existing connection attempt in syncache if 1096 * the flag is only ACK. A successful lookup creates a new 1097 * socket appended to the listen queue in SYN_RECEIVED state. 1098 */ 1099 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) { 1100 1101 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 1102 /* 1103 * Parse the TCP options here because 1104 * syncookies need access to the reflected 1105 * timestamp. 1106 */ 1107 tcp_dooptions(&to, optp, optlen, 0); 1108 /* 1109 * NB: syncache_expand() doesn't unlock 1110 * inp and tcpinfo locks. 1111 */ 1112 rstreason = syncache_expand(&inc, &to, th, &so, m); 1113 if (rstreason < 0) { 1114 /* 1115 * A failing TCP MD5 signature comparison 1116 * must result in the segment being dropped 1117 * and must not produce any response back 1118 * to the sender. 1119 */ 1120 goto dropunlock; 1121 } else if (rstreason == 0) { 1122 /* 1123 * No syncache entry or ACK was not 1124 * for our SYN/ACK. Send a RST. 1125 * NB: syncache did its own logging 1126 * of the failure cause. 1127 */ 1128 rstreason = BANDLIM_RST_OPENPORT; 1129 goto dropwithreset; 1130 } 1131#ifdef TCP_RFC7413 1132new_tfo_socket: 1133#endif 1134 if (so == NULL) { 1135 /* 1136 * We completed the 3-way handshake 1137 * but could not allocate a socket 1138 * either due to memory shortage, 1139 * listen queue length limits or 1140 * global socket limits. Send RST 1141 * or wait and have the remote end 1142 * retransmit the ACK for another 1143 * try. 1144 */ 1145 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1146 log(LOG_DEBUG, "%s; %s: Listen socket: " 1147 "Socket allocation failed due to " 1148 "limits or memory shortage, %s\n", 1149 s, __func__, 1150 V_tcp_sc_rst_sock_fail ? 1151 "sending RST" : "try again"); 1152 if (V_tcp_sc_rst_sock_fail) { 1153 rstreason = BANDLIM_UNLIMITED; 1154 goto dropwithreset; 1155 } else 1156 goto dropunlock; 1157 } 1158 /* 1159 * Socket is created in state SYN_RECEIVED. 1160 * Unlock the listen socket, lock the newly 1161 * created socket and update the tp variable. 1162 */ 1163 INP_WUNLOCK(inp); /* listen socket */ 1164 inp = sotoinpcb(so); 1165 /* 1166 * New connection inpcb is already locked by 1167 * syncache_expand(). 1168 */ 1169 INP_WLOCK_ASSERT(inp); 1170 tp = intotcpcb(inp); 1171 KASSERT(tp->t_state == TCPS_SYN_RECEIVED, 1172 ("%s: ", __func__)); 1173 /* 1174 * Process the segment and the data it 1175 * contains. tcp_do_segment() consumes 1176 * the mbuf chain and unlocks the inpcb. 1177 */ 1178 tp->t_fb->tfb_tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, 1179 iptos, ti_locked); 1180 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1181 return (IPPROTO_DONE); 1182 } 1183 /* 1184 * Segment flag validation for new connection attempts: 1185 * 1186 * Our (SYN|ACK) response was rejected. 1187 * Check with syncache and remove entry to prevent 1188 * retransmits. 1189 * 1190 * NB: syncache_chkrst does its own logging of failure 1191 * causes. 1192 */ 1193 if (thflags & TH_RST) { 1194 syncache_chkrst(&inc, th); 1195 goto dropunlock; 1196 } 1197 /* 1198 * We can't do anything without SYN. 1199 */ 1200 if ((thflags & TH_SYN) == 0) { 1201 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1202 log(LOG_DEBUG, "%s; %s: Listen socket: " 1203 "SYN is missing, segment ignored\n", 1204 s, __func__); 1205 TCPSTAT_INC(tcps_badsyn); 1206 goto dropunlock; 1207 } 1208 /* 1209 * (SYN|ACK) is bogus on a listen socket. 1210 */ 1211 if (thflags & TH_ACK) { 1212 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1213 log(LOG_DEBUG, "%s; %s: Listen socket: " 1214 "SYN|ACK invalid, segment rejected\n", 1215 s, __func__); 1216 syncache_badack(&inc); /* XXX: Not needed! */ 1217 TCPSTAT_INC(tcps_badsyn); 1218 rstreason = BANDLIM_RST_OPENPORT; 1219 goto dropwithreset; 1220 } 1221 /* 1222 * If the drop_synfin option is enabled, drop all 1223 * segments with both the SYN and FIN bits set. 1224 * This prevents e.g. nmap from identifying the 1225 * TCP/IP stack. 1226 * XXX: Poor reasoning. nmap has other methods 1227 * and is constantly refining its stack detection 1228 * strategies. 1229 * XXX: This is a violation of the TCP specification 1230 * and was used by RFC1644. 1231 */ 1232 if ((thflags & TH_FIN) && V_drop_synfin) { 1233 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1234 log(LOG_DEBUG, "%s; %s: Listen socket: " 1235 "SYN|FIN segment ignored (based on " 1236 "sysctl setting)\n", s, __func__); 1237 TCPSTAT_INC(tcps_badsyn); 1238 goto dropunlock; 1239 } 1240 /* 1241 * Segment's flags are (SYN) or (SYN|FIN). 1242 * 1243 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored 1244 * as they do not affect the state of the TCP FSM. 1245 * The data pointed to by TH_URG and th_urp is ignored. 1246 */ 1247 KASSERT((thflags & (TH_RST|TH_ACK)) == 0, 1248 ("%s: Listen socket: TH_RST or TH_ACK set", __func__)); 1249 KASSERT(thflags & (TH_SYN), 1250 ("%s: Listen socket: TH_SYN not set", __func__)); 1251#ifdef INET6 1252 /* 1253 * If deprecated address is forbidden, 1254 * we do not accept SYN to deprecated interface 1255 * address to prevent any new inbound connection from 1256 * getting established. 1257 * When we do not accept SYN, we send a TCP RST, 1258 * with deprecated source address (instead of dropping 1259 * it). We compromise it as it is much better for peer 1260 * to send a RST, and RST will be the final packet 1261 * for the exchange. 1262 * 1263 * If we do not forbid deprecated addresses, we accept 1264 * the SYN packet. RFC2462 does not suggest dropping 1265 * SYN in this case. 1266 * If we decipher RFC2462 5.5.4, it says like this: 1267 * 1. use of deprecated addr with existing 1268 * communication is okay - "SHOULD continue to be 1269 * used" 1270 * 2. use of it with new communication: 1271 * (2a) "SHOULD NOT be used if alternate address 1272 * with sufficient scope is available" 1273 * (2b) nothing mentioned otherwise. 1274 * Here we fall into (2b) case as we have no choice in 1275 * our source address selection - we must obey the peer. 1276 * 1277 * The wording in RFC2462 is confusing, and there are 1278 * multiple description text for deprecated address 1279 * handling - worse, they are not exactly the same. 1280 * I believe 5.5.4 is the best one, so we follow 5.5.4. 1281 */ 1282 if (isipv6 && !V_ip6_use_deprecated) { 1283 struct in6_ifaddr *ia6; 1284 1285 ia6 = in6ifa_ifwithaddr(&ip6->ip6_dst, 0 /* XXX */); 1286 if (ia6 != NULL && 1287 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) { 1288 ifa_free(&ia6->ia_ifa); 1289 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1290 log(LOG_DEBUG, "%s; %s: Listen socket: " 1291 "Connection attempt to deprecated " 1292 "IPv6 address rejected\n", 1293 s, __func__); 1294 rstreason = BANDLIM_RST_OPENPORT; 1295 goto dropwithreset; 1296 } 1297 if (ia6) 1298 ifa_free(&ia6->ia_ifa); 1299 } 1300#endif /* INET6 */ 1301 /* 1302 * Basic sanity checks on incoming SYN requests: 1303 * Don't respond if the destination is a link layer 1304 * broadcast according to RFC1122 4.2.3.10, p. 104. 1305 * If it is from this socket it must be forged. 1306 * Don't respond if the source or destination is a 1307 * global or subnet broad- or multicast address. 1308 * Note that it is quite possible to receive unicast 1309 * link-layer packets with a broadcast IP address. Use 1310 * in_broadcast() to find them. 1311 */ 1312 if (m->m_flags & (M_BCAST|M_MCAST)) { 1313 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1314 log(LOG_DEBUG, "%s; %s: Listen socket: " 1315 "Connection attempt from broad- or multicast " 1316 "link layer address ignored\n", s, __func__); 1317 goto dropunlock; 1318 } 1319#ifdef INET6 1320 if (isipv6) { 1321 if (th->th_dport == th->th_sport && 1322 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) { 1323 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1324 log(LOG_DEBUG, "%s; %s: Listen socket: " 1325 "Connection attempt to/from self " 1326 "ignored\n", s, __func__); 1327 goto dropunlock; 1328 } 1329 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 1330 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) { 1331 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1332 log(LOG_DEBUG, "%s; %s: Listen socket: " 1333 "Connection attempt from/to multicast " 1334 "address ignored\n", s, __func__); 1335 goto dropunlock; 1336 } 1337 } 1338#endif 1339#if defined(INET) && defined(INET6) 1340 else 1341#endif 1342#ifdef INET 1343 { 1344 if (th->th_dport == th->th_sport && 1345 ip->ip_dst.s_addr == ip->ip_src.s_addr) { 1346 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1347 log(LOG_DEBUG, "%s; %s: Listen socket: " 1348 "Connection attempt from/to self " 1349 "ignored\n", s, __func__); 1350 goto dropunlock; 1351 } 1352 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 1353 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 1354 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 1355 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) { 1356 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1357 log(LOG_DEBUG, "%s; %s: Listen socket: " 1358 "Connection attempt from/to broad- " 1359 "or multicast address ignored\n", 1360 s, __func__); 1361 goto dropunlock; 1362 } 1363 } 1364#endif 1365 /* 1366 * SYN appears to be valid. Create compressed TCP state 1367 * for syncache. 1368 */ 1369#ifdef TCPDEBUG 1370 if (so->so_options & SO_DEBUG) 1371 tcp_trace(TA_INPUT, ostate, tp, 1372 (void *)tcp_saveipgen, &tcp_savetcp, 0); 1373#endif 1374 TCP_PROBE3(debug__input, tp, th, mtod(m, const char *)); 1375 tcp_dooptions(&to, optp, optlen, TO_SYN); 1376#ifdef TCP_RFC7413 1377 if (syncache_add(&inc, &to, th, inp, &so, m, NULL, NULL)) 1378 goto new_tfo_socket; 1379#else 1380 syncache_add(&inc, &to, th, inp, &so, m, NULL, NULL); 1381#endif 1382 /* 1383 * Entry added to syncache and mbuf consumed. 1384 * Only the listen socket is unlocked by syncache_add(). 1385 */ 1386 if (ti_locked == TI_RLOCKED) { 1387 INP_INFO_RUNLOCK(&V_tcbinfo); 1388 ti_locked = TI_UNLOCKED; 1389 } 1390 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1391 return (IPPROTO_DONE); 1392 } else if (tp->t_state == TCPS_LISTEN) { 1393 /* 1394 * When a listen socket is torn down the SO_ACCEPTCONN 1395 * flag is removed first while connections are drained 1396 * from the accept queue in a unlock/lock cycle of the 1397 * ACCEPT_LOCK, opening a race condition allowing a SYN 1398 * attempt go through unhandled. 1399 */ 1400 goto dropunlock; 1401 } 1402#if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 1403 if (tp->t_flags & TF_SIGNATURE) { 1404 tcp_dooptions(&to, optp, optlen, thflags); 1405 if ((to.to_flags & TOF_SIGNATURE) == 0) { 1406 TCPSTAT_INC(tcps_sig_err_nosigopt); 1407 goto dropunlock; 1408 } 1409 if (!TCPMD5_ENABLED() || 1410 TCPMD5_INPUT(m, th, to.to_signature) != 0) 1411 goto dropunlock; 1412 } 1413#endif 1414 TCP_PROBE5(receive, NULL, tp, mtod(m, const char *), tp, th); 1415 1416 /* 1417 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later 1418 * state. tcp_do_segment() always consumes the mbuf chain, unlocks 1419 * the inpcb, and unlocks pcbinfo. 1420 */ 1421 tp->t_fb->tfb_tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, iptos, ti_locked); 1422 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1423 return (IPPROTO_DONE); 1424 1425dropwithreset: 1426 TCP_PROBE5(receive, NULL, tp, mtod(m, const char *), tp, th); 1427 1428 if (ti_locked == TI_RLOCKED) { 1429 INP_INFO_RUNLOCK(&V_tcbinfo); 1430 ti_locked = TI_UNLOCKED; 1431 } 1432#ifdef INVARIANTS 1433 else { 1434 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropwithreset " 1435 "ti_locked: %d", __func__, ti_locked)); 1436 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1437 } 1438#endif 1439 1440 if (inp != NULL) { 1441 tcp_dropwithreset(m, th, tp, tlen, rstreason); 1442 INP_WUNLOCK(inp); 1443 } else 1444 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 1445 m = NULL; /* mbuf chain got consumed. */ 1446 goto drop; 1447 1448dropunlock: 1449 if (m != NULL) 1450 TCP_PROBE5(receive, NULL, tp, mtod(m, const char *), tp, th); 1451 1452 if (ti_locked == TI_RLOCKED) { 1453 INP_INFO_RUNLOCK(&V_tcbinfo); 1454 ti_locked = TI_UNLOCKED; 1455 } 1456#ifdef INVARIANTS 1457 else { 1458 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropunlock " 1459 "ti_locked: %d", __func__, ti_locked)); 1460 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1461 } 1462#endif 1463 1464 if (inp != NULL) 1465 INP_WUNLOCK(inp); 1466 1467drop: 1468 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1469 if (s != NULL) 1470 free(s, M_TCPLOG); 1471 if (m != NULL) 1472 m_freem(m); 1473 return (IPPROTO_DONE); 1474} 1475 1476void 1477tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 1478 struct tcpcb *tp, int drop_hdrlen, int tlen, uint8_t iptos, 1479 int ti_locked) 1480{ 1481 int thflags, acked, ourfinisacked, needoutput = 0, sack_changed; 1482 int rstreason, todrop, win; 1483 u_long tiwin; 1484 char *s; 1485 struct in_conninfo *inc; 1486 struct mbuf *mfree; 1487 struct tcpopt to; 1488 int tfo_syn; 1489 1490#ifdef TCPDEBUG 1491 /* 1492 * The size of tcp_saveipgen must be the size of the max ip header, 1493 * now IPv6. 1494 */ 1495 u_char tcp_saveipgen[IP6_HDR_LEN]; 1496 struct tcphdr tcp_savetcp; 1497 short ostate = 0; 1498#endif 1499 thflags = th->th_flags; 1500 inc = &tp->t_inpcb->inp_inc; 1501 tp->sackhint.last_sack_ack = 0; 1502 sack_changed = 0; 1503 1504 /* 1505 * If this is either a state-changing packet or current state isn't 1506 * established, we require a write lock on tcbinfo. Otherwise, we 1507 * allow the tcbinfo to be in either alocked or unlocked, as the 1508 * caller may have unnecessarily acquired a write lock due to a race. 1509 */ 1510 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 || 1511 tp->t_state != TCPS_ESTABLISHED) { 1512 KASSERT(ti_locked == TI_RLOCKED, ("%s ti_locked %d for " 1513 "SYN/FIN/RST/!EST", __func__, ti_locked)); 1514 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 1515 } else { 1516#ifdef INVARIANTS 1517 if (ti_locked == TI_RLOCKED) 1518 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 1519 else { 1520 KASSERT(ti_locked == TI_UNLOCKED, ("%s: EST " 1521 "ti_locked: %d", __func__, ti_locked)); 1522 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1523 } 1524#endif 1525 } 1526 INP_WLOCK_ASSERT(tp->t_inpcb); 1527 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 1528 __func__)); 1529 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 1530 __func__)); 1531 1532#ifdef TCPPCAP 1533 /* Save segment, if requested. */ 1534 tcp_pcap_add(th, m, &(tp->t_inpkts)); 1535#endif 1536 1537 /* 1538 * Segment received on connection. 1539 * Reset idle time and keep-alive timer. 1540 * XXX: This should be done after segment 1541 * validation to ignore broken/spoofed segs. 1542 */ 1543 tp->t_rcvtime = ticks; 1544 if (TCPS_HAVEESTABLISHED(tp->t_state)) 1545 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp)); 1546 1547 /* 1548 * Scale up the window into a 32-bit value. 1549 * For the SYN_SENT state the scale is zero. 1550 */ 1551 tiwin = th->th_win << tp->snd_scale; 1552 1553 /* 1554 * TCP ECN processing. 1555 */ 1556 if (tp->t_flags & TF_ECN_PERMIT) { 1557 if (thflags & TH_CWR) 1558 tp->t_flags &= ~TF_ECN_SND_ECE; 1559 switch (iptos & IPTOS_ECN_MASK) { 1560 case IPTOS_ECN_CE: 1561 tp->t_flags |= TF_ECN_SND_ECE; 1562 TCPSTAT_INC(tcps_ecn_ce); 1563 break; 1564 case IPTOS_ECN_ECT0: 1565 TCPSTAT_INC(tcps_ecn_ect0); 1566 break; 1567 case IPTOS_ECN_ECT1: 1568 TCPSTAT_INC(tcps_ecn_ect1); 1569 break; 1570 } 1571 1572 /* Process a packet differently from RFC3168. */ 1573 cc_ecnpkt_handler(tp, th, iptos); 1574 1575 /* Congestion experienced. */ 1576 if (thflags & TH_ECE) { 1577 cc_cong_signal(tp, th, CC_ECN); 1578 } 1579 } 1580 1581 /* 1582 * Parse options on any incoming segment. 1583 */ 1584 tcp_dooptions(&to, (u_char *)(th + 1), 1585 (th->th_off << 2) - sizeof(struct tcphdr), 1586 (thflags & TH_SYN) ? TO_SYN : 0); 1587 1588#if defined(IPSEC_SUPPORT) || defined(TCP_SIGNATURE) 1589 if ((tp->t_flags & TF_SIGNATURE) != 0 && 1590 (to.to_flags & TOF_SIGNATURE) == 0) { 1591 TCPSTAT_INC(tcps_sig_err_sigopt); 1592 /* XXX: should drop? */ 1593 } 1594#endif 1595 /* 1596 * If echoed timestamp is later than the current time, 1597 * fall back to non RFC1323 RTT calculation. Normalize 1598 * timestamp if syncookies were used when this connection 1599 * was established. 1600 */ 1601 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 1602 to.to_tsecr -= tp->ts_offset; 1603 if (TSTMP_GT(to.to_tsecr, tcp_ts_getticks())) 1604 to.to_tsecr = 0; 1605 } 1606 /* 1607 * If timestamps were negotiated during SYN/ACK they should 1608 * appear on every segment during this session and vice versa. 1609 */ 1610 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS)) { 1611 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1612 log(LOG_DEBUG, "%s; %s: Timestamp missing, " 1613 "no action\n", s, __func__); 1614 free(s, M_TCPLOG); 1615 } 1616 } 1617 if (!(tp->t_flags & TF_RCVD_TSTMP) && (to.to_flags & TOF_TS)) { 1618 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1619 log(LOG_DEBUG, "%s; %s: Timestamp not expected, " 1620 "no action\n", s, __func__); 1621 free(s, M_TCPLOG); 1622 } 1623 } 1624 1625 /* 1626 * Process options only when we get SYN/ACK back. The SYN case 1627 * for incoming connections is handled in tcp_syncache. 1628 * According to RFC1323 the window field in a SYN (i.e., a <SYN> 1629 * or <SYN,ACK>) segment itself is never scaled. 1630 * XXX this is traditional behavior, may need to be cleaned up. 1631 */ 1632 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 1633 if ((to.to_flags & TOF_SCALE) && 1634 (tp->t_flags & TF_REQ_SCALE)) { 1635 tp->t_flags |= TF_RCVD_SCALE; 1636 tp->snd_scale = to.to_wscale; 1637 } 1638 /* 1639 * Initial send window. It will be updated with 1640 * the next incoming segment to the scaled value. 1641 */ 1642 tp->snd_wnd = th->th_win; 1643 if (to.to_flags & TOF_TS) { 1644 tp->t_flags |= TF_RCVD_TSTMP; 1645 tp->ts_recent = to.to_tsval; 1646 tp->ts_recent_age = tcp_ts_getticks(); 1647 } 1648 if (to.to_flags & TOF_MSS) 1649 tcp_mss(tp, to.to_mss); 1650 if ((tp->t_flags & TF_SACK_PERMIT) && 1651 (to.to_flags & TOF_SACKPERM) == 0) 1652 tp->t_flags &= ~TF_SACK_PERMIT; 1653 } 1654 1655 /* 1656 * Header prediction: check for the two common cases 1657 * of a uni-directional data xfer. If the packet has 1658 * no control flags, is in-sequence, the window didn't 1659 * change and we're not retransmitting, it's a 1660 * candidate. If the length is zero and the ack moved 1661 * forward, we're the sender side of the xfer. Just 1662 * free the data acked & wake any higher level process 1663 * that was blocked waiting for space. If the length 1664 * is non-zero and the ack didn't move, we're the 1665 * receiver side. If we're getting packets in-order 1666 * (the reassembly queue is empty), add the data to 1667 * the socket buffer and note that we need a delayed ack. 1668 * Make sure that the hidden state-flags are also off. 1669 * Since we check for TCPS_ESTABLISHED first, it can only 1670 * be TH_NEEDSYN. 1671 */ 1672 if (tp->t_state == TCPS_ESTABLISHED && 1673 th->th_seq == tp->rcv_nxt && 1674 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && 1675 tp->snd_nxt == tp->snd_max && 1676 tiwin && tiwin == tp->snd_wnd && 1677 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) && 1678 LIST_EMPTY(&tp->t_segq) && 1679 ((to.to_flags & TOF_TS) == 0 || 1680 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) { 1681 1682 /* 1683 * If last ACK falls within this segment's sequence numbers, 1684 * record the timestamp. 1685 * NOTE that the test is modified according to the latest 1686 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1687 */ 1688 if ((to.to_flags & TOF_TS) != 0 && 1689 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 1690 tp->ts_recent_age = tcp_ts_getticks(); 1691 tp->ts_recent = to.to_tsval; 1692 } 1693 1694 if (tlen == 0) { 1695 if (SEQ_GT(th->th_ack, tp->snd_una) && 1696 SEQ_LEQ(th->th_ack, tp->snd_max) && 1697 !IN_RECOVERY(tp->t_flags) && 1698 (to.to_flags & TOF_SACK) == 0 && 1699 TAILQ_EMPTY(&tp->snd_holes)) { 1700 /* 1701 * This is a pure ack for outstanding data. 1702 */ 1703 if (ti_locked == TI_RLOCKED) 1704 INP_INFO_RUNLOCK(&V_tcbinfo); 1705 ti_locked = TI_UNLOCKED; 1706 1707 TCPSTAT_INC(tcps_predack); 1708 1709 /* 1710 * "bad retransmit" recovery. 1711 */ 1712 if (tp->t_rxtshift == 1 && 1713 tp->t_flags & TF_PREVVALID && 1714 (int)(ticks - tp->t_badrxtwin) < 0) { 1715 cc_cong_signal(tp, th, CC_RTO_ERR); 1716 } 1717 1718 /* 1719 * Recalculate the transmit timer / rtt. 1720 * 1721 * Some boxes send broken timestamp replies 1722 * during the SYN+ACK phase, ignore 1723 * timestamps of 0 or we could calculate a 1724 * huge RTT and blow up the retransmit timer. 1725 */ 1726 if ((to.to_flags & TOF_TS) != 0 && 1727 to.to_tsecr) { 1728 u_int t; 1729 1730 t = tcp_ts_getticks() - to.to_tsecr; 1731 if (!tp->t_rttlow || tp->t_rttlow > t) 1732 tp->t_rttlow = t; 1733 tcp_xmit_timer(tp, 1734 TCP_TS_TO_TICKS(t) + 1); 1735 } else if (tp->t_rtttime && 1736 SEQ_GT(th->th_ack, tp->t_rtseq)) { 1737 if (!tp->t_rttlow || 1738 tp->t_rttlow > ticks - tp->t_rtttime) 1739 tp->t_rttlow = ticks - tp->t_rtttime; 1740 tcp_xmit_timer(tp, 1741 ticks - tp->t_rtttime); 1742 } 1743 acked = BYTES_THIS_ACK(tp, th); 1744 1745 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 1746 hhook_run_tcp_est_in(tp, th, &to); 1747 1748 TCPSTAT_INC(tcps_rcvackpack); 1749 TCPSTAT_ADD(tcps_rcvackbyte, acked); 1750 sbdrop(&so->so_snd, acked); 1751 if (SEQ_GT(tp->snd_una, tp->snd_recover) && 1752 SEQ_LEQ(th->th_ack, tp->snd_recover)) 1753 tp->snd_recover = th->th_ack - 1; 1754 1755 /* 1756 * Let the congestion control algorithm update 1757 * congestion control related information. This 1758 * typically means increasing the congestion 1759 * window. 1760 */ 1761 cc_ack_received(tp, th, CC_ACK); 1762 1763 tp->snd_una = th->th_ack; 1764 /* 1765 * Pull snd_wl2 up to prevent seq wrap relative 1766 * to th_ack. 1767 */ 1768 tp->snd_wl2 = th->th_ack; 1769 tp->t_dupacks = 0; 1770 m_freem(m); 1771 1772 /* 1773 * If all outstanding data are acked, stop 1774 * retransmit timer, otherwise restart timer 1775 * using current (possibly backed-off) value. 1776 * If process is waiting for space, 1777 * wakeup/selwakeup/signal. If data 1778 * are ready to send, let tcp_output 1779 * decide between more output or persist. 1780 */ 1781#ifdef TCPDEBUG 1782 if (so->so_options & SO_DEBUG) 1783 tcp_trace(TA_INPUT, ostate, tp, 1784 (void *)tcp_saveipgen, 1785 &tcp_savetcp, 0); 1786#endif 1787 TCP_PROBE3(debug__input, tp, th, 1788 mtod(m, const char *)); 1789 if (tp->snd_una == tp->snd_max) 1790 tcp_timer_activate(tp, TT_REXMT, 0); 1791 else if (!tcp_timer_active(tp, TT_PERSIST)) 1792 tcp_timer_activate(tp, TT_REXMT, 1793 tp->t_rxtcur); 1794 sowwakeup(so); 1795 if (sbavail(&so->so_snd)) 1796 (void) tp->t_fb->tfb_tcp_output(tp); 1797 goto check_delack; 1798 } 1799 } else if (th->th_ack == tp->snd_una && 1800 tlen <= sbspace(&so->so_rcv)) { 1801 int newsize = 0; /* automatic sockbuf scaling */ 1802 1803 /* 1804 * This is a pure, in-sequence data packet with 1805 * nothing on the reassembly queue and we have enough 1806 * buffer space to take it. 1807 */ 1808 if (ti_locked == TI_RLOCKED) 1809 INP_INFO_RUNLOCK(&V_tcbinfo); 1810 ti_locked = TI_UNLOCKED; 1811 1812 /* Clean receiver SACK report if present */ 1813 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks) 1814 tcp_clean_sackreport(tp); 1815 TCPSTAT_INC(tcps_preddat); 1816 tp->rcv_nxt += tlen; 1817 /* 1818 * Pull snd_wl1 up to prevent seq wrap relative to 1819 * th_seq. 1820 */ 1821 tp->snd_wl1 = th->th_seq; 1822 /* 1823 * Pull rcv_up up to prevent seq wrap relative to 1824 * rcv_nxt. 1825 */ 1826 tp->rcv_up = tp->rcv_nxt; 1827 TCPSTAT_INC(tcps_rcvpack); 1828 TCPSTAT_ADD(tcps_rcvbyte, tlen); 1829#ifdef TCPDEBUG 1830 if (so->so_options & SO_DEBUG) 1831 tcp_trace(TA_INPUT, ostate, tp, 1832 (void *)tcp_saveipgen, &tcp_savetcp, 0); 1833#endif 1834 TCP_PROBE3(debug__input, tp, th, mtod(m, const char *)); 1835 1836 /* 1837 * Automatic sizing of receive socket buffer. Often the send 1838 * buffer size is not optimally adjusted to the actual network 1839 * conditions at hand (delay bandwidth product). Setting the 1840 * buffer size too small limits throughput on links with high 1841 * bandwidth and high delay (eg. trans-continental/oceanic links). 1842 * 1843 * On the receive side the socket buffer memory is only rarely 1844 * used to any significant extent. This allows us to be much 1845 * more aggressive in scaling the receive socket buffer. For 1846 * the case that the buffer space is actually used to a large 1847 * extent and we run out of kernel memory we can simply drop 1848 * the new segments; TCP on the sender will just retransmit it 1849 * later. Setting the buffer size too big may only consume too 1850 * much kernel memory if the application doesn't read() from 1851 * the socket or packet loss or reordering makes use of the 1852 * reassembly queue. 1853 * 1854 * The criteria to step up the receive buffer one notch are: 1855 * 1. Application has not set receive buffer size with 1856 * SO_RCVBUF. Setting SO_RCVBUF clears SB_AUTOSIZE. 1857 * 2. the number of bytes received during the time it takes 1858 * one timestamp to be reflected back to us (the RTT); 1859 * 3. received bytes per RTT is within seven eighth of the 1860 * current socket buffer size; 1861 * 4. receive buffer size has not hit maximal automatic size; 1862 * 1863 * This algorithm does one step per RTT at most and only if 1864 * we receive a bulk stream w/o packet losses or reorderings. 1865 * Shrinking the buffer during idle times is not necessary as 1866 * it doesn't consume any memory when idle. 1867 * 1868 * TODO: Only step up if the application is actually serving 1869 * the buffer to better manage the socket buffer resources. 1870 */ 1871 if (V_tcp_do_autorcvbuf && 1872 (to.to_flags & TOF_TS) && 1873 to.to_tsecr && 1874 (so->so_rcv.sb_flags & SB_AUTOSIZE)) { 1875 if (TSTMP_GT(to.to_tsecr, tp->rfbuf_ts) && 1876 to.to_tsecr - tp->rfbuf_ts < hz) { 1877 if (tp->rfbuf_cnt > 1878 (so->so_rcv.sb_hiwat / 8 * 7) && 1879 so->so_rcv.sb_hiwat < 1880 V_tcp_autorcvbuf_max) { 1881 newsize = 1882 min(so->so_rcv.sb_hiwat + 1883 V_tcp_autorcvbuf_inc, 1884 V_tcp_autorcvbuf_max); 1885 } 1886 /* Start over with next RTT. */ 1887 tp->rfbuf_ts = 0; 1888 tp->rfbuf_cnt = 0; 1889 } else 1890 tp->rfbuf_cnt += tlen; /* add up */ 1891 } 1892 1893 /* Add data to socket buffer. */ 1894 SOCKBUF_LOCK(&so->so_rcv); 1895 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1896 m_freem(m); 1897 } else { 1898 /* 1899 * Set new socket buffer size. 1900 * Give up when limit is reached. 1901 */ 1902 if (newsize) 1903 if (!sbreserve_locked(&so->so_rcv, 1904 newsize, so, NULL)) 1905 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 1906 m_adj(m, drop_hdrlen); /* delayed header drop */ 1907 sbappendstream_locked(&so->so_rcv, m, 0); 1908 } 1909 /* NB: sorwakeup_locked() does an implicit unlock. */ 1910 sorwakeup_locked(so); 1911 if (DELAY_ACK(tp, tlen)) { 1912 tp->t_flags |= TF_DELACK; 1913 } else { 1914 tp->t_flags |= TF_ACKNOW; 1915 tp->t_fb->tfb_tcp_output(tp); 1916 } 1917 goto check_delack; 1918 } 1919 } 1920 1921 /* 1922 * Calculate amount of space in receive window, 1923 * and then do TCP input processing. 1924 * Receive window is amount of space in rcv queue, 1925 * but not less than advertised window. 1926 */ 1927 win = sbspace(&so->so_rcv); 1928 if (win < 0) 1929 win = 0; 1930 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt)); 1931 1932 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 1933 tp->rfbuf_ts = 0; 1934 tp->rfbuf_cnt = 0; 1935 1936 switch (tp->t_state) { 1937 1938 /* 1939 * If the state is SYN_RECEIVED: 1940 * if seg contains an ACK, but not for our SYN/ACK, send a RST. 1941 */ 1942 case TCPS_SYN_RECEIVED: 1943 if ((thflags & TH_ACK) && 1944 (SEQ_LEQ(th->th_ack, tp->snd_una) || 1945 SEQ_GT(th->th_ack, tp->snd_max))) { 1946 rstreason = BANDLIM_RST_OPENPORT; 1947 goto dropwithreset; 1948 } 1949#ifdef TCP_RFC7413 1950 if (tp->t_flags & TF_FASTOPEN) { 1951 /* 1952 * When a TFO connection is in SYN_RECEIVED, the 1953 * only valid packets are the initial SYN, a 1954 * retransmit/copy of the initial SYN (possibly with 1955 * a subset of the original data), a valid ACK, a 1956 * FIN, or a RST. 1957 */ 1958 if ((thflags & (TH_SYN|TH_ACK)) == (TH_SYN|TH_ACK)) { 1959 rstreason = BANDLIM_RST_OPENPORT; 1960 goto dropwithreset; 1961 } else if (thflags & TH_SYN) { 1962 /* non-initial SYN is ignored */ 1963 if ((tcp_timer_active(tp, TT_DELACK) || 1964 tcp_timer_active(tp, TT_REXMT))) 1965 goto drop; 1966 } else if (!(thflags & (TH_ACK|TH_FIN|TH_RST))) { 1967 goto drop; 1968 } 1969 } 1970#endif 1971 break; 1972 1973 /* 1974 * If the state is SYN_SENT: 1975 * if seg contains an ACK, but not for our SYN, drop the input. 1976 * if seg contains a RST, then drop the connection. 1977 * if seg does not contain SYN, then drop it. 1978 * Otherwise this is an acceptable SYN segment 1979 * initialize tp->rcv_nxt and tp->irs 1980 * if seg contains ack then advance tp->snd_una 1981 * if seg contains an ECE and ECN support is enabled, the stream 1982 * is ECN capable. 1983 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state 1984 * arrange for segment to be acked (eventually) 1985 * continue processing rest of data/controls, beginning with URG 1986 */ 1987 case TCPS_SYN_SENT: 1988 if ((thflags & TH_ACK) && 1989 (SEQ_LEQ(th->th_ack, tp->iss) || 1990 SEQ_GT(th->th_ack, tp->snd_max))) { 1991 rstreason = BANDLIM_UNLIMITED; 1992 goto dropwithreset; 1993 } 1994 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) { 1995 TCP_PROBE5(connect__refused, NULL, tp, 1996 mtod(m, const char *), tp, th); 1997 tp = tcp_drop(tp, ECONNREFUSED); 1998 } 1999 if (thflags & TH_RST) 2000 goto drop; 2001 if (!(thflags & TH_SYN)) 2002 goto drop; 2003 2004 tp->irs = th->th_seq; 2005 tcp_rcvseqinit(tp); 2006 if (thflags & TH_ACK) { 2007 TCPSTAT_INC(tcps_connects); 2008 soisconnected(so); 2009#ifdef MAC 2010 mac_socketpeer_set_from_mbuf(m, so); 2011#endif 2012 /* Do window scaling on this connection? */ 2013 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2014 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2015 tp->rcv_scale = tp->request_r_scale; 2016 } 2017 tp->rcv_adv += imin(tp->rcv_wnd, 2018 TCP_MAXWIN << tp->rcv_scale); 2019 tp->snd_una++; /* SYN is acked */ 2020 /* 2021 * If there's data, delay ACK; if there's also a FIN 2022 * ACKNOW will be turned on later. 2023 */ 2024 if (DELAY_ACK(tp, tlen) && tlen != 0) 2025 tcp_timer_activate(tp, TT_DELACK, 2026 tcp_delacktime); 2027 else 2028 tp->t_flags |= TF_ACKNOW; 2029 2030 if ((thflags & TH_ECE) && V_tcp_do_ecn) { 2031 tp->t_flags |= TF_ECN_PERMIT; 2032 TCPSTAT_INC(tcps_ecn_shs); 2033 } 2034 2035 /* 2036 * Received <SYN,ACK> in SYN_SENT[*] state. 2037 * Transitions: 2038 * SYN_SENT --> ESTABLISHED 2039 * SYN_SENT* --> FIN_WAIT_1 2040 */ 2041 tp->t_starttime = ticks; 2042 if (tp->t_flags & TF_NEEDFIN) { 2043 tcp_state_change(tp, TCPS_FIN_WAIT_1); 2044 tp->t_flags &= ~TF_NEEDFIN; 2045 thflags &= ~TH_SYN; 2046 } else { 2047 tcp_state_change(tp, TCPS_ESTABLISHED); 2048 TCP_PROBE5(connect__established, NULL, tp, 2049 mtod(m, const char *), tp, th); 2050 cc_conn_init(tp); 2051 tcp_timer_activate(tp, TT_KEEP, 2052 TP_KEEPIDLE(tp)); 2053 } 2054 } else { 2055 /* 2056 * Received initial SYN in SYN-SENT[*] state => 2057 * simultaneous open. 2058 * If it succeeds, connection is * half-synchronized. 2059 * Otherwise, do 3-way handshake: 2060 * SYN-SENT -> SYN-RECEIVED 2061 * SYN-SENT* -> SYN-RECEIVED* 2062 */ 2063 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 2064 tcp_timer_activate(tp, TT_REXMT, 0); 2065 tcp_state_change(tp, TCPS_SYN_RECEIVED); 2066 } 2067 2068 KASSERT(ti_locked == TI_RLOCKED, ("%s: trimthenstep6: " 2069 "ti_locked %d", __func__, ti_locked)); 2070 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 2071 INP_WLOCK_ASSERT(tp->t_inpcb); 2072 2073 /* 2074 * Advance th->th_seq to correspond to first data byte. 2075 * If data, trim to stay within window, 2076 * dropping FIN if necessary. 2077 */ 2078 th->th_seq++; 2079 if (tlen > tp->rcv_wnd) { 2080 todrop = tlen - tp->rcv_wnd; 2081 m_adj(m, -todrop); 2082 tlen = tp->rcv_wnd; 2083 thflags &= ~TH_FIN; 2084 TCPSTAT_INC(tcps_rcvpackafterwin); 2085 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 2086 } 2087 tp->snd_wl1 = th->th_seq - 1; 2088 tp->rcv_up = th->th_seq; 2089 /* 2090 * Client side of transaction: already sent SYN and data. 2091 * If the remote host used T/TCP to validate the SYN, 2092 * our data will be ACK'd; if so, enter normal data segment 2093 * processing in the middle of step 5, ack processing. 2094 * Otherwise, goto step 6. 2095 */ 2096 if (thflags & TH_ACK) 2097 goto process_ACK; 2098 2099 goto step6; 2100 2101 /* 2102 * If the state is LAST_ACK or CLOSING or TIME_WAIT: 2103 * do normal processing. 2104 * 2105 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later. 2106 */ 2107 case TCPS_LAST_ACK: 2108 case TCPS_CLOSING: 2109 break; /* continue normal processing */ 2110 } 2111 2112 /* 2113 * States other than LISTEN or SYN_SENT. 2114 * First check the RST flag and sequence number since reset segments 2115 * are exempt from the timestamp and connection count tests. This 2116 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix 2117 * below which allowed reset segments in half the sequence space 2118 * to fall though and be processed (which gives forged reset 2119 * segments with a random sequence number a 50 percent chance of 2120 * killing a connection). 2121 * Then check timestamp, if present. 2122 * Then check the connection count, if present. 2123 * Then check that at least some bytes of segment are within 2124 * receive window. If segment begins before rcv_nxt, 2125 * drop leading data (and SYN); if nothing left, just ack. 2126 */ 2127 if (thflags & TH_RST) { 2128 /* 2129 * RFC5961 Section 3.2 2130 * 2131 * - RST drops connection only if SEG.SEQ == RCV.NXT. 2132 * - If RST is in window, we send challenge ACK. 2133 * 2134 * Note: to take into account delayed ACKs, we should 2135 * test against last_ack_sent instead of rcv_nxt. 2136 * Note 2: we handle special case of closed window, not 2137 * covered by the RFC. 2138 */ 2139 if ((SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 2140 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) || 2141 (tp->rcv_wnd == 0 && tp->last_ack_sent == th->th_seq)) { 2142 2143 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 2144 KASSERT(ti_locked == TI_RLOCKED, 2145 ("%s: TH_RST ti_locked %d, th %p tp %p", 2146 __func__, ti_locked, th, tp)); 2147 KASSERT(tp->t_state != TCPS_SYN_SENT, 2148 ("%s: TH_RST for TCPS_SYN_SENT th %p tp %p", 2149 __func__, th, tp)); 2150 2151 if (V_tcp_insecure_rst || 2152 tp->last_ack_sent == th->th_seq) { 2153 TCPSTAT_INC(tcps_drops); 2154 /* Drop the connection. */ 2155 switch (tp->t_state) { 2156 case TCPS_SYN_RECEIVED: 2157 so->so_error = ECONNREFUSED; 2158 goto close; 2159 case TCPS_ESTABLISHED: 2160 case TCPS_FIN_WAIT_1: 2161 case TCPS_FIN_WAIT_2: 2162 case TCPS_CLOSE_WAIT: 2163 case TCPS_CLOSING: 2164 case TCPS_LAST_ACK: 2165 so->so_error = ECONNRESET; 2166 close: 2167 /* FALLTHROUGH */ 2168 default: 2169 tp = tcp_close(tp); 2170 } 2171 } else { 2172 TCPSTAT_INC(tcps_badrst); 2173 /* Send challenge ACK. */ 2174 tcp_respond(tp, mtod(m, void *), th, m, 2175 tp->rcv_nxt, tp->snd_nxt, TH_ACK); 2176 tp->last_ack_sent = tp->rcv_nxt; 2177 m = NULL; 2178 } 2179 } 2180 goto drop; 2181 } 2182 2183 /* 2184 * RFC5961 Section 4.2 2185 * Send challenge ACK for any SYN in synchronized state. 2186 */ 2187 if ((thflags & TH_SYN) && tp->t_state != TCPS_SYN_SENT && 2188 tp->t_state != TCPS_SYN_RECEIVED) { 2189 KASSERT(ti_locked == TI_RLOCKED, 2190 ("tcp_do_segment: TH_SYN ti_locked %d", ti_locked)); 2191 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 2192 2193 TCPSTAT_INC(tcps_badsyn); 2194 if (V_tcp_insecure_syn && 2195 SEQ_GEQ(th->th_seq, tp->last_ack_sent) && 2196 SEQ_LT(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) { 2197 tp = tcp_drop(tp, ECONNRESET); 2198 rstreason = BANDLIM_UNLIMITED; 2199 } else { 2200 /* Send challenge ACK. */ 2201 tcp_respond(tp, mtod(m, void *), th, m, tp->rcv_nxt, 2202 tp->snd_nxt, TH_ACK); 2203 tp->last_ack_sent = tp->rcv_nxt; 2204 m = NULL; 2205 } 2206 goto drop; 2207 } 2208 2209 /* 2210 * RFC 1323 PAWS: If we have a timestamp reply on this segment 2211 * and it's less than ts_recent, drop it. 2212 */ 2213 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent && 2214 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 2215 2216 /* Check to see if ts_recent is over 24 days old. */ 2217 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) { 2218 /* 2219 * Invalidate ts_recent. If this segment updates 2220 * ts_recent, the age will be reset later and ts_recent 2221 * will get a valid value. If it does not, setting 2222 * ts_recent to zero will at least satisfy the 2223 * requirement that zero be placed in the timestamp 2224 * echo reply when ts_recent isn't valid. The 2225 * age isn't reset until we get a valid ts_recent 2226 * because we don't want out-of-order segments to be 2227 * dropped when ts_recent is old. 2228 */ 2229 tp->ts_recent = 0; 2230 } else { 2231 TCPSTAT_INC(tcps_rcvduppack); 2232 TCPSTAT_ADD(tcps_rcvdupbyte, tlen); 2233 TCPSTAT_INC(tcps_pawsdrop); 2234 if (tlen) 2235 goto dropafterack; 2236 goto drop; 2237 } 2238 } 2239 2240 /* 2241 * In the SYN-RECEIVED state, validate that the packet belongs to 2242 * this connection before trimming the data to fit the receive 2243 * window. Check the sequence number versus IRS since we know 2244 * the sequence numbers haven't wrapped. This is a partial fix 2245 * for the "LAND" DoS attack. 2246 */ 2247 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) { 2248 rstreason = BANDLIM_RST_OPENPORT; 2249 goto dropwithreset; 2250 } 2251 2252 todrop = tp->rcv_nxt - th->th_seq; 2253 if (todrop > 0) { 2254 if (thflags & TH_SYN) { 2255 thflags &= ~TH_SYN; 2256 th->th_seq++; 2257 if (th->th_urp > 1) 2258 th->th_urp--; 2259 else 2260 thflags &= ~TH_URG; 2261 todrop--; 2262 } 2263 /* 2264 * Following if statement from Stevens, vol. 2, p. 960. 2265 */ 2266 if (todrop > tlen 2267 || (todrop == tlen && (thflags & TH_FIN) == 0)) { 2268 /* 2269 * Any valid FIN must be to the left of the window. 2270 * At this point the FIN must be a duplicate or out 2271 * of sequence; drop it. 2272 */ 2273 thflags &= ~TH_FIN; 2274 2275 /* 2276 * Send an ACK to resynchronize and drop any data. 2277 * But keep on processing for RST or ACK. 2278 */ 2279 tp->t_flags |= TF_ACKNOW; 2280 todrop = tlen; 2281 TCPSTAT_INC(tcps_rcvduppack); 2282 TCPSTAT_ADD(tcps_rcvdupbyte, todrop); 2283 } else { 2284 TCPSTAT_INC(tcps_rcvpartduppack); 2285 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop); 2286 } 2287 drop_hdrlen += todrop; /* drop from the top afterwards */ 2288 th->th_seq += todrop; 2289 tlen -= todrop; 2290 if (th->th_urp > todrop) 2291 th->th_urp -= todrop; 2292 else { 2293 thflags &= ~TH_URG; 2294 th->th_urp = 0; 2295 } 2296 } 2297 2298 /* 2299 * If new data are received on a connection after the 2300 * user processes are gone, then RST the other end. 2301 */ 2302 if ((so->so_state & SS_NOFDREF) && 2303 tp->t_state > TCPS_CLOSE_WAIT && tlen) { 2304 KASSERT(ti_locked == TI_RLOCKED, ("%s: SS_NOFDEREF && " 2305 "CLOSE_WAIT && tlen ti_locked %d", __func__, ti_locked)); 2306 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 2307 2308 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 2309 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data " 2310 "after socket was closed, " 2311 "sending RST and removing tcpcb\n", 2312 s, __func__, tcpstates[tp->t_state], tlen); 2313 free(s, M_TCPLOG); 2314 } 2315 tp = tcp_close(tp); 2316 TCPSTAT_INC(tcps_rcvafterclose); 2317 rstreason = BANDLIM_UNLIMITED; 2318 goto dropwithreset; 2319 } 2320 2321 /* 2322 * If segment ends after window, drop trailing data 2323 * (and PUSH and FIN); if nothing left, just ACK. 2324 */ 2325 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd); 2326 if (todrop > 0) { 2327 TCPSTAT_INC(tcps_rcvpackafterwin); 2328 if (todrop >= tlen) { 2329 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen); 2330 /* 2331 * If window is closed can only take segments at 2332 * window edge, and have to drop data and PUSH from 2333 * incoming segments. Continue processing, but 2334 * remember to ack. Otherwise, drop segment 2335 * and ack. 2336 */ 2337 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { 2338 tp->t_flags |= TF_ACKNOW; 2339 TCPSTAT_INC(tcps_rcvwinprobe); 2340 } else 2341 goto dropafterack; 2342 } else 2343 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 2344 m_adj(m, -todrop); 2345 tlen -= todrop; 2346 thflags &= ~(TH_PUSH|TH_FIN); 2347 } 2348 2349 /* 2350 * If last ACK falls within this segment's sequence numbers, 2351 * record its timestamp. 2352 * NOTE: 2353 * 1) That the test incorporates suggestions from the latest 2354 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 2355 * 2) That updating only on newer timestamps interferes with 2356 * our earlier PAWS tests, so this check should be solely 2357 * predicated on the sequence space of this segment. 2358 * 3) That we modify the segment boundary check to be 2359 * Last.ACK.Sent <= SEG.SEQ + SEG.Len 2360 * instead of RFC1323's 2361 * Last.ACK.Sent < SEG.SEQ + SEG.Len, 2362 * This modified check allows us to overcome RFC1323's 2363 * limitations as described in Stevens TCP/IP Illustrated 2364 * Vol. 2 p.869. In such cases, we can still calculate the 2365 * RTT correctly when RCV.NXT == Last.ACK.Sent. 2366 */ 2367 if ((to.to_flags & TOF_TS) != 0 && 2368 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 2369 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 2370 ((thflags & (TH_SYN|TH_FIN)) != 0))) { 2371 tp->ts_recent_age = tcp_ts_getticks(); 2372 tp->ts_recent = to.to_tsval; 2373 } 2374 2375 /* 2376 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN 2377 * flag is on (half-synchronized state), then queue data for 2378 * later processing; else drop segment and return. 2379 */ 2380 if ((thflags & TH_ACK) == 0) { 2381 if (tp->t_state == TCPS_SYN_RECEIVED || 2382 (tp->t_flags & TF_NEEDSYN)) { 2383#ifdef TCP_RFC7413 2384 if (tp->t_state == TCPS_SYN_RECEIVED && 2385 tp->t_flags & TF_FASTOPEN) { 2386 tp->snd_wnd = tiwin; 2387 cc_conn_init(tp); 2388 } 2389#endif 2390 goto step6; 2391 } else if (tp->t_flags & TF_ACKNOW) 2392 goto dropafterack; 2393 else 2394 goto drop; 2395 } 2396 2397 /* 2398 * Ack processing. 2399 */ 2400 switch (tp->t_state) { 2401 2402 /* 2403 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter 2404 * ESTABLISHED state and continue processing. 2405 * The ACK was checked above. 2406 */ 2407 case TCPS_SYN_RECEIVED: 2408 2409 TCPSTAT_INC(tcps_connects); 2410 soisconnected(so); 2411 /* Do window scaling? */ 2412 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2413 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2414 tp->rcv_scale = tp->request_r_scale; 2415 tp->snd_wnd = tiwin; 2416 } 2417 /* 2418 * Make transitions: 2419 * SYN-RECEIVED -> ESTABLISHED 2420 * SYN-RECEIVED* -> FIN-WAIT-1 2421 */ 2422 tp->t_starttime = ticks; 2423 if (tp->t_flags & TF_NEEDFIN) { 2424 tcp_state_change(tp, TCPS_FIN_WAIT_1); 2425 tp->t_flags &= ~TF_NEEDFIN; 2426 } else { 2427 tcp_state_change(tp, TCPS_ESTABLISHED); 2428 TCP_PROBE5(accept__established, NULL, tp, 2429 mtod(m, const char *), tp, th); 2430#ifdef TCP_RFC7413 2431 if (tp->t_tfo_pending) { 2432 tcp_fastopen_decrement_counter(tp->t_tfo_pending); 2433 tp->t_tfo_pending = NULL; 2434 2435 /* 2436 * Account for the ACK of our SYN prior to 2437 * regular ACK processing below. 2438 */ 2439 tp->snd_una++; 2440 } 2441 /* 2442 * TFO connections call cc_conn_init() during SYN 2443 * processing. Calling it again here for such 2444 * connections is not harmless as it would undo the 2445 * snd_cwnd reduction that occurs when a TFO SYN|ACK 2446 * is retransmitted. 2447 */ 2448 if (!(tp->t_flags & TF_FASTOPEN)) 2449#endif 2450 cc_conn_init(tp); 2451 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp)); 2452 } 2453 /* 2454 * If segment contains data or ACK, will call tcp_reass() 2455 * later; if not, do so now to pass queued data to user. 2456 */ 2457 if (tlen == 0 && (thflags & TH_FIN) == 0) 2458 (void) tcp_reass(tp, (struct tcphdr *)0, 0, 2459 (struct mbuf *)0); 2460 tp->snd_wl1 = th->th_seq - 1; 2461 /* FALLTHROUGH */ 2462 2463 /* 2464 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range 2465 * ACKs. If the ack is in the range 2466 * tp->snd_una < th->th_ack <= tp->snd_max 2467 * then advance tp->snd_una to th->th_ack and drop 2468 * data from the retransmission queue. If this ACK reflects 2469 * more up to date window information we update our window information. 2470 */ 2471 case TCPS_ESTABLISHED: 2472 case TCPS_FIN_WAIT_1: 2473 case TCPS_FIN_WAIT_2: 2474 case TCPS_CLOSE_WAIT: 2475 case TCPS_CLOSING: 2476 case TCPS_LAST_ACK: 2477 if (SEQ_GT(th->th_ack, tp->snd_max)) { 2478 TCPSTAT_INC(tcps_rcvacktoomuch); 2479 goto dropafterack; 2480 } 2481 if ((tp->t_flags & TF_SACK_PERMIT) && 2482 ((to.to_flags & TOF_SACK) || 2483 !TAILQ_EMPTY(&tp->snd_holes))) 2484 sack_changed = tcp_sack_doack(tp, &to, th->th_ack); 2485 else 2486 /* 2487 * Reset the value so that previous (valid) value 2488 * from the last ack with SACK doesn't get used. 2489 */ 2490 tp->sackhint.sacked_bytes = 0; 2491 2492 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 2493 hhook_run_tcp_est_in(tp, th, &to); 2494 2495 if (SEQ_LEQ(th->th_ack, tp->snd_una)) { 2496 u_int maxseg; 2497 2498 maxseg = tcp_maxseg(tp); 2499 if (tlen == 0 && 2500 (tiwin == tp->snd_wnd || 2501 (tp->t_flags & TF_SACK_PERMIT))) { 2502 /* 2503 * If this is the first time we've seen a 2504 * FIN from the remote, this is not a 2505 * duplicate and it needs to be processed 2506 * normally. This happens during a 2507 * simultaneous close. 2508 */ 2509 if ((thflags & TH_FIN) && 2510 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) { 2511 tp->t_dupacks = 0; 2512 break; 2513 } 2514 TCPSTAT_INC(tcps_rcvdupack); 2515 /* 2516 * If we have outstanding data (other than 2517 * a window probe), this is a completely 2518 * duplicate ack (ie, window info didn't 2519 * change and FIN isn't set), 2520 * the ack is the biggest we've 2521 * seen and we've seen exactly our rexmt 2522 * threshold of them, assume a packet 2523 * has been dropped and retransmit it. 2524 * Kludge snd_nxt & the congestion 2525 * window so we send only this one 2526 * packet. 2527 * 2528 * We know we're losing at the current 2529 * window size so do congestion avoidance 2530 * (set ssthresh to half the current window 2531 * and pull our congestion window back to 2532 * the new ssthresh). 2533 * 2534 * Dup acks mean that packets have left the 2535 * network (they're now cached at the receiver) 2536 * so bump cwnd by the amount in the receiver 2537 * to keep a constant cwnd packets in the 2538 * network. 2539 * 2540 * When using TCP ECN, notify the peer that 2541 * we reduced the cwnd. 2542 */ 2543 /* 2544 * Following 2 kinds of acks should not affect 2545 * dupack counting: 2546 * 1) Old acks 2547 * 2) Acks with SACK but without any new SACK 2548 * information in them. These could result from 2549 * any anomaly in the network like a switch 2550 * duplicating packets or a possible DoS attack. 2551 */ 2552 if (th->th_ack != tp->snd_una || 2553 ((tp->t_flags & TF_SACK_PERMIT) && 2554 !sack_changed)) 2555 break; 2556 else if (!tcp_timer_active(tp, TT_REXMT)) 2557 tp->t_dupacks = 0; 2558 else if (++tp->t_dupacks > tcprexmtthresh || 2559 IN_FASTRECOVERY(tp->t_flags)) { 2560 cc_ack_received(tp, th, CC_DUPACK); 2561 if ((tp->t_flags & TF_SACK_PERMIT) && 2562 IN_FASTRECOVERY(tp->t_flags)) { 2563 int awnd; 2564 2565 /* 2566 * Compute the amount of data in flight first. 2567 * We can inject new data into the pipe iff 2568 * we have less than 1/2 the original window's 2569 * worth of data in flight. 2570 */ 2571 if (V_tcp_do_rfc6675_pipe) 2572 awnd = tcp_compute_pipe(tp); 2573 else 2574 awnd = (tp->snd_nxt - tp->snd_fack) + 2575 tp->sackhint.sack_bytes_rexmit; 2576 2577 if (awnd < tp->snd_ssthresh) { 2578 tp->snd_cwnd += maxseg; 2579 if (tp->snd_cwnd > tp->snd_ssthresh) 2580 tp->snd_cwnd = tp->snd_ssthresh; 2581 } 2582 } else 2583 tp->snd_cwnd += maxseg; 2584 (void) tp->t_fb->tfb_tcp_output(tp); 2585 goto drop; 2586 } else if (tp->t_dupacks == tcprexmtthresh) { 2587 tcp_seq onxt = tp->snd_nxt; 2588 2589 /* 2590 * If we're doing sack, check to 2591 * see if we're already in sack 2592 * recovery. If we're not doing sack, 2593 * check to see if we're in newreno 2594 * recovery. 2595 */ 2596 if (tp->t_flags & TF_SACK_PERMIT) { 2597 if (IN_FASTRECOVERY(tp->t_flags)) { 2598 tp->t_dupacks = 0; 2599 break; 2600 } 2601 } else { 2602 if (SEQ_LEQ(th->th_ack, 2603 tp->snd_recover)) { 2604 tp->t_dupacks = 0; 2605 break; 2606 } 2607 } 2608 /* Congestion signal before ack. */ 2609 cc_cong_signal(tp, th, CC_NDUPACK); 2610 cc_ack_received(tp, th, CC_DUPACK); 2611 tcp_timer_activate(tp, TT_REXMT, 0); 2612 tp->t_rtttime = 0; 2613 if (tp->t_flags & TF_SACK_PERMIT) { 2614 TCPSTAT_INC( 2615 tcps_sack_recovery_episode); 2616 tp->sack_newdata = tp->snd_nxt; 2617 tp->snd_cwnd = maxseg; 2618 (void) tp->t_fb->tfb_tcp_output(tp); 2619 goto drop; 2620 } 2621 tp->snd_nxt = th->th_ack; 2622 tp->snd_cwnd = maxseg; 2623 (void) tp->t_fb->tfb_tcp_output(tp); 2624 KASSERT(tp->snd_limited <= 2, 2625 ("%s: tp->snd_limited too big", 2626 __func__)); 2627 tp->snd_cwnd = tp->snd_ssthresh + 2628 maxseg * 2629 (tp->t_dupacks - tp->snd_limited); 2630 if (SEQ_GT(onxt, tp->snd_nxt)) 2631 tp->snd_nxt = onxt; 2632 goto drop; 2633 } else if (V_tcp_do_rfc3042) { 2634 /* 2635 * Process first and second duplicate 2636 * ACKs. Each indicates a segment 2637 * leaving the network, creating room 2638 * for more. Make sure we can send a 2639 * packet on reception of each duplicate 2640 * ACK by increasing snd_cwnd by one 2641 * segment. Restore the original 2642 * snd_cwnd after packet transmission. 2643 */ 2644 cc_ack_received(tp, th, CC_DUPACK); 2645 u_long oldcwnd = tp->snd_cwnd; 2646 tcp_seq oldsndmax = tp->snd_max; 2647 u_int sent; 2648 int avail; 2649 2650 KASSERT(tp->t_dupacks == 1 || 2651 tp->t_dupacks == 2, 2652 ("%s: dupacks not 1 or 2", 2653 __func__)); 2654 if (tp->t_dupacks == 1) 2655 tp->snd_limited = 0; 2656 tp->snd_cwnd = 2657 (tp->snd_nxt - tp->snd_una) + 2658 (tp->t_dupacks - tp->snd_limited) * 2659 maxseg; 2660 /* 2661 * Only call tcp_output when there 2662 * is new data available to be sent. 2663 * Otherwise we would send pure ACKs. 2664 */ 2665 SOCKBUF_LOCK(&so->so_snd); 2666 avail = sbavail(&so->so_snd) - 2667 (tp->snd_nxt - tp->snd_una); 2668 SOCKBUF_UNLOCK(&so->so_snd); 2669 if (avail > 0) 2670 (void) tp->t_fb->tfb_tcp_output(tp); 2671 sent = tp->snd_max - oldsndmax; 2672 if (sent > maxseg) { 2673 KASSERT((tp->t_dupacks == 2 && 2674 tp->snd_limited == 0) || 2675 (sent == maxseg + 1 && 2676 tp->t_flags & TF_SENTFIN), 2677 ("%s: sent too much", 2678 __func__)); 2679 tp->snd_limited = 2; 2680 } else if (sent > 0) 2681 ++tp->snd_limited; 2682 tp->snd_cwnd = oldcwnd; 2683 goto drop; 2684 } 2685 } 2686 break; 2687 } else { 2688 /* 2689 * This ack is advancing the left edge, reset the 2690 * counter. 2691 */ 2692 tp->t_dupacks = 0; 2693 /* 2694 * If this ack also has new SACK info, increment the 2695 * counter as per rfc6675. 2696 */ 2697 if ((tp->t_flags & TF_SACK_PERMIT) && sack_changed) 2698 tp->t_dupacks++; 2699 } 2700 2701 KASSERT(SEQ_GT(th->th_ack, tp->snd_una), 2702 ("%s: th_ack <= snd_una", __func__)); 2703 2704 /* 2705 * If the congestion window was inflated to account 2706 * for the other side's cached packets, retract it. 2707 */ 2708 if (IN_FASTRECOVERY(tp->t_flags)) { 2709 if (SEQ_LT(th->th_ack, tp->snd_recover)) { 2710 if (tp->t_flags & TF_SACK_PERMIT) 2711 tcp_sack_partialack(tp, th); 2712 else 2713 tcp_newreno_partial_ack(tp, th); 2714 } else 2715 cc_post_recovery(tp, th); 2716 } 2717 /* 2718 * If we reach this point, ACK is not a duplicate, 2719 * i.e., it ACKs something we sent. 2720 */ 2721 if (tp->t_flags & TF_NEEDSYN) { 2722 /* 2723 * T/TCP: Connection was half-synchronized, and our 2724 * SYN has been ACK'd (so connection is now fully 2725 * synchronized). Go to non-starred state, 2726 * increment snd_una for ACK of SYN, and check if 2727 * we can do window scaling. 2728 */ 2729 tp->t_flags &= ~TF_NEEDSYN; 2730 tp->snd_una++; 2731 /* Do window scaling? */ 2732 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2733 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2734 tp->rcv_scale = tp->request_r_scale; 2735 /* Send window already scaled. */ 2736 } 2737 } 2738 2739process_ACK: 2740 INP_WLOCK_ASSERT(tp->t_inpcb); 2741 2742 acked = BYTES_THIS_ACK(tp, th); 2743 KASSERT(acked >= 0, ("%s: acked unexepectedly negative " 2744 "(tp->snd_una=%u, th->th_ack=%u, tp=%p, m=%p)", __func__, 2745 tp->snd_una, th->th_ack, tp, m)); 2746 TCPSTAT_INC(tcps_rcvackpack); 2747 TCPSTAT_ADD(tcps_rcvackbyte, acked); 2748 2749 /* 2750 * If we just performed our first retransmit, and the ACK 2751 * arrives within our recovery window, then it was a mistake 2752 * to do the retransmit in the first place. Recover our 2753 * original cwnd and ssthresh, and proceed to transmit where 2754 * we left off. 2755 */ 2756 if (tp->t_rxtshift == 1 && tp->t_flags & TF_PREVVALID && 2757 (int)(ticks - tp->t_badrxtwin) < 0) 2758 cc_cong_signal(tp, th, CC_RTO_ERR); 2759 2760 /* 2761 * If we have a timestamp reply, update smoothed 2762 * round trip time. If no timestamp is present but 2763 * transmit timer is running and timed sequence 2764 * number was acked, update smoothed round trip time. 2765 * Since we now have an rtt measurement, cancel the 2766 * timer backoff (cf., Phil Karn's retransmit alg.). 2767 * Recompute the initial retransmit timer. 2768 * 2769 * Some boxes send broken timestamp replies 2770 * during the SYN+ACK phase, ignore 2771 * timestamps of 0 or we could calculate a 2772 * huge RTT and blow up the retransmit timer. 2773 */ 2774 if ((to.to_flags & TOF_TS) != 0 && to.to_tsecr) { 2775 u_int t; 2776 2777 t = tcp_ts_getticks() - to.to_tsecr; 2778 if (!tp->t_rttlow || tp->t_rttlow > t) 2779 tp->t_rttlow = t; 2780 tcp_xmit_timer(tp, TCP_TS_TO_TICKS(t) + 1); 2781 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) { 2782 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime) 2783 tp->t_rttlow = ticks - tp->t_rtttime; 2784 tcp_xmit_timer(tp, ticks - tp->t_rtttime); 2785 } 2786 2787 /* 2788 * If all outstanding data is acked, stop retransmit 2789 * timer and remember to restart (more output or persist). 2790 * If there is more data to be acked, restart retransmit 2791 * timer, using current (possibly backed-off) value. 2792 */ 2793 if (th->th_ack == tp->snd_max) { 2794 tcp_timer_activate(tp, TT_REXMT, 0); 2795 needoutput = 1; 2796 } else if (!tcp_timer_active(tp, TT_PERSIST)) 2797 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur); 2798 2799 /* 2800 * If no data (only SYN) was ACK'd, 2801 * skip rest of ACK processing. 2802 */ 2803 if (acked == 0) 2804 goto step6; 2805 2806 /* 2807 * Let the congestion control algorithm update congestion 2808 * control related information. This typically means increasing 2809 * the congestion window. 2810 */ 2811 cc_ack_received(tp, th, CC_ACK); 2812 2813 SOCKBUF_LOCK(&so->so_snd); 2814 if (acked > sbavail(&so->so_snd)) { 2815 if (tp->snd_wnd >= sbavail(&so->so_snd)) 2816 tp->snd_wnd -= sbavail(&so->so_snd); 2817 else 2818 tp->snd_wnd = 0; 2819 mfree = sbcut_locked(&so->so_snd, 2820 (int)sbavail(&so->so_snd)); 2821 ourfinisacked = 1; 2822 } else { 2823 mfree = sbcut_locked(&so->so_snd, acked); 2824 if (tp->snd_wnd >= (u_long) acked) 2825 tp->snd_wnd -= acked; 2826 else 2827 tp->snd_wnd = 0; 2828 ourfinisacked = 0; 2829 } 2830 /* NB: sowwakeup_locked() does an implicit unlock. */ 2831 sowwakeup_locked(so); 2832 m_freem(mfree); 2833 /* Detect una wraparound. */ 2834 if (!IN_RECOVERY(tp->t_flags) && 2835 SEQ_GT(tp->snd_una, tp->snd_recover) && 2836 SEQ_LEQ(th->th_ack, tp->snd_recover)) 2837 tp->snd_recover = th->th_ack - 1; 2838 /* XXXLAS: Can this be moved up into cc_post_recovery? */ 2839 if (IN_RECOVERY(tp->t_flags) && 2840 SEQ_GEQ(th->th_ack, tp->snd_recover)) { 2841 EXIT_RECOVERY(tp->t_flags); 2842 } 2843 tp->snd_una = th->th_ack; 2844 if (tp->t_flags & TF_SACK_PERMIT) { 2845 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 2846 tp->snd_recover = tp->snd_una; 2847 } 2848 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 2849 tp->snd_nxt = tp->snd_una; 2850 2851 switch (tp->t_state) { 2852 2853 /* 2854 * In FIN_WAIT_1 STATE in addition to the processing 2855 * for the ESTABLISHED state if our FIN is now acknowledged 2856 * then enter FIN_WAIT_2. 2857 */ 2858 case TCPS_FIN_WAIT_1: 2859 if (ourfinisacked) { 2860 /* 2861 * If we can't receive any more 2862 * data, then closing user can proceed. 2863 * Starting the timer is contrary to the 2864 * specification, but if we don't get a FIN 2865 * we'll hang forever. 2866 * 2867 * XXXjl: 2868 * we should release the tp also, and use a 2869 * compressed state. 2870 */ 2871 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 2872 soisdisconnected(so); 2873 tcp_timer_activate(tp, TT_2MSL, 2874 (tcp_fast_finwait2_recycle ? 2875 tcp_finwait2_timeout : 2876 TP_MAXIDLE(tp))); 2877 } 2878 tcp_state_change(tp, TCPS_FIN_WAIT_2); 2879 } 2880 break; 2881 2882 /* 2883 * In CLOSING STATE in addition to the processing for 2884 * the ESTABLISHED state if the ACK acknowledges our FIN 2885 * then enter the TIME-WAIT state, otherwise ignore 2886 * the segment. 2887 */ 2888 case TCPS_CLOSING: 2889 if (ourfinisacked) { 2890 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 2891 tcp_twstart(tp); 2892 INP_INFO_RUNLOCK(&V_tcbinfo); 2893 m_freem(m); 2894 return; 2895 } 2896 break; 2897 2898 /* 2899 * In LAST_ACK, we may still be waiting for data to drain 2900 * and/or to be acked, as well as for the ack of our FIN. 2901 * If our FIN is now acknowledged, delete the TCB, 2902 * enter the closed state and return. 2903 */ 2904 case TCPS_LAST_ACK: 2905 if (ourfinisacked) { 2906 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 2907 tp = tcp_close(tp); 2908 goto drop; 2909 } 2910 break; 2911 } 2912 } 2913 2914step6: 2915 INP_WLOCK_ASSERT(tp->t_inpcb); 2916 2917 /* 2918 * Update window information. 2919 * Don't look at window if no ACK: TAC's send garbage on first SYN. 2920 */ 2921 if ((thflags & TH_ACK) && 2922 (SEQ_LT(tp->snd_wl1, th->th_seq) || 2923 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 2924 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 2925 /* keep track of pure window updates */ 2926 if (tlen == 0 && 2927 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 2928 TCPSTAT_INC(tcps_rcvwinupd); 2929 tp->snd_wnd = tiwin; 2930 tp->snd_wl1 = th->th_seq; 2931 tp->snd_wl2 = th->th_ack; 2932 if (tp->snd_wnd > tp->max_sndwnd) 2933 tp->max_sndwnd = tp->snd_wnd; 2934 needoutput = 1; 2935 } 2936 2937 /* 2938 * Process segments with URG. 2939 */ 2940 if ((thflags & TH_URG) && th->th_urp && 2941 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2942 /* 2943 * This is a kludge, but if we receive and accept 2944 * random urgent pointers, we'll crash in 2945 * soreceive. It's hard to imagine someone 2946 * actually wanting to send this much urgent data. 2947 */ 2948 SOCKBUF_LOCK(&so->so_rcv); 2949 if (th->th_urp + sbavail(&so->so_rcv) > sb_max) { 2950 th->th_urp = 0; /* XXX */ 2951 thflags &= ~TH_URG; /* XXX */ 2952 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */ 2953 goto dodata; /* XXX */ 2954 } 2955 /* 2956 * If this segment advances the known urgent pointer, 2957 * then mark the data stream. This should not happen 2958 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since 2959 * a FIN has been received from the remote side. 2960 * In these states we ignore the URG. 2961 * 2962 * According to RFC961 (Assigned Protocols), 2963 * the urgent pointer points to the last octet 2964 * of urgent data. We continue, however, 2965 * to consider it to indicate the first octet 2966 * of data past the urgent section as the original 2967 * spec states (in one of two places). 2968 */ 2969 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) { 2970 tp->rcv_up = th->th_seq + th->th_urp; 2971 so->so_oobmark = sbavail(&so->so_rcv) + 2972 (tp->rcv_up - tp->rcv_nxt) - 1; 2973 if (so->so_oobmark == 0) 2974 so->so_rcv.sb_state |= SBS_RCVATMARK; 2975 sohasoutofband(so); 2976 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); 2977 } 2978 SOCKBUF_UNLOCK(&so->so_rcv); 2979 /* 2980 * Remove out of band data so doesn't get presented to user. 2981 * This can happen independent of advancing the URG pointer, 2982 * but if two URG's are pending at once, some out-of-band 2983 * data may creep in... ick. 2984 */ 2985 if (th->th_urp <= (u_long)tlen && 2986 !(so->so_options & SO_OOBINLINE)) { 2987 /* hdr drop is delayed */ 2988 tcp_pulloutofband(so, th, m, drop_hdrlen); 2989 } 2990 } else { 2991 /* 2992 * If no out of band data is expected, 2993 * pull receive urgent pointer along 2994 * with the receive window. 2995 */ 2996 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) 2997 tp->rcv_up = tp->rcv_nxt; 2998 } 2999dodata: /* XXX */ 3000 INP_WLOCK_ASSERT(tp->t_inpcb); 3001 3002 /* 3003 * Process the segment text, merging it into the TCP sequencing queue, 3004 * and arranging for acknowledgment of receipt if necessary. 3005 * This process logically involves adjusting tp->rcv_wnd as data 3006 * is presented to the user (this happens in tcp_usrreq.c, 3007 * case PRU_RCVD). If a FIN has already been received on this 3008 * connection then we just ignore the text. 3009 */ 3010 tfo_syn = ((tp->t_state == TCPS_SYN_RECEIVED) && 3011 (tp->t_flags & TF_FASTOPEN)); 3012 if ((tlen || (thflags & TH_FIN) || tfo_syn) && 3013 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 3014 tcp_seq save_start = th->th_seq; 3015 m_adj(m, drop_hdrlen); /* delayed header drop */ 3016 /* 3017 * Insert segment which includes th into TCP reassembly queue 3018 * with control block tp. Set thflags to whether reassembly now 3019 * includes a segment with FIN. This handles the common case 3020 * inline (segment is the next to be received on an established 3021 * connection, and the queue is empty), avoiding linkage into 3022 * and removal from the queue and repetition of various 3023 * conversions. 3024 * Set DELACK for segments received in order, but ack 3025 * immediately when segments are out of order (so 3026 * fast retransmit can work). 3027 */ 3028 if (th->th_seq == tp->rcv_nxt && 3029 LIST_EMPTY(&tp->t_segq) && 3030 (TCPS_HAVEESTABLISHED(tp->t_state) || 3031 tfo_syn)) { 3032 if (DELAY_ACK(tp, tlen) || tfo_syn) 3033 tp->t_flags |= TF_DELACK; 3034 else 3035 tp->t_flags |= TF_ACKNOW; 3036 tp->rcv_nxt += tlen; 3037 thflags = th->th_flags & TH_FIN; 3038 TCPSTAT_INC(tcps_rcvpack); 3039 TCPSTAT_ADD(tcps_rcvbyte, tlen); 3040 SOCKBUF_LOCK(&so->so_rcv); 3041 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 3042 m_freem(m); 3043 else 3044 sbappendstream_locked(&so->so_rcv, m, 0); 3045 /* NB: sorwakeup_locked() does an implicit unlock. */ 3046 sorwakeup_locked(so); 3047 } else { 3048 /* 3049 * XXX: Due to the header drop above "th" is 3050 * theoretically invalid by now. Fortunately 3051 * m_adj() doesn't actually frees any mbufs 3052 * when trimming from the head. 3053 */ 3054 thflags = tcp_reass(tp, th, &tlen, m); 3055 tp->t_flags |= TF_ACKNOW; 3056 } 3057 if (tlen > 0 && (tp->t_flags & TF_SACK_PERMIT)) 3058 tcp_update_sack_list(tp, save_start, save_start + tlen); 3059#if 0 3060 /* 3061 * Note the amount of data that peer has sent into 3062 * our window, in order to estimate the sender's 3063 * buffer size. 3064 * XXX: Unused. 3065 */ 3066 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) 3067 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt); 3068 else 3069 len = so->so_rcv.sb_hiwat; 3070#endif 3071 } else { 3072 m_freem(m); 3073 thflags &= ~TH_FIN; 3074 } 3075 3076 /* 3077 * If FIN is received ACK the FIN and let the user know 3078 * that the connection is closing. 3079 */ 3080 if (thflags & TH_FIN) { 3081 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 3082 socantrcvmore(so); 3083 /* 3084 * If connection is half-synchronized 3085 * (ie NEEDSYN flag on) then delay ACK, 3086 * so it may be piggybacked when SYN is sent. 3087 * Otherwise, since we received a FIN then no 3088 * more input can be expected, send ACK now. 3089 */ 3090 if (tp->t_flags & TF_NEEDSYN) 3091 tp->t_flags |= TF_DELACK; 3092 else 3093 tp->t_flags |= TF_ACKNOW; 3094 tp->rcv_nxt++; 3095 } 3096 switch (tp->t_state) { 3097 3098 /* 3099 * In SYN_RECEIVED and ESTABLISHED STATES 3100 * enter the CLOSE_WAIT state. 3101 */ 3102 case TCPS_SYN_RECEIVED: 3103 tp->t_starttime = ticks; 3104 /* FALLTHROUGH */ 3105 case TCPS_ESTABLISHED: 3106 tcp_state_change(tp, TCPS_CLOSE_WAIT); 3107 break; 3108 3109 /* 3110 * If still in FIN_WAIT_1 STATE FIN has not been acked so 3111 * enter the CLOSING state. 3112 */ 3113 case TCPS_FIN_WAIT_1: 3114 tcp_state_change(tp, TCPS_CLOSING); 3115 break; 3116 3117 /* 3118 * In FIN_WAIT_2 state enter the TIME_WAIT state, 3119 * starting the time-wait timer, turning off the other 3120 * standard timers. 3121 */ 3122 case TCPS_FIN_WAIT_2: 3123 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 3124 KASSERT(ti_locked == TI_RLOCKED, ("%s: dodata " 3125 "TCP_FIN_WAIT_2 ti_locked: %d", __func__, 3126 ti_locked)); 3127 3128 tcp_twstart(tp); 3129 INP_INFO_RUNLOCK(&V_tcbinfo); 3130 return; 3131 } 3132 } 3133 if (ti_locked == TI_RLOCKED) 3134 INP_INFO_RUNLOCK(&V_tcbinfo); 3135 ti_locked = TI_UNLOCKED; 3136 3137#ifdef TCPDEBUG 3138 if (so->so_options & SO_DEBUG) 3139 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen, 3140 &tcp_savetcp, 0); 3141#endif 3142 TCP_PROBE3(debug__input, tp, th, mtod(m, const char *)); 3143 3144 /* 3145 * Return any desired output. 3146 */ 3147 if (needoutput || (tp->t_flags & TF_ACKNOW)) 3148 (void) tp->t_fb->tfb_tcp_output(tp); 3149 3150check_delack: 3151 KASSERT(ti_locked == TI_UNLOCKED, ("%s: check_delack ti_locked %d", 3152 __func__, ti_locked)); 3153 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 3154 INP_WLOCK_ASSERT(tp->t_inpcb); 3155 3156 if (tp->t_flags & TF_DELACK) { 3157 tp->t_flags &= ~TF_DELACK; 3158 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime); 3159 } 3160 INP_WUNLOCK(tp->t_inpcb); 3161 return; 3162 3163dropafterack: 3164 /* 3165 * Generate an ACK dropping incoming segment if it occupies 3166 * sequence space, where the ACK reflects our state. 3167 * 3168 * We can now skip the test for the RST flag since all 3169 * paths to this code happen after packets containing 3170 * RST have been dropped. 3171 * 3172 * In the SYN-RECEIVED state, don't send an ACK unless the 3173 * segment we received passes the SYN-RECEIVED ACK test. 3174 * If it fails send a RST. This breaks the loop in the 3175 * "LAND" DoS attack, and also prevents an ACK storm 3176 * between two listening ports that have been sent forged 3177 * SYN segments, each with the source address of the other. 3178 */ 3179 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && 3180 (SEQ_GT(tp->snd_una, th->th_ack) || 3181 SEQ_GT(th->th_ack, tp->snd_max)) ) { 3182 rstreason = BANDLIM_RST_OPENPORT; 3183 goto dropwithreset; 3184 } 3185#ifdef TCPDEBUG 3186 if (so->so_options & SO_DEBUG) 3187 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 3188 &tcp_savetcp, 0); 3189#endif 3190 TCP_PROBE3(debug__input, tp, th, mtod(m, const char *)); 3191 if (ti_locked == TI_RLOCKED) 3192 INP_INFO_RUNLOCK(&V_tcbinfo); 3193 ti_locked = TI_UNLOCKED; 3194 3195 tp->t_flags |= TF_ACKNOW; 3196 (void) tp->t_fb->tfb_tcp_output(tp); 3197 INP_WUNLOCK(tp->t_inpcb); 3198 m_freem(m); 3199 return; 3200 3201dropwithreset: 3202 if (ti_locked == TI_RLOCKED) 3203 INP_INFO_RUNLOCK(&V_tcbinfo); 3204 ti_locked = TI_UNLOCKED; 3205 3206 if (tp != NULL) { 3207 tcp_dropwithreset(m, th, tp, tlen, rstreason); 3208 INP_WUNLOCK(tp->t_inpcb); 3209 } else 3210 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 3211 return; 3212 3213drop: 3214 if (ti_locked == TI_RLOCKED) { 3215 INP_INFO_RUNLOCK(&V_tcbinfo); 3216 ti_locked = TI_UNLOCKED; 3217 } 3218#ifdef INVARIANTS 3219 else 3220 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 3221#endif 3222 3223 /* 3224 * Drop space held by incoming segment and return. 3225 */ 3226#ifdef TCPDEBUG 3227 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 3228 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 3229 &tcp_savetcp, 0); 3230#endif 3231 TCP_PROBE3(debug__input, tp, th, mtod(m, const char *)); 3232 if (tp != NULL) 3233 INP_WUNLOCK(tp->t_inpcb); 3234 m_freem(m); 3235} 3236 3237/* 3238 * Issue RST and make ACK acceptable to originator of segment. 3239 * The mbuf must still include the original packet header. 3240 * tp may be NULL. 3241 */ 3242void 3243tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, 3244 int tlen, int rstreason) 3245{ 3246#ifdef INET 3247 struct ip *ip; 3248#endif 3249#ifdef INET6 3250 struct ip6_hdr *ip6; 3251#endif 3252 3253 if (tp != NULL) { 3254 INP_WLOCK_ASSERT(tp->t_inpcb); 3255 } 3256 3257 /* Don't bother if destination was broadcast/multicast. */ 3258 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST)) 3259 goto drop; 3260#ifdef INET6 3261 if (mtod(m, struct ip *)->ip_v == 6) { 3262 ip6 = mtod(m, struct ip6_hdr *); 3263 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 3264 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 3265 goto drop; 3266 /* IPv6 anycast check is done at tcp6_input() */ 3267 } 3268#endif 3269#if defined(INET) && defined(INET6) 3270 else 3271#endif 3272#ifdef INET 3273 { 3274 ip = mtod(m, struct ip *); 3275 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 3276 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 3277 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 3278 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 3279 goto drop; 3280 } 3281#endif 3282 3283 /* Perform bandwidth limiting. */ 3284 if (badport_bandlim(rstreason) < 0) 3285 goto drop; 3286 3287 /* tcp_respond consumes the mbuf chain. */ 3288 if (th->th_flags & TH_ACK) { 3289 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, 3290 th->th_ack, TH_RST); 3291 } else { 3292 if (th->th_flags & TH_SYN) 3293 tlen++; 3294 if (th->th_flags & TH_FIN) 3295 tlen++; 3296 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen, 3297 (tcp_seq)0, TH_RST|TH_ACK); 3298 } 3299 return; 3300drop: 3301 m_freem(m); 3302} 3303 3304/* 3305 * Parse TCP options and place in tcpopt. 3306 */ 3307void 3308tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags) 3309{ 3310 int opt, optlen; 3311 3312 to->to_flags = 0; 3313 for (; cnt > 0; cnt -= optlen, cp += optlen) { 3314 opt = cp[0]; 3315 if (opt == TCPOPT_EOL) 3316 break; 3317 if (opt == TCPOPT_NOP) 3318 optlen = 1; 3319 else { 3320 if (cnt < 2) 3321 break; 3322 optlen = cp[1]; 3323 if (optlen < 2 || optlen > cnt) 3324 break; 3325 } 3326 switch (opt) { 3327 case TCPOPT_MAXSEG: 3328 if (optlen != TCPOLEN_MAXSEG) 3329 continue; 3330 if (!(flags & TO_SYN)) 3331 continue; 3332 to->to_flags |= TOF_MSS; 3333 bcopy((char *)cp + 2, 3334 (char *)&to->to_mss, sizeof(to->to_mss)); 3335 to->to_mss = ntohs(to->to_mss); 3336 break; 3337 case TCPOPT_WINDOW: 3338 if (optlen != TCPOLEN_WINDOW) 3339 continue; 3340 if (!(flags & TO_SYN)) 3341 continue; 3342 to->to_flags |= TOF_SCALE; 3343 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT); 3344 break; 3345 case TCPOPT_TIMESTAMP: 3346 if (optlen != TCPOLEN_TIMESTAMP) 3347 continue; 3348 to->to_flags |= TOF_TS; 3349 bcopy((char *)cp + 2, 3350 (char *)&to->to_tsval, sizeof(to->to_tsval)); 3351 to->to_tsval = ntohl(to->to_tsval); 3352 bcopy((char *)cp + 6, 3353 (char *)&to->to_tsecr, sizeof(to->to_tsecr)); 3354 to->to_tsecr = ntohl(to->to_tsecr); 3355 break; 3356 case TCPOPT_SIGNATURE: 3357 /* 3358 * In order to reply to a host which has set the 3359 * TCP_SIGNATURE option in its initial SYN, we have 3360 * to record the fact that the option was observed 3361 * here for the syncache code to perform the correct 3362 * response. 3363 */ 3364 if (optlen != TCPOLEN_SIGNATURE) 3365 continue; 3366 to->to_flags |= TOF_SIGNATURE; 3367 to->to_signature = cp + 2; 3368 break; 3369 case TCPOPT_SACK_PERMITTED: 3370 if (optlen != TCPOLEN_SACK_PERMITTED) 3371 continue; 3372 if (!(flags & TO_SYN)) 3373 continue; 3374 if (!V_tcp_do_sack) 3375 continue; 3376 to->to_flags |= TOF_SACKPERM; 3377 break; 3378 case TCPOPT_SACK: 3379 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0) 3380 continue; 3381 if (flags & TO_SYN) 3382 continue; 3383 to->to_flags |= TOF_SACK; 3384 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK; 3385 to->to_sacks = cp + 2; 3386 TCPSTAT_INC(tcps_sack_rcv_blocks); 3387 break; 3388#ifdef TCP_RFC7413 3389 case TCPOPT_FAST_OPEN: 3390 if ((optlen != TCPOLEN_FAST_OPEN_EMPTY) && 3391 (optlen < TCPOLEN_FAST_OPEN_MIN) && 3392 (optlen > TCPOLEN_FAST_OPEN_MAX)) 3393 continue; 3394 if (!(flags & TO_SYN)) 3395 continue; 3396 if (!V_tcp_fastopen_enabled) 3397 continue; 3398 to->to_flags |= TOF_FASTOPEN; 3399 to->to_tfo_len = optlen - 2; 3400 to->to_tfo_cookie = to->to_tfo_len ? cp + 2 : NULL; 3401 break; 3402#endif 3403 default: 3404 continue; 3405 } 3406 } 3407} 3408 3409/* 3410 * Pull out of band byte out of a segment so 3411 * it doesn't appear in the user's data queue. 3412 * It is still reflected in the segment length for 3413 * sequencing purposes. 3414 */ 3415void 3416tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, 3417 int off) 3418{ 3419 int cnt = off + th->th_urp - 1; 3420 3421 while (cnt >= 0) { 3422 if (m->m_len > cnt) { 3423 char *cp = mtod(m, caddr_t) + cnt; 3424 struct tcpcb *tp = sototcpcb(so); 3425 3426 INP_WLOCK_ASSERT(tp->t_inpcb); 3427 3428 tp->t_iobc = *cp; 3429 tp->t_oobflags |= TCPOOB_HAVEDATA; 3430 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1)); 3431 m->m_len--; 3432 if (m->m_flags & M_PKTHDR) 3433 m->m_pkthdr.len--; 3434 return; 3435 } 3436 cnt -= m->m_len; 3437 m = m->m_next; 3438 if (m == NULL) 3439 break; 3440 } 3441 panic("tcp_pulloutofband"); 3442} 3443 3444/* 3445 * Collect new round-trip time estimate 3446 * and update averages and current timeout. 3447 */ 3448void 3449tcp_xmit_timer(struct tcpcb *tp, int rtt) 3450{ 3451 int delta; 3452 3453 INP_WLOCK_ASSERT(tp->t_inpcb); 3454 3455 TCPSTAT_INC(tcps_rttupdated); 3456 tp->t_rttupdated++; 3457 if (tp->t_srtt != 0) { 3458 /* 3459 * srtt is stored as fixed point with 5 bits after the 3460 * binary point (i.e., scaled by 8). The following magic 3461 * is equivalent to the smoothing algorithm in rfc793 with 3462 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed 3463 * point). Adjust rtt to origin 0. 3464 */ 3465 delta = ((rtt - 1) << TCP_DELTA_SHIFT) 3466 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); 3467 3468 if ((tp->t_srtt += delta) <= 0) 3469 tp->t_srtt = 1; 3470 3471 /* 3472 * We accumulate a smoothed rtt variance (actually, a 3473 * smoothed mean difference), then set the retransmit 3474 * timer to smoothed rtt + 4 times the smoothed variance. 3475 * rttvar is stored as fixed point with 4 bits after the 3476 * binary point (scaled by 16). The following is 3477 * equivalent to rfc793 smoothing with an alpha of .75 3478 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces 3479 * rfc793's wired-in beta. 3480 */ 3481 if (delta < 0) 3482 delta = -delta; 3483 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); 3484 if ((tp->t_rttvar += delta) <= 0) 3485 tp->t_rttvar = 1; 3486 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 3487 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 3488 } else { 3489 /* 3490 * No rtt measurement yet - use the unsmoothed rtt. 3491 * Set the variance to half the rtt (so our first 3492 * retransmit happens at 3*rtt). 3493 */ 3494 tp->t_srtt = rtt << TCP_RTT_SHIFT; 3495 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); 3496 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 3497 } 3498 tp->t_rtttime = 0; 3499 tp->t_rxtshift = 0; 3500 3501 /* 3502 * the retransmit should happen at rtt + 4 * rttvar. 3503 * Because of the way we do the smoothing, srtt and rttvar 3504 * will each average +1/2 tick of bias. When we compute 3505 * the retransmit timer, we want 1/2 tick of rounding and 3506 * 1 extra tick because of +-1/2 tick uncertainty in the 3507 * firing of the timer. The bias will give us exactly the 3508 * 1.5 tick we need. But, because the bias is 3509 * statistical, we have to test that we don't drop below 3510 * the minimum feasible timer (which is 2 ticks). 3511 */ 3512 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 3513 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX); 3514 3515 /* 3516 * We received an ack for a packet that wasn't retransmitted; 3517 * it is probably safe to discard any error indications we've 3518 * received recently. This isn't quite right, but close enough 3519 * for now (a route might have failed after we sent a segment, 3520 * and the return path might not be symmetrical). 3521 */ 3522 tp->t_softerror = 0; 3523} 3524 3525/* 3526 * Determine a reasonable value for maxseg size. 3527 * If the route is known, check route for mtu. 3528 * If none, use an mss that can be handled on the outgoing interface 3529 * without forcing IP to fragment. If no route is found, route has no mtu, 3530 * or the destination isn't local, use a default, hopefully conservative 3531 * size (usually 512 or the default IP max size, but no more than the mtu 3532 * of the interface), as we can't discover anything about intervening 3533 * gateways or networks. We also initialize the congestion/slow start 3534 * window to be a single segment if the destination isn't local. 3535 * While looking at the routing entry, we also initialize other path-dependent 3536 * parameters from pre-set or cached values in the routing entry. 3537 * 3538 * NOTE that resulting t_maxseg doesn't include space for TCP options or 3539 * IP options, e.g. IPSEC data, since length of this data may vary, and 3540 * thus it is calculated for every segment separately in tcp_output(). 3541 * 3542 * NOTE that this routine is only called when we process an incoming 3543 * segment, or an ICMP need fragmentation datagram. Outgoing SYN/ACK MSS 3544 * settings are handled in tcp_mssopt(). 3545 */ 3546void 3547tcp_mss_update(struct tcpcb *tp, int offer, int mtuoffer, 3548 struct hc_metrics_lite *metricptr, struct tcp_ifcap *cap) 3549{ 3550 int mss = 0; 3551 u_long maxmtu = 0; 3552 struct inpcb *inp = tp->t_inpcb; 3553 struct hc_metrics_lite metrics; 3554#ifdef INET6 3555 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0; 3556 size_t min_protoh = isipv6 ? 3557 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) : 3558 sizeof (struct tcpiphdr); 3559#else 3560 const size_t min_protoh = sizeof(struct tcpiphdr); 3561#endif 3562 3563 INP_WLOCK_ASSERT(tp->t_inpcb); 3564 3565 if (mtuoffer != -1) { 3566 KASSERT(offer == -1, ("%s: conflict", __func__)); 3567 offer = mtuoffer - min_protoh; 3568 } 3569 3570 /* Initialize. */ 3571#ifdef INET6 3572 if (isipv6) { 3573 maxmtu = tcp_maxmtu6(&inp->inp_inc, cap); 3574 tp->t_maxseg = V_tcp_v6mssdflt; 3575 } 3576#endif 3577#if defined(INET) && defined(INET6) 3578 else 3579#endif 3580#ifdef INET 3581 { 3582 maxmtu = tcp_maxmtu(&inp->inp_inc, cap); 3583 tp->t_maxseg = V_tcp_mssdflt; 3584 } 3585#endif 3586 3587 /* 3588 * No route to sender, stay with default mss and return. 3589 */ 3590 if (maxmtu == 0) { 3591 /* 3592 * In case we return early we need to initialize metrics 3593 * to a defined state as tcp_hc_get() would do for us 3594 * if there was no cache hit. 3595 */ 3596 if (metricptr != NULL) 3597 bzero(metricptr, sizeof(struct hc_metrics_lite)); 3598 return; 3599 } 3600 3601 /* What have we got? */ 3602 switch (offer) { 3603 case 0: 3604 /* 3605 * Offer == 0 means that there was no MSS on the SYN 3606 * segment, in this case we use tcp_mssdflt as 3607 * already assigned to t_maxseg above. 3608 */ 3609 offer = tp->t_maxseg; 3610 break; 3611 3612 case -1: 3613 /* 3614 * Offer == -1 means that we didn't receive SYN yet. 3615 */ 3616 /* FALLTHROUGH */ 3617 3618 default: 3619 /* 3620 * Prevent DoS attack with too small MSS. Round up 3621 * to at least minmss. 3622 */ 3623 offer = max(offer, V_tcp_minmss); 3624 } 3625 3626 /* 3627 * rmx information is now retrieved from tcp_hostcache. 3628 */ 3629 tcp_hc_get(&inp->inp_inc, &metrics); 3630 if (metricptr != NULL) 3631 bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite)); 3632 3633 /* 3634 * If there's a discovered mtu in tcp hostcache, use it. 3635 * Else, use the link mtu. 3636 */ 3637 if (metrics.rmx_mtu) 3638 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh; 3639 else { 3640#ifdef INET6 3641 if (isipv6) { 3642 mss = maxmtu - min_protoh; 3643 if (!V_path_mtu_discovery && 3644 !in6_localaddr(&inp->in6p_faddr)) 3645 mss = min(mss, V_tcp_v6mssdflt); 3646 } 3647#endif 3648#if defined(INET) && defined(INET6) 3649 else 3650#endif 3651#ifdef INET 3652 { 3653 mss = maxmtu - min_protoh; 3654 if (!V_path_mtu_discovery && 3655 !in_localaddr(inp->inp_faddr)) 3656 mss = min(mss, V_tcp_mssdflt); 3657 } 3658#endif 3659 /* 3660 * XXX - The above conditional (mss = maxmtu - min_protoh) 3661 * probably violates the TCP spec. 3662 * The problem is that, since we don't know the 3663 * other end's MSS, we are supposed to use a conservative 3664 * default. But, if we do that, then MTU discovery will 3665 * never actually take place, because the conservative 3666 * default is much less than the MTUs typically seen 3667 * on the Internet today. For the moment, we'll sweep 3668 * this under the carpet. 3669 * 3670 * The conservative default might not actually be a problem 3671 * if the only case this occurs is when sending an initial 3672 * SYN with options and data to a host we've never talked 3673 * to before. Then, they will reply with an MSS value which 3674 * will get recorded and the new parameters should get 3675 * recomputed. For Further Study. 3676 */ 3677 } 3678 mss = min(mss, offer); 3679 3680 /* 3681 * Sanity check: make sure that maxseg will be large 3682 * enough to allow some data on segments even if the 3683 * all the option space is used (40bytes). Otherwise 3684 * funny things may happen in tcp_output. 3685 * 3686 * XXXGL: shouldn't we reserve space for IP/IPv6 options? 3687 */ 3688 mss = max(mss, 64); 3689 3690 tp->t_maxseg = mss; 3691} 3692 3693void 3694tcp_mss(struct tcpcb *tp, int offer) 3695{ 3696 int mss; 3697 u_long bufsize; 3698 struct inpcb *inp; 3699 struct socket *so; 3700 struct hc_metrics_lite metrics; 3701 struct tcp_ifcap cap; 3702 3703 KASSERT(tp != NULL, ("%s: tp == NULL", __func__)); 3704 3705 bzero(&cap, sizeof(cap)); 3706 tcp_mss_update(tp, offer, -1, &metrics, &cap); 3707 3708 mss = tp->t_maxseg; 3709 inp = tp->t_inpcb; 3710 3711 /* 3712 * If there's a pipesize, change the socket buffer to that size, 3713 * don't change if sb_hiwat is different than default (then it 3714 * has been changed on purpose with setsockopt). 3715 * Make the socket buffers an integral number of mss units; 3716 * if the mss is larger than the socket buffer, decrease the mss. 3717 */ 3718 so = inp->inp_socket; 3719 SOCKBUF_LOCK(&so->so_snd); 3720 if ((so->so_snd.sb_hiwat == V_tcp_sendspace) && metrics.rmx_sendpipe) 3721 bufsize = metrics.rmx_sendpipe; 3722 else 3723 bufsize = so->so_snd.sb_hiwat; 3724 if (bufsize < mss) 3725 mss = bufsize; 3726 else { 3727 bufsize = roundup(bufsize, mss); 3728 if (bufsize > sb_max) 3729 bufsize = sb_max; 3730 if (bufsize > so->so_snd.sb_hiwat) 3731 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL); 3732 } 3733 SOCKBUF_UNLOCK(&so->so_snd); 3734 /* 3735 * Sanity check: make sure that maxseg will be large 3736 * enough to allow some data on segments even if the 3737 * all the option space is used (40bytes). Otherwise 3738 * funny things may happen in tcp_output. 3739 * 3740 * XXXGL: shouldn't we reserve space for IP/IPv6 options? 3741 */ 3742 tp->t_maxseg = max(mss, 64); 3743 3744 SOCKBUF_LOCK(&so->so_rcv); 3745 if ((so->so_rcv.sb_hiwat == V_tcp_recvspace) && metrics.rmx_recvpipe) 3746 bufsize = metrics.rmx_recvpipe; 3747 else 3748 bufsize = so->so_rcv.sb_hiwat; 3749 if (bufsize > mss) { 3750 bufsize = roundup(bufsize, mss); 3751 if (bufsize > sb_max) 3752 bufsize = sb_max; 3753 if (bufsize > so->so_rcv.sb_hiwat) 3754 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL); 3755 } 3756 SOCKBUF_UNLOCK(&so->so_rcv); 3757 3758 /* Check the interface for TSO capabilities. */ 3759 if (cap.ifcap & CSUM_TSO) { 3760 tp->t_flags |= TF_TSO; 3761 tp->t_tsomax = cap.tsomax; 3762 tp->t_tsomaxsegcount = cap.tsomaxsegcount; 3763 tp->t_tsomaxsegsize = cap.tsomaxsegsize; 3764 } 3765} 3766 3767/* 3768 * Determine the MSS option to send on an outgoing SYN. 3769 */ 3770int 3771tcp_mssopt(struct in_conninfo *inc) 3772{ 3773 int mss = 0; 3774 u_long maxmtu = 0; 3775 u_long thcmtu = 0; 3776 size_t min_protoh; 3777 3778 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer")); 3779 3780#ifdef INET6 3781 if (inc->inc_flags & INC_ISIPV6) { 3782 mss = V_tcp_v6mssdflt; 3783 maxmtu = tcp_maxmtu6(inc, NULL); 3784 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 3785 } 3786#endif 3787#if defined(INET) && defined(INET6) 3788 else 3789#endif 3790#ifdef INET 3791 { 3792 mss = V_tcp_mssdflt; 3793 maxmtu = tcp_maxmtu(inc, NULL); 3794 min_protoh = sizeof(struct tcpiphdr); 3795 } 3796#endif 3797#if defined(INET6) || defined(INET) 3798 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */ 3799#endif 3800 3801 if (maxmtu && thcmtu) 3802 mss = min(maxmtu, thcmtu) - min_protoh; 3803 else if (maxmtu || thcmtu) 3804 mss = max(maxmtu, thcmtu) - min_protoh; 3805 3806 return (mss); 3807} 3808 3809 3810/* 3811 * On a partial ack arrives, force the retransmission of the 3812 * next unacknowledged segment. Do not clear tp->t_dupacks. 3813 * By setting snd_nxt to ti_ack, this forces retransmission timer to 3814 * be started again. 3815 */ 3816void 3817tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th) 3818{ 3819 tcp_seq onxt = tp->snd_nxt; 3820 u_long ocwnd = tp->snd_cwnd; 3821 u_int maxseg = tcp_maxseg(tp); 3822 3823 INP_WLOCK_ASSERT(tp->t_inpcb); 3824 3825 tcp_timer_activate(tp, TT_REXMT, 0); 3826 tp->t_rtttime = 0; 3827 tp->snd_nxt = th->th_ack; 3828 /* 3829 * Set snd_cwnd to one segment beyond acknowledged offset. 3830 * (tp->snd_una has not yet been updated when this function is called.) 3831 */ 3832 tp->snd_cwnd = maxseg + BYTES_THIS_ACK(tp, th); 3833 tp->t_flags |= TF_ACKNOW; 3834 (void) tp->t_fb->tfb_tcp_output(tp); 3835 tp->snd_cwnd = ocwnd; 3836 if (SEQ_GT(onxt, tp->snd_nxt)) 3837 tp->snd_nxt = onxt; 3838 /* 3839 * Partial window deflation. Relies on fact that tp->snd_una 3840 * not updated yet. 3841 */ 3842 if (tp->snd_cwnd > BYTES_THIS_ACK(tp, th)) 3843 tp->snd_cwnd -= BYTES_THIS_ACK(tp, th); 3844 else 3845 tp->snd_cwnd = 0; 3846 tp->snd_cwnd += maxseg; 3847} 3848 3849int 3850tcp_compute_pipe(struct tcpcb *tp) 3851{ 3852 return (tp->snd_max - tp->snd_una + 3853 tp->sackhint.sack_bytes_rexmit - 3854 tp->sackhint.sacked_bytes); 3855} 3856