tcp_input.c revision 270052
1/*- 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 3 * The Regents of the University of California. All rights reserved. 4 * Copyright (c) 2007-2008,2010 5 * Swinburne University of Technology, Melbourne, Australia. 6 * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org> 7 * Copyright (c) 2010 The FreeBSD Foundation 8 * Copyright (c) 2010-2011 Juniper Networks, Inc. 9 * All rights reserved. 10 * 11 * Portions of this software were developed at the Centre for Advanced Internet 12 * Architectures, Swinburne University of Technology, by Lawrence Stewart, 13 * James Healy and David Hayes, made possible in part by a grant from the Cisco 14 * University Research Program Fund at Community Foundation Silicon Valley. 15 * 16 * Portions of this software were developed at the Centre for Advanced 17 * Internet Architectures, Swinburne University of Technology, Melbourne, 18 * Australia by David Hayes under sponsorship from the FreeBSD Foundation. 19 * 20 * Portions of this software were developed by Robert N. M. Watson under 21 * contract to Juniper Networks, Inc. 22 * 23 * Redistribution and use in source and binary forms, with or without 24 * modification, are permitted provided that the following conditions 25 * are met: 26 * 1. Redistributions of source code must retain the above copyright 27 * notice, this list of conditions and the following disclaimer. 28 * 2. Redistributions in binary form must reproduce the above copyright 29 * notice, this list of conditions and the following disclaimer in the 30 * documentation and/or other materials provided with the distribution. 31 * 4. Neither the name of the University nor the names of its contributors 32 * may be used to endorse or promote products derived from this software 33 * without specific prior written permission. 34 * 35 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 36 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 38 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 39 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 40 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 41 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 42 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 43 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 44 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 45 * SUCH DAMAGE. 46 * 47 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95 48 */ 49 50#include <sys/cdefs.h> 51__FBSDID("$FreeBSD: stable/10/sys/netinet/tcp_input.c 270052 2014-08-16 13:53:05Z bz $"); 52 53#include "opt_ipfw.h" /* for ipfw_fwd */ 54#include "opt_inet.h" 55#include "opt_inet6.h" 56#include "opt_ipsec.h" 57#include "opt_kdtrace.h" 58#include "opt_tcpdebug.h" 59 60#include <sys/param.h> 61#include <sys/kernel.h> 62#include <sys/hhook.h> 63#include <sys/malloc.h> 64#include <sys/mbuf.h> 65#include <sys/proc.h> /* for proc0 declaration */ 66#include <sys/protosw.h> 67#include <sys/sdt.h> 68#include <sys/signalvar.h> 69#include <sys/socket.h> 70#include <sys/socketvar.h> 71#include <sys/sysctl.h> 72#include <sys/syslog.h> 73#include <sys/systm.h> 74 75#include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */ 76 77#include <vm/uma.h> 78 79#include <net/if.h> 80#include <net/route.h> 81#include <net/vnet.h> 82 83#define TCPSTATES /* for logging */ 84 85#include <netinet/cc.h> 86#include <netinet/in.h> 87#include <netinet/in_kdtrace.h> 88#include <netinet/in_pcb.h> 89#include <netinet/in_systm.h> 90#include <netinet/in_var.h> 91#include <netinet/ip.h> 92#include <netinet/ip_icmp.h> /* required for icmp_var.h */ 93#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 94#include <netinet/ip_var.h> 95#include <netinet/ip_options.h> 96#include <netinet/ip6.h> 97#include <netinet/icmp6.h> 98#include <netinet6/in6_pcb.h> 99#include <netinet6/ip6_var.h> 100#include <netinet6/nd6.h> 101#include <netinet/tcp_fsm.h> 102#include <netinet/tcp_seq.h> 103#include <netinet/tcp_timer.h> 104#include <netinet/tcp_var.h> 105#include <netinet6/tcp6_var.h> 106#include <netinet/tcpip.h> 107#include <netinet/tcp_syncache.h> 108#ifdef TCPDEBUG 109#include <netinet/tcp_debug.h> 110#endif /* TCPDEBUG */ 111#ifdef TCP_OFFLOAD 112#include <netinet/tcp_offload.h> 113#endif 114 115#ifdef IPSEC 116#include <netipsec/ipsec.h> 117#include <netipsec/ipsec6.h> 118#endif /*IPSEC*/ 119 120#include <machine/in_cksum.h> 121 122#include <security/mac/mac_framework.h> 123 124const int tcprexmtthresh = 3; 125 126int tcp_log_in_vain = 0; 127SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW, 128 &tcp_log_in_vain, 0, 129 "Log all incoming TCP segments to closed ports"); 130 131VNET_DEFINE(int, blackhole) = 0; 132#define V_blackhole VNET(blackhole) 133SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW, 134 &VNET_NAME(blackhole), 0, 135 "Do not send RST on segments to closed ports"); 136 137VNET_DEFINE(int, tcp_delack_enabled) = 1; 138SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW, 139 &VNET_NAME(tcp_delack_enabled), 0, 140 "Delay ACK to try and piggyback it onto a data packet"); 141 142VNET_DEFINE(int, drop_synfin) = 0; 143#define V_drop_synfin VNET(drop_synfin) 144SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW, 145 &VNET_NAME(drop_synfin), 0, 146 "Drop TCP packets with SYN+FIN set"); 147 148VNET_DEFINE(int, tcp_do_rfc3042) = 1; 149#define V_tcp_do_rfc3042 VNET(tcp_do_rfc3042) 150SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_RW, 151 &VNET_NAME(tcp_do_rfc3042), 0, 152 "Enable RFC 3042 (Limited Transmit)"); 153 154VNET_DEFINE(int, tcp_do_rfc3390) = 1; 155SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW, 156 &VNET_NAME(tcp_do_rfc3390), 0, 157 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)"); 158 159SYSCTL_NODE(_net_inet_tcp, OID_AUTO, experimental, CTLFLAG_RW, 0, 160 "Experimental TCP extensions"); 161 162VNET_DEFINE(int, tcp_do_initcwnd10) = 1; 163SYSCTL_VNET_INT(_net_inet_tcp_experimental, OID_AUTO, initcwnd10, CTLFLAG_RW, 164 &VNET_NAME(tcp_do_initcwnd10), 0, 165 "Enable RFC 6928 (Increasing initial CWND to 10)"); 166 167VNET_DEFINE(int, tcp_do_rfc3465) = 1; 168SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_RW, 169 &VNET_NAME(tcp_do_rfc3465), 0, 170 "Enable RFC 3465 (Appropriate Byte Counting)"); 171 172VNET_DEFINE(int, tcp_abc_l_var) = 2; 173SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, abc_l_var, CTLFLAG_RW, 174 &VNET_NAME(tcp_abc_l_var), 2, 175 "Cap the max cwnd increment during slow-start to this number of segments"); 176 177static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, ecn, CTLFLAG_RW, 0, "TCP ECN"); 178 179VNET_DEFINE(int, tcp_do_ecn) = 0; 180SYSCTL_VNET_INT(_net_inet_tcp_ecn, OID_AUTO, enable, CTLFLAG_RW, 181 &VNET_NAME(tcp_do_ecn), 0, 182 "TCP ECN support"); 183 184VNET_DEFINE(int, tcp_ecn_maxretries) = 1; 185SYSCTL_VNET_INT(_net_inet_tcp_ecn, OID_AUTO, maxretries, CTLFLAG_RW, 186 &VNET_NAME(tcp_ecn_maxretries), 0, 187 "Max retries before giving up on ECN"); 188 189VNET_DEFINE(int, tcp_insecure_rst) = 0; 190#define V_tcp_insecure_rst VNET(tcp_insecure_rst) 191SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_RW, 192 &VNET_NAME(tcp_insecure_rst), 0, 193 "Follow the old (insecure) criteria for accepting RST packets"); 194 195VNET_DEFINE(int, tcp_recvspace) = 1024*64; 196#define V_tcp_recvspace VNET(tcp_recvspace) 197SYSCTL_VNET_INT(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLFLAG_RW, 198 &VNET_NAME(tcp_recvspace), 0, "Initial receive socket buffer size"); 199 200VNET_DEFINE(int, tcp_do_autorcvbuf) = 1; 201#define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf) 202SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_RW, 203 &VNET_NAME(tcp_do_autorcvbuf), 0, 204 "Enable automatic receive buffer sizing"); 205 206VNET_DEFINE(int, tcp_autorcvbuf_inc) = 16*1024; 207#define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc) 208SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_RW, 209 &VNET_NAME(tcp_autorcvbuf_inc), 0, 210 "Incrementor step size of automatic receive buffer"); 211 212VNET_DEFINE(int, tcp_autorcvbuf_max) = 2*1024*1024; 213#define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max) 214SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_RW, 215 &VNET_NAME(tcp_autorcvbuf_max), 0, 216 "Max size of automatic receive buffer"); 217 218VNET_DEFINE(struct inpcbhead, tcb); 219#define tcb6 tcb /* for KAME src sync over BSD*'s */ 220VNET_DEFINE(struct inpcbinfo, tcbinfo); 221 222static void tcp_dooptions(struct tcpopt *, u_char *, int, int); 223static void tcp_do_segment(struct mbuf *, struct tcphdr *, 224 struct socket *, struct tcpcb *, int, int, uint8_t, 225 int); 226static void tcp_dropwithreset(struct mbuf *, struct tcphdr *, 227 struct tcpcb *, int, int); 228static void tcp_pulloutofband(struct socket *, 229 struct tcphdr *, struct mbuf *, int); 230static void tcp_xmit_timer(struct tcpcb *, int); 231static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *); 232#ifdef TCP_SIGNATURE 233static int inline tcp_signature_verify_input(struct mbuf *, int, int, 234 int, struct tcpopt *, struct tcphdr *, u_int); 235#endif 236static void inline cc_ack_received(struct tcpcb *tp, struct tcphdr *th, 237 uint16_t type); 238static void inline cc_conn_init(struct tcpcb *tp); 239static void inline cc_post_recovery(struct tcpcb *tp, struct tcphdr *th); 240static void inline hhook_run_tcp_est_in(struct tcpcb *tp, 241 struct tcphdr *th, struct tcpopt *to); 242 243/* 244 * TCP statistics are stored in an "array" of counter(9)s. 245 */ 246VNET_PCPUSTAT_DEFINE(struct tcpstat, tcpstat); 247VNET_PCPUSTAT_SYSINIT(tcpstat); 248SYSCTL_VNET_PCPUSTAT(_net_inet_tcp, TCPCTL_STATS, stats, struct tcpstat, 249 tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)"); 250 251#ifdef VIMAGE 252VNET_PCPUSTAT_SYSUNINIT(tcpstat); 253#endif /* VIMAGE */ 254/* 255 * Kernel module interface for updating tcpstat. The argument is an index 256 * into tcpstat treated as an array. 257 */ 258void 259kmod_tcpstat_inc(int statnum) 260{ 261 262 counter_u64_add(VNET(tcpstat)[statnum], 1); 263} 264 265/* 266 * Wrapper for the TCP established input helper hook. 267 */ 268static void inline 269hhook_run_tcp_est_in(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to) 270{ 271 struct tcp_hhook_data hhook_data; 272 273 if (V_tcp_hhh[HHOOK_TCP_EST_IN]->hhh_nhooks > 0) { 274 hhook_data.tp = tp; 275 hhook_data.th = th; 276 hhook_data.to = to; 277 278 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_IN], &hhook_data, 279 tp->osd); 280 } 281} 282 283/* 284 * CC wrapper hook functions 285 */ 286static void inline 287cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t type) 288{ 289 INP_WLOCK_ASSERT(tp->t_inpcb); 290 291 tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th); 292 if (tp->snd_cwnd <= tp->snd_wnd) 293 tp->ccv->flags |= CCF_CWND_LIMITED; 294 else 295 tp->ccv->flags &= ~CCF_CWND_LIMITED; 296 297 if (type == CC_ACK) { 298 if (tp->snd_cwnd > tp->snd_ssthresh) { 299 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack, 300 V_tcp_abc_l_var * tp->t_maxseg); 301 if (tp->t_bytes_acked >= tp->snd_cwnd) { 302 tp->t_bytes_acked -= tp->snd_cwnd; 303 tp->ccv->flags |= CCF_ABC_SENTAWND; 304 } 305 } else { 306 tp->ccv->flags &= ~CCF_ABC_SENTAWND; 307 tp->t_bytes_acked = 0; 308 } 309 } 310 311 if (CC_ALGO(tp)->ack_received != NULL) { 312 /* XXXLAS: Find a way to live without this */ 313 tp->ccv->curack = th->th_ack; 314 CC_ALGO(tp)->ack_received(tp->ccv, type); 315 } 316} 317 318static void inline 319cc_conn_init(struct tcpcb *tp) 320{ 321 struct hc_metrics_lite metrics; 322 struct inpcb *inp = tp->t_inpcb; 323 int rtt; 324 325 INP_WLOCK_ASSERT(tp->t_inpcb); 326 327 tcp_hc_get(&inp->inp_inc, &metrics); 328 329 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) { 330 tp->t_srtt = rtt; 331 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE; 332 TCPSTAT_INC(tcps_usedrtt); 333 if (metrics.rmx_rttvar) { 334 tp->t_rttvar = metrics.rmx_rttvar; 335 TCPSTAT_INC(tcps_usedrttvar); 336 } else { 337 /* default variation is +- 1 rtt */ 338 tp->t_rttvar = 339 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE; 340 } 341 TCPT_RANGESET(tp->t_rxtcur, 342 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, 343 tp->t_rttmin, TCPTV_REXMTMAX); 344 } 345 if (metrics.rmx_ssthresh) { 346 /* 347 * There's some sort of gateway or interface 348 * buffer limit on the path. Use this to set 349 * the slow start threshhold, but set the 350 * threshold to no less than 2*mss. 351 */ 352 tp->snd_ssthresh = max(2 * tp->t_maxseg, metrics.rmx_ssthresh); 353 TCPSTAT_INC(tcps_usedssthresh); 354 } 355 356 /* 357 * Set the initial slow-start flight size. 358 * 359 * RFC5681 Section 3.1 specifies the default conservative values. 360 * RFC3390 specifies slightly more aggressive values. 361 * RFC6928 increases it to ten segments. 362 * 363 * If a SYN or SYN/ACK was lost and retransmitted, we have to 364 * reduce the initial CWND to one segment as congestion is likely 365 * requiring us to be cautious. 366 */ 367 if (tp->snd_cwnd == 1) 368 tp->snd_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ 369 else if (V_tcp_do_initcwnd10) 370 tp->snd_cwnd = min(10 * tp->t_maxseg, 371 max(2 * tp->t_maxseg, 14600)); 372 else if (V_tcp_do_rfc3390) 373 tp->snd_cwnd = min(4 * tp->t_maxseg, 374 max(2 * tp->t_maxseg, 4380)); 375 else { 376 /* Per RFC5681 Section 3.1 */ 377 if (tp->t_maxseg > 2190) 378 tp->snd_cwnd = 2 * tp->t_maxseg; 379 else if (tp->t_maxseg > 1095) 380 tp->snd_cwnd = 3 * tp->t_maxseg; 381 else 382 tp->snd_cwnd = 4 * tp->t_maxseg; 383 } 384 385 if (CC_ALGO(tp)->conn_init != NULL) 386 CC_ALGO(tp)->conn_init(tp->ccv); 387} 388 389void inline 390cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type) 391{ 392 INP_WLOCK_ASSERT(tp->t_inpcb); 393 394 switch(type) { 395 case CC_NDUPACK: 396 if (!IN_FASTRECOVERY(tp->t_flags)) { 397 tp->snd_recover = tp->snd_max; 398 if (tp->t_flags & TF_ECN_PERMIT) 399 tp->t_flags |= TF_ECN_SND_CWR; 400 } 401 break; 402 case CC_ECN: 403 if (!IN_CONGRECOVERY(tp->t_flags)) { 404 TCPSTAT_INC(tcps_ecn_rcwnd); 405 tp->snd_recover = tp->snd_max; 406 if (tp->t_flags & TF_ECN_PERMIT) 407 tp->t_flags |= TF_ECN_SND_CWR; 408 } 409 break; 410 case CC_RTO: 411 tp->t_dupacks = 0; 412 tp->t_bytes_acked = 0; 413 EXIT_RECOVERY(tp->t_flags); 414 tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 / 415 tp->t_maxseg) * tp->t_maxseg; 416 tp->snd_cwnd = tp->t_maxseg; 417 break; 418 case CC_RTO_ERR: 419 TCPSTAT_INC(tcps_sndrexmitbad); 420 /* RTO was unnecessary, so reset everything. */ 421 tp->snd_cwnd = tp->snd_cwnd_prev; 422 tp->snd_ssthresh = tp->snd_ssthresh_prev; 423 tp->snd_recover = tp->snd_recover_prev; 424 if (tp->t_flags & TF_WASFRECOVERY) 425 ENTER_FASTRECOVERY(tp->t_flags); 426 if (tp->t_flags & TF_WASCRECOVERY) 427 ENTER_CONGRECOVERY(tp->t_flags); 428 tp->snd_nxt = tp->snd_max; 429 tp->t_flags &= ~TF_PREVVALID; 430 tp->t_badrxtwin = 0; 431 break; 432 } 433 434 if (CC_ALGO(tp)->cong_signal != NULL) { 435 if (th != NULL) 436 tp->ccv->curack = th->th_ack; 437 CC_ALGO(tp)->cong_signal(tp->ccv, type); 438 } 439} 440 441static void inline 442cc_post_recovery(struct tcpcb *tp, struct tcphdr *th) 443{ 444 INP_WLOCK_ASSERT(tp->t_inpcb); 445 446 /* XXXLAS: KASSERT that we're in recovery? */ 447 448 if (CC_ALGO(tp)->post_recovery != NULL) { 449 tp->ccv->curack = th->th_ack; 450 CC_ALGO(tp)->post_recovery(tp->ccv); 451 } 452 /* XXXLAS: EXIT_RECOVERY ? */ 453 tp->t_bytes_acked = 0; 454} 455 456#ifdef TCP_SIGNATURE 457static inline int 458tcp_signature_verify_input(struct mbuf *m, int off0, int tlen, int optlen, 459 struct tcpopt *to, struct tcphdr *th, u_int tcpbflag) 460{ 461 int ret; 462 463 tcp_fields_to_net(th); 464 ret = tcp_signature_verify(m, off0, tlen, optlen, to, th, tcpbflag); 465 tcp_fields_to_host(th); 466 return (ret); 467} 468#endif 469 470/* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */ 471#ifdef INET6 472#define ND6_HINT(tp) \ 473do { \ 474 if ((tp) && (tp)->t_inpcb && \ 475 ((tp)->t_inpcb->inp_vflag & INP_IPV6) != 0) \ 476 nd6_nud_hint(NULL, NULL, 0); \ 477} while (0) 478#else 479#define ND6_HINT(tp) 480#endif 481 482/* 483 * Indicate whether this ack should be delayed. We can delay the ack if 484 * - there is no delayed ack timer in progress and 485 * - our last ack wasn't a 0-sized window. We never want to delay 486 * the ack that opens up a 0-sized window and 487 * - delayed acks are enabled or 488 * - this is a half-synchronized T/TCP connection. 489 * - the segment size is not larger than the MSS and LRO wasn't used 490 * for this segment. 491 */ 492#define DELAY_ACK(tp, tlen) \ 493 ((!tcp_timer_active(tp, TT_DELACK) && \ 494 (tp->t_flags & TF_RXWIN0SENT) == 0) && \ 495 (tlen <= tp->t_maxopd) && \ 496 (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN))) 497 498/* 499 * TCP input handling is split into multiple parts: 500 * tcp6_input is a thin wrapper around tcp_input for the extended 501 * ip6_protox[] call format in ip6_input 502 * tcp_input handles primary segment validation, inpcb lookup and 503 * SYN processing on listen sockets 504 * tcp_do_segment processes the ACK and text of the segment for 505 * establishing, established and closing connections 506 */ 507#ifdef INET6 508int 509tcp6_input(struct mbuf **mp, int *offp, int proto) 510{ 511 struct mbuf *m = *mp; 512 struct in6_ifaddr *ia6; 513 514 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE); 515 516 /* 517 * draft-itojun-ipv6-tcp-to-anycast 518 * better place to put this in? 519 */ 520 ia6 = ip6_getdstifaddr(m); 521 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) { 522 struct ip6_hdr *ip6; 523 524 ifa_free(&ia6->ia_ifa); 525 ip6 = mtod(m, struct ip6_hdr *); 526 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR, 527 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6); 528 return IPPROTO_DONE; 529 } 530 if (ia6) 531 ifa_free(&ia6->ia_ifa); 532 533 tcp_input(m, *offp); 534 return IPPROTO_DONE; 535} 536#endif /* INET6 */ 537 538void 539tcp_input(struct mbuf *m, int off0) 540{ 541 struct tcphdr *th = NULL; 542 struct ip *ip = NULL; 543 struct inpcb *inp = NULL; 544 struct tcpcb *tp = NULL; 545 struct socket *so = NULL; 546 u_char *optp = NULL; 547 int optlen = 0; 548#ifdef INET 549 int len; 550#endif 551 int tlen = 0, off; 552 int drop_hdrlen; 553 int thflags; 554 int rstreason = 0; /* For badport_bandlim accounting purposes */ 555#ifdef TCP_SIGNATURE 556 uint8_t sig_checked = 0; 557#endif 558 uint8_t iptos = 0; 559 struct m_tag *fwd_tag = NULL; 560#ifdef INET6 561 struct ip6_hdr *ip6 = NULL; 562 int isipv6; 563#else 564 const void *ip6 = NULL; 565#endif /* INET6 */ 566 struct tcpopt to; /* options in this segment */ 567 char *s = NULL; /* address and port logging */ 568 int ti_locked; 569#define TI_UNLOCKED 1 570#define TI_WLOCKED 2 571 572#ifdef TCPDEBUG 573 /* 574 * The size of tcp_saveipgen must be the size of the max ip header, 575 * now IPv6. 576 */ 577 u_char tcp_saveipgen[IP6_HDR_LEN]; 578 struct tcphdr tcp_savetcp; 579 short ostate = 0; 580#endif 581 582#ifdef INET6 583 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0; 584#endif 585 586 to.to_flags = 0; 587 TCPSTAT_INC(tcps_rcvtotal); 588 589#ifdef INET6 590 if (isipv6) { 591 /* IP6_EXTHDR_CHECK() is already done at tcp6_input(). */ 592 593 if (m->m_len < (sizeof(*ip6) + sizeof(*th))) { 594 m = m_pullup(m, sizeof(*ip6) + sizeof(*th)); 595 if (m == NULL) { 596 TCPSTAT_INC(tcps_rcvshort); 597 return; 598 } 599 } 600 601 ip6 = mtod(m, struct ip6_hdr *); 602 th = (struct tcphdr *)((caddr_t)ip6 + off0); 603 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0; 604 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID_IPV6) { 605 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 606 th->th_sum = m->m_pkthdr.csum_data; 607 else 608 th->th_sum = in6_cksum_pseudo(ip6, tlen, 609 IPPROTO_TCP, m->m_pkthdr.csum_data); 610 th->th_sum ^= 0xffff; 611 } else 612 th->th_sum = in6_cksum(m, IPPROTO_TCP, off0, tlen); 613 if (th->th_sum) { 614 TCPSTAT_INC(tcps_rcvbadsum); 615 goto drop; 616 } 617 618 /* 619 * Be proactive about unspecified IPv6 address in source. 620 * As we use all-zero to indicate unbounded/unconnected pcb, 621 * unspecified IPv6 address can be used to confuse us. 622 * 623 * Note that packets with unspecified IPv6 destination is 624 * already dropped in ip6_input. 625 */ 626 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { 627 /* XXX stat */ 628 goto drop; 629 } 630 } 631#endif 632#if defined(INET) && defined(INET6) 633 else 634#endif 635#ifdef INET 636 { 637 /* 638 * Get IP and TCP header together in first mbuf. 639 * Note: IP leaves IP header in first mbuf. 640 */ 641 if (off0 > sizeof (struct ip)) { 642 ip_stripoptions(m); 643 off0 = sizeof(struct ip); 644 } 645 if (m->m_len < sizeof (struct tcpiphdr)) { 646 if ((m = m_pullup(m, sizeof (struct tcpiphdr))) 647 == NULL) { 648 TCPSTAT_INC(tcps_rcvshort); 649 return; 650 } 651 } 652 ip = mtod(m, struct ip *); 653 th = (struct tcphdr *)((caddr_t)ip + off0); 654 tlen = ntohs(ip->ip_len) - off0; 655 656 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 657 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 658 th->th_sum = m->m_pkthdr.csum_data; 659 else 660 th->th_sum = in_pseudo(ip->ip_src.s_addr, 661 ip->ip_dst.s_addr, 662 htonl(m->m_pkthdr.csum_data + tlen + 663 IPPROTO_TCP)); 664 th->th_sum ^= 0xffff; 665 } else { 666 struct ipovly *ipov = (struct ipovly *)ip; 667 668 /* 669 * Checksum extended TCP header and data. 670 */ 671 len = off0 + tlen; 672 bzero(ipov->ih_x1, sizeof(ipov->ih_x1)); 673 ipov->ih_len = htons(tlen); 674 th->th_sum = in_cksum(m, len); 675 /* Reset length for SDT probes. */ 676 ip->ip_len = htons(tlen + off0); 677 } 678 679 if (th->th_sum) { 680 TCPSTAT_INC(tcps_rcvbadsum); 681 goto drop; 682 } 683 /* Re-initialization for later version check */ 684 ip->ip_v = IPVERSION; 685 } 686#endif /* INET */ 687 688#ifdef INET6 689 if (isipv6) 690 iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; 691#endif 692#if defined(INET) && defined(INET6) 693 else 694#endif 695#ifdef INET 696 iptos = ip->ip_tos; 697#endif 698 699 /* 700 * Check that TCP offset makes sense, 701 * pull out TCP options and adjust length. XXX 702 */ 703 off = th->th_off << 2; 704 if (off < sizeof (struct tcphdr) || off > tlen) { 705 TCPSTAT_INC(tcps_rcvbadoff); 706 goto drop; 707 } 708 tlen -= off; /* tlen is used instead of ti->ti_len */ 709 if (off > sizeof (struct tcphdr)) { 710#ifdef INET6 711 if (isipv6) { 712 IP6_EXTHDR_CHECK(m, off0, off, ); 713 ip6 = mtod(m, struct ip6_hdr *); 714 th = (struct tcphdr *)((caddr_t)ip6 + off0); 715 } 716#endif 717#if defined(INET) && defined(INET6) 718 else 719#endif 720#ifdef INET 721 { 722 if (m->m_len < sizeof(struct ip) + off) { 723 if ((m = m_pullup(m, sizeof (struct ip) + off)) 724 == NULL) { 725 TCPSTAT_INC(tcps_rcvshort); 726 return; 727 } 728 ip = mtod(m, struct ip *); 729 th = (struct tcphdr *)((caddr_t)ip + off0); 730 } 731 } 732#endif 733 optlen = off - sizeof (struct tcphdr); 734 optp = (u_char *)(th + 1); 735 } 736 thflags = th->th_flags; 737 738 /* 739 * Convert TCP protocol specific fields to host format. 740 */ 741 tcp_fields_to_host(th); 742 743 /* 744 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options. 745 */ 746 drop_hdrlen = off0 + off; 747 748 /* 749 * Locate pcb for segment; if we're likely to add or remove a 750 * connection then first acquire pcbinfo lock. There are two cases 751 * where we might discover later we need a write lock despite the 752 * flags: ACKs moving a connection out of the syncache, and ACKs for 753 * a connection in TIMEWAIT. 754 */ 755 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0) { 756 INP_INFO_WLOCK(&V_tcbinfo); 757 ti_locked = TI_WLOCKED; 758 } else 759 ti_locked = TI_UNLOCKED; 760 761 /* 762 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. 763 */ 764 if ( 765#ifdef INET6 766 (isipv6 && (m->m_flags & M_IP6_NEXTHOP)) 767#ifdef INET 768 || (!isipv6 && (m->m_flags & M_IP_NEXTHOP)) 769#endif 770#endif 771#if defined(INET) && !defined(INET6) 772 (m->m_flags & M_IP_NEXTHOP) 773#endif 774 ) 775 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL); 776 777findpcb: 778#ifdef INVARIANTS 779 if (ti_locked == TI_WLOCKED) { 780 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 781 } else { 782 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 783 } 784#endif 785#ifdef INET6 786 if (isipv6 && fwd_tag != NULL) { 787 struct sockaddr_in6 *next_hop6; 788 789 next_hop6 = (struct sockaddr_in6 *)(fwd_tag + 1); 790 /* 791 * Transparently forwarded. Pretend to be the destination. 792 * Already got one like this? 793 */ 794 inp = in6_pcblookup_mbuf(&V_tcbinfo, 795 &ip6->ip6_src, th->th_sport, &ip6->ip6_dst, th->th_dport, 796 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif, m); 797 if (!inp) { 798 /* 799 * It's new. Try to find the ambushing socket. 800 * Because we've rewritten the destination address, 801 * any hardware-generated hash is ignored. 802 */ 803 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_src, 804 th->th_sport, &next_hop6->sin6_addr, 805 next_hop6->sin6_port ? ntohs(next_hop6->sin6_port) : 806 th->th_dport, INPLOOKUP_WILDCARD | 807 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif); 808 } 809 } else if (isipv6) { 810 inp = in6_pcblookup_mbuf(&V_tcbinfo, &ip6->ip6_src, 811 th->th_sport, &ip6->ip6_dst, th->th_dport, 812 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB, 813 m->m_pkthdr.rcvif, m); 814 } 815#endif /* INET6 */ 816#if defined(INET6) && defined(INET) 817 else 818#endif 819#ifdef INET 820 if (fwd_tag != NULL) { 821 struct sockaddr_in *next_hop; 822 823 next_hop = (struct sockaddr_in *)(fwd_tag+1); 824 /* 825 * Transparently forwarded. Pretend to be the destination. 826 * already got one like this? 827 */ 828 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, th->th_sport, 829 ip->ip_dst, th->th_dport, INPLOOKUP_WLOCKPCB, 830 m->m_pkthdr.rcvif, m); 831 if (!inp) { 832 /* 833 * It's new. Try to find the ambushing socket. 834 * Because we've rewritten the destination address, 835 * any hardware-generated hash is ignored. 836 */ 837 inp = in_pcblookup(&V_tcbinfo, ip->ip_src, 838 th->th_sport, next_hop->sin_addr, 839 next_hop->sin_port ? ntohs(next_hop->sin_port) : 840 th->th_dport, INPLOOKUP_WILDCARD | 841 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif); 842 } 843 } else 844 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, 845 th->th_sport, ip->ip_dst, th->th_dport, 846 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB, 847 m->m_pkthdr.rcvif, m); 848#endif /* INET */ 849 850 /* 851 * If the INPCB does not exist then all data in the incoming 852 * segment is discarded and an appropriate RST is sent back. 853 * XXX MRT Send RST using which routing table? 854 */ 855 if (inp == NULL) { 856 /* 857 * Log communication attempts to ports that are not 858 * in use. 859 */ 860 if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) || 861 tcp_log_in_vain == 2) { 862 if ((s = tcp_log_vain(NULL, th, (void *)ip, ip6))) 863 log(LOG_INFO, "%s; %s: Connection attempt " 864 "to closed port\n", s, __func__); 865 } 866 /* 867 * When blackholing do not respond with a RST but 868 * completely ignore the segment and drop it. 869 */ 870 if ((V_blackhole == 1 && (thflags & TH_SYN)) || 871 V_blackhole == 2) 872 goto dropunlock; 873 874 rstreason = BANDLIM_RST_CLOSEDPORT; 875 goto dropwithreset; 876 } 877 INP_WLOCK_ASSERT(inp); 878 if (!(inp->inp_flags & INP_HW_FLOWID) 879 && (m->m_flags & M_FLOWID) 880 && ((inp->inp_socket == NULL) 881 || !(inp->inp_socket->so_options & SO_ACCEPTCONN))) { 882 inp->inp_flags |= INP_HW_FLOWID; 883 inp->inp_flags &= ~INP_SW_FLOWID; 884 inp->inp_flowid = m->m_pkthdr.flowid; 885 } 886#ifdef IPSEC 887#ifdef INET6 888 if (isipv6 && ipsec6_in_reject(m, inp)) { 889 IPSEC6STAT_INC(ips_in_polvio); 890 goto dropunlock; 891 } else 892#endif /* INET6 */ 893 if (ipsec4_in_reject(m, inp) != 0) { 894 IPSECSTAT_INC(ips_in_polvio); 895 goto dropunlock; 896 } 897#endif /* IPSEC */ 898 899 /* 900 * Check the minimum TTL for socket. 901 */ 902 if (inp->inp_ip_minttl != 0) { 903#ifdef INET6 904 if (isipv6 && inp->inp_ip_minttl > ip6->ip6_hlim) 905 goto dropunlock; 906 else 907#endif 908 if (inp->inp_ip_minttl > ip->ip_ttl) 909 goto dropunlock; 910 } 911 912 /* 913 * A previous connection in TIMEWAIT state is supposed to catch stray 914 * or duplicate segments arriving late. If this segment was a 915 * legitimate new connection attempt, the old INPCB gets removed and 916 * we can try again to find a listening socket. 917 * 918 * At this point, due to earlier optimism, we may hold only an inpcb 919 * lock, and not the inpcbinfo write lock. If so, we need to try to 920 * acquire it, or if that fails, acquire a reference on the inpcb, 921 * drop all locks, acquire a global write lock, and then re-acquire 922 * the inpcb lock. We may at that point discover that another thread 923 * has tried to free the inpcb, in which case we need to loop back 924 * and try to find a new inpcb to deliver to. 925 * 926 * XXXRW: It may be time to rethink timewait locking. 927 */ 928relocked: 929 if (inp->inp_flags & INP_TIMEWAIT) { 930 if (ti_locked == TI_UNLOCKED) { 931 if (INP_INFO_TRY_WLOCK(&V_tcbinfo) == 0) { 932 in_pcbref(inp); 933 INP_WUNLOCK(inp); 934 INP_INFO_WLOCK(&V_tcbinfo); 935 ti_locked = TI_WLOCKED; 936 INP_WLOCK(inp); 937 if (in_pcbrele_wlocked(inp)) { 938 inp = NULL; 939 goto findpcb; 940 } 941 } else 942 ti_locked = TI_WLOCKED; 943 } 944 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 945 946 if (thflags & TH_SYN) 947 tcp_dooptions(&to, optp, optlen, TO_SYN); 948 /* 949 * NB: tcp_twcheck unlocks the INP and frees the mbuf. 950 */ 951 if (tcp_twcheck(inp, &to, th, m, tlen)) 952 goto findpcb; 953 INP_INFO_WUNLOCK(&V_tcbinfo); 954 return; 955 } 956 /* 957 * The TCPCB may no longer exist if the connection is winding 958 * down or it is in the CLOSED state. Either way we drop the 959 * segment and send an appropriate response. 960 */ 961 tp = intotcpcb(inp); 962 if (tp == NULL || tp->t_state == TCPS_CLOSED) { 963 rstreason = BANDLIM_RST_CLOSEDPORT; 964 goto dropwithreset; 965 } 966 967#ifdef TCP_OFFLOAD 968 if (tp->t_flags & TF_TOE) { 969 tcp_offload_input(tp, m); 970 m = NULL; /* consumed by the TOE driver */ 971 goto dropunlock; 972 } 973#endif 974 975 /* 976 * We've identified a valid inpcb, but it could be that we need an 977 * inpcbinfo write lock but don't hold it. In this case, attempt to 978 * acquire using the same strategy as the TIMEWAIT case above. If we 979 * relock, we have to jump back to 'relocked' as the connection might 980 * now be in TIMEWAIT. 981 */ 982#ifdef INVARIANTS 983 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0) 984 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 985#endif 986 if (tp->t_state != TCPS_ESTABLISHED) { 987 if (ti_locked == TI_UNLOCKED) { 988 if (INP_INFO_TRY_WLOCK(&V_tcbinfo) == 0) { 989 in_pcbref(inp); 990 INP_WUNLOCK(inp); 991 INP_INFO_WLOCK(&V_tcbinfo); 992 ti_locked = TI_WLOCKED; 993 INP_WLOCK(inp); 994 if (in_pcbrele_wlocked(inp)) { 995 inp = NULL; 996 goto findpcb; 997 } 998 goto relocked; 999 } else 1000 ti_locked = TI_WLOCKED; 1001 } 1002 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 1003 } 1004 1005#ifdef MAC 1006 INP_WLOCK_ASSERT(inp); 1007 if (mac_inpcb_check_deliver(inp, m)) 1008 goto dropunlock; 1009#endif 1010 so = inp->inp_socket; 1011 KASSERT(so != NULL, ("%s: so == NULL", __func__)); 1012#ifdef TCPDEBUG 1013 if (so->so_options & SO_DEBUG) { 1014 ostate = tp->t_state; 1015#ifdef INET6 1016 if (isipv6) { 1017 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6)); 1018 } else 1019#endif 1020 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip)); 1021 tcp_savetcp = *th; 1022 } 1023#endif /* TCPDEBUG */ 1024 /* 1025 * When the socket is accepting connections (the INPCB is in LISTEN 1026 * state) we look into the SYN cache if this is a new connection 1027 * attempt or the completion of a previous one. Because listen 1028 * sockets are never in TCPS_ESTABLISHED, the V_tcbinfo lock will be 1029 * held in this case. 1030 */ 1031 if (so->so_options & SO_ACCEPTCONN) { 1032 struct in_conninfo inc; 1033 1034 KASSERT(tp->t_state == TCPS_LISTEN, ("%s: so accepting but " 1035 "tp not listening", __func__)); 1036 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 1037 1038 bzero(&inc, sizeof(inc)); 1039#ifdef INET6 1040 if (isipv6) { 1041 inc.inc_flags |= INC_ISIPV6; 1042 inc.inc6_faddr = ip6->ip6_src; 1043 inc.inc6_laddr = ip6->ip6_dst; 1044 } else 1045#endif 1046 { 1047 inc.inc_faddr = ip->ip_src; 1048 inc.inc_laddr = ip->ip_dst; 1049 } 1050 inc.inc_fport = th->th_sport; 1051 inc.inc_lport = th->th_dport; 1052 inc.inc_fibnum = so->so_fibnum; 1053 1054 /* 1055 * Check for an existing connection attempt in syncache if 1056 * the flag is only ACK. A successful lookup creates a new 1057 * socket appended to the listen queue in SYN_RECEIVED state. 1058 */ 1059 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) { 1060 /* 1061 * Parse the TCP options here because 1062 * syncookies need access to the reflected 1063 * timestamp. 1064 */ 1065 tcp_dooptions(&to, optp, optlen, 0); 1066 /* 1067 * NB: syncache_expand() doesn't unlock 1068 * inp and tcpinfo locks. 1069 */ 1070 if (!syncache_expand(&inc, &to, th, &so, m)) { 1071 /* 1072 * No syncache entry or ACK was not 1073 * for our SYN/ACK. Send a RST. 1074 * NB: syncache did its own logging 1075 * of the failure cause. 1076 */ 1077 rstreason = BANDLIM_RST_OPENPORT; 1078 goto dropwithreset; 1079 } 1080 if (so == NULL) { 1081 /* 1082 * We completed the 3-way handshake 1083 * but could not allocate a socket 1084 * either due to memory shortage, 1085 * listen queue length limits or 1086 * global socket limits. Send RST 1087 * or wait and have the remote end 1088 * retransmit the ACK for another 1089 * try. 1090 */ 1091 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1092 log(LOG_DEBUG, "%s; %s: Listen socket: " 1093 "Socket allocation failed due to " 1094 "limits or memory shortage, %s\n", 1095 s, __func__, 1096 V_tcp_sc_rst_sock_fail ? 1097 "sending RST" : "try again"); 1098 if (V_tcp_sc_rst_sock_fail) { 1099 rstreason = BANDLIM_UNLIMITED; 1100 goto dropwithreset; 1101 } else 1102 goto dropunlock; 1103 } 1104 /* 1105 * Socket is created in state SYN_RECEIVED. 1106 * Unlock the listen socket, lock the newly 1107 * created socket and update the tp variable. 1108 */ 1109 INP_WUNLOCK(inp); /* listen socket */ 1110 inp = sotoinpcb(so); 1111 INP_WLOCK(inp); /* new connection */ 1112 tp = intotcpcb(inp); 1113 KASSERT(tp->t_state == TCPS_SYN_RECEIVED, 1114 ("%s: ", __func__)); 1115#ifdef TCP_SIGNATURE 1116 if (sig_checked == 0) { 1117 tcp_dooptions(&to, optp, optlen, 1118 (thflags & TH_SYN) ? TO_SYN : 0); 1119 if (!tcp_signature_verify_input(m, off0, tlen, 1120 optlen, &to, th, tp->t_flags)) { 1121 1122 /* 1123 * In SYN_SENT state if it receives an 1124 * RST, it is allowed for further 1125 * processing. 1126 */ 1127 if ((thflags & TH_RST) == 0 || 1128 (tp->t_state == TCPS_SYN_SENT) == 0) 1129 goto dropunlock; 1130 } 1131 sig_checked = 1; 1132 } 1133#endif 1134 1135 /* 1136 * Process the segment and the data it 1137 * contains. tcp_do_segment() consumes 1138 * the mbuf chain and unlocks the inpcb. 1139 */ 1140 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, 1141 iptos, ti_locked); 1142 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1143 return; 1144 } 1145 /* 1146 * Segment flag validation for new connection attempts: 1147 * 1148 * Our (SYN|ACK) response was rejected. 1149 * Check with syncache and remove entry to prevent 1150 * retransmits. 1151 * 1152 * NB: syncache_chkrst does its own logging of failure 1153 * causes. 1154 */ 1155 if (thflags & TH_RST) { 1156 syncache_chkrst(&inc, th); 1157 goto dropunlock; 1158 } 1159 /* 1160 * We can't do anything without SYN. 1161 */ 1162 if ((thflags & TH_SYN) == 0) { 1163 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1164 log(LOG_DEBUG, "%s; %s: Listen socket: " 1165 "SYN is missing, segment ignored\n", 1166 s, __func__); 1167 TCPSTAT_INC(tcps_badsyn); 1168 goto dropunlock; 1169 } 1170 /* 1171 * (SYN|ACK) is bogus on a listen socket. 1172 */ 1173 if (thflags & TH_ACK) { 1174 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1175 log(LOG_DEBUG, "%s; %s: Listen socket: " 1176 "SYN|ACK invalid, segment rejected\n", 1177 s, __func__); 1178 syncache_badack(&inc); /* XXX: Not needed! */ 1179 TCPSTAT_INC(tcps_badsyn); 1180 rstreason = BANDLIM_RST_OPENPORT; 1181 goto dropwithreset; 1182 } 1183 /* 1184 * If the drop_synfin option is enabled, drop all 1185 * segments with both the SYN and FIN bits set. 1186 * This prevents e.g. nmap from identifying the 1187 * TCP/IP stack. 1188 * XXX: Poor reasoning. nmap has other methods 1189 * and is constantly refining its stack detection 1190 * strategies. 1191 * XXX: This is a violation of the TCP specification 1192 * and was used by RFC1644. 1193 */ 1194 if ((thflags & TH_FIN) && V_drop_synfin) { 1195 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1196 log(LOG_DEBUG, "%s; %s: Listen socket: " 1197 "SYN|FIN segment ignored (based on " 1198 "sysctl setting)\n", s, __func__); 1199 TCPSTAT_INC(tcps_badsyn); 1200 goto dropunlock; 1201 } 1202 /* 1203 * Segment's flags are (SYN) or (SYN|FIN). 1204 * 1205 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored 1206 * as they do not affect the state of the TCP FSM. 1207 * The data pointed to by TH_URG and th_urp is ignored. 1208 */ 1209 KASSERT((thflags & (TH_RST|TH_ACK)) == 0, 1210 ("%s: Listen socket: TH_RST or TH_ACK set", __func__)); 1211 KASSERT(thflags & (TH_SYN), 1212 ("%s: Listen socket: TH_SYN not set", __func__)); 1213#ifdef INET6 1214 /* 1215 * If deprecated address is forbidden, 1216 * we do not accept SYN to deprecated interface 1217 * address to prevent any new inbound connection from 1218 * getting established. 1219 * When we do not accept SYN, we send a TCP RST, 1220 * with deprecated source address (instead of dropping 1221 * it). We compromise it as it is much better for peer 1222 * to send a RST, and RST will be the final packet 1223 * for the exchange. 1224 * 1225 * If we do not forbid deprecated addresses, we accept 1226 * the SYN packet. RFC2462 does not suggest dropping 1227 * SYN in this case. 1228 * If we decipher RFC2462 5.5.4, it says like this: 1229 * 1. use of deprecated addr with existing 1230 * communication is okay - "SHOULD continue to be 1231 * used" 1232 * 2. use of it with new communication: 1233 * (2a) "SHOULD NOT be used if alternate address 1234 * with sufficient scope is available" 1235 * (2b) nothing mentioned otherwise. 1236 * Here we fall into (2b) case as we have no choice in 1237 * our source address selection - we must obey the peer. 1238 * 1239 * The wording in RFC2462 is confusing, and there are 1240 * multiple description text for deprecated address 1241 * handling - worse, they are not exactly the same. 1242 * I believe 5.5.4 is the best one, so we follow 5.5.4. 1243 */ 1244 if (isipv6 && !V_ip6_use_deprecated) { 1245 struct in6_ifaddr *ia6; 1246 1247 ia6 = ip6_getdstifaddr(m); 1248 if (ia6 != NULL && 1249 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) { 1250 ifa_free(&ia6->ia_ifa); 1251 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1252 log(LOG_DEBUG, "%s; %s: Listen socket: " 1253 "Connection attempt to deprecated " 1254 "IPv6 address rejected\n", 1255 s, __func__); 1256 rstreason = BANDLIM_RST_OPENPORT; 1257 goto dropwithreset; 1258 } 1259 if (ia6) 1260 ifa_free(&ia6->ia_ifa); 1261 } 1262#endif /* INET6 */ 1263 /* 1264 * Basic sanity checks on incoming SYN requests: 1265 * Don't respond if the destination is a link layer 1266 * broadcast according to RFC1122 4.2.3.10, p. 104. 1267 * If it is from this socket it must be forged. 1268 * Don't respond if the source or destination is a 1269 * global or subnet broad- or multicast address. 1270 * Note that it is quite possible to receive unicast 1271 * link-layer packets with a broadcast IP address. Use 1272 * in_broadcast() to find them. 1273 */ 1274 if (m->m_flags & (M_BCAST|M_MCAST)) { 1275 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1276 log(LOG_DEBUG, "%s; %s: Listen socket: " 1277 "Connection attempt from broad- or multicast " 1278 "link layer address ignored\n", s, __func__); 1279 goto dropunlock; 1280 } 1281#ifdef INET6 1282 if (isipv6) { 1283 if (th->th_dport == th->th_sport && 1284 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) { 1285 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1286 log(LOG_DEBUG, "%s; %s: Listen socket: " 1287 "Connection attempt to/from self " 1288 "ignored\n", s, __func__); 1289 goto dropunlock; 1290 } 1291 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 1292 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) { 1293 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1294 log(LOG_DEBUG, "%s; %s: Listen socket: " 1295 "Connection attempt from/to multicast " 1296 "address ignored\n", s, __func__); 1297 goto dropunlock; 1298 } 1299 } 1300#endif 1301#if defined(INET) && defined(INET6) 1302 else 1303#endif 1304#ifdef INET 1305 { 1306 if (th->th_dport == th->th_sport && 1307 ip->ip_dst.s_addr == ip->ip_src.s_addr) { 1308 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1309 log(LOG_DEBUG, "%s; %s: Listen socket: " 1310 "Connection attempt from/to self " 1311 "ignored\n", s, __func__); 1312 goto dropunlock; 1313 } 1314 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 1315 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 1316 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 1317 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) { 1318 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1319 log(LOG_DEBUG, "%s; %s: Listen socket: " 1320 "Connection attempt from/to broad- " 1321 "or multicast address ignored\n", 1322 s, __func__); 1323 goto dropunlock; 1324 } 1325 } 1326#endif 1327 /* 1328 * SYN appears to be valid. Create compressed TCP state 1329 * for syncache. 1330 */ 1331#ifdef TCPDEBUG 1332 if (so->so_options & SO_DEBUG) 1333 tcp_trace(TA_INPUT, ostate, tp, 1334 (void *)tcp_saveipgen, &tcp_savetcp, 0); 1335#endif 1336 tcp_dooptions(&to, optp, optlen, TO_SYN); 1337 syncache_add(&inc, &to, th, inp, &so, m, NULL, NULL); 1338 /* 1339 * Entry added to syncache and mbuf consumed. 1340 * Everything already unlocked by syncache_add(). 1341 */ 1342 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1343 return; 1344 } else if (tp->t_state == TCPS_LISTEN) { 1345 /* 1346 * When a listen socket is torn down the SO_ACCEPTCONN 1347 * flag is removed first while connections are drained 1348 * from the accept queue in a unlock/lock cycle of the 1349 * ACCEPT_LOCK, opening a race condition allowing a SYN 1350 * attempt go through unhandled. 1351 */ 1352 goto dropunlock; 1353 } 1354 1355#ifdef TCP_SIGNATURE 1356 if (sig_checked == 0) { 1357 tcp_dooptions(&to, optp, optlen, 1358 (thflags & TH_SYN) ? TO_SYN : 0); 1359 if (!tcp_signature_verify_input(m, off0, tlen, optlen, &to, 1360 th, tp->t_flags)) { 1361 1362 /* 1363 * In SYN_SENT state if it receives an RST, it is 1364 * allowed for further processing. 1365 */ 1366 if ((thflags & TH_RST) == 0 || 1367 (tp->t_state == TCPS_SYN_SENT) == 0) 1368 goto dropunlock; 1369 } 1370 sig_checked = 1; 1371 } 1372#endif 1373 1374 TCP_PROBE5(receive, NULL, tp, mtod(m, const char *), tp, th); 1375 1376 /* 1377 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later 1378 * state. tcp_do_segment() always consumes the mbuf chain, unlocks 1379 * the inpcb, and unlocks pcbinfo. 1380 */ 1381 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, iptos, ti_locked); 1382 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1383 return; 1384 1385dropwithreset: 1386 TCP_PROBE5(receive, NULL, tp, mtod(m, const char *), tp, th); 1387 1388 if (ti_locked == TI_WLOCKED) { 1389 INP_INFO_WUNLOCK(&V_tcbinfo); 1390 ti_locked = TI_UNLOCKED; 1391 } 1392#ifdef INVARIANTS 1393 else { 1394 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropwithreset " 1395 "ti_locked: %d", __func__, ti_locked)); 1396 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1397 } 1398#endif 1399 1400 if (inp != NULL) { 1401 tcp_dropwithreset(m, th, tp, tlen, rstreason); 1402 INP_WUNLOCK(inp); 1403 } else 1404 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 1405 m = NULL; /* mbuf chain got consumed. */ 1406 goto drop; 1407 1408dropunlock: 1409 if (m != NULL) 1410 TCP_PROBE5(receive, NULL, tp, mtod(m, const char *), tp, th); 1411 1412 if (ti_locked == TI_WLOCKED) { 1413 INP_INFO_WUNLOCK(&V_tcbinfo); 1414 ti_locked = TI_UNLOCKED; 1415 } 1416#ifdef INVARIANTS 1417 else { 1418 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropunlock " 1419 "ti_locked: %d", __func__, ti_locked)); 1420 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1421 } 1422#endif 1423 1424 if (inp != NULL) 1425 INP_WUNLOCK(inp); 1426 1427drop: 1428 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1429 if (s != NULL) 1430 free(s, M_TCPLOG); 1431 if (m != NULL) 1432 m_freem(m); 1433} 1434 1435static void 1436tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 1437 struct tcpcb *tp, int drop_hdrlen, int tlen, uint8_t iptos, 1438 int ti_locked) 1439{ 1440 int thflags, acked, ourfinisacked, needoutput = 0; 1441 int rstreason, todrop, win; 1442 u_long tiwin; 1443 char *s; 1444 struct in_conninfo *inc; 1445 struct mbuf *mfree; 1446 struct tcpopt to; 1447 1448#ifdef TCPDEBUG 1449 /* 1450 * The size of tcp_saveipgen must be the size of the max ip header, 1451 * now IPv6. 1452 */ 1453 u_char tcp_saveipgen[IP6_HDR_LEN]; 1454 struct tcphdr tcp_savetcp; 1455 short ostate = 0; 1456#endif 1457 thflags = th->th_flags; 1458 inc = &tp->t_inpcb->inp_inc; 1459 tp->sackhint.last_sack_ack = 0; 1460 1461 /* 1462 * If this is either a state-changing packet or current state isn't 1463 * established, we require a write lock on tcbinfo. Otherwise, we 1464 * allow the tcbinfo to be in either alocked or unlocked, as the 1465 * caller may have unnecessarily acquired a write lock due to a race. 1466 */ 1467 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 || 1468 tp->t_state != TCPS_ESTABLISHED) { 1469 KASSERT(ti_locked == TI_WLOCKED, ("%s ti_locked %d for " 1470 "SYN/FIN/RST/!EST", __func__, ti_locked)); 1471 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 1472 } else { 1473#ifdef INVARIANTS 1474 if (ti_locked == TI_WLOCKED) 1475 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 1476 else { 1477 KASSERT(ti_locked == TI_UNLOCKED, ("%s: EST " 1478 "ti_locked: %d", __func__, ti_locked)); 1479 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1480 } 1481#endif 1482 } 1483 INP_WLOCK_ASSERT(tp->t_inpcb); 1484 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 1485 __func__)); 1486 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 1487 __func__)); 1488 1489 /* 1490 * Segment received on connection. 1491 * Reset idle time and keep-alive timer. 1492 * XXX: This should be done after segment 1493 * validation to ignore broken/spoofed segs. 1494 */ 1495 tp->t_rcvtime = ticks; 1496 if (TCPS_HAVEESTABLISHED(tp->t_state)) 1497 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp)); 1498 1499 /* 1500 * Unscale the window into a 32-bit value. 1501 * For the SYN_SENT state the scale is zero. 1502 */ 1503 tiwin = th->th_win << tp->snd_scale; 1504 1505 /* 1506 * TCP ECN processing. 1507 */ 1508 if (tp->t_flags & TF_ECN_PERMIT) { 1509 if (thflags & TH_CWR) 1510 tp->t_flags &= ~TF_ECN_SND_ECE; 1511 switch (iptos & IPTOS_ECN_MASK) { 1512 case IPTOS_ECN_CE: 1513 tp->t_flags |= TF_ECN_SND_ECE; 1514 TCPSTAT_INC(tcps_ecn_ce); 1515 break; 1516 case IPTOS_ECN_ECT0: 1517 TCPSTAT_INC(tcps_ecn_ect0); 1518 break; 1519 case IPTOS_ECN_ECT1: 1520 TCPSTAT_INC(tcps_ecn_ect1); 1521 break; 1522 } 1523 /* Congestion experienced. */ 1524 if (thflags & TH_ECE) { 1525 cc_cong_signal(tp, th, CC_ECN); 1526 } 1527 } 1528 1529 /* 1530 * Parse options on any incoming segment. 1531 */ 1532 tcp_dooptions(&to, (u_char *)(th + 1), 1533 (th->th_off << 2) - sizeof(struct tcphdr), 1534 (thflags & TH_SYN) ? TO_SYN : 0); 1535 1536 /* 1537 * If echoed timestamp is later than the current time, 1538 * fall back to non RFC1323 RTT calculation. Normalize 1539 * timestamp if syncookies were used when this connection 1540 * was established. 1541 */ 1542 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 1543 to.to_tsecr -= tp->ts_offset; 1544 if (TSTMP_GT(to.to_tsecr, tcp_ts_getticks())) 1545 to.to_tsecr = 0; 1546 } 1547 /* 1548 * If timestamps were negotiated during SYN/ACK they should 1549 * appear on every segment during this session and vice versa. 1550 */ 1551 if ((tp->t_flags & TF_RCVD_TSTMP) && !(to.to_flags & TOF_TS)) { 1552 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1553 log(LOG_DEBUG, "%s; %s: Timestamp missing, " 1554 "no action\n", s, __func__); 1555 free(s, M_TCPLOG); 1556 } 1557 } 1558 if (!(tp->t_flags & TF_RCVD_TSTMP) && (to.to_flags & TOF_TS)) { 1559 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 1560 log(LOG_DEBUG, "%s; %s: Timestamp not expected, " 1561 "no action\n", s, __func__); 1562 free(s, M_TCPLOG); 1563 } 1564 } 1565 1566 /* 1567 * Process options only when we get SYN/ACK back. The SYN case 1568 * for incoming connections is handled in tcp_syncache. 1569 * According to RFC1323 the window field in a SYN (i.e., a <SYN> 1570 * or <SYN,ACK>) segment itself is never scaled. 1571 * XXX this is traditional behavior, may need to be cleaned up. 1572 */ 1573 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 1574 if ((to.to_flags & TOF_SCALE) && 1575 (tp->t_flags & TF_REQ_SCALE)) { 1576 tp->t_flags |= TF_RCVD_SCALE; 1577 tp->snd_scale = to.to_wscale; 1578 } 1579 /* 1580 * Initial send window. It will be updated with 1581 * the next incoming segment to the scaled value. 1582 */ 1583 tp->snd_wnd = th->th_win; 1584 if (to.to_flags & TOF_TS) { 1585 tp->t_flags |= TF_RCVD_TSTMP; 1586 tp->ts_recent = to.to_tsval; 1587 tp->ts_recent_age = tcp_ts_getticks(); 1588 } 1589 if (to.to_flags & TOF_MSS) 1590 tcp_mss(tp, to.to_mss); 1591 if ((tp->t_flags & TF_SACK_PERMIT) && 1592 (to.to_flags & TOF_SACKPERM) == 0) 1593 tp->t_flags &= ~TF_SACK_PERMIT; 1594 } 1595 1596 /* 1597 * Header prediction: check for the two common cases 1598 * of a uni-directional data xfer. If the packet has 1599 * no control flags, is in-sequence, the window didn't 1600 * change and we're not retransmitting, it's a 1601 * candidate. If the length is zero and the ack moved 1602 * forward, we're the sender side of the xfer. Just 1603 * free the data acked & wake any higher level process 1604 * that was blocked waiting for space. If the length 1605 * is non-zero and the ack didn't move, we're the 1606 * receiver side. If we're getting packets in-order 1607 * (the reassembly queue is empty), add the data to 1608 * the socket buffer and note that we need a delayed ack. 1609 * Make sure that the hidden state-flags are also off. 1610 * Since we check for TCPS_ESTABLISHED first, it can only 1611 * be TH_NEEDSYN. 1612 */ 1613 if (tp->t_state == TCPS_ESTABLISHED && 1614 th->th_seq == tp->rcv_nxt && 1615 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && 1616 tp->snd_nxt == tp->snd_max && 1617 tiwin && tiwin == tp->snd_wnd && 1618 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) && 1619 LIST_EMPTY(&tp->t_segq) && 1620 ((to.to_flags & TOF_TS) == 0 || 1621 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) { 1622 1623 /* 1624 * If last ACK falls within this segment's sequence numbers, 1625 * record the timestamp. 1626 * NOTE that the test is modified according to the latest 1627 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1628 */ 1629 if ((to.to_flags & TOF_TS) != 0 && 1630 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 1631 tp->ts_recent_age = tcp_ts_getticks(); 1632 tp->ts_recent = to.to_tsval; 1633 } 1634 1635 if (tlen == 0) { 1636 if (SEQ_GT(th->th_ack, tp->snd_una) && 1637 SEQ_LEQ(th->th_ack, tp->snd_max) && 1638 !IN_RECOVERY(tp->t_flags) && 1639 (to.to_flags & TOF_SACK) == 0 && 1640 TAILQ_EMPTY(&tp->snd_holes)) { 1641 /* 1642 * This is a pure ack for outstanding data. 1643 */ 1644 if (ti_locked == TI_WLOCKED) 1645 INP_INFO_WUNLOCK(&V_tcbinfo); 1646 ti_locked = TI_UNLOCKED; 1647 1648 TCPSTAT_INC(tcps_predack); 1649 1650 /* 1651 * "bad retransmit" recovery. 1652 */ 1653 if (tp->t_rxtshift == 1 && 1654 tp->t_flags & TF_PREVVALID && 1655 (int)(ticks - tp->t_badrxtwin) < 0) { 1656 cc_cong_signal(tp, th, CC_RTO_ERR); 1657 } 1658 1659 /* 1660 * Recalculate the transmit timer / rtt. 1661 * 1662 * Some boxes send broken timestamp replies 1663 * during the SYN+ACK phase, ignore 1664 * timestamps of 0 or we could calculate a 1665 * huge RTT and blow up the retransmit timer. 1666 */ 1667 if ((to.to_flags & TOF_TS) != 0 && 1668 to.to_tsecr) { 1669 u_int t; 1670 1671 t = tcp_ts_getticks() - to.to_tsecr; 1672 if (!tp->t_rttlow || tp->t_rttlow > t) 1673 tp->t_rttlow = t; 1674 tcp_xmit_timer(tp, 1675 TCP_TS_TO_TICKS(t) + 1); 1676 } else if (tp->t_rtttime && 1677 SEQ_GT(th->th_ack, tp->t_rtseq)) { 1678 if (!tp->t_rttlow || 1679 tp->t_rttlow > ticks - tp->t_rtttime) 1680 tp->t_rttlow = ticks - tp->t_rtttime; 1681 tcp_xmit_timer(tp, 1682 ticks - tp->t_rtttime); 1683 } 1684 acked = BYTES_THIS_ACK(tp, th); 1685 1686 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 1687 hhook_run_tcp_est_in(tp, th, &to); 1688 1689 TCPSTAT_INC(tcps_rcvackpack); 1690 TCPSTAT_ADD(tcps_rcvackbyte, acked); 1691 sbdrop(&so->so_snd, acked); 1692 if (SEQ_GT(tp->snd_una, tp->snd_recover) && 1693 SEQ_LEQ(th->th_ack, tp->snd_recover)) 1694 tp->snd_recover = th->th_ack - 1; 1695 1696 /* 1697 * Let the congestion control algorithm update 1698 * congestion control related information. This 1699 * typically means increasing the congestion 1700 * window. 1701 */ 1702 cc_ack_received(tp, th, CC_ACK); 1703 1704 tp->snd_una = th->th_ack; 1705 /* 1706 * Pull snd_wl2 up to prevent seq wrap relative 1707 * to th_ack. 1708 */ 1709 tp->snd_wl2 = th->th_ack; 1710 tp->t_dupacks = 0; 1711 m_freem(m); 1712 ND6_HINT(tp); /* Some progress has been made. */ 1713 1714 /* 1715 * If all outstanding data are acked, stop 1716 * retransmit timer, otherwise restart timer 1717 * using current (possibly backed-off) value. 1718 * If process is waiting for space, 1719 * wakeup/selwakeup/signal. If data 1720 * are ready to send, let tcp_output 1721 * decide between more output or persist. 1722 */ 1723#ifdef TCPDEBUG 1724 if (so->so_options & SO_DEBUG) 1725 tcp_trace(TA_INPUT, ostate, tp, 1726 (void *)tcp_saveipgen, 1727 &tcp_savetcp, 0); 1728#endif 1729 if (tp->snd_una == tp->snd_max) 1730 tcp_timer_activate(tp, TT_REXMT, 0); 1731 else if (!tcp_timer_active(tp, TT_PERSIST)) 1732 tcp_timer_activate(tp, TT_REXMT, 1733 tp->t_rxtcur); 1734 sowwakeup(so); 1735 if (so->so_snd.sb_cc) 1736 (void) tcp_output(tp); 1737 goto check_delack; 1738 } 1739 } else if (th->th_ack == tp->snd_una && 1740 tlen <= sbspace(&so->so_rcv)) { 1741 int newsize = 0; /* automatic sockbuf scaling */ 1742 1743 /* 1744 * This is a pure, in-sequence data packet with 1745 * nothing on the reassembly queue and we have enough 1746 * buffer space to take it. 1747 */ 1748 if (ti_locked == TI_WLOCKED) 1749 INP_INFO_WUNLOCK(&V_tcbinfo); 1750 ti_locked = TI_UNLOCKED; 1751 1752 /* Clean receiver SACK report if present */ 1753 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks) 1754 tcp_clean_sackreport(tp); 1755 TCPSTAT_INC(tcps_preddat); 1756 tp->rcv_nxt += tlen; 1757 /* 1758 * Pull snd_wl1 up to prevent seq wrap relative to 1759 * th_seq. 1760 */ 1761 tp->snd_wl1 = th->th_seq; 1762 /* 1763 * Pull rcv_up up to prevent seq wrap relative to 1764 * rcv_nxt. 1765 */ 1766 tp->rcv_up = tp->rcv_nxt; 1767 TCPSTAT_INC(tcps_rcvpack); 1768 TCPSTAT_ADD(tcps_rcvbyte, tlen); 1769 ND6_HINT(tp); /* Some progress has been made */ 1770#ifdef TCPDEBUG 1771 if (so->so_options & SO_DEBUG) 1772 tcp_trace(TA_INPUT, ostate, tp, 1773 (void *)tcp_saveipgen, &tcp_savetcp, 0); 1774#endif 1775 /* 1776 * Automatic sizing of receive socket buffer. Often the send 1777 * buffer size is not optimally adjusted to the actual network 1778 * conditions at hand (delay bandwidth product). Setting the 1779 * buffer size too small limits throughput on links with high 1780 * bandwidth and high delay (eg. trans-continental/oceanic links). 1781 * 1782 * On the receive side the socket buffer memory is only rarely 1783 * used to any significant extent. This allows us to be much 1784 * more aggressive in scaling the receive socket buffer. For 1785 * the case that the buffer space is actually used to a large 1786 * extent and we run out of kernel memory we can simply drop 1787 * the new segments; TCP on the sender will just retransmit it 1788 * later. Setting the buffer size too big may only consume too 1789 * much kernel memory if the application doesn't read() from 1790 * the socket or packet loss or reordering makes use of the 1791 * reassembly queue. 1792 * 1793 * The criteria to step up the receive buffer one notch are: 1794 * 1. the number of bytes received during the time it takes 1795 * one timestamp to be reflected back to us (the RTT); 1796 * 2. received bytes per RTT is within seven eighth of the 1797 * current socket buffer size; 1798 * 3. receive buffer size has not hit maximal automatic size; 1799 * 1800 * This algorithm does one step per RTT at most and only if 1801 * we receive a bulk stream w/o packet losses or reorderings. 1802 * Shrinking the buffer during idle times is not necessary as 1803 * it doesn't consume any memory when idle. 1804 * 1805 * TODO: Only step up if the application is actually serving 1806 * the buffer to better manage the socket buffer resources. 1807 */ 1808 if (V_tcp_do_autorcvbuf && 1809 to.to_tsecr && 1810 (so->so_rcv.sb_flags & SB_AUTOSIZE)) { 1811 if (TSTMP_GT(to.to_tsecr, tp->rfbuf_ts) && 1812 to.to_tsecr - tp->rfbuf_ts < hz) { 1813 if (tp->rfbuf_cnt > 1814 (so->so_rcv.sb_hiwat / 8 * 7) && 1815 so->so_rcv.sb_hiwat < 1816 V_tcp_autorcvbuf_max) { 1817 newsize = 1818 min(so->so_rcv.sb_hiwat + 1819 V_tcp_autorcvbuf_inc, 1820 V_tcp_autorcvbuf_max); 1821 } 1822 /* Start over with next RTT. */ 1823 tp->rfbuf_ts = 0; 1824 tp->rfbuf_cnt = 0; 1825 } else 1826 tp->rfbuf_cnt += tlen; /* add up */ 1827 } 1828 1829 /* Add data to socket buffer. */ 1830 SOCKBUF_LOCK(&so->so_rcv); 1831 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1832 m_freem(m); 1833 } else { 1834 /* 1835 * Set new socket buffer size. 1836 * Give up when limit is reached. 1837 */ 1838 if (newsize) 1839 if (!sbreserve_locked(&so->so_rcv, 1840 newsize, so, NULL)) 1841 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 1842 m_adj(m, drop_hdrlen); /* delayed header drop */ 1843 sbappendstream_locked(&so->so_rcv, m); 1844 } 1845 /* NB: sorwakeup_locked() does an implicit unlock. */ 1846 sorwakeup_locked(so); 1847 if (DELAY_ACK(tp, tlen)) { 1848 tp->t_flags |= TF_DELACK; 1849 } else { 1850 tp->t_flags |= TF_ACKNOW; 1851 tcp_output(tp); 1852 } 1853 goto check_delack; 1854 } 1855 } 1856 1857 /* 1858 * Calculate amount of space in receive window, 1859 * and then do TCP input processing. 1860 * Receive window is amount of space in rcv queue, 1861 * but not less than advertised window. 1862 */ 1863 win = sbspace(&so->so_rcv); 1864 if (win < 0) 1865 win = 0; 1866 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt)); 1867 1868 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 1869 tp->rfbuf_ts = 0; 1870 tp->rfbuf_cnt = 0; 1871 1872 switch (tp->t_state) { 1873 1874 /* 1875 * If the state is SYN_RECEIVED: 1876 * if seg contains an ACK, but not for our SYN/ACK, send a RST. 1877 */ 1878 case TCPS_SYN_RECEIVED: 1879 if ((thflags & TH_ACK) && 1880 (SEQ_LEQ(th->th_ack, tp->snd_una) || 1881 SEQ_GT(th->th_ack, tp->snd_max))) { 1882 rstreason = BANDLIM_RST_OPENPORT; 1883 goto dropwithreset; 1884 } 1885 break; 1886 1887 /* 1888 * If the state is SYN_SENT: 1889 * if seg contains an ACK, but not for our SYN, drop the input. 1890 * if seg contains a RST, then drop the connection. 1891 * if seg does not contain SYN, then drop it. 1892 * Otherwise this is an acceptable SYN segment 1893 * initialize tp->rcv_nxt and tp->irs 1894 * if seg contains ack then advance tp->snd_una 1895 * if seg contains an ECE and ECN support is enabled, the stream 1896 * is ECN capable. 1897 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state 1898 * arrange for segment to be acked (eventually) 1899 * continue processing rest of data/controls, beginning with URG 1900 */ 1901 case TCPS_SYN_SENT: 1902 if ((thflags & TH_ACK) && 1903 (SEQ_LEQ(th->th_ack, tp->iss) || 1904 SEQ_GT(th->th_ack, tp->snd_max))) { 1905 rstreason = BANDLIM_UNLIMITED; 1906 goto dropwithreset; 1907 } 1908 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) { 1909 TCP_PROBE5(connect__refused, NULL, tp, 1910 mtod(m, const char *), tp, th); 1911 tp = tcp_drop(tp, ECONNREFUSED); 1912 } 1913 if (thflags & TH_RST) 1914 goto drop; 1915 if (!(thflags & TH_SYN)) 1916 goto drop; 1917 1918 tp->irs = th->th_seq; 1919 tcp_rcvseqinit(tp); 1920 if (thflags & TH_ACK) { 1921 TCPSTAT_INC(tcps_connects); 1922 soisconnected(so); 1923#ifdef MAC 1924 mac_socketpeer_set_from_mbuf(m, so); 1925#endif 1926 /* Do window scaling on this connection? */ 1927 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 1928 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 1929 tp->rcv_scale = tp->request_r_scale; 1930 } 1931 tp->rcv_adv += imin(tp->rcv_wnd, 1932 TCP_MAXWIN << tp->rcv_scale); 1933 tp->snd_una++; /* SYN is acked */ 1934 /* 1935 * If there's data, delay ACK; if there's also a FIN 1936 * ACKNOW will be turned on later. 1937 */ 1938 if (DELAY_ACK(tp, tlen) && tlen != 0) 1939 tcp_timer_activate(tp, TT_DELACK, 1940 tcp_delacktime); 1941 else 1942 tp->t_flags |= TF_ACKNOW; 1943 1944 if ((thflags & TH_ECE) && V_tcp_do_ecn) { 1945 tp->t_flags |= TF_ECN_PERMIT; 1946 TCPSTAT_INC(tcps_ecn_shs); 1947 } 1948 1949 /* 1950 * Received <SYN,ACK> in SYN_SENT[*] state. 1951 * Transitions: 1952 * SYN_SENT --> ESTABLISHED 1953 * SYN_SENT* --> FIN_WAIT_1 1954 */ 1955 tp->t_starttime = ticks; 1956 if (tp->t_flags & TF_NEEDFIN) { 1957 tcp_state_change(tp, TCPS_FIN_WAIT_1); 1958 tp->t_flags &= ~TF_NEEDFIN; 1959 thflags &= ~TH_SYN; 1960 } else { 1961 tcp_state_change(tp, TCPS_ESTABLISHED); 1962 TCP_PROBE5(connect__established, NULL, tp, 1963 mtod(m, const char *), tp, th); 1964 cc_conn_init(tp); 1965 tcp_timer_activate(tp, TT_KEEP, 1966 TP_KEEPIDLE(tp)); 1967 } 1968 } else { 1969 /* 1970 * Received initial SYN in SYN-SENT[*] state => 1971 * simultaneous open. If segment contains CC option 1972 * and there is a cached CC, apply TAO test. 1973 * If it succeeds, connection is * half-synchronized. 1974 * Otherwise, do 3-way handshake: 1975 * SYN-SENT -> SYN-RECEIVED 1976 * SYN-SENT* -> SYN-RECEIVED* 1977 * If there was no CC option, clear cached CC value. 1978 */ 1979 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 1980 tcp_timer_activate(tp, TT_REXMT, 0); 1981 tcp_state_change(tp, TCPS_SYN_RECEIVED); 1982 } 1983 1984 KASSERT(ti_locked == TI_WLOCKED, ("%s: trimthenstep6: " 1985 "ti_locked %d", __func__, ti_locked)); 1986 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 1987 INP_WLOCK_ASSERT(tp->t_inpcb); 1988 1989 /* 1990 * Advance th->th_seq to correspond to first data byte. 1991 * If data, trim to stay within window, 1992 * dropping FIN if necessary. 1993 */ 1994 th->th_seq++; 1995 if (tlen > tp->rcv_wnd) { 1996 todrop = tlen - tp->rcv_wnd; 1997 m_adj(m, -todrop); 1998 tlen = tp->rcv_wnd; 1999 thflags &= ~TH_FIN; 2000 TCPSTAT_INC(tcps_rcvpackafterwin); 2001 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 2002 } 2003 tp->snd_wl1 = th->th_seq - 1; 2004 tp->rcv_up = th->th_seq; 2005 /* 2006 * Client side of transaction: already sent SYN and data. 2007 * If the remote host used T/TCP to validate the SYN, 2008 * our data will be ACK'd; if so, enter normal data segment 2009 * processing in the middle of step 5, ack processing. 2010 * Otherwise, goto step 6. 2011 */ 2012 if (thflags & TH_ACK) 2013 goto process_ACK; 2014 2015 goto step6; 2016 2017 /* 2018 * If the state is LAST_ACK or CLOSING or TIME_WAIT: 2019 * do normal processing. 2020 * 2021 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later. 2022 */ 2023 case TCPS_LAST_ACK: 2024 case TCPS_CLOSING: 2025 break; /* continue normal processing */ 2026 } 2027 2028 /* 2029 * States other than LISTEN or SYN_SENT. 2030 * First check the RST flag and sequence number since reset segments 2031 * are exempt from the timestamp and connection count tests. This 2032 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix 2033 * below which allowed reset segments in half the sequence space 2034 * to fall though and be processed (which gives forged reset 2035 * segments with a random sequence number a 50 percent chance of 2036 * killing a connection). 2037 * Then check timestamp, if present. 2038 * Then check the connection count, if present. 2039 * Then check that at least some bytes of segment are within 2040 * receive window. If segment begins before rcv_nxt, 2041 * drop leading data (and SYN); if nothing left, just ack. 2042 * 2043 * 2044 * If the RST bit is set, check the sequence number to see 2045 * if this is a valid reset segment. 2046 * RFC 793 page 37: 2047 * In all states except SYN-SENT, all reset (RST) segments 2048 * are validated by checking their SEQ-fields. A reset is 2049 * valid if its sequence number is in the window. 2050 * Note: this does not take into account delayed ACKs, so 2051 * we should test against last_ack_sent instead of rcv_nxt. 2052 * The sequence number in the reset segment is normally an 2053 * echo of our outgoing acknowlegement numbers, but some hosts 2054 * send a reset with the sequence number at the rightmost edge 2055 * of our receive window, and we have to handle this case. 2056 * Note 2: Paul Watson's paper "Slipping in the Window" has shown 2057 * that brute force RST attacks are possible. To combat this, 2058 * we use a much stricter check while in the ESTABLISHED state, 2059 * only accepting RSTs where the sequence number is equal to 2060 * last_ack_sent. In all other states (the states in which a 2061 * RST is more likely), the more permissive check is used. 2062 * If we have multiple segments in flight, the initial reset 2063 * segment sequence numbers will be to the left of last_ack_sent, 2064 * but they will eventually catch up. 2065 * In any case, it never made sense to trim reset segments to 2066 * fit the receive window since RFC 1122 says: 2067 * 4.2.2.12 RST Segment: RFC-793 Section 3.4 2068 * 2069 * A TCP SHOULD allow a received RST segment to include data. 2070 * 2071 * DISCUSSION 2072 * It has been suggested that a RST segment could contain 2073 * ASCII text that encoded and explained the cause of the 2074 * RST. No standard has yet been established for such 2075 * data. 2076 * 2077 * If the reset segment passes the sequence number test examine 2078 * the state: 2079 * SYN_RECEIVED STATE: 2080 * If passive open, return to LISTEN state. 2081 * If active open, inform user that connection was refused. 2082 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES: 2083 * Inform user that connection was reset, and close tcb. 2084 * CLOSING, LAST_ACK STATES: 2085 * Close the tcb. 2086 * TIME_WAIT STATE: 2087 * Drop the segment - see Stevens, vol. 2, p. 964 and 2088 * RFC 1337. 2089 */ 2090 if (thflags & TH_RST) { 2091 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) && 2092 SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) { 2093 switch (tp->t_state) { 2094 2095 case TCPS_SYN_RECEIVED: 2096 so->so_error = ECONNREFUSED; 2097 goto close; 2098 2099 case TCPS_ESTABLISHED: 2100 if (V_tcp_insecure_rst == 0 && 2101 !(SEQ_GEQ(th->th_seq, tp->rcv_nxt - 1) && 2102 SEQ_LEQ(th->th_seq, tp->rcv_nxt + 1)) && 2103 !(SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) && 2104 SEQ_LEQ(th->th_seq, tp->last_ack_sent + 1))) { 2105 TCPSTAT_INC(tcps_badrst); 2106 goto drop; 2107 } 2108 /* FALLTHROUGH */ 2109 case TCPS_FIN_WAIT_1: 2110 case TCPS_FIN_WAIT_2: 2111 case TCPS_CLOSE_WAIT: 2112 so->so_error = ECONNRESET; 2113 close: 2114 KASSERT(ti_locked == TI_WLOCKED, 2115 ("tcp_do_segment: TH_RST 1 ti_locked %d", 2116 ti_locked)); 2117 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 2118 2119 tcp_state_change(tp, TCPS_CLOSED); 2120 TCPSTAT_INC(tcps_drops); 2121 tp = tcp_close(tp); 2122 break; 2123 2124 case TCPS_CLOSING: 2125 case TCPS_LAST_ACK: 2126 KASSERT(ti_locked == TI_WLOCKED, 2127 ("tcp_do_segment: TH_RST 2 ti_locked %d", 2128 ti_locked)); 2129 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 2130 2131 tp = tcp_close(tp); 2132 break; 2133 } 2134 } 2135 goto drop; 2136 } 2137 2138 /* 2139 * RFC 1323 PAWS: If we have a timestamp reply on this segment 2140 * and it's less than ts_recent, drop it. 2141 */ 2142 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent && 2143 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 2144 2145 /* Check to see if ts_recent is over 24 days old. */ 2146 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) { 2147 /* 2148 * Invalidate ts_recent. If this segment updates 2149 * ts_recent, the age will be reset later and ts_recent 2150 * will get a valid value. If it does not, setting 2151 * ts_recent to zero will at least satisfy the 2152 * requirement that zero be placed in the timestamp 2153 * echo reply when ts_recent isn't valid. The 2154 * age isn't reset until we get a valid ts_recent 2155 * because we don't want out-of-order segments to be 2156 * dropped when ts_recent is old. 2157 */ 2158 tp->ts_recent = 0; 2159 } else { 2160 TCPSTAT_INC(tcps_rcvduppack); 2161 TCPSTAT_ADD(tcps_rcvdupbyte, tlen); 2162 TCPSTAT_INC(tcps_pawsdrop); 2163 if (tlen) 2164 goto dropafterack; 2165 goto drop; 2166 } 2167 } 2168 2169 /* 2170 * In the SYN-RECEIVED state, validate that the packet belongs to 2171 * this connection before trimming the data to fit the receive 2172 * window. Check the sequence number versus IRS since we know 2173 * the sequence numbers haven't wrapped. This is a partial fix 2174 * for the "LAND" DoS attack. 2175 */ 2176 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) { 2177 rstreason = BANDLIM_RST_OPENPORT; 2178 goto dropwithreset; 2179 } 2180 2181 todrop = tp->rcv_nxt - th->th_seq; 2182 if (todrop > 0) { 2183 /* 2184 * If this is a duplicate SYN for our current connection, 2185 * advance over it and pretend and it's not a SYN. 2186 */ 2187 if (thflags & TH_SYN && th->th_seq == tp->irs) { 2188 thflags &= ~TH_SYN; 2189 th->th_seq++; 2190 if (th->th_urp > 1) 2191 th->th_urp--; 2192 else 2193 thflags &= ~TH_URG; 2194 todrop--; 2195 } 2196 /* 2197 * Following if statement from Stevens, vol. 2, p. 960. 2198 */ 2199 if (todrop > tlen 2200 || (todrop == tlen && (thflags & TH_FIN) == 0)) { 2201 /* 2202 * Any valid FIN must be to the left of the window. 2203 * At this point the FIN must be a duplicate or out 2204 * of sequence; drop it. 2205 */ 2206 thflags &= ~TH_FIN; 2207 2208 /* 2209 * Send an ACK to resynchronize and drop any data. 2210 * But keep on processing for RST or ACK. 2211 */ 2212 tp->t_flags |= TF_ACKNOW; 2213 todrop = tlen; 2214 TCPSTAT_INC(tcps_rcvduppack); 2215 TCPSTAT_ADD(tcps_rcvdupbyte, todrop); 2216 } else { 2217 TCPSTAT_INC(tcps_rcvpartduppack); 2218 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop); 2219 } 2220 drop_hdrlen += todrop; /* drop from the top afterwards */ 2221 th->th_seq += todrop; 2222 tlen -= todrop; 2223 if (th->th_urp > todrop) 2224 th->th_urp -= todrop; 2225 else { 2226 thflags &= ~TH_URG; 2227 th->th_urp = 0; 2228 } 2229 } 2230 2231 /* 2232 * If new data are received on a connection after the 2233 * user processes are gone, then RST the other end. 2234 */ 2235 if ((so->so_state & SS_NOFDREF) && 2236 tp->t_state > TCPS_CLOSE_WAIT && tlen) { 2237 KASSERT(ti_locked == TI_WLOCKED, ("%s: SS_NOFDEREF && " 2238 "CLOSE_WAIT && tlen ti_locked %d", __func__, ti_locked)); 2239 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 2240 2241 if ((s = tcp_log_addrs(inc, th, NULL, NULL))) { 2242 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data " 2243 "after socket was closed, " 2244 "sending RST and removing tcpcb\n", 2245 s, __func__, tcpstates[tp->t_state], tlen); 2246 free(s, M_TCPLOG); 2247 } 2248 tp = tcp_close(tp); 2249 TCPSTAT_INC(tcps_rcvafterclose); 2250 rstreason = BANDLIM_UNLIMITED; 2251 goto dropwithreset; 2252 } 2253 2254 /* 2255 * If segment ends after window, drop trailing data 2256 * (and PUSH and FIN); if nothing left, just ACK. 2257 */ 2258 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd); 2259 if (todrop > 0) { 2260 TCPSTAT_INC(tcps_rcvpackafterwin); 2261 if (todrop >= tlen) { 2262 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen); 2263 /* 2264 * If window is closed can only take segments at 2265 * window edge, and have to drop data and PUSH from 2266 * incoming segments. Continue processing, but 2267 * remember to ack. Otherwise, drop segment 2268 * and ack. 2269 */ 2270 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { 2271 tp->t_flags |= TF_ACKNOW; 2272 TCPSTAT_INC(tcps_rcvwinprobe); 2273 } else 2274 goto dropafterack; 2275 } else 2276 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 2277 m_adj(m, -todrop); 2278 tlen -= todrop; 2279 thflags &= ~(TH_PUSH|TH_FIN); 2280 } 2281 2282 /* 2283 * If last ACK falls within this segment's sequence numbers, 2284 * record its timestamp. 2285 * NOTE: 2286 * 1) That the test incorporates suggestions from the latest 2287 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 2288 * 2) That updating only on newer timestamps interferes with 2289 * our earlier PAWS tests, so this check should be solely 2290 * predicated on the sequence space of this segment. 2291 * 3) That we modify the segment boundary check to be 2292 * Last.ACK.Sent <= SEG.SEQ + SEG.Len 2293 * instead of RFC1323's 2294 * Last.ACK.Sent < SEG.SEQ + SEG.Len, 2295 * This modified check allows us to overcome RFC1323's 2296 * limitations as described in Stevens TCP/IP Illustrated 2297 * Vol. 2 p.869. In such cases, we can still calculate the 2298 * RTT correctly when RCV.NXT == Last.ACK.Sent. 2299 */ 2300 if ((to.to_flags & TOF_TS) != 0 && 2301 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 2302 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 2303 ((thflags & (TH_SYN|TH_FIN)) != 0))) { 2304 tp->ts_recent_age = tcp_ts_getticks(); 2305 tp->ts_recent = to.to_tsval; 2306 } 2307 2308 /* 2309 * If a SYN is in the window, then this is an 2310 * error and we send an RST and drop the connection. 2311 */ 2312 if (thflags & TH_SYN) { 2313 KASSERT(ti_locked == TI_WLOCKED, 2314 ("tcp_do_segment: TH_SYN ti_locked %d", ti_locked)); 2315 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 2316 2317 tp = tcp_drop(tp, ECONNRESET); 2318 rstreason = BANDLIM_UNLIMITED; 2319 goto drop; 2320 } 2321 2322 /* 2323 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN 2324 * flag is on (half-synchronized state), then queue data for 2325 * later processing; else drop segment and return. 2326 */ 2327 if ((thflags & TH_ACK) == 0) { 2328 if (tp->t_state == TCPS_SYN_RECEIVED || 2329 (tp->t_flags & TF_NEEDSYN)) 2330 goto step6; 2331 else if (tp->t_flags & TF_ACKNOW) 2332 goto dropafterack; 2333 else 2334 goto drop; 2335 } 2336 2337 /* 2338 * Ack processing. 2339 */ 2340 switch (tp->t_state) { 2341 2342 /* 2343 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter 2344 * ESTABLISHED state and continue processing. 2345 * The ACK was checked above. 2346 */ 2347 case TCPS_SYN_RECEIVED: 2348 2349 TCPSTAT_INC(tcps_connects); 2350 soisconnected(so); 2351 /* Do window scaling? */ 2352 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2353 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2354 tp->rcv_scale = tp->request_r_scale; 2355 tp->snd_wnd = tiwin; 2356 } 2357 /* 2358 * Make transitions: 2359 * SYN-RECEIVED -> ESTABLISHED 2360 * SYN-RECEIVED* -> FIN-WAIT-1 2361 */ 2362 tp->t_starttime = ticks; 2363 if (tp->t_flags & TF_NEEDFIN) { 2364 tcp_state_change(tp, TCPS_FIN_WAIT_1); 2365 tp->t_flags &= ~TF_NEEDFIN; 2366 } else { 2367 tcp_state_change(tp, TCPS_ESTABLISHED); 2368 TCP_PROBE5(accept__established, NULL, tp, 2369 mtod(m, const char *), tp, th); 2370 cc_conn_init(tp); 2371 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp)); 2372 } 2373 /* 2374 * If segment contains data or ACK, will call tcp_reass() 2375 * later; if not, do so now to pass queued data to user. 2376 */ 2377 if (tlen == 0 && (thflags & TH_FIN) == 0) 2378 (void) tcp_reass(tp, (struct tcphdr *)0, 0, 2379 (struct mbuf *)0); 2380 tp->snd_wl1 = th->th_seq - 1; 2381 /* FALLTHROUGH */ 2382 2383 /* 2384 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range 2385 * ACKs. If the ack is in the range 2386 * tp->snd_una < th->th_ack <= tp->snd_max 2387 * then advance tp->snd_una to th->th_ack and drop 2388 * data from the retransmission queue. If this ACK reflects 2389 * more up to date window information we update our window information. 2390 */ 2391 case TCPS_ESTABLISHED: 2392 case TCPS_FIN_WAIT_1: 2393 case TCPS_FIN_WAIT_2: 2394 case TCPS_CLOSE_WAIT: 2395 case TCPS_CLOSING: 2396 case TCPS_LAST_ACK: 2397 if (SEQ_GT(th->th_ack, tp->snd_max)) { 2398 TCPSTAT_INC(tcps_rcvacktoomuch); 2399 goto dropafterack; 2400 } 2401 if ((tp->t_flags & TF_SACK_PERMIT) && 2402 ((to.to_flags & TOF_SACK) || 2403 !TAILQ_EMPTY(&tp->snd_holes))) 2404 tcp_sack_doack(tp, &to, th->th_ack); 2405 2406 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 2407 hhook_run_tcp_est_in(tp, th, &to); 2408 2409 if (SEQ_LEQ(th->th_ack, tp->snd_una)) { 2410 if (tlen == 0 && tiwin == tp->snd_wnd) { 2411 TCPSTAT_INC(tcps_rcvdupack); 2412 /* 2413 * If we have outstanding data (other than 2414 * a window probe), this is a completely 2415 * duplicate ack (ie, window info didn't 2416 * change), the ack is the biggest we've 2417 * seen and we've seen exactly our rexmt 2418 * threshhold of them, assume a packet 2419 * has been dropped and retransmit it. 2420 * Kludge snd_nxt & the congestion 2421 * window so we send only this one 2422 * packet. 2423 * 2424 * We know we're losing at the current 2425 * window size so do congestion avoidance 2426 * (set ssthresh to half the current window 2427 * and pull our congestion window back to 2428 * the new ssthresh). 2429 * 2430 * Dup acks mean that packets have left the 2431 * network (they're now cached at the receiver) 2432 * so bump cwnd by the amount in the receiver 2433 * to keep a constant cwnd packets in the 2434 * network. 2435 * 2436 * When using TCP ECN, notify the peer that 2437 * we reduced the cwnd. 2438 */ 2439 if (!tcp_timer_active(tp, TT_REXMT) || 2440 th->th_ack != tp->snd_una) 2441 tp->t_dupacks = 0; 2442 else if (++tp->t_dupacks > tcprexmtthresh || 2443 IN_FASTRECOVERY(tp->t_flags)) { 2444 cc_ack_received(tp, th, CC_DUPACK); 2445 if ((tp->t_flags & TF_SACK_PERMIT) && 2446 IN_FASTRECOVERY(tp->t_flags)) { 2447 int awnd; 2448 2449 /* 2450 * Compute the amount of data in flight first. 2451 * We can inject new data into the pipe iff 2452 * we have less than 1/2 the original window's 2453 * worth of data in flight. 2454 */ 2455 awnd = (tp->snd_nxt - tp->snd_fack) + 2456 tp->sackhint.sack_bytes_rexmit; 2457 if (awnd < tp->snd_ssthresh) { 2458 tp->snd_cwnd += tp->t_maxseg; 2459 if (tp->snd_cwnd > tp->snd_ssthresh) 2460 tp->snd_cwnd = tp->snd_ssthresh; 2461 } 2462 } else 2463 tp->snd_cwnd += tp->t_maxseg; 2464 if ((thflags & TH_FIN) && 2465 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) { 2466 /* 2467 * If its a fin we need to process 2468 * it to avoid a race where both 2469 * sides enter FIN-WAIT and send FIN|ACK 2470 * at the same time. 2471 */ 2472 break; 2473 } 2474 (void) tcp_output(tp); 2475 goto drop; 2476 } else if (tp->t_dupacks == tcprexmtthresh) { 2477 tcp_seq onxt = tp->snd_nxt; 2478 2479 /* 2480 * If we're doing sack, check to 2481 * see if we're already in sack 2482 * recovery. If we're not doing sack, 2483 * check to see if we're in newreno 2484 * recovery. 2485 */ 2486 if (tp->t_flags & TF_SACK_PERMIT) { 2487 if (IN_FASTRECOVERY(tp->t_flags)) { 2488 tp->t_dupacks = 0; 2489 break; 2490 } 2491 } else { 2492 if (SEQ_LEQ(th->th_ack, 2493 tp->snd_recover)) { 2494 tp->t_dupacks = 0; 2495 break; 2496 } 2497 } 2498 /* Congestion signal before ack. */ 2499 cc_cong_signal(tp, th, CC_NDUPACK); 2500 cc_ack_received(tp, th, CC_DUPACK); 2501 tcp_timer_activate(tp, TT_REXMT, 0); 2502 tp->t_rtttime = 0; 2503 if (tp->t_flags & TF_SACK_PERMIT) { 2504 TCPSTAT_INC( 2505 tcps_sack_recovery_episode); 2506 tp->sack_newdata = tp->snd_nxt; 2507 tp->snd_cwnd = tp->t_maxseg; 2508 (void) tcp_output(tp); 2509 goto drop; 2510 } 2511 tp->snd_nxt = th->th_ack; 2512 tp->snd_cwnd = tp->t_maxseg; 2513 if ((thflags & TH_FIN) && 2514 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) { 2515 /* 2516 * If its a fin we need to process 2517 * it to avoid a race where both 2518 * sides enter FIN-WAIT and send FIN|ACK 2519 * at the same time. 2520 */ 2521 break; 2522 } 2523 (void) tcp_output(tp); 2524 KASSERT(tp->snd_limited <= 2, 2525 ("%s: tp->snd_limited too big", 2526 __func__)); 2527 tp->snd_cwnd = tp->snd_ssthresh + 2528 tp->t_maxseg * 2529 (tp->t_dupacks - tp->snd_limited); 2530 if (SEQ_GT(onxt, tp->snd_nxt)) 2531 tp->snd_nxt = onxt; 2532 goto drop; 2533 } else if (V_tcp_do_rfc3042) { 2534 cc_ack_received(tp, th, CC_DUPACK); 2535 u_long oldcwnd = tp->snd_cwnd; 2536 tcp_seq oldsndmax = tp->snd_max; 2537 u_int sent; 2538 int avail; 2539 2540 KASSERT(tp->t_dupacks == 1 || 2541 tp->t_dupacks == 2, 2542 ("%s: dupacks not 1 or 2", 2543 __func__)); 2544 if (tp->t_dupacks == 1) 2545 tp->snd_limited = 0; 2546 tp->snd_cwnd = 2547 (tp->snd_nxt - tp->snd_una) + 2548 (tp->t_dupacks - tp->snd_limited) * 2549 tp->t_maxseg; 2550 if ((thflags & TH_FIN) && 2551 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) { 2552 /* 2553 * If its a fin we need to process 2554 * it to avoid a race where both 2555 * sides enter FIN-WAIT and send FIN|ACK 2556 * at the same time. 2557 */ 2558 break; 2559 } 2560 /* 2561 * Only call tcp_output when there 2562 * is new data available to be sent. 2563 * Otherwise we would send pure ACKs. 2564 */ 2565 SOCKBUF_LOCK(&so->so_snd); 2566 avail = so->so_snd.sb_cc - 2567 (tp->snd_nxt - tp->snd_una); 2568 SOCKBUF_UNLOCK(&so->so_snd); 2569 if (avail > 0) 2570 (void) tcp_output(tp); 2571 sent = tp->snd_max - oldsndmax; 2572 if (sent > tp->t_maxseg) { 2573 KASSERT((tp->t_dupacks == 2 && 2574 tp->snd_limited == 0) || 2575 (sent == tp->t_maxseg + 1 && 2576 tp->t_flags & TF_SENTFIN), 2577 ("%s: sent too much", 2578 __func__)); 2579 tp->snd_limited = 2; 2580 } else if (sent > 0) 2581 ++tp->snd_limited; 2582 tp->snd_cwnd = oldcwnd; 2583 goto drop; 2584 } 2585 } else 2586 tp->t_dupacks = 0; 2587 break; 2588 } 2589 2590 KASSERT(SEQ_GT(th->th_ack, tp->snd_una), 2591 ("%s: th_ack <= snd_una", __func__)); 2592 2593 /* 2594 * If the congestion window was inflated to account 2595 * for the other side's cached packets, retract it. 2596 */ 2597 if (IN_FASTRECOVERY(tp->t_flags)) { 2598 if (SEQ_LT(th->th_ack, tp->snd_recover)) { 2599 if (tp->t_flags & TF_SACK_PERMIT) 2600 tcp_sack_partialack(tp, th); 2601 else 2602 tcp_newreno_partial_ack(tp, th); 2603 } else 2604 cc_post_recovery(tp, th); 2605 } 2606 tp->t_dupacks = 0; 2607 /* 2608 * If we reach this point, ACK is not a duplicate, 2609 * i.e., it ACKs something we sent. 2610 */ 2611 if (tp->t_flags & TF_NEEDSYN) { 2612 /* 2613 * T/TCP: Connection was half-synchronized, and our 2614 * SYN has been ACK'd (so connection is now fully 2615 * synchronized). Go to non-starred state, 2616 * increment snd_una for ACK of SYN, and check if 2617 * we can do window scaling. 2618 */ 2619 tp->t_flags &= ~TF_NEEDSYN; 2620 tp->snd_una++; 2621 /* Do window scaling? */ 2622 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2623 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2624 tp->rcv_scale = tp->request_r_scale; 2625 /* Send window already scaled. */ 2626 } 2627 } 2628 2629process_ACK: 2630 INP_WLOCK_ASSERT(tp->t_inpcb); 2631 2632 acked = BYTES_THIS_ACK(tp, th); 2633 TCPSTAT_INC(tcps_rcvackpack); 2634 TCPSTAT_ADD(tcps_rcvackbyte, acked); 2635 2636 /* 2637 * If we just performed our first retransmit, and the ACK 2638 * arrives within our recovery window, then it was a mistake 2639 * to do the retransmit in the first place. Recover our 2640 * original cwnd and ssthresh, and proceed to transmit where 2641 * we left off. 2642 */ 2643 if (tp->t_rxtshift == 1 && tp->t_flags & TF_PREVVALID && 2644 (int)(ticks - tp->t_badrxtwin) < 0) 2645 cc_cong_signal(tp, th, CC_RTO_ERR); 2646 2647 /* 2648 * If we have a timestamp reply, update smoothed 2649 * round trip time. If no timestamp is present but 2650 * transmit timer is running and timed sequence 2651 * number was acked, update smoothed round trip time. 2652 * Since we now have an rtt measurement, cancel the 2653 * timer backoff (cf., Phil Karn's retransmit alg.). 2654 * Recompute the initial retransmit timer. 2655 * 2656 * Some boxes send broken timestamp replies 2657 * during the SYN+ACK phase, ignore 2658 * timestamps of 0 or we could calculate a 2659 * huge RTT and blow up the retransmit timer. 2660 */ 2661 if ((to.to_flags & TOF_TS) != 0 && to.to_tsecr) { 2662 u_int t; 2663 2664 t = tcp_ts_getticks() - to.to_tsecr; 2665 if (!tp->t_rttlow || tp->t_rttlow > t) 2666 tp->t_rttlow = t; 2667 tcp_xmit_timer(tp, TCP_TS_TO_TICKS(t) + 1); 2668 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) { 2669 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime) 2670 tp->t_rttlow = ticks - tp->t_rtttime; 2671 tcp_xmit_timer(tp, ticks - tp->t_rtttime); 2672 } 2673 2674 /* 2675 * If all outstanding data is acked, stop retransmit 2676 * timer and remember to restart (more output or persist). 2677 * If there is more data to be acked, restart retransmit 2678 * timer, using current (possibly backed-off) value. 2679 */ 2680 if (th->th_ack == tp->snd_max) { 2681 tcp_timer_activate(tp, TT_REXMT, 0); 2682 needoutput = 1; 2683 } else if (!tcp_timer_active(tp, TT_PERSIST)) 2684 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur); 2685 2686 /* 2687 * If no data (only SYN) was ACK'd, 2688 * skip rest of ACK processing. 2689 */ 2690 if (acked == 0) 2691 goto step6; 2692 2693 /* 2694 * Let the congestion control algorithm update congestion 2695 * control related information. This typically means increasing 2696 * the congestion window. 2697 */ 2698 cc_ack_received(tp, th, CC_ACK); 2699 2700 SOCKBUF_LOCK(&so->so_snd); 2701 if (acked > so->so_snd.sb_cc) { 2702 tp->snd_wnd -= so->so_snd.sb_cc; 2703 mfree = sbcut_locked(&so->so_snd, 2704 (int)so->so_snd.sb_cc); 2705 ourfinisacked = 1; 2706 } else { 2707 mfree = sbcut_locked(&so->so_snd, acked); 2708 tp->snd_wnd -= acked; 2709 ourfinisacked = 0; 2710 } 2711 /* NB: sowwakeup_locked() does an implicit unlock. */ 2712 sowwakeup_locked(so); 2713 m_freem(mfree); 2714 /* Detect una wraparound. */ 2715 if (!IN_RECOVERY(tp->t_flags) && 2716 SEQ_GT(tp->snd_una, tp->snd_recover) && 2717 SEQ_LEQ(th->th_ack, tp->snd_recover)) 2718 tp->snd_recover = th->th_ack - 1; 2719 /* XXXLAS: Can this be moved up into cc_post_recovery? */ 2720 if (IN_RECOVERY(tp->t_flags) && 2721 SEQ_GEQ(th->th_ack, tp->snd_recover)) { 2722 EXIT_RECOVERY(tp->t_flags); 2723 } 2724 tp->snd_una = th->th_ack; 2725 if (tp->t_flags & TF_SACK_PERMIT) { 2726 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 2727 tp->snd_recover = tp->snd_una; 2728 } 2729 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 2730 tp->snd_nxt = tp->snd_una; 2731 2732 switch (tp->t_state) { 2733 2734 /* 2735 * In FIN_WAIT_1 STATE in addition to the processing 2736 * for the ESTABLISHED state if our FIN is now acknowledged 2737 * then enter FIN_WAIT_2. 2738 */ 2739 case TCPS_FIN_WAIT_1: 2740 if (ourfinisacked) { 2741 /* 2742 * If we can't receive any more 2743 * data, then closing user can proceed. 2744 * Starting the timer is contrary to the 2745 * specification, but if we don't get a FIN 2746 * we'll hang forever. 2747 * 2748 * XXXjl: 2749 * we should release the tp also, and use a 2750 * compressed state. 2751 */ 2752 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 2753 soisdisconnected(so); 2754 tcp_timer_activate(tp, TT_2MSL, 2755 (tcp_fast_finwait2_recycle ? 2756 tcp_finwait2_timeout : 2757 TP_MAXIDLE(tp))); 2758 } 2759 tcp_state_change(tp, TCPS_FIN_WAIT_2); 2760 } 2761 break; 2762 2763 /* 2764 * In CLOSING STATE in addition to the processing for 2765 * the ESTABLISHED state if the ACK acknowledges our FIN 2766 * then enter the TIME-WAIT state, otherwise ignore 2767 * the segment. 2768 */ 2769 case TCPS_CLOSING: 2770 if (ourfinisacked) { 2771 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 2772 tcp_twstart(tp); 2773 INP_INFO_WUNLOCK(&V_tcbinfo); 2774 m_freem(m); 2775 return; 2776 } 2777 break; 2778 2779 /* 2780 * In LAST_ACK, we may still be waiting for data to drain 2781 * and/or to be acked, as well as for the ack of our FIN. 2782 * If our FIN is now acknowledged, delete the TCB, 2783 * enter the closed state and return. 2784 */ 2785 case TCPS_LAST_ACK: 2786 if (ourfinisacked) { 2787 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 2788 tp = tcp_close(tp); 2789 goto drop; 2790 } 2791 break; 2792 } 2793 } 2794 2795step6: 2796 INP_WLOCK_ASSERT(tp->t_inpcb); 2797 2798 /* 2799 * Update window information. 2800 * Don't look at window if no ACK: TAC's send garbage on first SYN. 2801 */ 2802 if ((thflags & TH_ACK) && 2803 (SEQ_LT(tp->snd_wl1, th->th_seq) || 2804 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 2805 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 2806 /* keep track of pure window updates */ 2807 if (tlen == 0 && 2808 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 2809 TCPSTAT_INC(tcps_rcvwinupd); 2810 tp->snd_wnd = tiwin; 2811 tp->snd_wl1 = th->th_seq; 2812 tp->snd_wl2 = th->th_ack; 2813 if (tp->snd_wnd > tp->max_sndwnd) 2814 tp->max_sndwnd = tp->snd_wnd; 2815 needoutput = 1; 2816 } 2817 2818 /* 2819 * Process segments with URG. 2820 */ 2821 if ((thflags & TH_URG) && th->th_urp && 2822 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2823 /* 2824 * This is a kludge, but if we receive and accept 2825 * random urgent pointers, we'll crash in 2826 * soreceive. It's hard to imagine someone 2827 * actually wanting to send this much urgent data. 2828 */ 2829 SOCKBUF_LOCK(&so->so_rcv); 2830 if (th->th_urp + so->so_rcv.sb_cc > sb_max) { 2831 th->th_urp = 0; /* XXX */ 2832 thflags &= ~TH_URG; /* XXX */ 2833 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */ 2834 goto dodata; /* XXX */ 2835 } 2836 /* 2837 * If this segment advances the known urgent pointer, 2838 * then mark the data stream. This should not happen 2839 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since 2840 * a FIN has been received from the remote side. 2841 * In these states we ignore the URG. 2842 * 2843 * According to RFC961 (Assigned Protocols), 2844 * the urgent pointer points to the last octet 2845 * of urgent data. We continue, however, 2846 * to consider it to indicate the first octet 2847 * of data past the urgent section as the original 2848 * spec states (in one of two places). 2849 */ 2850 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) { 2851 tp->rcv_up = th->th_seq + th->th_urp; 2852 so->so_oobmark = so->so_rcv.sb_cc + 2853 (tp->rcv_up - tp->rcv_nxt) - 1; 2854 if (so->so_oobmark == 0) 2855 so->so_rcv.sb_state |= SBS_RCVATMARK; 2856 sohasoutofband(so); 2857 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); 2858 } 2859 SOCKBUF_UNLOCK(&so->so_rcv); 2860 /* 2861 * Remove out of band data so doesn't get presented to user. 2862 * This can happen independent of advancing the URG pointer, 2863 * but if two URG's are pending at once, some out-of-band 2864 * data may creep in... ick. 2865 */ 2866 if (th->th_urp <= (u_long)tlen && 2867 !(so->so_options & SO_OOBINLINE)) { 2868 /* hdr drop is delayed */ 2869 tcp_pulloutofband(so, th, m, drop_hdrlen); 2870 } 2871 } else { 2872 /* 2873 * If no out of band data is expected, 2874 * pull receive urgent pointer along 2875 * with the receive window. 2876 */ 2877 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) 2878 tp->rcv_up = tp->rcv_nxt; 2879 } 2880dodata: /* XXX */ 2881 INP_WLOCK_ASSERT(tp->t_inpcb); 2882 2883 /* 2884 * Process the segment text, merging it into the TCP sequencing queue, 2885 * and arranging for acknowledgment of receipt if necessary. 2886 * This process logically involves adjusting tp->rcv_wnd as data 2887 * is presented to the user (this happens in tcp_usrreq.c, 2888 * case PRU_RCVD). If a FIN has already been received on this 2889 * connection then we just ignore the text. 2890 */ 2891 if ((tlen || (thflags & TH_FIN)) && 2892 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2893 tcp_seq save_start = th->th_seq; 2894 m_adj(m, drop_hdrlen); /* delayed header drop */ 2895 /* 2896 * Insert segment which includes th into TCP reassembly queue 2897 * with control block tp. Set thflags to whether reassembly now 2898 * includes a segment with FIN. This handles the common case 2899 * inline (segment is the next to be received on an established 2900 * connection, and the queue is empty), avoiding linkage into 2901 * and removal from the queue and repetition of various 2902 * conversions. 2903 * Set DELACK for segments received in order, but ack 2904 * immediately when segments are out of order (so 2905 * fast retransmit can work). 2906 */ 2907 if (th->th_seq == tp->rcv_nxt && 2908 LIST_EMPTY(&tp->t_segq) && 2909 TCPS_HAVEESTABLISHED(tp->t_state)) { 2910 if (DELAY_ACK(tp, tlen)) 2911 tp->t_flags |= TF_DELACK; 2912 else 2913 tp->t_flags |= TF_ACKNOW; 2914 tp->rcv_nxt += tlen; 2915 thflags = th->th_flags & TH_FIN; 2916 TCPSTAT_INC(tcps_rcvpack); 2917 TCPSTAT_ADD(tcps_rcvbyte, tlen); 2918 ND6_HINT(tp); 2919 SOCKBUF_LOCK(&so->so_rcv); 2920 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 2921 m_freem(m); 2922 else 2923 sbappendstream_locked(&so->so_rcv, m); 2924 /* NB: sorwakeup_locked() does an implicit unlock. */ 2925 sorwakeup_locked(so); 2926 } else { 2927 /* 2928 * XXX: Due to the header drop above "th" is 2929 * theoretically invalid by now. Fortunately 2930 * m_adj() doesn't actually frees any mbufs 2931 * when trimming from the head. 2932 */ 2933 thflags = tcp_reass(tp, th, &tlen, m); 2934 tp->t_flags |= TF_ACKNOW; 2935 } 2936 if (tlen > 0 && (tp->t_flags & TF_SACK_PERMIT)) 2937 tcp_update_sack_list(tp, save_start, save_start + tlen); 2938#if 0 2939 /* 2940 * Note the amount of data that peer has sent into 2941 * our window, in order to estimate the sender's 2942 * buffer size. 2943 * XXX: Unused. 2944 */ 2945 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) 2946 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt); 2947 else 2948 len = so->so_rcv.sb_hiwat; 2949#endif 2950 } else { 2951 m_freem(m); 2952 thflags &= ~TH_FIN; 2953 } 2954 2955 /* 2956 * If FIN is received ACK the FIN and let the user know 2957 * that the connection is closing. 2958 */ 2959 if (thflags & TH_FIN) { 2960 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2961 socantrcvmore(so); 2962 /* 2963 * If connection is half-synchronized 2964 * (ie NEEDSYN flag on) then delay ACK, 2965 * so it may be piggybacked when SYN is sent. 2966 * Otherwise, since we received a FIN then no 2967 * more input can be expected, send ACK now. 2968 */ 2969 if (tp->t_flags & TF_NEEDSYN) 2970 tp->t_flags |= TF_DELACK; 2971 else 2972 tp->t_flags |= TF_ACKNOW; 2973 tp->rcv_nxt++; 2974 } 2975 switch (tp->t_state) { 2976 2977 /* 2978 * In SYN_RECEIVED and ESTABLISHED STATES 2979 * enter the CLOSE_WAIT state. 2980 */ 2981 case TCPS_SYN_RECEIVED: 2982 tp->t_starttime = ticks; 2983 /* FALLTHROUGH */ 2984 case TCPS_ESTABLISHED: 2985 tcp_state_change(tp, TCPS_CLOSE_WAIT); 2986 break; 2987 2988 /* 2989 * If still in FIN_WAIT_1 STATE FIN has not been acked so 2990 * enter the CLOSING state. 2991 */ 2992 case TCPS_FIN_WAIT_1: 2993 tcp_state_change(tp, TCPS_CLOSING); 2994 break; 2995 2996 /* 2997 * In FIN_WAIT_2 state enter the TIME_WAIT state, 2998 * starting the time-wait timer, turning off the other 2999 * standard timers. 3000 */ 3001 case TCPS_FIN_WAIT_2: 3002 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 3003 KASSERT(ti_locked == TI_WLOCKED, ("%s: dodata " 3004 "TCP_FIN_WAIT_2 ti_locked: %d", __func__, 3005 ti_locked)); 3006 3007 tcp_twstart(tp); 3008 INP_INFO_WUNLOCK(&V_tcbinfo); 3009 return; 3010 } 3011 } 3012 if (ti_locked == TI_WLOCKED) 3013 INP_INFO_WUNLOCK(&V_tcbinfo); 3014 ti_locked = TI_UNLOCKED; 3015 3016#ifdef TCPDEBUG 3017 if (so->so_options & SO_DEBUG) 3018 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen, 3019 &tcp_savetcp, 0); 3020#endif 3021 3022 /* 3023 * Return any desired output. 3024 */ 3025 if (needoutput || (tp->t_flags & TF_ACKNOW)) 3026 (void) tcp_output(tp); 3027 3028check_delack: 3029 KASSERT(ti_locked == TI_UNLOCKED, ("%s: check_delack ti_locked %d", 3030 __func__, ti_locked)); 3031 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 3032 INP_WLOCK_ASSERT(tp->t_inpcb); 3033 3034 if (tp->t_flags & TF_DELACK) { 3035 tp->t_flags &= ~TF_DELACK; 3036 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime); 3037 } 3038 INP_WUNLOCK(tp->t_inpcb); 3039 return; 3040 3041dropafterack: 3042 /* 3043 * Generate an ACK dropping incoming segment if it occupies 3044 * sequence space, where the ACK reflects our state. 3045 * 3046 * We can now skip the test for the RST flag since all 3047 * paths to this code happen after packets containing 3048 * RST have been dropped. 3049 * 3050 * In the SYN-RECEIVED state, don't send an ACK unless the 3051 * segment we received passes the SYN-RECEIVED ACK test. 3052 * If it fails send a RST. This breaks the loop in the 3053 * "LAND" DoS attack, and also prevents an ACK storm 3054 * between two listening ports that have been sent forged 3055 * SYN segments, each with the source address of the other. 3056 */ 3057 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && 3058 (SEQ_GT(tp->snd_una, th->th_ack) || 3059 SEQ_GT(th->th_ack, tp->snd_max)) ) { 3060 rstreason = BANDLIM_RST_OPENPORT; 3061 goto dropwithreset; 3062 } 3063#ifdef TCPDEBUG 3064 if (so->so_options & SO_DEBUG) 3065 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 3066 &tcp_savetcp, 0); 3067#endif 3068 if (ti_locked == TI_WLOCKED) 3069 INP_INFO_WUNLOCK(&V_tcbinfo); 3070 ti_locked = TI_UNLOCKED; 3071 3072 tp->t_flags |= TF_ACKNOW; 3073 (void) tcp_output(tp); 3074 INP_WUNLOCK(tp->t_inpcb); 3075 m_freem(m); 3076 return; 3077 3078dropwithreset: 3079 if (ti_locked == TI_WLOCKED) 3080 INP_INFO_WUNLOCK(&V_tcbinfo); 3081 ti_locked = TI_UNLOCKED; 3082 3083 if (tp != NULL) { 3084 tcp_dropwithreset(m, th, tp, tlen, rstreason); 3085 INP_WUNLOCK(tp->t_inpcb); 3086 } else 3087 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 3088 return; 3089 3090drop: 3091 if (ti_locked == TI_WLOCKED) { 3092 INP_INFO_WUNLOCK(&V_tcbinfo); 3093 ti_locked = TI_UNLOCKED; 3094 } 3095#ifdef INVARIANTS 3096 else 3097 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 3098#endif 3099 3100 /* 3101 * Drop space held by incoming segment and return. 3102 */ 3103#ifdef TCPDEBUG 3104 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 3105 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 3106 &tcp_savetcp, 0); 3107#endif 3108 if (tp != NULL) 3109 INP_WUNLOCK(tp->t_inpcb); 3110 m_freem(m); 3111} 3112 3113/* 3114 * Issue RST and make ACK acceptable to originator of segment. 3115 * The mbuf must still include the original packet header. 3116 * tp may be NULL. 3117 */ 3118static void 3119tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, 3120 int tlen, int rstreason) 3121{ 3122#ifdef INET 3123 struct ip *ip; 3124#endif 3125#ifdef INET6 3126 struct ip6_hdr *ip6; 3127#endif 3128 3129 if (tp != NULL) { 3130 INP_WLOCK_ASSERT(tp->t_inpcb); 3131 } 3132 3133 /* Don't bother if destination was broadcast/multicast. */ 3134 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST)) 3135 goto drop; 3136#ifdef INET6 3137 if (mtod(m, struct ip *)->ip_v == 6) { 3138 ip6 = mtod(m, struct ip6_hdr *); 3139 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 3140 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 3141 goto drop; 3142 /* IPv6 anycast check is done at tcp6_input() */ 3143 } 3144#endif 3145#if defined(INET) && defined(INET6) 3146 else 3147#endif 3148#ifdef INET 3149 { 3150 ip = mtod(m, struct ip *); 3151 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 3152 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 3153 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 3154 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 3155 goto drop; 3156 } 3157#endif 3158 3159 /* Perform bandwidth limiting. */ 3160 if (badport_bandlim(rstreason) < 0) 3161 goto drop; 3162 3163 /* tcp_respond consumes the mbuf chain. */ 3164 if (th->th_flags & TH_ACK) { 3165 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, 3166 th->th_ack, TH_RST); 3167 } else { 3168 if (th->th_flags & TH_SYN) 3169 tlen++; 3170 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen, 3171 (tcp_seq)0, TH_RST|TH_ACK); 3172 } 3173 return; 3174drop: 3175 m_freem(m); 3176} 3177 3178/* 3179 * Parse TCP options and place in tcpopt. 3180 */ 3181static void 3182tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags) 3183{ 3184 int opt, optlen; 3185 3186 to->to_flags = 0; 3187 for (; cnt > 0; cnt -= optlen, cp += optlen) { 3188 opt = cp[0]; 3189 if (opt == TCPOPT_EOL) 3190 break; 3191 if (opt == TCPOPT_NOP) 3192 optlen = 1; 3193 else { 3194 if (cnt < 2) 3195 break; 3196 optlen = cp[1]; 3197 if (optlen < 2 || optlen > cnt) 3198 break; 3199 } 3200 switch (opt) { 3201 case TCPOPT_MAXSEG: 3202 if (optlen != TCPOLEN_MAXSEG) 3203 continue; 3204 if (!(flags & TO_SYN)) 3205 continue; 3206 to->to_flags |= TOF_MSS; 3207 bcopy((char *)cp + 2, 3208 (char *)&to->to_mss, sizeof(to->to_mss)); 3209 to->to_mss = ntohs(to->to_mss); 3210 break; 3211 case TCPOPT_WINDOW: 3212 if (optlen != TCPOLEN_WINDOW) 3213 continue; 3214 if (!(flags & TO_SYN)) 3215 continue; 3216 to->to_flags |= TOF_SCALE; 3217 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT); 3218 break; 3219 case TCPOPT_TIMESTAMP: 3220 if (optlen != TCPOLEN_TIMESTAMP) 3221 continue; 3222 to->to_flags |= TOF_TS; 3223 bcopy((char *)cp + 2, 3224 (char *)&to->to_tsval, sizeof(to->to_tsval)); 3225 to->to_tsval = ntohl(to->to_tsval); 3226 bcopy((char *)cp + 6, 3227 (char *)&to->to_tsecr, sizeof(to->to_tsecr)); 3228 to->to_tsecr = ntohl(to->to_tsecr); 3229 break; 3230#ifdef TCP_SIGNATURE 3231 /* 3232 * XXX In order to reply to a host which has set the 3233 * TCP_SIGNATURE option in its initial SYN, we have to 3234 * record the fact that the option was observed here 3235 * for the syncache code to perform the correct response. 3236 */ 3237 case TCPOPT_SIGNATURE: 3238 if (optlen != TCPOLEN_SIGNATURE) 3239 continue; 3240 to->to_flags |= TOF_SIGNATURE; 3241 to->to_signature = cp + 2; 3242 break; 3243#endif 3244 case TCPOPT_SACK_PERMITTED: 3245 if (optlen != TCPOLEN_SACK_PERMITTED) 3246 continue; 3247 if (!(flags & TO_SYN)) 3248 continue; 3249 if (!V_tcp_do_sack) 3250 continue; 3251 to->to_flags |= TOF_SACKPERM; 3252 break; 3253 case TCPOPT_SACK: 3254 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0) 3255 continue; 3256 if (flags & TO_SYN) 3257 continue; 3258 to->to_flags |= TOF_SACK; 3259 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK; 3260 to->to_sacks = cp + 2; 3261 TCPSTAT_INC(tcps_sack_rcv_blocks); 3262 break; 3263 default: 3264 continue; 3265 } 3266 } 3267} 3268 3269/* 3270 * Pull out of band byte out of a segment so 3271 * it doesn't appear in the user's data queue. 3272 * It is still reflected in the segment length for 3273 * sequencing purposes. 3274 */ 3275static void 3276tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, 3277 int off) 3278{ 3279 int cnt = off + th->th_urp - 1; 3280 3281 while (cnt >= 0) { 3282 if (m->m_len > cnt) { 3283 char *cp = mtod(m, caddr_t) + cnt; 3284 struct tcpcb *tp = sototcpcb(so); 3285 3286 INP_WLOCK_ASSERT(tp->t_inpcb); 3287 3288 tp->t_iobc = *cp; 3289 tp->t_oobflags |= TCPOOB_HAVEDATA; 3290 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1)); 3291 m->m_len--; 3292 if (m->m_flags & M_PKTHDR) 3293 m->m_pkthdr.len--; 3294 return; 3295 } 3296 cnt -= m->m_len; 3297 m = m->m_next; 3298 if (m == NULL) 3299 break; 3300 } 3301 panic("tcp_pulloutofband"); 3302} 3303 3304/* 3305 * Collect new round-trip time estimate 3306 * and update averages and current timeout. 3307 */ 3308static void 3309tcp_xmit_timer(struct tcpcb *tp, int rtt) 3310{ 3311 int delta; 3312 3313 INP_WLOCK_ASSERT(tp->t_inpcb); 3314 3315 TCPSTAT_INC(tcps_rttupdated); 3316 tp->t_rttupdated++; 3317 if (tp->t_srtt != 0) { 3318 /* 3319 * srtt is stored as fixed point with 5 bits after the 3320 * binary point (i.e., scaled by 8). The following magic 3321 * is equivalent to the smoothing algorithm in rfc793 with 3322 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed 3323 * point). Adjust rtt to origin 0. 3324 */ 3325 delta = ((rtt - 1) << TCP_DELTA_SHIFT) 3326 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); 3327 3328 if ((tp->t_srtt += delta) <= 0) 3329 tp->t_srtt = 1; 3330 3331 /* 3332 * We accumulate a smoothed rtt variance (actually, a 3333 * smoothed mean difference), then set the retransmit 3334 * timer to smoothed rtt + 4 times the smoothed variance. 3335 * rttvar is stored as fixed point with 4 bits after the 3336 * binary point (scaled by 16). The following is 3337 * equivalent to rfc793 smoothing with an alpha of .75 3338 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces 3339 * rfc793's wired-in beta. 3340 */ 3341 if (delta < 0) 3342 delta = -delta; 3343 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); 3344 if ((tp->t_rttvar += delta) <= 0) 3345 tp->t_rttvar = 1; 3346 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 3347 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 3348 } else { 3349 /* 3350 * No rtt measurement yet - use the unsmoothed rtt. 3351 * Set the variance to half the rtt (so our first 3352 * retransmit happens at 3*rtt). 3353 */ 3354 tp->t_srtt = rtt << TCP_RTT_SHIFT; 3355 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); 3356 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 3357 } 3358 tp->t_rtttime = 0; 3359 tp->t_rxtshift = 0; 3360 3361 /* 3362 * the retransmit should happen at rtt + 4 * rttvar. 3363 * Because of the way we do the smoothing, srtt and rttvar 3364 * will each average +1/2 tick of bias. When we compute 3365 * the retransmit timer, we want 1/2 tick of rounding and 3366 * 1 extra tick because of +-1/2 tick uncertainty in the 3367 * firing of the timer. The bias will give us exactly the 3368 * 1.5 tick we need. But, because the bias is 3369 * statistical, we have to test that we don't drop below 3370 * the minimum feasible timer (which is 2 ticks). 3371 */ 3372 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 3373 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX); 3374 3375 /* 3376 * We received an ack for a packet that wasn't retransmitted; 3377 * it is probably safe to discard any error indications we've 3378 * received recently. This isn't quite right, but close enough 3379 * for now (a route might have failed after we sent a segment, 3380 * and the return path might not be symmetrical). 3381 */ 3382 tp->t_softerror = 0; 3383} 3384 3385/* 3386 * Determine a reasonable value for maxseg size. 3387 * If the route is known, check route for mtu. 3388 * If none, use an mss that can be handled on the outgoing interface 3389 * without forcing IP to fragment. If no route is found, route has no mtu, 3390 * or the destination isn't local, use a default, hopefully conservative 3391 * size (usually 512 or the default IP max size, but no more than the mtu 3392 * of the interface), as we can't discover anything about intervening 3393 * gateways or networks. We also initialize the congestion/slow start 3394 * window to be a single segment if the destination isn't local. 3395 * While looking at the routing entry, we also initialize other path-dependent 3396 * parameters from pre-set or cached values in the routing entry. 3397 * 3398 * Also take into account the space needed for options that we 3399 * send regularly. Make maxseg shorter by that amount to assure 3400 * that we can send maxseg amount of data even when the options 3401 * are present. Store the upper limit of the length of options plus 3402 * data in maxopd. 3403 * 3404 * NOTE that this routine is only called when we process an incoming 3405 * segment, or an ICMP need fragmentation datagram. Outgoing SYN/ACK MSS 3406 * settings are handled in tcp_mssopt(). 3407 */ 3408void 3409tcp_mss_update(struct tcpcb *tp, int offer, int mtuoffer, 3410 struct hc_metrics_lite *metricptr, struct tcp_ifcap *cap) 3411{ 3412 int mss = 0; 3413 u_long maxmtu = 0; 3414 struct inpcb *inp = tp->t_inpcb; 3415 struct hc_metrics_lite metrics; 3416 int origoffer; 3417#ifdef INET6 3418 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0; 3419 size_t min_protoh = isipv6 ? 3420 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) : 3421 sizeof (struct tcpiphdr); 3422#else 3423 const size_t min_protoh = sizeof(struct tcpiphdr); 3424#endif 3425 3426 INP_WLOCK_ASSERT(tp->t_inpcb); 3427 3428 if (mtuoffer != -1) { 3429 KASSERT(offer == -1, ("%s: conflict", __func__)); 3430 offer = mtuoffer - min_protoh; 3431 } 3432 origoffer = offer; 3433 3434 /* Initialize. */ 3435#ifdef INET6 3436 if (isipv6) { 3437 maxmtu = tcp_maxmtu6(&inp->inp_inc, cap); 3438 tp->t_maxopd = tp->t_maxseg = V_tcp_v6mssdflt; 3439 } 3440#endif 3441#if defined(INET) && defined(INET6) 3442 else 3443#endif 3444#ifdef INET 3445 { 3446 maxmtu = tcp_maxmtu(&inp->inp_inc, cap); 3447 tp->t_maxopd = tp->t_maxseg = V_tcp_mssdflt; 3448 } 3449#endif 3450 3451 /* 3452 * No route to sender, stay with default mss and return. 3453 */ 3454 if (maxmtu == 0) { 3455 /* 3456 * In case we return early we need to initialize metrics 3457 * to a defined state as tcp_hc_get() would do for us 3458 * if there was no cache hit. 3459 */ 3460 if (metricptr != NULL) 3461 bzero(metricptr, sizeof(struct hc_metrics_lite)); 3462 return; 3463 } 3464 3465 /* What have we got? */ 3466 switch (offer) { 3467 case 0: 3468 /* 3469 * Offer == 0 means that there was no MSS on the SYN 3470 * segment, in this case we use tcp_mssdflt as 3471 * already assigned to t_maxopd above. 3472 */ 3473 offer = tp->t_maxopd; 3474 break; 3475 3476 case -1: 3477 /* 3478 * Offer == -1 means that we didn't receive SYN yet. 3479 */ 3480 /* FALLTHROUGH */ 3481 3482 default: 3483 /* 3484 * Prevent DoS attack with too small MSS. Round up 3485 * to at least minmss. 3486 */ 3487 offer = max(offer, V_tcp_minmss); 3488 } 3489 3490 /* 3491 * rmx information is now retrieved from tcp_hostcache. 3492 */ 3493 tcp_hc_get(&inp->inp_inc, &metrics); 3494 if (metricptr != NULL) 3495 bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite)); 3496 3497 /* 3498 * If there's a discovered mtu int tcp hostcache, use it 3499 * else, use the link mtu. 3500 */ 3501 if (metrics.rmx_mtu) 3502 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh; 3503 else { 3504#ifdef INET6 3505 if (isipv6) { 3506 mss = maxmtu - min_protoh; 3507 if (!V_path_mtu_discovery && 3508 !in6_localaddr(&inp->in6p_faddr)) 3509 mss = min(mss, V_tcp_v6mssdflt); 3510 } 3511#endif 3512#if defined(INET) && defined(INET6) 3513 else 3514#endif 3515#ifdef INET 3516 { 3517 mss = maxmtu - min_protoh; 3518 if (!V_path_mtu_discovery && 3519 !in_localaddr(inp->inp_faddr)) 3520 mss = min(mss, V_tcp_mssdflt); 3521 } 3522#endif 3523 /* 3524 * XXX - The above conditional (mss = maxmtu - min_protoh) 3525 * probably violates the TCP spec. 3526 * The problem is that, since we don't know the 3527 * other end's MSS, we are supposed to use a conservative 3528 * default. But, if we do that, then MTU discovery will 3529 * never actually take place, because the conservative 3530 * default is much less than the MTUs typically seen 3531 * on the Internet today. For the moment, we'll sweep 3532 * this under the carpet. 3533 * 3534 * The conservative default might not actually be a problem 3535 * if the only case this occurs is when sending an initial 3536 * SYN with options and data to a host we've never talked 3537 * to before. Then, they will reply with an MSS value which 3538 * will get recorded and the new parameters should get 3539 * recomputed. For Further Study. 3540 */ 3541 } 3542 mss = min(mss, offer); 3543 3544 /* 3545 * Sanity check: make sure that maxopd will be large 3546 * enough to allow some data on segments even if the 3547 * all the option space is used (40bytes). Otherwise 3548 * funny things may happen in tcp_output. 3549 */ 3550 mss = max(mss, 64); 3551 3552 /* 3553 * maxopd stores the maximum length of data AND options 3554 * in a segment; maxseg is the amount of data in a normal 3555 * segment. We need to store this value (maxopd) apart 3556 * from maxseg, because now every segment carries options 3557 * and thus we normally have somewhat less data in segments. 3558 */ 3559 tp->t_maxopd = mss; 3560 3561 /* 3562 * origoffer==-1 indicates that no segments were received yet. 3563 * In this case we just guess. 3564 */ 3565 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && 3566 (origoffer == -1 || 3567 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)) 3568 mss -= TCPOLEN_TSTAMP_APPA; 3569 3570 tp->t_maxseg = mss; 3571} 3572 3573void 3574tcp_mss(struct tcpcb *tp, int offer) 3575{ 3576 int mss; 3577 u_long bufsize; 3578 struct inpcb *inp; 3579 struct socket *so; 3580 struct hc_metrics_lite metrics; 3581 struct tcp_ifcap cap; 3582 3583 KASSERT(tp != NULL, ("%s: tp == NULL", __func__)); 3584 3585 bzero(&cap, sizeof(cap)); 3586 tcp_mss_update(tp, offer, -1, &metrics, &cap); 3587 3588 mss = tp->t_maxseg; 3589 inp = tp->t_inpcb; 3590 3591 /* 3592 * If there's a pipesize, change the socket buffer to that size, 3593 * don't change if sb_hiwat is different than default (then it 3594 * has been changed on purpose with setsockopt). 3595 * Make the socket buffers an integral number of mss units; 3596 * if the mss is larger than the socket buffer, decrease the mss. 3597 */ 3598 so = inp->inp_socket; 3599 SOCKBUF_LOCK(&so->so_snd); 3600 if ((so->so_snd.sb_hiwat == V_tcp_sendspace) && metrics.rmx_sendpipe) 3601 bufsize = metrics.rmx_sendpipe; 3602 else 3603 bufsize = so->so_snd.sb_hiwat; 3604 if (bufsize < mss) 3605 mss = bufsize; 3606 else { 3607 bufsize = roundup(bufsize, mss); 3608 if (bufsize > sb_max) 3609 bufsize = sb_max; 3610 if (bufsize > so->so_snd.sb_hiwat) 3611 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL); 3612 } 3613 SOCKBUF_UNLOCK(&so->so_snd); 3614 tp->t_maxseg = mss; 3615 3616 SOCKBUF_LOCK(&so->so_rcv); 3617 if ((so->so_rcv.sb_hiwat == V_tcp_recvspace) && metrics.rmx_recvpipe) 3618 bufsize = metrics.rmx_recvpipe; 3619 else 3620 bufsize = so->so_rcv.sb_hiwat; 3621 if (bufsize > mss) { 3622 bufsize = roundup(bufsize, mss); 3623 if (bufsize > sb_max) 3624 bufsize = sb_max; 3625 if (bufsize > so->so_rcv.sb_hiwat) 3626 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL); 3627 } 3628 SOCKBUF_UNLOCK(&so->so_rcv); 3629 3630 /* Check the interface for TSO capabilities. */ 3631 if (cap.ifcap & CSUM_TSO) { 3632 tp->t_flags |= TF_TSO; 3633 tp->t_tsomax = cap.tsomax; 3634 } 3635} 3636 3637/* 3638 * Determine the MSS option to send on an outgoing SYN. 3639 */ 3640int 3641tcp_mssopt(struct in_conninfo *inc) 3642{ 3643 int mss = 0; 3644 u_long maxmtu = 0; 3645 u_long thcmtu = 0; 3646 size_t min_protoh; 3647 3648 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer")); 3649 3650#ifdef INET6 3651 if (inc->inc_flags & INC_ISIPV6) { 3652 mss = V_tcp_v6mssdflt; 3653 maxmtu = tcp_maxmtu6(inc, NULL); 3654 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 3655 } 3656#endif 3657#if defined(INET) && defined(INET6) 3658 else 3659#endif 3660#ifdef INET 3661 { 3662 mss = V_tcp_mssdflt; 3663 maxmtu = tcp_maxmtu(inc, NULL); 3664 min_protoh = sizeof(struct tcpiphdr); 3665 } 3666#endif 3667#if defined(INET6) || defined(INET) 3668 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */ 3669#endif 3670 3671 if (maxmtu && thcmtu) 3672 mss = min(maxmtu, thcmtu) - min_protoh; 3673 else if (maxmtu || thcmtu) 3674 mss = max(maxmtu, thcmtu) - min_protoh; 3675 3676 return (mss); 3677} 3678 3679 3680/* 3681 * On a partial ack arrives, force the retransmission of the 3682 * next unacknowledged segment. Do not clear tp->t_dupacks. 3683 * By setting snd_nxt to ti_ack, this forces retransmission timer to 3684 * be started again. 3685 */ 3686static void 3687tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th) 3688{ 3689 tcp_seq onxt = tp->snd_nxt; 3690 u_long ocwnd = tp->snd_cwnd; 3691 3692 INP_WLOCK_ASSERT(tp->t_inpcb); 3693 3694 tcp_timer_activate(tp, TT_REXMT, 0); 3695 tp->t_rtttime = 0; 3696 tp->snd_nxt = th->th_ack; 3697 /* 3698 * Set snd_cwnd to one segment beyond acknowledged offset. 3699 * (tp->snd_una has not yet been updated when this function is called.) 3700 */ 3701 tp->snd_cwnd = tp->t_maxseg + BYTES_THIS_ACK(tp, th); 3702 tp->t_flags |= TF_ACKNOW; 3703 (void) tcp_output(tp); 3704 tp->snd_cwnd = ocwnd; 3705 if (SEQ_GT(onxt, tp->snd_nxt)) 3706 tp->snd_nxt = onxt; 3707 /* 3708 * Partial window deflation. Relies on fact that tp->snd_una 3709 * not updated yet. 3710 */ 3711 if (tp->snd_cwnd > BYTES_THIS_ACK(tp, th)) 3712 tp->snd_cwnd -= BYTES_THIS_ACK(tp, th); 3713 else 3714 tp->snd_cwnd = 0; 3715 tp->snd_cwnd += tp->t_maxseg; 3716} 3717