tcp_input.c revision 242255
1/*- 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 3 * The Regents of the University of California. All rights reserved. 4 * Copyright (c) 2007-2008,2010 5 * Swinburne University of Technology, Melbourne, Australia. 6 * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org> 7 * Copyright (c) 2010 The FreeBSD Foundation 8 * Copyright (c) 2010-2011 Juniper Networks, Inc. 9 * All rights reserved. 10 * 11 * Portions of this software were developed at the Centre for Advanced Internet 12 * Architectures, Swinburne University of Technology, by Lawrence Stewart, 13 * James Healy and David Hayes, made possible in part by a grant from the Cisco 14 * University Research Program Fund at Community Foundation Silicon Valley. 15 * 16 * Portions of this software were developed at the Centre for Advanced 17 * Internet Architectures, Swinburne University of Technology, Melbourne, 18 * Australia by David Hayes under sponsorship from the FreeBSD Foundation. 19 * 20 * Portions of this software were developed by Robert N. M. Watson under 21 * contract to Juniper Networks, Inc. 22 * 23 * Redistribution and use in source and binary forms, with or without 24 * modification, are permitted provided that the following conditions 25 * are met: 26 * 1. Redistributions of source code must retain the above copyright 27 * notice, this list of conditions and the following disclaimer. 28 * 2. Redistributions in binary form must reproduce the above copyright 29 * notice, this list of conditions and the following disclaimer in the 30 * documentation and/or other materials provided with the distribution. 31 * 4. Neither the name of the University nor the names of its contributors 32 * may be used to endorse or promote products derived from this software 33 * without specific prior written permission. 34 * 35 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 36 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 38 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 39 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 40 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 41 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 42 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 43 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 44 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 45 * SUCH DAMAGE. 46 * 47 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95 48 */ 49 50#include <sys/cdefs.h> 51__FBSDID("$FreeBSD: head/sys/netinet/tcp_input.c 242255 2012-10-28 18:33:52Z andre $"); 52 53#include "opt_ipfw.h" /* for ipfw_fwd */ 54#include "opt_inet.h" 55#include "opt_inet6.h" 56#include "opt_ipsec.h" 57#include "opt_tcpdebug.h" 58 59#include <sys/param.h> 60#include <sys/kernel.h> 61#include <sys/hhook.h> 62#include <sys/malloc.h> 63#include <sys/mbuf.h> 64#include <sys/proc.h> /* for proc0 declaration */ 65#include <sys/protosw.h> 66#include <sys/signalvar.h> 67#include <sys/socket.h> 68#include <sys/socketvar.h> 69#include <sys/sysctl.h> 70#include <sys/syslog.h> 71#include <sys/systm.h> 72 73#include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */ 74 75#include <vm/uma.h> 76 77#include <net/if.h> 78#include <net/pfil.h> 79#include <net/route.h> 80#include <net/vnet.h> 81 82#define TCPSTATES /* for logging */ 83 84#include <netinet/cc.h> 85#include <netinet/in.h> 86#include <netinet/in_pcb.h> 87#include <netinet/in_systm.h> 88#include <netinet/in_var.h> 89#include <netinet/ip.h> 90#include <netinet/ip_icmp.h> /* required for icmp_var.h */ 91#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 92#include <netinet/ip_var.h> 93#include <netinet/ip_options.h> 94#include <netinet/ip6.h> 95#include <netinet/icmp6.h> 96#include <netinet6/in6_pcb.h> 97#include <netinet6/ip6_var.h> 98#include <netinet6/nd6.h> 99#include <netinet/tcp_fsm.h> 100#include <netinet/tcp_seq.h> 101#include <netinet/tcp_timer.h> 102#include <netinet/tcp_var.h> 103#include <netinet6/tcp6_var.h> 104#include <netinet/tcpip.h> 105#include <netinet/tcp_syncache.h> 106#ifdef TCPDEBUG 107#include <netinet/tcp_debug.h> 108#endif /* TCPDEBUG */ 109#ifdef TCP_OFFLOAD 110#include <netinet/tcp_offload.h> 111#endif 112 113#ifdef IPSEC 114#include <netipsec/ipsec.h> 115#include <netipsec/ipsec6.h> 116#endif /*IPSEC*/ 117 118#include <machine/in_cksum.h> 119 120#include <security/mac/mac_framework.h> 121 122const int tcprexmtthresh = 3; 123 124VNET_DEFINE(struct tcpstat, tcpstat); 125SYSCTL_VNET_STRUCT(_net_inet_tcp, TCPCTL_STATS, stats, CTLFLAG_RW, 126 &VNET_NAME(tcpstat), tcpstat, 127 "TCP statistics (struct tcpstat, netinet/tcp_var.h)"); 128 129int tcp_log_in_vain = 0; 130SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW, 131 &tcp_log_in_vain, 0, 132 "Log all incoming TCP segments to closed ports"); 133 134VNET_DEFINE(int, blackhole) = 0; 135#define V_blackhole VNET(blackhole) 136SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW, 137 &VNET_NAME(blackhole), 0, 138 "Do not send RST on segments to closed ports"); 139 140VNET_DEFINE(int, tcp_delack_enabled) = 1; 141SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW, 142 &VNET_NAME(tcp_delack_enabled), 0, 143 "Delay ACK to try and piggyback it onto a data packet"); 144 145VNET_DEFINE(int, drop_synfin) = 0; 146#define V_drop_synfin VNET(drop_synfin) 147SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW, 148 &VNET_NAME(drop_synfin), 0, 149 "Drop TCP packets with SYN+FIN set"); 150 151VNET_DEFINE(int, tcp_do_rfc3042) = 1; 152#define V_tcp_do_rfc3042 VNET(tcp_do_rfc3042) 153SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_RW, 154 &VNET_NAME(tcp_do_rfc3042), 0, 155 "Enable RFC 3042 (Limited Transmit)"); 156 157VNET_DEFINE(int, tcp_do_rfc3390) = 1; 158SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW, 159 &VNET_NAME(tcp_do_rfc3390), 0, 160 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)"); 161 162VNET_DEFINE(int, tcp_do_rfc3465) = 1; 163SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_RW, 164 &VNET_NAME(tcp_do_rfc3465), 0, 165 "Enable RFC 3465 (Appropriate Byte Counting)"); 166 167VNET_DEFINE(int, tcp_abc_l_var) = 2; 168SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, abc_l_var, CTLFLAG_RW, 169 &VNET_NAME(tcp_abc_l_var), 2, 170 "Cap the max cwnd increment during slow-start to this number of segments"); 171 172static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, ecn, CTLFLAG_RW, 0, "TCP ECN"); 173 174VNET_DEFINE(int, tcp_do_ecn) = 0; 175SYSCTL_VNET_INT(_net_inet_tcp_ecn, OID_AUTO, enable, CTLFLAG_RW, 176 &VNET_NAME(tcp_do_ecn), 0, 177 "TCP ECN support"); 178 179VNET_DEFINE(int, tcp_ecn_maxretries) = 1; 180SYSCTL_VNET_INT(_net_inet_tcp_ecn, OID_AUTO, maxretries, CTLFLAG_RW, 181 &VNET_NAME(tcp_ecn_maxretries), 0, 182 "Max retries before giving up on ECN"); 183 184VNET_DEFINE(int, tcp_insecure_rst) = 0; 185#define V_tcp_insecure_rst VNET(tcp_insecure_rst) 186SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_RW, 187 &VNET_NAME(tcp_insecure_rst), 0, 188 "Follow the old (insecure) criteria for accepting RST packets"); 189 190VNET_DEFINE(int, tcp_recvspace) = 1024*64; 191#define V_tcp_recvspace VNET(tcp_recvspace) 192SYSCTL_VNET_INT(_net_inet_tcp, TCPCTL_RECVSPACE, recvspace, CTLFLAG_RW, 193 &VNET_NAME(tcp_recvspace), 0, "Initial receive socket buffer size"); 194 195VNET_DEFINE(int, tcp_do_autorcvbuf) = 1; 196#define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf) 197SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_RW, 198 &VNET_NAME(tcp_do_autorcvbuf), 0, 199 "Enable automatic receive buffer sizing"); 200 201VNET_DEFINE(int, tcp_autorcvbuf_inc) = 16*1024; 202#define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc) 203SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_RW, 204 &VNET_NAME(tcp_autorcvbuf_inc), 0, 205 "Incrementor step size of automatic receive buffer"); 206 207VNET_DEFINE(int, tcp_autorcvbuf_max) = 2*1024*1024; 208#define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max) 209SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_RW, 210 &VNET_NAME(tcp_autorcvbuf_max), 0, 211 "Max size of automatic receive buffer"); 212 213VNET_DEFINE(struct inpcbhead, tcb); 214#define tcb6 tcb /* for KAME src sync over BSD*'s */ 215VNET_DEFINE(struct inpcbinfo, tcbinfo); 216 217static void tcp_dooptions(struct tcpopt *, u_char *, int, int); 218static void tcp_do_segment(struct mbuf *, struct tcphdr *, 219 struct socket *, struct tcpcb *, int, int, uint8_t, 220 int); 221static void tcp_dropwithreset(struct mbuf *, struct tcphdr *, 222 struct tcpcb *, int, int); 223static void tcp_pulloutofband(struct socket *, 224 struct tcphdr *, struct mbuf *, int); 225static void tcp_xmit_timer(struct tcpcb *, int); 226static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *); 227static void inline tcp_fields_to_host(struct tcphdr *); 228#ifdef TCP_SIGNATURE 229static void inline tcp_fields_to_net(struct tcphdr *); 230static int inline tcp_signature_verify_input(struct mbuf *, int, int, 231 int, struct tcpopt *, struct tcphdr *, u_int); 232#endif 233static void inline cc_ack_received(struct tcpcb *tp, struct tcphdr *th, 234 uint16_t type); 235static void inline cc_conn_init(struct tcpcb *tp); 236static void inline cc_post_recovery(struct tcpcb *tp, struct tcphdr *th); 237static void inline hhook_run_tcp_est_in(struct tcpcb *tp, 238 struct tcphdr *th, struct tcpopt *to); 239 240/* 241 * Kernel module interface for updating tcpstat. The argument is an index 242 * into tcpstat treated as an array of u_long. While this encodes the 243 * general layout of tcpstat into the caller, it doesn't encode its location, 244 * so that future changes to add, for example, per-CPU stats support won't 245 * cause binary compatibility problems for kernel modules. 246 */ 247void 248kmod_tcpstat_inc(int statnum) 249{ 250 251 (*((u_long *)&V_tcpstat + statnum))++; 252} 253 254/* 255 * Wrapper for the TCP established input helper hook. 256 */ 257static void inline 258hhook_run_tcp_est_in(struct tcpcb *tp, struct tcphdr *th, struct tcpopt *to) 259{ 260 struct tcp_hhook_data hhook_data; 261 262 if (V_tcp_hhh[HHOOK_TCP_EST_IN]->hhh_nhooks > 0) { 263 hhook_data.tp = tp; 264 hhook_data.th = th; 265 hhook_data.to = to; 266 267 hhook_run_hooks(V_tcp_hhh[HHOOK_TCP_EST_IN], &hhook_data, 268 tp->osd); 269 } 270} 271 272/* 273 * CC wrapper hook functions 274 */ 275static void inline 276cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t type) 277{ 278 INP_WLOCK_ASSERT(tp->t_inpcb); 279 280 tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th); 281 if (tp->snd_cwnd == min(tp->snd_cwnd, tp->snd_wnd)) 282 tp->ccv->flags |= CCF_CWND_LIMITED; 283 else 284 tp->ccv->flags &= ~CCF_CWND_LIMITED; 285 286 if (type == CC_ACK) { 287 if (tp->snd_cwnd > tp->snd_ssthresh) { 288 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack, 289 V_tcp_abc_l_var * tp->t_maxseg); 290 if (tp->t_bytes_acked >= tp->snd_cwnd) { 291 tp->t_bytes_acked -= tp->snd_cwnd; 292 tp->ccv->flags |= CCF_ABC_SENTAWND; 293 } 294 } else { 295 tp->ccv->flags &= ~CCF_ABC_SENTAWND; 296 tp->t_bytes_acked = 0; 297 } 298 } 299 300 if (CC_ALGO(tp)->ack_received != NULL) { 301 /* XXXLAS: Find a way to live without this */ 302 tp->ccv->curack = th->th_ack; 303 CC_ALGO(tp)->ack_received(tp->ccv, type); 304 } 305} 306 307static void inline 308cc_conn_init(struct tcpcb *tp) 309{ 310 struct hc_metrics_lite metrics; 311 struct inpcb *inp = tp->t_inpcb; 312 int rtt; 313 314 INP_WLOCK_ASSERT(tp->t_inpcb); 315 316 tcp_hc_get(&inp->inp_inc, &metrics); 317 318 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) { 319 tp->t_srtt = rtt; 320 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE; 321 TCPSTAT_INC(tcps_usedrtt); 322 if (metrics.rmx_rttvar) { 323 tp->t_rttvar = metrics.rmx_rttvar; 324 TCPSTAT_INC(tcps_usedrttvar); 325 } else { 326 /* default variation is +- 1 rtt */ 327 tp->t_rttvar = 328 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE; 329 } 330 TCPT_RANGESET(tp->t_rxtcur, 331 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, 332 tp->t_rttmin, TCPTV_REXMTMAX); 333 } 334 if (metrics.rmx_ssthresh) { 335 /* 336 * There's some sort of gateway or interface 337 * buffer limit on the path. Use this to set 338 * the slow start threshhold, but set the 339 * threshold to no less than 2*mss. 340 */ 341 tp->snd_ssthresh = max(2 * tp->t_maxseg, metrics.rmx_ssthresh); 342 TCPSTAT_INC(tcps_usedssthresh); 343 } 344 345 /* 346 * Set the initial slow-start flight size. 347 * 348 * RFC5681 Section 3.1 specifies the default conservative values. 349 * RFC3390 specifies slightly more aggressive values. 350 * 351 * If a SYN or SYN/ACK was lost and retransmitted, we have to 352 * reduce the initial CWND to one segment as congestion is likely 353 * requiring us to be cautious. 354 */ 355 if (tp->snd_cwnd == 1) 356 tp->snd_cwnd = tp->t_maxseg; /* SYN(-ACK) lost */ 357 else if (V_tcp_do_rfc3390) 358 tp->snd_cwnd = min(4 * tp->t_maxseg, 359 max(2 * tp->t_maxseg, 4380)); 360 else { 361 /* Per RFC5681 Section 3.1 */ 362 if (tp->t_maxseg > 2190) 363 tp->snd_cwnd = 2 * tp->t_maxseg; 364 else if (tp->t_maxseg > 1095) 365 tp->snd_cwnd = 3 * tp->t_maxseg; 366 else 367 tp->snd_cwnd = 4 * tp->t_maxseg; 368 } 369 370 if (CC_ALGO(tp)->conn_init != NULL) 371 CC_ALGO(tp)->conn_init(tp->ccv); 372} 373 374void inline 375cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type) 376{ 377 INP_WLOCK_ASSERT(tp->t_inpcb); 378 379 switch(type) { 380 case CC_NDUPACK: 381 if (!IN_FASTRECOVERY(tp->t_flags)) { 382 tp->snd_recover = tp->snd_max; 383 if (tp->t_flags & TF_ECN_PERMIT) 384 tp->t_flags |= TF_ECN_SND_CWR; 385 } 386 break; 387 case CC_ECN: 388 if (!IN_CONGRECOVERY(tp->t_flags)) { 389 TCPSTAT_INC(tcps_ecn_rcwnd); 390 tp->snd_recover = tp->snd_max; 391 if (tp->t_flags & TF_ECN_PERMIT) 392 tp->t_flags |= TF_ECN_SND_CWR; 393 } 394 break; 395 case CC_RTO: 396 tp->t_dupacks = 0; 397 tp->t_bytes_acked = 0; 398 EXIT_RECOVERY(tp->t_flags); 399 tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 / 400 tp->t_maxseg) * tp->t_maxseg; 401 tp->snd_cwnd = tp->t_maxseg; 402 break; 403 case CC_RTO_ERR: 404 TCPSTAT_INC(tcps_sndrexmitbad); 405 /* RTO was unnecessary, so reset everything. */ 406 tp->snd_cwnd = tp->snd_cwnd_prev; 407 tp->snd_ssthresh = tp->snd_ssthresh_prev; 408 tp->snd_recover = tp->snd_recover_prev; 409 if (tp->t_flags & TF_WASFRECOVERY) 410 ENTER_FASTRECOVERY(tp->t_flags); 411 if (tp->t_flags & TF_WASCRECOVERY) 412 ENTER_CONGRECOVERY(tp->t_flags); 413 tp->snd_nxt = tp->snd_max; 414 tp->t_flags &= ~TF_PREVVALID; 415 tp->t_badrxtwin = 0; 416 break; 417 } 418 419 if (CC_ALGO(tp)->cong_signal != NULL) { 420 if (th != NULL) 421 tp->ccv->curack = th->th_ack; 422 CC_ALGO(tp)->cong_signal(tp->ccv, type); 423 } 424} 425 426static void inline 427cc_post_recovery(struct tcpcb *tp, struct tcphdr *th) 428{ 429 INP_WLOCK_ASSERT(tp->t_inpcb); 430 431 /* XXXLAS: KASSERT that we're in recovery? */ 432 433 if (CC_ALGO(tp)->post_recovery != NULL) { 434 tp->ccv->curack = th->th_ack; 435 CC_ALGO(tp)->post_recovery(tp->ccv); 436 } 437 /* XXXLAS: EXIT_RECOVERY ? */ 438 tp->t_bytes_acked = 0; 439} 440 441static inline void 442tcp_fields_to_host(struct tcphdr *th) 443{ 444 445 th->th_seq = ntohl(th->th_seq); 446 th->th_ack = ntohl(th->th_ack); 447 th->th_win = ntohs(th->th_win); 448 th->th_urp = ntohs(th->th_urp); 449} 450 451#ifdef TCP_SIGNATURE 452static inline void 453tcp_fields_to_net(struct tcphdr *th) 454{ 455 456 th->th_seq = htonl(th->th_seq); 457 th->th_ack = htonl(th->th_ack); 458 th->th_win = htons(th->th_win); 459 th->th_urp = htons(th->th_urp); 460} 461 462static inline int 463tcp_signature_verify_input(struct mbuf *m, int off0, int tlen, int optlen, 464 struct tcpopt *to, struct tcphdr *th, u_int tcpbflag) 465{ 466 int ret; 467 468 tcp_fields_to_net(th); 469 ret = tcp_signature_verify(m, off0, tlen, optlen, to, th, tcpbflag); 470 tcp_fields_to_host(th); 471 return (ret); 472} 473#endif 474 475/* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */ 476#ifdef INET6 477#define ND6_HINT(tp) \ 478do { \ 479 if ((tp) && (tp)->t_inpcb && \ 480 ((tp)->t_inpcb->inp_vflag & INP_IPV6) != 0) \ 481 nd6_nud_hint(NULL, NULL, 0); \ 482} while (0) 483#else 484#define ND6_HINT(tp) 485#endif 486 487/* 488 * Indicate whether this ack should be delayed. We can delay the ack if 489 * - there is no delayed ack timer in progress and 490 * - our last ack wasn't a 0-sized window. We never want to delay 491 * the ack that opens up a 0-sized window and 492 * - delayed acks are enabled or 493 * - this is a half-synchronized T/TCP connection. 494 */ 495#define DELAY_ACK(tp) \ 496 ((!tcp_timer_active(tp, TT_DELACK) && \ 497 (tp->t_flags & TF_RXWIN0SENT) == 0) && \ 498 (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN))) 499 500/* 501 * TCP input handling is split into multiple parts: 502 * tcp6_input is a thin wrapper around tcp_input for the extended 503 * ip6_protox[] call format in ip6_input 504 * tcp_input handles primary segment validation, inpcb lookup and 505 * SYN processing on listen sockets 506 * tcp_do_segment processes the ACK and text of the segment for 507 * establishing, established and closing connections 508 */ 509#ifdef INET6 510int 511tcp6_input(struct mbuf **mp, int *offp, int proto) 512{ 513 struct mbuf *m = *mp; 514 struct in6_ifaddr *ia6; 515 516 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE); 517 518 /* 519 * draft-itojun-ipv6-tcp-to-anycast 520 * better place to put this in? 521 */ 522 ia6 = ip6_getdstifaddr(m); 523 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) { 524 struct ip6_hdr *ip6; 525 526 ifa_free(&ia6->ia_ifa); 527 ip6 = mtod(m, struct ip6_hdr *); 528 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR, 529 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6); 530 return IPPROTO_DONE; 531 } 532 if (ia6) 533 ifa_free(&ia6->ia_ifa); 534 535 tcp_input(m, *offp); 536 return IPPROTO_DONE; 537} 538#endif /* INET6 */ 539 540void 541tcp_input(struct mbuf *m, int off0) 542{ 543 struct tcphdr *th = NULL; 544 struct ip *ip = NULL; 545 struct inpcb *inp = NULL; 546 struct tcpcb *tp = NULL; 547 struct socket *so = NULL; 548 u_char *optp = NULL; 549 int optlen = 0; 550#ifdef INET 551 int len; 552#endif 553 int tlen = 0, off; 554 int drop_hdrlen; 555 int thflags; 556 int rstreason = 0; /* For badport_bandlim accounting purposes */ 557#ifdef TCP_SIGNATURE 558 uint8_t sig_checked = 0; 559#endif 560 uint8_t iptos = 0; 561 struct m_tag *fwd_tag = NULL; 562#ifdef INET6 563 struct ip6_hdr *ip6 = NULL; 564 int isipv6; 565#else 566 const void *ip6 = NULL; 567#endif /* INET6 */ 568 struct tcpopt to; /* options in this segment */ 569 char *s = NULL; /* address and port logging */ 570 int ti_locked; 571#define TI_UNLOCKED 1 572#define TI_WLOCKED 2 573 574#ifdef TCPDEBUG 575 /* 576 * The size of tcp_saveipgen must be the size of the max ip header, 577 * now IPv6. 578 */ 579 u_char tcp_saveipgen[IP6_HDR_LEN]; 580 struct tcphdr tcp_savetcp; 581 short ostate = 0; 582#endif 583 584#ifdef INET6 585 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0; 586#endif 587 588 to.to_flags = 0; 589 TCPSTAT_INC(tcps_rcvtotal); 590 591#ifdef INET6 592 if (isipv6) { 593 /* IP6_EXTHDR_CHECK() is already done at tcp6_input(). */ 594 595 if (m->m_len < (sizeof(*ip6) + sizeof(*th))) { 596 m = m_pullup(m, sizeof(*ip6) + sizeof(*th)); 597 if (m == NULL) { 598 TCPSTAT_INC(tcps_rcvshort); 599 return; 600 } 601 } 602 603 ip6 = mtod(m, struct ip6_hdr *); 604 th = (struct tcphdr *)((caddr_t)ip6 + off0); 605 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0; 606 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID_IPV6) { 607 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 608 th->th_sum = m->m_pkthdr.csum_data; 609 else 610 th->th_sum = in6_cksum_pseudo(ip6, tlen, 611 IPPROTO_TCP, m->m_pkthdr.csum_data); 612 th->th_sum ^= 0xffff; 613 } else 614 th->th_sum = in6_cksum(m, IPPROTO_TCP, off0, tlen); 615 if (th->th_sum) { 616 TCPSTAT_INC(tcps_rcvbadsum); 617 goto drop; 618 } 619 620 /* 621 * Be proactive about unspecified IPv6 address in source. 622 * As we use all-zero to indicate unbounded/unconnected pcb, 623 * unspecified IPv6 address can be used to confuse us. 624 * 625 * Note that packets with unspecified IPv6 destination is 626 * already dropped in ip6_input. 627 */ 628 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { 629 /* XXX stat */ 630 goto drop; 631 } 632 } 633#endif 634#if defined(INET) && defined(INET6) 635 else 636#endif 637#ifdef INET 638 { 639 /* 640 * Get IP and TCP header together in first mbuf. 641 * Note: IP leaves IP header in first mbuf. 642 */ 643 if (off0 > sizeof (struct ip)) { 644 ip_stripoptions(m); 645 off0 = sizeof(struct ip); 646 } 647 if (m->m_len < sizeof (struct tcpiphdr)) { 648 if ((m = m_pullup(m, sizeof (struct tcpiphdr))) 649 == NULL) { 650 TCPSTAT_INC(tcps_rcvshort); 651 return; 652 } 653 } 654 ip = mtod(m, struct ip *); 655 th = (struct tcphdr *)((caddr_t)ip + off0); 656 tlen = ntohs(ip->ip_len) - off0; 657 658 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 659 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 660 th->th_sum = m->m_pkthdr.csum_data; 661 else 662 th->th_sum = in_pseudo(ip->ip_src.s_addr, 663 ip->ip_dst.s_addr, 664 htonl(m->m_pkthdr.csum_data + tlen + 665 IPPROTO_TCP)); 666 th->th_sum ^= 0xffff; 667 } else { 668 struct ipovly *ipov = (struct ipovly *)ip; 669 670 /* 671 * Checksum extended TCP header and data. 672 */ 673 len = off0 + tlen; 674 bzero(ipov->ih_x1, sizeof(ipov->ih_x1)); 675 ipov->ih_len = htons(tlen); 676 th->th_sum = in_cksum(m, len); 677 } 678 if (th->th_sum) { 679 TCPSTAT_INC(tcps_rcvbadsum); 680 goto drop; 681 } 682 /* Re-initialization for later version check */ 683 ip->ip_v = IPVERSION; 684 } 685#endif /* INET */ 686 687#ifdef INET6 688 if (isipv6) 689 iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; 690#endif 691#if defined(INET) && defined(INET6) 692 else 693#endif 694#ifdef INET 695 iptos = ip->ip_tos; 696#endif 697 698 /* 699 * Check that TCP offset makes sense, 700 * pull out TCP options and adjust length. XXX 701 */ 702 off = th->th_off << 2; 703 if (off < sizeof (struct tcphdr) || off > tlen) { 704 TCPSTAT_INC(tcps_rcvbadoff); 705 goto drop; 706 } 707 tlen -= off; /* tlen is used instead of ti->ti_len */ 708 if (off > sizeof (struct tcphdr)) { 709#ifdef INET6 710 if (isipv6) { 711 IP6_EXTHDR_CHECK(m, off0, off, ); 712 ip6 = mtod(m, struct ip6_hdr *); 713 th = (struct tcphdr *)((caddr_t)ip6 + off0); 714 } 715#endif 716#if defined(INET) && defined(INET6) 717 else 718#endif 719#ifdef INET 720 { 721 if (m->m_len < sizeof(struct ip) + off) { 722 if ((m = m_pullup(m, sizeof (struct ip) + off)) 723 == NULL) { 724 TCPSTAT_INC(tcps_rcvshort); 725 return; 726 } 727 ip = mtod(m, struct ip *); 728 th = (struct tcphdr *)((caddr_t)ip + off0); 729 } 730 } 731#endif 732 optlen = off - sizeof (struct tcphdr); 733 optp = (u_char *)(th + 1); 734 } 735 thflags = th->th_flags; 736 737 /* 738 * Convert TCP protocol specific fields to host format. 739 */ 740 tcp_fields_to_host(th); 741 742 /* 743 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options. 744 */ 745 drop_hdrlen = off0 + off; 746 747 /* 748 * Locate pcb for segment; if we're likely to add or remove a 749 * connection then first acquire pcbinfo lock. There are two cases 750 * where we might discover later we need a write lock despite the 751 * flags: ACKs moving a connection out of the syncache, and ACKs for 752 * a connection in TIMEWAIT. 753 */ 754 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0) { 755 INP_INFO_WLOCK(&V_tcbinfo); 756 ti_locked = TI_WLOCKED; 757 } else 758 ti_locked = TI_UNLOCKED; 759 760findpcb: 761#ifdef INVARIANTS 762 if (ti_locked == TI_WLOCKED) { 763 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 764 } else { 765 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 766 } 767#endif 768 769 /* 770 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. 771 */ 772 if (V_pfilforward != 0) 773 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL); 774 775#ifdef INET6 776 if (isipv6 && fwd_tag != NULL) { 777 struct sockaddr_in6 *next_hop6; 778 779 next_hop6 = (struct sockaddr_in6 *)(fwd_tag + 1); 780 /* 781 * Transparently forwarded. Pretend to be the destination. 782 * Already got one like this? 783 */ 784 inp = in6_pcblookup_mbuf(&V_tcbinfo, 785 &ip6->ip6_src, th->th_sport, &ip6->ip6_dst, th->th_dport, 786 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif, m); 787 if (!inp) { 788 /* 789 * It's new. Try to find the ambushing socket. 790 * Because we've rewritten the destination address, 791 * any hardware-generated hash is ignored. 792 */ 793 inp = in6_pcblookup(&V_tcbinfo, &ip6->ip6_src, 794 th->th_sport, &next_hop6->sin6_addr, 795 next_hop6->sin6_port ? ntohs(next_hop6->sin6_port) : 796 th->th_dport, INPLOOKUP_WILDCARD | 797 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif); 798 } 799 /* Remove the tag from the packet. We don't need it anymore. */ 800 m_tag_delete(m, fwd_tag); 801 } else if (isipv6) { 802 inp = in6_pcblookup_mbuf(&V_tcbinfo, &ip6->ip6_src, 803 th->th_sport, &ip6->ip6_dst, th->th_dport, 804 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB, 805 m->m_pkthdr.rcvif, m); 806 } 807#endif /* INET6 */ 808#if defined(INET6) && defined(INET) 809 else 810#endif 811#ifdef INET 812 if (fwd_tag != NULL) { 813 struct sockaddr_in *next_hop; 814 815 next_hop = (struct sockaddr_in *)(fwd_tag+1); 816 /* 817 * Transparently forwarded. Pretend to be the destination. 818 * already got one like this? 819 */ 820 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, th->th_sport, 821 ip->ip_dst, th->th_dport, INPLOOKUP_WLOCKPCB, 822 m->m_pkthdr.rcvif, m); 823 if (!inp) { 824 /* 825 * It's new. Try to find the ambushing socket. 826 * Because we've rewritten the destination address, 827 * any hardware-generated hash is ignored. 828 */ 829 inp = in_pcblookup(&V_tcbinfo, ip->ip_src, 830 th->th_sport, next_hop->sin_addr, 831 next_hop->sin_port ? ntohs(next_hop->sin_port) : 832 th->th_dport, INPLOOKUP_WILDCARD | 833 INPLOOKUP_WLOCKPCB, m->m_pkthdr.rcvif); 834 } 835 /* Remove the tag from the packet. We don't need it anymore. */ 836 m_tag_delete(m, fwd_tag); 837 } else 838 inp = in_pcblookup_mbuf(&V_tcbinfo, ip->ip_src, 839 th->th_sport, ip->ip_dst, th->th_dport, 840 INPLOOKUP_WILDCARD | INPLOOKUP_WLOCKPCB, 841 m->m_pkthdr.rcvif, m); 842#endif /* INET */ 843 844 /* 845 * If the INPCB does not exist then all data in the incoming 846 * segment is discarded and an appropriate RST is sent back. 847 * XXX MRT Send RST using which routing table? 848 */ 849 if (inp == NULL) { 850 /* 851 * Log communication attempts to ports that are not 852 * in use. 853 */ 854 if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) || 855 tcp_log_in_vain == 2) { 856 if ((s = tcp_log_vain(NULL, th, (void *)ip, ip6))) 857 log(LOG_INFO, "%s; %s: Connection attempt " 858 "to closed port\n", s, __func__); 859 } 860 /* 861 * When blackholing do not respond with a RST but 862 * completely ignore the segment and drop it. 863 */ 864 if ((V_blackhole == 1 && (thflags & TH_SYN)) || 865 V_blackhole == 2) 866 goto dropunlock; 867 868 rstreason = BANDLIM_RST_CLOSEDPORT; 869 goto dropwithreset; 870 } 871 INP_WLOCK_ASSERT(inp); 872 if (!(inp->inp_flags & INP_HW_FLOWID) 873 && (m->m_flags & M_FLOWID) 874 && ((inp->inp_socket == NULL) 875 || !(inp->inp_socket->so_options & SO_ACCEPTCONN))) { 876 inp->inp_flags |= INP_HW_FLOWID; 877 inp->inp_flags &= ~INP_SW_FLOWID; 878 inp->inp_flowid = m->m_pkthdr.flowid; 879 } 880#ifdef IPSEC 881#ifdef INET6 882 if (isipv6 && ipsec6_in_reject(m, inp)) { 883 V_ipsec6stat.in_polvio++; 884 goto dropunlock; 885 } else 886#endif /* INET6 */ 887 if (ipsec4_in_reject(m, inp) != 0) { 888 V_ipsec4stat.in_polvio++; 889 goto dropunlock; 890 } 891#endif /* IPSEC */ 892 893 /* 894 * Check the minimum TTL for socket. 895 */ 896 if (inp->inp_ip_minttl != 0) { 897#ifdef INET6 898 if (isipv6 && inp->inp_ip_minttl > ip6->ip6_hlim) 899 goto dropunlock; 900 else 901#endif 902 if (inp->inp_ip_minttl > ip->ip_ttl) 903 goto dropunlock; 904 } 905 906 /* 907 * A previous connection in TIMEWAIT state is supposed to catch stray 908 * or duplicate segments arriving late. If this segment was a 909 * legitimate new connection attempt, the old INPCB gets removed and 910 * we can try again to find a listening socket. 911 * 912 * At this point, due to earlier optimism, we may hold only an inpcb 913 * lock, and not the inpcbinfo write lock. If so, we need to try to 914 * acquire it, or if that fails, acquire a reference on the inpcb, 915 * drop all locks, acquire a global write lock, and then re-acquire 916 * the inpcb lock. We may at that point discover that another thread 917 * has tried to free the inpcb, in which case we need to loop back 918 * and try to find a new inpcb to deliver to. 919 * 920 * XXXRW: It may be time to rethink timewait locking. 921 */ 922relocked: 923 if (inp->inp_flags & INP_TIMEWAIT) { 924 if (ti_locked == TI_UNLOCKED) { 925 if (INP_INFO_TRY_WLOCK(&V_tcbinfo) == 0) { 926 in_pcbref(inp); 927 INP_WUNLOCK(inp); 928 INP_INFO_WLOCK(&V_tcbinfo); 929 ti_locked = TI_WLOCKED; 930 INP_WLOCK(inp); 931 if (in_pcbrele_wlocked(inp)) { 932 inp = NULL; 933 goto findpcb; 934 } 935 } else 936 ti_locked = TI_WLOCKED; 937 } 938 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 939 940 if (thflags & TH_SYN) 941 tcp_dooptions(&to, optp, optlen, TO_SYN); 942 /* 943 * NB: tcp_twcheck unlocks the INP and frees the mbuf. 944 */ 945 if (tcp_twcheck(inp, &to, th, m, tlen)) 946 goto findpcb; 947 INP_INFO_WUNLOCK(&V_tcbinfo); 948 return; 949 } 950 /* 951 * The TCPCB may no longer exist if the connection is winding 952 * down or it is in the CLOSED state. Either way we drop the 953 * segment and send an appropriate response. 954 */ 955 tp = intotcpcb(inp); 956 if (tp == NULL || tp->t_state == TCPS_CLOSED) { 957 rstreason = BANDLIM_RST_CLOSEDPORT; 958 goto dropwithreset; 959 } 960 961#ifdef TCP_OFFLOAD 962 if (tp->t_flags & TF_TOE) { 963 tcp_offload_input(tp, m); 964 m = NULL; /* consumed by the TOE driver */ 965 goto dropunlock; 966 } 967#endif 968 969 /* 970 * We've identified a valid inpcb, but it could be that we need an 971 * inpcbinfo write lock but don't hold it. In this case, attempt to 972 * acquire using the same strategy as the TIMEWAIT case above. If we 973 * relock, we have to jump back to 'relocked' as the connection might 974 * now be in TIMEWAIT. 975 */ 976#ifdef INVARIANTS 977 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0) 978 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 979#endif 980 if (tp->t_state != TCPS_ESTABLISHED) { 981 if (ti_locked == TI_UNLOCKED) { 982 if (INP_INFO_TRY_WLOCK(&V_tcbinfo) == 0) { 983 in_pcbref(inp); 984 INP_WUNLOCK(inp); 985 INP_INFO_WLOCK(&V_tcbinfo); 986 ti_locked = TI_WLOCKED; 987 INP_WLOCK(inp); 988 if (in_pcbrele_wlocked(inp)) { 989 inp = NULL; 990 goto findpcb; 991 } 992 goto relocked; 993 } else 994 ti_locked = TI_WLOCKED; 995 } 996 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 997 } 998 999#ifdef MAC 1000 INP_WLOCK_ASSERT(inp); 1001 if (mac_inpcb_check_deliver(inp, m)) 1002 goto dropunlock; 1003#endif 1004 so = inp->inp_socket; 1005 KASSERT(so != NULL, ("%s: so == NULL", __func__)); 1006#ifdef TCPDEBUG 1007 if (so->so_options & SO_DEBUG) { 1008 ostate = tp->t_state; 1009#ifdef INET6 1010 if (isipv6) { 1011 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6)); 1012 } else 1013#endif 1014 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip)); 1015 tcp_savetcp = *th; 1016 } 1017#endif /* TCPDEBUG */ 1018 /* 1019 * When the socket is accepting connections (the INPCB is in LISTEN 1020 * state) we look into the SYN cache if this is a new connection 1021 * attempt or the completion of a previous one. Because listen 1022 * sockets are never in TCPS_ESTABLISHED, the V_tcbinfo lock will be 1023 * held in this case. 1024 */ 1025 if (so->so_options & SO_ACCEPTCONN) { 1026 struct in_conninfo inc; 1027 1028 KASSERT(tp->t_state == TCPS_LISTEN, ("%s: so accepting but " 1029 "tp not listening", __func__)); 1030 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 1031 1032 bzero(&inc, sizeof(inc)); 1033#ifdef INET6 1034 if (isipv6) { 1035 inc.inc_flags |= INC_ISIPV6; 1036 inc.inc6_faddr = ip6->ip6_src; 1037 inc.inc6_laddr = ip6->ip6_dst; 1038 } else 1039#endif 1040 { 1041 inc.inc_faddr = ip->ip_src; 1042 inc.inc_laddr = ip->ip_dst; 1043 } 1044 inc.inc_fport = th->th_sport; 1045 inc.inc_lport = th->th_dport; 1046 inc.inc_fibnum = so->so_fibnum; 1047 1048 /* 1049 * Check for an existing connection attempt in syncache if 1050 * the flag is only ACK. A successful lookup creates a new 1051 * socket appended to the listen queue in SYN_RECEIVED state. 1052 */ 1053 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) { 1054 /* 1055 * Parse the TCP options here because 1056 * syncookies need access to the reflected 1057 * timestamp. 1058 */ 1059 tcp_dooptions(&to, optp, optlen, 0); 1060 /* 1061 * NB: syncache_expand() doesn't unlock 1062 * inp and tcpinfo locks. 1063 */ 1064 if (!syncache_expand(&inc, &to, th, &so, m)) { 1065 /* 1066 * No syncache entry or ACK was not 1067 * for our SYN/ACK. Send a RST. 1068 * NB: syncache did its own logging 1069 * of the failure cause. 1070 */ 1071 rstreason = BANDLIM_RST_OPENPORT; 1072 goto dropwithreset; 1073 } 1074 if (so == NULL) { 1075 /* 1076 * We completed the 3-way handshake 1077 * but could not allocate a socket 1078 * either due to memory shortage, 1079 * listen queue length limits or 1080 * global socket limits. Send RST 1081 * or wait and have the remote end 1082 * retransmit the ACK for another 1083 * try. 1084 */ 1085 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1086 log(LOG_DEBUG, "%s; %s: Listen socket: " 1087 "Socket allocation failed due to " 1088 "limits or memory shortage, %s\n", 1089 s, __func__, 1090 V_tcp_sc_rst_sock_fail ? 1091 "sending RST" : "try again"); 1092 if (V_tcp_sc_rst_sock_fail) { 1093 rstreason = BANDLIM_UNLIMITED; 1094 goto dropwithreset; 1095 } else 1096 goto dropunlock; 1097 } 1098 /* 1099 * Socket is created in state SYN_RECEIVED. 1100 * Unlock the listen socket, lock the newly 1101 * created socket and update the tp variable. 1102 */ 1103 INP_WUNLOCK(inp); /* listen socket */ 1104 inp = sotoinpcb(so); 1105 INP_WLOCK(inp); /* new connection */ 1106 tp = intotcpcb(inp); 1107 KASSERT(tp->t_state == TCPS_SYN_RECEIVED, 1108 ("%s: ", __func__)); 1109#ifdef TCP_SIGNATURE 1110 if (sig_checked == 0) { 1111 tcp_dooptions(&to, optp, optlen, 1112 (thflags & TH_SYN) ? TO_SYN : 0); 1113 if (!tcp_signature_verify_input(m, off0, tlen, 1114 optlen, &to, th, tp->t_flags)) { 1115 1116 /* 1117 * In SYN_SENT state if it receives an 1118 * RST, it is allowed for further 1119 * processing. 1120 */ 1121 if ((thflags & TH_RST) == 0 || 1122 (tp->t_state == TCPS_SYN_SENT) == 0) 1123 goto dropunlock; 1124 } 1125 sig_checked = 1; 1126 } 1127#endif 1128 1129 /* 1130 * Process the segment and the data it 1131 * contains. tcp_do_segment() consumes 1132 * the mbuf chain and unlocks the inpcb. 1133 */ 1134 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, 1135 iptos, ti_locked); 1136 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1137 return; 1138 } 1139 /* 1140 * Segment flag validation for new connection attempts: 1141 * 1142 * Our (SYN|ACK) response was rejected. 1143 * Check with syncache and remove entry to prevent 1144 * retransmits. 1145 * 1146 * NB: syncache_chkrst does its own logging of failure 1147 * causes. 1148 */ 1149 if (thflags & TH_RST) { 1150 syncache_chkrst(&inc, th); 1151 goto dropunlock; 1152 } 1153 /* 1154 * We can't do anything without SYN. 1155 */ 1156 if ((thflags & TH_SYN) == 0) { 1157 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1158 log(LOG_DEBUG, "%s; %s: Listen socket: " 1159 "SYN is missing, segment ignored\n", 1160 s, __func__); 1161 TCPSTAT_INC(tcps_badsyn); 1162 goto dropunlock; 1163 } 1164 /* 1165 * (SYN|ACK) is bogus on a listen socket. 1166 */ 1167 if (thflags & TH_ACK) { 1168 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1169 log(LOG_DEBUG, "%s; %s: Listen socket: " 1170 "SYN|ACK invalid, segment rejected\n", 1171 s, __func__); 1172 syncache_badack(&inc); /* XXX: Not needed! */ 1173 TCPSTAT_INC(tcps_badsyn); 1174 rstreason = BANDLIM_RST_OPENPORT; 1175 goto dropwithreset; 1176 } 1177 /* 1178 * If the drop_synfin option is enabled, drop all 1179 * segments with both the SYN and FIN bits set. 1180 * This prevents e.g. nmap from identifying the 1181 * TCP/IP stack. 1182 * XXX: Poor reasoning. nmap has other methods 1183 * and is constantly refining its stack detection 1184 * strategies. 1185 * XXX: This is a violation of the TCP specification 1186 * and was used by RFC1644. 1187 */ 1188 if ((thflags & TH_FIN) && V_drop_synfin) { 1189 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1190 log(LOG_DEBUG, "%s; %s: Listen socket: " 1191 "SYN|FIN segment ignored (based on " 1192 "sysctl setting)\n", s, __func__); 1193 TCPSTAT_INC(tcps_badsyn); 1194 goto dropunlock; 1195 } 1196 /* 1197 * Segment's flags are (SYN) or (SYN|FIN). 1198 * 1199 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored 1200 * as they do not affect the state of the TCP FSM. 1201 * The data pointed to by TH_URG and th_urp is ignored. 1202 */ 1203 KASSERT((thflags & (TH_RST|TH_ACK)) == 0, 1204 ("%s: Listen socket: TH_RST or TH_ACK set", __func__)); 1205 KASSERT(thflags & (TH_SYN), 1206 ("%s: Listen socket: TH_SYN not set", __func__)); 1207#ifdef INET6 1208 /* 1209 * If deprecated address is forbidden, 1210 * we do not accept SYN to deprecated interface 1211 * address to prevent any new inbound connection from 1212 * getting established. 1213 * When we do not accept SYN, we send a TCP RST, 1214 * with deprecated source address (instead of dropping 1215 * it). We compromise it as it is much better for peer 1216 * to send a RST, and RST will be the final packet 1217 * for the exchange. 1218 * 1219 * If we do not forbid deprecated addresses, we accept 1220 * the SYN packet. RFC2462 does not suggest dropping 1221 * SYN in this case. 1222 * If we decipher RFC2462 5.5.4, it says like this: 1223 * 1. use of deprecated addr with existing 1224 * communication is okay - "SHOULD continue to be 1225 * used" 1226 * 2. use of it with new communication: 1227 * (2a) "SHOULD NOT be used if alternate address 1228 * with sufficient scope is available" 1229 * (2b) nothing mentioned otherwise. 1230 * Here we fall into (2b) case as we have no choice in 1231 * our source address selection - we must obey the peer. 1232 * 1233 * The wording in RFC2462 is confusing, and there are 1234 * multiple description text for deprecated address 1235 * handling - worse, they are not exactly the same. 1236 * I believe 5.5.4 is the best one, so we follow 5.5.4. 1237 */ 1238 if (isipv6 && !V_ip6_use_deprecated) { 1239 struct in6_ifaddr *ia6; 1240 1241 ia6 = ip6_getdstifaddr(m); 1242 if (ia6 != NULL && 1243 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) { 1244 ifa_free(&ia6->ia_ifa); 1245 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1246 log(LOG_DEBUG, "%s; %s: Listen socket: " 1247 "Connection attempt to deprecated " 1248 "IPv6 address rejected\n", 1249 s, __func__); 1250 rstreason = BANDLIM_RST_OPENPORT; 1251 goto dropwithreset; 1252 } 1253 if (ia6) 1254 ifa_free(&ia6->ia_ifa); 1255 } 1256#endif /* INET6 */ 1257 /* 1258 * Basic sanity checks on incoming SYN requests: 1259 * Don't respond if the destination is a link layer 1260 * broadcast according to RFC1122 4.2.3.10, p. 104. 1261 * If it is from this socket it must be forged. 1262 * Don't respond if the source or destination is a 1263 * global or subnet broad- or multicast address. 1264 * Note that it is quite possible to receive unicast 1265 * link-layer packets with a broadcast IP address. Use 1266 * in_broadcast() to find them. 1267 */ 1268 if (m->m_flags & (M_BCAST|M_MCAST)) { 1269 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1270 log(LOG_DEBUG, "%s; %s: Listen socket: " 1271 "Connection attempt from broad- or multicast " 1272 "link layer address ignored\n", s, __func__); 1273 goto dropunlock; 1274 } 1275#ifdef INET6 1276 if (isipv6) { 1277 if (th->th_dport == th->th_sport && 1278 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) { 1279 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1280 log(LOG_DEBUG, "%s; %s: Listen socket: " 1281 "Connection attempt to/from self " 1282 "ignored\n", s, __func__); 1283 goto dropunlock; 1284 } 1285 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 1286 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) { 1287 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1288 log(LOG_DEBUG, "%s; %s: Listen socket: " 1289 "Connection attempt from/to multicast " 1290 "address ignored\n", s, __func__); 1291 goto dropunlock; 1292 } 1293 } 1294#endif 1295#if defined(INET) && defined(INET6) 1296 else 1297#endif 1298#ifdef INET 1299 { 1300 if (th->th_dport == th->th_sport && 1301 ip->ip_dst.s_addr == ip->ip_src.s_addr) { 1302 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1303 log(LOG_DEBUG, "%s; %s: Listen socket: " 1304 "Connection attempt from/to self " 1305 "ignored\n", s, __func__); 1306 goto dropunlock; 1307 } 1308 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 1309 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 1310 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 1311 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) { 1312 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1313 log(LOG_DEBUG, "%s; %s: Listen socket: " 1314 "Connection attempt from/to broad- " 1315 "or multicast address ignored\n", 1316 s, __func__); 1317 goto dropunlock; 1318 } 1319 } 1320#endif 1321 /* 1322 * SYN appears to be valid. Create compressed TCP state 1323 * for syncache. 1324 */ 1325#ifdef TCPDEBUG 1326 if (so->so_options & SO_DEBUG) 1327 tcp_trace(TA_INPUT, ostate, tp, 1328 (void *)tcp_saveipgen, &tcp_savetcp, 0); 1329#endif 1330 tcp_dooptions(&to, optp, optlen, TO_SYN); 1331 syncache_add(&inc, &to, th, inp, &so, m, NULL, NULL); 1332 /* 1333 * Entry added to syncache and mbuf consumed. 1334 * Everything already unlocked by syncache_add(). 1335 */ 1336 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1337 return; 1338 } 1339 1340#ifdef TCP_SIGNATURE 1341 if (sig_checked == 0) { 1342 tcp_dooptions(&to, optp, optlen, 1343 (thflags & TH_SYN) ? TO_SYN : 0); 1344 if (!tcp_signature_verify_input(m, off0, tlen, optlen, &to, 1345 th, tp->t_flags)) { 1346 1347 /* 1348 * In SYN_SENT state if it receives an RST, it is 1349 * allowed for further processing. 1350 */ 1351 if ((thflags & TH_RST) == 0 || 1352 (tp->t_state == TCPS_SYN_SENT) == 0) 1353 goto dropunlock; 1354 } 1355 sig_checked = 1; 1356 } 1357#endif 1358 1359 /* 1360 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later 1361 * state. tcp_do_segment() always consumes the mbuf chain, unlocks 1362 * the inpcb, and unlocks pcbinfo. 1363 */ 1364 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, iptos, ti_locked); 1365 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1366 return; 1367 1368dropwithreset: 1369 if (ti_locked == TI_WLOCKED) { 1370 INP_INFO_WUNLOCK(&V_tcbinfo); 1371 ti_locked = TI_UNLOCKED; 1372 } 1373#ifdef INVARIANTS 1374 else { 1375 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropwithreset " 1376 "ti_locked: %d", __func__, ti_locked)); 1377 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1378 } 1379#endif 1380 1381 if (inp != NULL) { 1382 tcp_dropwithreset(m, th, tp, tlen, rstreason); 1383 INP_WUNLOCK(inp); 1384 } else 1385 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 1386 m = NULL; /* mbuf chain got consumed. */ 1387 goto drop; 1388 1389dropunlock: 1390 if (ti_locked == TI_WLOCKED) { 1391 INP_INFO_WUNLOCK(&V_tcbinfo); 1392 ti_locked = TI_UNLOCKED; 1393 } 1394#ifdef INVARIANTS 1395 else { 1396 KASSERT(ti_locked == TI_UNLOCKED, ("%s: dropunlock " 1397 "ti_locked: %d", __func__, ti_locked)); 1398 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1399 } 1400#endif 1401 1402 if (inp != NULL) 1403 INP_WUNLOCK(inp); 1404 1405drop: 1406 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1407 if (s != NULL) 1408 free(s, M_TCPLOG); 1409 if (m != NULL) 1410 m_freem(m); 1411} 1412 1413static void 1414tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 1415 struct tcpcb *tp, int drop_hdrlen, int tlen, uint8_t iptos, 1416 int ti_locked) 1417{ 1418 int thflags, acked, ourfinisacked, needoutput = 0; 1419 int rstreason, todrop, win; 1420 u_long tiwin; 1421 struct tcpopt to; 1422 1423#ifdef TCPDEBUG 1424 /* 1425 * The size of tcp_saveipgen must be the size of the max ip header, 1426 * now IPv6. 1427 */ 1428 u_char tcp_saveipgen[IP6_HDR_LEN]; 1429 struct tcphdr tcp_savetcp; 1430 short ostate = 0; 1431#endif 1432 thflags = th->th_flags; 1433 tp->sackhint.last_sack_ack = 0; 1434 1435 /* 1436 * If this is either a state-changing packet or current state isn't 1437 * established, we require a write lock on tcbinfo. Otherwise, we 1438 * allow the tcbinfo to be in either alocked or unlocked, as the 1439 * caller may have unnecessarily acquired a write lock due to a race. 1440 */ 1441 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 || 1442 tp->t_state != TCPS_ESTABLISHED) { 1443 KASSERT(ti_locked == TI_WLOCKED, ("%s ti_locked %d for " 1444 "SYN/FIN/RST/!EST", __func__, ti_locked)); 1445 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 1446 } else { 1447#ifdef INVARIANTS 1448 if (ti_locked == TI_WLOCKED) 1449 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 1450 else { 1451 KASSERT(ti_locked == TI_UNLOCKED, ("%s: EST " 1452 "ti_locked: %d", __func__, ti_locked)); 1453 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1454 } 1455#endif 1456 } 1457 INP_WLOCK_ASSERT(tp->t_inpcb); 1458 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 1459 __func__)); 1460 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 1461 __func__)); 1462 1463 /* 1464 * Segment received on connection. 1465 * Reset idle time and keep-alive timer. 1466 * XXX: This should be done after segment 1467 * validation to ignore broken/spoofed segs. 1468 */ 1469 tp->t_rcvtime = ticks; 1470 if (TCPS_HAVEESTABLISHED(tp->t_state)) 1471 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp)); 1472 1473 /* 1474 * Unscale the window into a 32-bit value. 1475 * For the SYN_SENT state the scale is zero. 1476 */ 1477 tiwin = th->th_win << tp->snd_scale; 1478 1479 /* 1480 * TCP ECN processing. 1481 */ 1482 if (tp->t_flags & TF_ECN_PERMIT) { 1483 if (thflags & TH_CWR) 1484 tp->t_flags &= ~TF_ECN_SND_ECE; 1485 switch (iptos & IPTOS_ECN_MASK) { 1486 case IPTOS_ECN_CE: 1487 tp->t_flags |= TF_ECN_SND_ECE; 1488 TCPSTAT_INC(tcps_ecn_ce); 1489 break; 1490 case IPTOS_ECN_ECT0: 1491 TCPSTAT_INC(tcps_ecn_ect0); 1492 break; 1493 case IPTOS_ECN_ECT1: 1494 TCPSTAT_INC(tcps_ecn_ect1); 1495 break; 1496 } 1497 /* Congestion experienced. */ 1498 if (thflags & TH_ECE) { 1499 cc_cong_signal(tp, th, CC_ECN); 1500 } 1501 } 1502 1503 /* 1504 * Parse options on any incoming segment. 1505 */ 1506 tcp_dooptions(&to, (u_char *)(th + 1), 1507 (th->th_off << 2) - sizeof(struct tcphdr), 1508 (thflags & TH_SYN) ? TO_SYN : 0); 1509 1510 /* 1511 * If echoed timestamp is later than the current time, 1512 * fall back to non RFC1323 RTT calculation. Normalize 1513 * timestamp if syncookies were used when this connection 1514 * was established. 1515 */ 1516 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 1517 to.to_tsecr -= tp->ts_offset; 1518 if (TSTMP_GT(to.to_tsecr, tcp_ts_getticks())) 1519 to.to_tsecr = 0; 1520 } 1521 1522 /* 1523 * Process options only when we get SYN/ACK back. The SYN case 1524 * for incoming connections is handled in tcp_syncache. 1525 * According to RFC1323 the window field in a SYN (i.e., a <SYN> 1526 * or <SYN,ACK>) segment itself is never scaled. 1527 * XXX this is traditional behavior, may need to be cleaned up. 1528 */ 1529 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 1530 if ((to.to_flags & TOF_SCALE) && 1531 (tp->t_flags & TF_REQ_SCALE)) { 1532 tp->t_flags |= TF_RCVD_SCALE; 1533 tp->snd_scale = to.to_wscale; 1534 } 1535 /* 1536 * Initial send window. It will be updated with 1537 * the next incoming segment to the scaled value. 1538 */ 1539 tp->snd_wnd = th->th_win; 1540 if (to.to_flags & TOF_TS) { 1541 tp->t_flags |= TF_RCVD_TSTMP; 1542 tp->ts_recent = to.to_tsval; 1543 tp->ts_recent_age = tcp_ts_getticks(); 1544 } 1545 if (to.to_flags & TOF_MSS) 1546 tcp_mss(tp, to.to_mss); 1547 if ((tp->t_flags & TF_SACK_PERMIT) && 1548 (to.to_flags & TOF_SACKPERM) == 0) 1549 tp->t_flags &= ~TF_SACK_PERMIT; 1550 } 1551 1552 /* 1553 * Header prediction: check for the two common cases 1554 * of a uni-directional data xfer. If the packet has 1555 * no control flags, is in-sequence, the window didn't 1556 * change and we're not retransmitting, it's a 1557 * candidate. If the length is zero and the ack moved 1558 * forward, we're the sender side of the xfer. Just 1559 * free the data acked & wake any higher level process 1560 * that was blocked waiting for space. If the length 1561 * is non-zero and the ack didn't move, we're the 1562 * receiver side. If we're getting packets in-order 1563 * (the reassembly queue is empty), add the data to 1564 * the socket buffer and note that we need a delayed ack. 1565 * Make sure that the hidden state-flags are also off. 1566 * Since we check for TCPS_ESTABLISHED first, it can only 1567 * be TH_NEEDSYN. 1568 */ 1569 if (tp->t_state == TCPS_ESTABLISHED && 1570 th->th_seq == tp->rcv_nxt && 1571 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && 1572 tp->snd_nxt == tp->snd_max && 1573 tiwin && tiwin == tp->snd_wnd && 1574 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) && 1575 LIST_EMPTY(&tp->t_segq) && 1576 ((to.to_flags & TOF_TS) == 0 || 1577 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) { 1578 1579 /* 1580 * If last ACK falls within this segment's sequence numbers, 1581 * record the timestamp. 1582 * NOTE that the test is modified according to the latest 1583 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1584 */ 1585 if ((to.to_flags & TOF_TS) != 0 && 1586 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 1587 tp->ts_recent_age = tcp_ts_getticks(); 1588 tp->ts_recent = to.to_tsval; 1589 } 1590 1591 if (tlen == 0) { 1592 if (SEQ_GT(th->th_ack, tp->snd_una) && 1593 SEQ_LEQ(th->th_ack, tp->snd_max) && 1594 !IN_RECOVERY(tp->t_flags) && 1595 (to.to_flags & TOF_SACK) == 0 && 1596 TAILQ_EMPTY(&tp->snd_holes)) { 1597 /* 1598 * This is a pure ack for outstanding data. 1599 */ 1600 if (ti_locked == TI_WLOCKED) 1601 INP_INFO_WUNLOCK(&V_tcbinfo); 1602 ti_locked = TI_UNLOCKED; 1603 1604 TCPSTAT_INC(tcps_predack); 1605 1606 /* 1607 * "bad retransmit" recovery. 1608 */ 1609 if (tp->t_rxtshift == 1 && 1610 tp->t_flags & TF_PREVVALID && 1611 (int)(ticks - tp->t_badrxtwin) < 0) { 1612 cc_cong_signal(tp, th, CC_RTO_ERR); 1613 } 1614 1615 /* 1616 * Recalculate the transmit timer / rtt. 1617 * 1618 * Some boxes send broken timestamp replies 1619 * during the SYN+ACK phase, ignore 1620 * timestamps of 0 or we could calculate a 1621 * huge RTT and blow up the retransmit timer. 1622 */ 1623 if ((to.to_flags & TOF_TS) != 0 && 1624 to.to_tsecr) { 1625 u_int t; 1626 1627 t = tcp_ts_getticks() - to.to_tsecr; 1628 if (!tp->t_rttlow || tp->t_rttlow > t) 1629 tp->t_rttlow = t; 1630 tcp_xmit_timer(tp, 1631 TCP_TS_TO_TICKS(t) + 1); 1632 } else if (tp->t_rtttime && 1633 SEQ_GT(th->th_ack, tp->t_rtseq)) { 1634 if (!tp->t_rttlow || 1635 tp->t_rttlow > ticks - tp->t_rtttime) 1636 tp->t_rttlow = ticks - tp->t_rtttime; 1637 tcp_xmit_timer(tp, 1638 ticks - tp->t_rtttime); 1639 } 1640 acked = BYTES_THIS_ACK(tp, th); 1641 1642 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 1643 hhook_run_tcp_est_in(tp, th, &to); 1644 1645 TCPSTAT_INC(tcps_rcvackpack); 1646 TCPSTAT_ADD(tcps_rcvackbyte, acked); 1647 sbdrop(&so->so_snd, acked); 1648 if (SEQ_GT(tp->snd_una, tp->snd_recover) && 1649 SEQ_LEQ(th->th_ack, tp->snd_recover)) 1650 tp->snd_recover = th->th_ack - 1; 1651 1652 /* 1653 * Let the congestion control algorithm update 1654 * congestion control related information. This 1655 * typically means increasing the congestion 1656 * window. 1657 */ 1658 cc_ack_received(tp, th, CC_ACK); 1659 1660 tp->snd_una = th->th_ack; 1661 /* 1662 * Pull snd_wl2 up to prevent seq wrap relative 1663 * to th_ack. 1664 */ 1665 tp->snd_wl2 = th->th_ack; 1666 tp->t_dupacks = 0; 1667 m_freem(m); 1668 ND6_HINT(tp); /* Some progress has been made. */ 1669 1670 /* 1671 * If all outstanding data are acked, stop 1672 * retransmit timer, otherwise restart timer 1673 * using current (possibly backed-off) value. 1674 * If process is waiting for space, 1675 * wakeup/selwakeup/signal. If data 1676 * are ready to send, let tcp_output 1677 * decide between more output or persist. 1678 */ 1679#ifdef TCPDEBUG 1680 if (so->so_options & SO_DEBUG) 1681 tcp_trace(TA_INPUT, ostate, tp, 1682 (void *)tcp_saveipgen, 1683 &tcp_savetcp, 0); 1684#endif 1685 if (tp->snd_una == tp->snd_max) 1686 tcp_timer_activate(tp, TT_REXMT, 0); 1687 else if (!tcp_timer_active(tp, TT_PERSIST)) 1688 tcp_timer_activate(tp, TT_REXMT, 1689 tp->t_rxtcur); 1690 sowwakeup(so); 1691 if (so->so_snd.sb_cc) 1692 (void) tcp_output(tp); 1693 goto check_delack; 1694 } 1695 } else if (th->th_ack == tp->snd_una && 1696 tlen <= sbspace(&so->so_rcv)) { 1697 int newsize = 0; /* automatic sockbuf scaling */ 1698 1699 /* 1700 * This is a pure, in-sequence data packet with 1701 * nothing on the reassembly queue and we have enough 1702 * buffer space to take it. 1703 */ 1704 if (ti_locked == TI_WLOCKED) 1705 INP_INFO_WUNLOCK(&V_tcbinfo); 1706 ti_locked = TI_UNLOCKED; 1707 1708 /* Clean receiver SACK report if present */ 1709 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks) 1710 tcp_clean_sackreport(tp); 1711 TCPSTAT_INC(tcps_preddat); 1712 tp->rcv_nxt += tlen; 1713 /* 1714 * Pull snd_wl1 up to prevent seq wrap relative to 1715 * th_seq. 1716 */ 1717 tp->snd_wl1 = th->th_seq; 1718 /* 1719 * Pull rcv_up up to prevent seq wrap relative to 1720 * rcv_nxt. 1721 */ 1722 tp->rcv_up = tp->rcv_nxt; 1723 TCPSTAT_INC(tcps_rcvpack); 1724 TCPSTAT_ADD(tcps_rcvbyte, tlen); 1725 ND6_HINT(tp); /* Some progress has been made */ 1726#ifdef TCPDEBUG 1727 if (so->so_options & SO_DEBUG) 1728 tcp_trace(TA_INPUT, ostate, tp, 1729 (void *)tcp_saveipgen, &tcp_savetcp, 0); 1730#endif 1731 /* 1732 * Automatic sizing of receive socket buffer. Often the send 1733 * buffer size is not optimally adjusted to the actual network 1734 * conditions at hand (delay bandwidth product). Setting the 1735 * buffer size too small limits throughput on links with high 1736 * bandwidth and high delay (eg. trans-continental/oceanic links). 1737 * 1738 * On the receive side the socket buffer memory is only rarely 1739 * used to any significant extent. This allows us to be much 1740 * more aggressive in scaling the receive socket buffer. For 1741 * the case that the buffer space is actually used to a large 1742 * extent and we run out of kernel memory we can simply drop 1743 * the new segments; TCP on the sender will just retransmit it 1744 * later. Setting the buffer size too big may only consume too 1745 * much kernel memory if the application doesn't read() from 1746 * the socket or packet loss or reordering makes use of the 1747 * reassembly queue. 1748 * 1749 * The criteria to step up the receive buffer one notch are: 1750 * 1. the number of bytes received during the time it takes 1751 * one timestamp to be reflected back to us (the RTT); 1752 * 2. received bytes per RTT is within seven eighth of the 1753 * current socket buffer size; 1754 * 3. receive buffer size has not hit maximal automatic size; 1755 * 1756 * This algorithm does one step per RTT at most and only if 1757 * we receive a bulk stream w/o packet losses or reorderings. 1758 * Shrinking the buffer during idle times is not necessary as 1759 * it doesn't consume any memory when idle. 1760 * 1761 * TODO: Only step up if the application is actually serving 1762 * the buffer to better manage the socket buffer resources. 1763 */ 1764 if (V_tcp_do_autorcvbuf && 1765 to.to_tsecr && 1766 (so->so_rcv.sb_flags & SB_AUTOSIZE)) { 1767 if (TSTMP_GT(to.to_tsecr, tp->rfbuf_ts) && 1768 to.to_tsecr - tp->rfbuf_ts < hz) { 1769 if (tp->rfbuf_cnt > 1770 (so->so_rcv.sb_hiwat / 8 * 7) && 1771 so->so_rcv.sb_hiwat < 1772 V_tcp_autorcvbuf_max) { 1773 newsize = 1774 min(so->so_rcv.sb_hiwat + 1775 V_tcp_autorcvbuf_inc, 1776 V_tcp_autorcvbuf_max); 1777 } 1778 /* Start over with next RTT. */ 1779 tp->rfbuf_ts = 0; 1780 tp->rfbuf_cnt = 0; 1781 } else 1782 tp->rfbuf_cnt += tlen; /* add up */ 1783 } 1784 1785 /* Add data to socket buffer. */ 1786 SOCKBUF_LOCK(&so->so_rcv); 1787 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1788 m_freem(m); 1789 } else { 1790 /* 1791 * Set new socket buffer size. 1792 * Give up when limit is reached. 1793 */ 1794 if (newsize) 1795 if (!sbreserve_locked(&so->so_rcv, 1796 newsize, so, NULL)) 1797 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 1798 m_adj(m, drop_hdrlen); /* delayed header drop */ 1799 sbappendstream_locked(&so->so_rcv, m); 1800 } 1801 /* NB: sorwakeup_locked() does an implicit unlock. */ 1802 sorwakeup_locked(so); 1803 if (DELAY_ACK(tp)) { 1804 tp->t_flags |= TF_DELACK; 1805 } else { 1806 tp->t_flags |= TF_ACKNOW; 1807 tcp_output(tp); 1808 } 1809 goto check_delack; 1810 } 1811 } 1812 1813 /* 1814 * Calculate amount of space in receive window, 1815 * and then do TCP input processing. 1816 * Receive window is amount of space in rcv queue, 1817 * but not less than advertised window. 1818 */ 1819 win = sbspace(&so->so_rcv); 1820 if (win < 0) 1821 win = 0; 1822 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt)); 1823 1824 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 1825 tp->rfbuf_ts = 0; 1826 tp->rfbuf_cnt = 0; 1827 1828 switch (tp->t_state) { 1829 1830 /* 1831 * If the state is SYN_RECEIVED: 1832 * if seg contains an ACK, but not for our SYN/ACK, send a RST. 1833 */ 1834 case TCPS_SYN_RECEIVED: 1835 if ((thflags & TH_ACK) && 1836 (SEQ_LEQ(th->th_ack, tp->snd_una) || 1837 SEQ_GT(th->th_ack, tp->snd_max))) { 1838 rstreason = BANDLIM_RST_OPENPORT; 1839 goto dropwithreset; 1840 } 1841 break; 1842 1843 /* 1844 * If the state is SYN_SENT: 1845 * if seg contains an ACK, but not for our SYN, drop the input. 1846 * if seg contains a RST, then drop the connection. 1847 * if seg does not contain SYN, then drop it. 1848 * Otherwise this is an acceptable SYN segment 1849 * initialize tp->rcv_nxt and tp->irs 1850 * if seg contains ack then advance tp->snd_una 1851 * if seg contains an ECE and ECN support is enabled, the stream 1852 * is ECN capable. 1853 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state 1854 * arrange for segment to be acked (eventually) 1855 * continue processing rest of data/controls, beginning with URG 1856 */ 1857 case TCPS_SYN_SENT: 1858 if ((thflags & TH_ACK) && 1859 (SEQ_LEQ(th->th_ack, tp->iss) || 1860 SEQ_GT(th->th_ack, tp->snd_max))) { 1861 rstreason = BANDLIM_UNLIMITED; 1862 goto dropwithreset; 1863 } 1864 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) 1865 tp = tcp_drop(tp, ECONNREFUSED); 1866 if (thflags & TH_RST) 1867 goto drop; 1868 if (!(thflags & TH_SYN)) 1869 goto drop; 1870 1871 tp->irs = th->th_seq; 1872 tcp_rcvseqinit(tp); 1873 if (thflags & TH_ACK) { 1874 TCPSTAT_INC(tcps_connects); 1875 soisconnected(so); 1876#ifdef MAC 1877 mac_socketpeer_set_from_mbuf(m, so); 1878#endif 1879 /* Do window scaling on this connection? */ 1880 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 1881 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 1882 tp->rcv_scale = tp->request_r_scale; 1883 } 1884 tp->rcv_adv += imin(tp->rcv_wnd, 1885 TCP_MAXWIN << tp->rcv_scale); 1886 tp->snd_una++; /* SYN is acked */ 1887 /* 1888 * If there's data, delay ACK; if there's also a FIN 1889 * ACKNOW will be turned on later. 1890 */ 1891 if (DELAY_ACK(tp) && tlen != 0) 1892 tcp_timer_activate(tp, TT_DELACK, 1893 tcp_delacktime); 1894 else 1895 tp->t_flags |= TF_ACKNOW; 1896 1897 if ((thflags & TH_ECE) && V_tcp_do_ecn) { 1898 tp->t_flags |= TF_ECN_PERMIT; 1899 TCPSTAT_INC(tcps_ecn_shs); 1900 } 1901 1902 /* 1903 * Received <SYN,ACK> in SYN_SENT[*] state. 1904 * Transitions: 1905 * SYN_SENT --> ESTABLISHED 1906 * SYN_SENT* --> FIN_WAIT_1 1907 */ 1908 tp->t_starttime = ticks; 1909 if (tp->t_flags & TF_NEEDFIN) { 1910 tp->t_state = TCPS_FIN_WAIT_1; 1911 tp->t_flags &= ~TF_NEEDFIN; 1912 thflags &= ~TH_SYN; 1913 } else { 1914 tp->t_state = TCPS_ESTABLISHED; 1915 cc_conn_init(tp); 1916 tcp_timer_activate(tp, TT_KEEP, 1917 TP_KEEPIDLE(tp)); 1918 } 1919 } else { 1920 /* 1921 * Received initial SYN in SYN-SENT[*] state => 1922 * simultaneous open. If segment contains CC option 1923 * and there is a cached CC, apply TAO test. 1924 * If it succeeds, connection is * half-synchronized. 1925 * Otherwise, do 3-way handshake: 1926 * SYN-SENT -> SYN-RECEIVED 1927 * SYN-SENT* -> SYN-RECEIVED* 1928 * If there was no CC option, clear cached CC value. 1929 */ 1930 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 1931 tcp_timer_activate(tp, TT_REXMT, 0); 1932 tp->t_state = TCPS_SYN_RECEIVED; 1933 } 1934 1935 KASSERT(ti_locked == TI_WLOCKED, ("%s: trimthenstep6: " 1936 "ti_locked %d", __func__, ti_locked)); 1937 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 1938 INP_WLOCK_ASSERT(tp->t_inpcb); 1939 1940 /* 1941 * Advance th->th_seq to correspond to first data byte. 1942 * If data, trim to stay within window, 1943 * dropping FIN if necessary. 1944 */ 1945 th->th_seq++; 1946 if (tlen > tp->rcv_wnd) { 1947 todrop = tlen - tp->rcv_wnd; 1948 m_adj(m, -todrop); 1949 tlen = tp->rcv_wnd; 1950 thflags &= ~TH_FIN; 1951 TCPSTAT_INC(tcps_rcvpackafterwin); 1952 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 1953 } 1954 tp->snd_wl1 = th->th_seq - 1; 1955 tp->rcv_up = th->th_seq; 1956 /* 1957 * Client side of transaction: already sent SYN and data. 1958 * If the remote host used T/TCP to validate the SYN, 1959 * our data will be ACK'd; if so, enter normal data segment 1960 * processing in the middle of step 5, ack processing. 1961 * Otherwise, goto step 6. 1962 */ 1963 if (thflags & TH_ACK) 1964 goto process_ACK; 1965 1966 goto step6; 1967 1968 /* 1969 * If the state is LAST_ACK or CLOSING or TIME_WAIT: 1970 * do normal processing. 1971 * 1972 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later. 1973 */ 1974 case TCPS_LAST_ACK: 1975 case TCPS_CLOSING: 1976 break; /* continue normal processing */ 1977 } 1978 1979 /* 1980 * States other than LISTEN or SYN_SENT. 1981 * First check the RST flag and sequence number since reset segments 1982 * are exempt from the timestamp and connection count tests. This 1983 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix 1984 * below which allowed reset segments in half the sequence space 1985 * to fall though and be processed (which gives forged reset 1986 * segments with a random sequence number a 50 percent chance of 1987 * killing a connection). 1988 * Then check timestamp, if present. 1989 * Then check the connection count, if present. 1990 * Then check that at least some bytes of segment are within 1991 * receive window. If segment begins before rcv_nxt, 1992 * drop leading data (and SYN); if nothing left, just ack. 1993 * 1994 * 1995 * If the RST bit is set, check the sequence number to see 1996 * if this is a valid reset segment. 1997 * RFC 793 page 37: 1998 * In all states except SYN-SENT, all reset (RST) segments 1999 * are validated by checking their SEQ-fields. A reset is 2000 * valid if its sequence number is in the window. 2001 * Note: this does not take into account delayed ACKs, so 2002 * we should test against last_ack_sent instead of rcv_nxt. 2003 * The sequence number in the reset segment is normally an 2004 * echo of our outgoing acknowlegement numbers, but some hosts 2005 * send a reset with the sequence number at the rightmost edge 2006 * of our receive window, and we have to handle this case. 2007 * Note 2: Paul Watson's paper "Slipping in the Window" has shown 2008 * that brute force RST attacks are possible. To combat this, 2009 * we use a much stricter check while in the ESTABLISHED state, 2010 * only accepting RSTs where the sequence number is equal to 2011 * last_ack_sent. In all other states (the states in which a 2012 * RST is more likely), the more permissive check is used. 2013 * If we have multiple segments in flight, the initial reset 2014 * segment sequence numbers will be to the left of last_ack_sent, 2015 * but they will eventually catch up. 2016 * In any case, it never made sense to trim reset segments to 2017 * fit the receive window since RFC 1122 says: 2018 * 4.2.2.12 RST Segment: RFC-793 Section 3.4 2019 * 2020 * A TCP SHOULD allow a received RST segment to include data. 2021 * 2022 * DISCUSSION 2023 * It has been suggested that a RST segment could contain 2024 * ASCII text that encoded and explained the cause of the 2025 * RST. No standard has yet been established for such 2026 * data. 2027 * 2028 * If the reset segment passes the sequence number test examine 2029 * the state: 2030 * SYN_RECEIVED STATE: 2031 * If passive open, return to LISTEN state. 2032 * If active open, inform user that connection was refused. 2033 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES: 2034 * Inform user that connection was reset, and close tcb. 2035 * CLOSING, LAST_ACK STATES: 2036 * Close the tcb. 2037 * TIME_WAIT STATE: 2038 * Drop the segment - see Stevens, vol. 2, p. 964 and 2039 * RFC 1337. 2040 */ 2041 if (thflags & TH_RST) { 2042 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) && 2043 SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) { 2044 switch (tp->t_state) { 2045 2046 case TCPS_SYN_RECEIVED: 2047 so->so_error = ECONNREFUSED; 2048 goto close; 2049 2050 case TCPS_ESTABLISHED: 2051 if (V_tcp_insecure_rst == 0 && 2052 !(SEQ_GEQ(th->th_seq, tp->rcv_nxt - 1) && 2053 SEQ_LEQ(th->th_seq, tp->rcv_nxt + 1)) && 2054 !(SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) && 2055 SEQ_LEQ(th->th_seq, tp->last_ack_sent + 1))) { 2056 TCPSTAT_INC(tcps_badrst); 2057 goto drop; 2058 } 2059 /* FALLTHROUGH */ 2060 case TCPS_FIN_WAIT_1: 2061 case TCPS_FIN_WAIT_2: 2062 case TCPS_CLOSE_WAIT: 2063 so->so_error = ECONNRESET; 2064 close: 2065 KASSERT(ti_locked == TI_WLOCKED, 2066 ("tcp_do_segment: TH_RST 1 ti_locked %d", 2067 ti_locked)); 2068 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 2069 2070 tp->t_state = TCPS_CLOSED; 2071 TCPSTAT_INC(tcps_drops); 2072 tp = tcp_close(tp); 2073 break; 2074 2075 case TCPS_CLOSING: 2076 case TCPS_LAST_ACK: 2077 KASSERT(ti_locked == TI_WLOCKED, 2078 ("tcp_do_segment: TH_RST 2 ti_locked %d", 2079 ti_locked)); 2080 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 2081 2082 tp = tcp_close(tp); 2083 break; 2084 } 2085 } 2086 goto drop; 2087 } 2088 2089 /* 2090 * RFC 1323 PAWS: If we have a timestamp reply on this segment 2091 * and it's less than ts_recent, drop it. 2092 */ 2093 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent && 2094 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 2095 2096 /* Check to see if ts_recent is over 24 days old. */ 2097 if (tcp_ts_getticks() - tp->ts_recent_age > TCP_PAWS_IDLE) { 2098 /* 2099 * Invalidate ts_recent. If this segment updates 2100 * ts_recent, the age will be reset later and ts_recent 2101 * will get a valid value. If it does not, setting 2102 * ts_recent to zero will at least satisfy the 2103 * requirement that zero be placed in the timestamp 2104 * echo reply when ts_recent isn't valid. The 2105 * age isn't reset until we get a valid ts_recent 2106 * because we don't want out-of-order segments to be 2107 * dropped when ts_recent is old. 2108 */ 2109 tp->ts_recent = 0; 2110 } else { 2111 TCPSTAT_INC(tcps_rcvduppack); 2112 TCPSTAT_ADD(tcps_rcvdupbyte, tlen); 2113 TCPSTAT_INC(tcps_pawsdrop); 2114 if (tlen) 2115 goto dropafterack; 2116 goto drop; 2117 } 2118 } 2119 2120 /* 2121 * In the SYN-RECEIVED state, validate that the packet belongs to 2122 * this connection before trimming the data to fit the receive 2123 * window. Check the sequence number versus IRS since we know 2124 * the sequence numbers haven't wrapped. This is a partial fix 2125 * for the "LAND" DoS attack. 2126 */ 2127 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) { 2128 rstreason = BANDLIM_RST_OPENPORT; 2129 goto dropwithreset; 2130 } 2131 2132 todrop = tp->rcv_nxt - th->th_seq; 2133 if (todrop > 0) { 2134 /* 2135 * If this is a duplicate SYN for our current connection, 2136 * advance over it and pretend and it's not a SYN. 2137 */ 2138 if (thflags & TH_SYN && th->th_seq == tp->irs) { 2139 thflags &= ~TH_SYN; 2140 th->th_seq++; 2141 if (th->th_urp > 1) 2142 th->th_urp--; 2143 else 2144 thflags &= ~TH_URG; 2145 todrop--; 2146 } 2147 /* 2148 * Following if statement from Stevens, vol. 2, p. 960. 2149 */ 2150 if (todrop > tlen 2151 || (todrop == tlen && (thflags & TH_FIN) == 0)) { 2152 /* 2153 * Any valid FIN must be to the left of the window. 2154 * At this point the FIN must be a duplicate or out 2155 * of sequence; drop it. 2156 */ 2157 thflags &= ~TH_FIN; 2158 2159 /* 2160 * Send an ACK to resynchronize and drop any data. 2161 * But keep on processing for RST or ACK. 2162 */ 2163 tp->t_flags |= TF_ACKNOW; 2164 todrop = tlen; 2165 TCPSTAT_INC(tcps_rcvduppack); 2166 TCPSTAT_ADD(tcps_rcvdupbyte, todrop); 2167 } else { 2168 TCPSTAT_INC(tcps_rcvpartduppack); 2169 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop); 2170 } 2171 drop_hdrlen += todrop; /* drop from the top afterwards */ 2172 th->th_seq += todrop; 2173 tlen -= todrop; 2174 if (th->th_urp > todrop) 2175 th->th_urp -= todrop; 2176 else { 2177 thflags &= ~TH_URG; 2178 th->th_urp = 0; 2179 } 2180 } 2181 2182 /* 2183 * If new data are received on a connection after the 2184 * user processes are gone, then RST the other end. 2185 */ 2186 if ((so->so_state & SS_NOFDREF) && 2187 tp->t_state > TCPS_CLOSE_WAIT && tlen) { 2188 char *s; 2189 2190 KASSERT(ti_locked == TI_WLOCKED, ("%s: SS_NOFDEREF && " 2191 "CLOSE_WAIT && tlen ti_locked %d", __func__, ti_locked)); 2192 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 2193 2194 if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL, NULL))) { 2195 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data after socket " 2196 "was closed, sending RST and removing tcpcb\n", 2197 s, __func__, tcpstates[tp->t_state], tlen); 2198 free(s, M_TCPLOG); 2199 } 2200 tp = tcp_close(tp); 2201 TCPSTAT_INC(tcps_rcvafterclose); 2202 rstreason = BANDLIM_UNLIMITED; 2203 goto dropwithreset; 2204 } 2205 2206 /* 2207 * If segment ends after window, drop trailing data 2208 * (and PUSH and FIN); if nothing left, just ACK. 2209 */ 2210 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd); 2211 if (todrop > 0) { 2212 TCPSTAT_INC(tcps_rcvpackafterwin); 2213 if (todrop >= tlen) { 2214 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen); 2215 /* 2216 * If window is closed can only take segments at 2217 * window edge, and have to drop data and PUSH from 2218 * incoming segments. Continue processing, but 2219 * remember to ack. Otherwise, drop segment 2220 * and ack. 2221 */ 2222 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { 2223 tp->t_flags |= TF_ACKNOW; 2224 TCPSTAT_INC(tcps_rcvwinprobe); 2225 } else 2226 goto dropafterack; 2227 } else 2228 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 2229 m_adj(m, -todrop); 2230 tlen -= todrop; 2231 thflags &= ~(TH_PUSH|TH_FIN); 2232 } 2233 2234 /* 2235 * If last ACK falls within this segment's sequence numbers, 2236 * record its timestamp. 2237 * NOTE: 2238 * 1) That the test incorporates suggestions from the latest 2239 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 2240 * 2) That updating only on newer timestamps interferes with 2241 * our earlier PAWS tests, so this check should be solely 2242 * predicated on the sequence space of this segment. 2243 * 3) That we modify the segment boundary check to be 2244 * Last.ACK.Sent <= SEG.SEQ + SEG.Len 2245 * instead of RFC1323's 2246 * Last.ACK.Sent < SEG.SEQ + SEG.Len, 2247 * This modified check allows us to overcome RFC1323's 2248 * limitations as described in Stevens TCP/IP Illustrated 2249 * Vol. 2 p.869. In such cases, we can still calculate the 2250 * RTT correctly when RCV.NXT == Last.ACK.Sent. 2251 */ 2252 if ((to.to_flags & TOF_TS) != 0 && 2253 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 2254 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 2255 ((thflags & (TH_SYN|TH_FIN)) != 0))) { 2256 tp->ts_recent_age = tcp_ts_getticks(); 2257 tp->ts_recent = to.to_tsval; 2258 } 2259 2260 /* 2261 * If a SYN is in the window, then this is an 2262 * error and we send an RST and drop the connection. 2263 */ 2264 if (thflags & TH_SYN) { 2265 KASSERT(ti_locked == TI_WLOCKED, 2266 ("tcp_do_segment: TH_SYN ti_locked %d", ti_locked)); 2267 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 2268 2269 tp = tcp_drop(tp, ECONNRESET); 2270 rstreason = BANDLIM_UNLIMITED; 2271 goto drop; 2272 } 2273 2274 /* 2275 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN 2276 * flag is on (half-synchronized state), then queue data for 2277 * later processing; else drop segment and return. 2278 */ 2279 if ((thflags & TH_ACK) == 0) { 2280 if (tp->t_state == TCPS_SYN_RECEIVED || 2281 (tp->t_flags & TF_NEEDSYN)) 2282 goto step6; 2283 else if (tp->t_flags & TF_ACKNOW) 2284 goto dropafterack; 2285 else 2286 goto drop; 2287 } 2288 2289 /* 2290 * Ack processing. 2291 */ 2292 switch (tp->t_state) { 2293 2294 /* 2295 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter 2296 * ESTABLISHED state and continue processing. 2297 * The ACK was checked above. 2298 */ 2299 case TCPS_SYN_RECEIVED: 2300 2301 TCPSTAT_INC(tcps_connects); 2302 soisconnected(so); 2303 /* Do window scaling? */ 2304 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2305 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2306 tp->rcv_scale = tp->request_r_scale; 2307 tp->snd_wnd = tiwin; 2308 } 2309 /* 2310 * Make transitions: 2311 * SYN-RECEIVED -> ESTABLISHED 2312 * SYN-RECEIVED* -> FIN-WAIT-1 2313 */ 2314 tp->t_starttime = ticks; 2315 if (tp->t_flags & TF_NEEDFIN) { 2316 tp->t_state = TCPS_FIN_WAIT_1; 2317 tp->t_flags &= ~TF_NEEDFIN; 2318 } else { 2319 tp->t_state = TCPS_ESTABLISHED; 2320 cc_conn_init(tp); 2321 tcp_timer_activate(tp, TT_KEEP, TP_KEEPIDLE(tp)); 2322 } 2323 /* 2324 * If segment contains data or ACK, will call tcp_reass() 2325 * later; if not, do so now to pass queued data to user. 2326 */ 2327 if (tlen == 0 && (thflags & TH_FIN) == 0) 2328 (void) tcp_reass(tp, (struct tcphdr *)0, 0, 2329 (struct mbuf *)0); 2330 tp->snd_wl1 = th->th_seq - 1; 2331 /* FALLTHROUGH */ 2332 2333 /* 2334 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range 2335 * ACKs. If the ack is in the range 2336 * tp->snd_una < th->th_ack <= tp->snd_max 2337 * then advance tp->snd_una to th->th_ack and drop 2338 * data from the retransmission queue. If this ACK reflects 2339 * more up to date window information we update our window information. 2340 */ 2341 case TCPS_ESTABLISHED: 2342 case TCPS_FIN_WAIT_1: 2343 case TCPS_FIN_WAIT_2: 2344 case TCPS_CLOSE_WAIT: 2345 case TCPS_CLOSING: 2346 case TCPS_LAST_ACK: 2347 if (SEQ_GT(th->th_ack, tp->snd_max)) { 2348 TCPSTAT_INC(tcps_rcvacktoomuch); 2349 goto dropafterack; 2350 } 2351 if ((tp->t_flags & TF_SACK_PERMIT) && 2352 ((to.to_flags & TOF_SACK) || 2353 !TAILQ_EMPTY(&tp->snd_holes))) 2354 tcp_sack_doack(tp, &to, th->th_ack); 2355 2356 /* Run HHOOK_TCP_ESTABLISHED_IN helper hooks. */ 2357 hhook_run_tcp_est_in(tp, th, &to); 2358 2359 if (SEQ_LEQ(th->th_ack, tp->snd_una)) { 2360 if (tlen == 0 && tiwin == tp->snd_wnd) { 2361 TCPSTAT_INC(tcps_rcvdupack); 2362 /* 2363 * If we have outstanding data (other than 2364 * a window probe), this is a completely 2365 * duplicate ack (ie, window info didn't 2366 * change), the ack is the biggest we've 2367 * seen and we've seen exactly our rexmt 2368 * threshhold of them, assume a packet 2369 * has been dropped and retransmit it. 2370 * Kludge snd_nxt & the congestion 2371 * window so we send only this one 2372 * packet. 2373 * 2374 * We know we're losing at the current 2375 * window size so do congestion avoidance 2376 * (set ssthresh to half the current window 2377 * and pull our congestion window back to 2378 * the new ssthresh). 2379 * 2380 * Dup acks mean that packets have left the 2381 * network (they're now cached at the receiver) 2382 * so bump cwnd by the amount in the receiver 2383 * to keep a constant cwnd packets in the 2384 * network. 2385 * 2386 * When using TCP ECN, notify the peer that 2387 * we reduced the cwnd. 2388 */ 2389 if (!tcp_timer_active(tp, TT_REXMT) || 2390 th->th_ack != tp->snd_una) 2391 tp->t_dupacks = 0; 2392 else if (++tp->t_dupacks > tcprexmtthresh || 2393 IN_FASTRECOVERY(tp->t_flags)) { 2394 cc_ack_received(tp, th, CC_DUPACK); 2395 if ((tp->t_flags & TF_SACK_PERMIT) && 2396 IN_FASTRECOVERY(tp->t_flags)) { 2397 int awnd; 2398 2399 /* 2400 * Compute the amount of data in flight first. 2401 * We can inject new data into the pipe iff 2402 * we have less than 1/2 the original window's 2403 * worth of data in flight. 2404 */ 2405 awnd = (tp->snd_nxt - tp->snd_fack) + 2406 tp->sackhint.sack_bytes_rexmit; 2407 if (awnd < tp->snd_ssthresh) { 2408 tp->snd_cwnd += tp->t_maxseg; 2409 if (tp->snd_cwnd > tp->snd_ssthresh) 2410 tp->snd_cwnd = tp->snd_ssthresh; 2411 } 2412 } else 2413 tp->snd_cwnd += tp->t_maxseg; 2414 if ((thflags & TH_FIN) && 2415 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) { 2416 /* 2417 * If its a fin we need to process 2418 * it to avoid a race where both 2419 * sides enter FIN-WAIT and send FIN|ACK 2420 * at the same time. 2421 */ 2422 break; 2423 } 2424 (void) tcp_output(tp); 2425 goto drop; 2426 } else if (tp->t_dupacks == tcprexmtthresh) { 2427 tcp_seq onxt = tp->snd_nxt; 2428 2429 /* 2430 * If we're doing sack, check to 2431 * see if we're already in sack 2432 * recovery. If we're not doing sack, 2433 * check to see if we're in newreno 2434 * recovery. 2435 */ 2436 if (tp->t_flags & TF_SACK_PERMIT) { 2437 if (IN_FASTRECOVERY(tp->t_flags)) { 2438 tp->t_dupacks = 0; 2439 break; 2440 } 2441 } else { 2442 if (SEQ_LEQ(th->th_ack, 2443 tp->snd_recover)) { 2444 tp->t_dupacks = 0; 2445 break; 2446 } 2447 } 2448 /* Congestion signal before ack. */ 2449 cc_cong_signal(tp, th, CC_NDUPACK); 2450 cc_ack_received(tp, th, CC_DUPACK); 2451 tcp_timer_activate(tp, TT_REXMT, 0); 2452 tp->t_rtttime = 0; 2453 if (tp->t_flags & TF_SACK_PERMIT) { 2454 TCPSTAT_INC( 2455 tcps_sack_recovery_episode); 2456 tp->sack_newdata = tp->snd_nxt; 2457 tp->snd_cwnd = tp->t_maxseg; 2458 (void) tcp_output(tp); 2459 goto drop; 2460 } 2461 tp->snd_nxt = th->th_ack; 2462 tp->snd_cwnd = tp->t_maxseg; 2463 if ((thflags & TH_FIN) && 2464 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) { 2465 /* 2466 * If its a fin we need to process 2467 * it to avoid a race where both 2468 * sides enter FIN-WAIT and send FIN|ACK 2469 * at the same time. 2470 */ 2471 break; 2472 } 2473 (void) tcp_output(tp); 2474 KASSERT(tp->snd_limited <= 2, 2475 ("%s: tp->snd_limited too big", 2476 __func__)); 2477 tp->snd_cwnd = tp->snd_ssthresh + 2478 tp->t_maxseg * 2479 (tp->t_dupacks - tp->snd_limited); 2480 if (SEQ_GT(onxt, tp->snd_nxt)) 2481 tp->snd_nxt = onxt; 2482 goto drop; 2483 } else if (V_tcp_do_rfc3042) { 2484 cc_ack_received(tp, th, CC_DUPACK); 2485 u_long oldcwnd = tp->snd_cwnd; 2486 tcp_seq oldsndmax = tp->snd_max; 2487 u_int sent; 2488 2489 KASSERT(tp->t_dupacks == 1 || 2490 tp->t_dupacks == 2, 2491 ("%s: dupacks not 1 or 2", 2492 __func__)); 2493 if (tp->t_dupacks == 1) 2494 tp->snd_limited = 0; 2495 tp->snd_cwnd = 2496 (tp->snd_nxt - tp->snd_una) + 2497 (tp->t_dupacks - tp->snd_limited) * 2498 tp->t_maxseg; 2499 if ((thflags & TH_FIN) && 2500 (TCPS_HAVERCVDFIN(tp->t_state) == 0)) { 2501 /* 2502 * If its a fin we need to process 2503 * it to avoid a race where both 2504 * sides enter FIN-WAIT and send FIN|ACK 2505 * at the same time. 2506 */ 2507 break; 2508 } 2509 (void) tcp_output(tp); 2510 sent = tp->snd_max - oldsndmax; 2511 if (sent > tp->t_maxseg) { 2512 KASSERT((tp->t_dupacks == 2 && 2513 tp->snd_limited == 0) || 2514 (sent == tp->t_maxseg + 1 && 2515 tp->t_flags & TF_SENTFIN), 2516 ("%s: sent too much", 2517 __func__)); 2518 tp->snd_limited = 2; 2519 } else if (sent > 0) 2520 ++tp->snd_limited; 2521 tp->snd_cwnd = oldcwnd; 2522 goto drop; 2523 } 2524 } else 2525 tp->t_dupacks = 0; 2526 break; 2527 } 2528 2529 KASSERT(SEQ_GT(th->th_ack, tp->snd_una), 2530 ("%s: th_ack <= snd_una", __func__)); 2531 2532 /* 2533 * If the congestion window was inflated to account 2534 * for the other side's cached packets, retract it. 2535 */ 2536 if (IN_FASTRECOVERY(tp->t_flags)) { 2537 if (SEQ_LT(th->th_ack, tp->snd_recover)) { 2538 if (tp->t_flags & TF_SACK_PERMIT) 2539 tcp_sack_partialack(tp, th); 2540 else 2541 tcp_newreno_partial_ack(tp, th); 2542 } else 2543 cc_post_recovery(tp, th); 2544 } 2545 tp->t_dupacks = 0; 2546 /* 2547 * If we reach this point, ACK is not a duplicate, 2548 * i.e., it ACKs something we sent. 2549 */ 2550 if (tp->t_flags & TF_NEEDSYN) { 2551 /* 2552 * T/TCP: Connection was half-synchronized, and our 2553 * SYN has been ACK'd (so connection is now fully 2554 * synchronized). Go to non-starred state, 2555 * increment snd_una for ACK of SYN, and check if 2556 * we can do window scaling. 2557 */ 2558 tp->t_flags &= ~TF_NEEDSYN; 2559 tp->snd_una++; 2560 /* Do window scaling? */ 2561 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2562 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2563 tp->rcv_scale = tp->request_r_scale; 2564 /* Send window already scaled. */ 2565 } 2566 } 2567 2568process_ACK: 2569 INP_WLOCK_ASSERT(tp->t_inpcb); 2570 2571 acked = BYTES_THIS_ACK(tp, th); 2572 TCPSTAT_INC(tcps_rcvackpack); 2573 TCPSTAT_ADD(tcps_rcvackbyte, acked); 2574 2575 /* 2576 * If we just performed our first retransmit, and the ACK 2577 * arrives within our recovery window, then it was a mistake 2578 * to do the retransmit in the first place. Recover our 2579 * original cwnd and ssthresh, and proceed to transmit where 2580 * we left off. 2581 */ 2582 if (tp->t_rxtshift == 1 && tp->t_flags & TF_PREVVALID && 2583 (int)(ticks - tp->t_badrxtwin) < 0) 2584 cc_cong_signal(tp, th, CC_RTO_ERR); 2585 2586 /* 2587 * If we have a timestamp reply, update smoothed 2588 * round trip time. If no timestamp is present but 2589 * transmit timer is running and timed sequence 2590 * number was acked, update smoothed round trip time. 2591 * Since we now have an rtt measurement, cancel the 2592 * timer backoff (cf., Phil Karn's retransmit alg.). 2593 * Recompute the initial retransmit timer. 2594 * 2595 * Some boxes send broken timestamp replies 2596 * during the SYN+ACK phase, ignore 2597 * timestamps of 0 or we could calculate a 2598 * huge RTT and blow up the retransmit timer. 2599 */ 2600 if ((to.to_flags & TOF_TS) != 0 && to.to_tsecr) { 2601 u_int t; 2602 2603 t = tcp_ts_getticks() - to.to_tsecr; 2604 if (!tp->t_rttlow || tp->t_rttlow > t) 2605 tp->t_rttlow = t; 2606 tcp_xmit_timer(tp, TCP_TS_TO_TICKS(t) + 1); 2607 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) { 2608 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime) 2609 tp->t_rttlow = ticks - tp->t_rtttime; 2610 tcp_xmit_timer(tp, ticks - tp->t_rtttime); 2611 } 2612 2613 /* 2614 * If all outstanding data is acked, stop retransmit 2615 * timer and remember to restart (more output or persist). 2616 * If there is more data to be acked, restart retransmit 2617 * timer, using current (possibly backed-off) value. 2618 */ 2619 if (th->th_ack == tp->snd_max) { 2620 tcp_timer_activate(tp, TT_REXMT, 0); 2621 needoutput = 1; 2622 } else if (!tcp_timer_active(tp, TT_PERSIST)) 2623 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur); 2624 2625 /* 2626 * If no data (only SYN) was ACK'd, 2627 * skip rest of ACK processing. 2628 */ 2629 if (acked == 0) 2630 goto step6; 2631 2632 /* 2633 * Let the congestion control algorithm update congestion 2634 * control related information. This typically means increasing 2635 * the congestion window. 2636 */ 2637 cc_ack_received(tp, th, CC_ACK); 2638 2639 SOCKBUF_LOCK(&so->so_snd); 2640 if (acked > so->so_snd.sb_cc) { 2641 tp->snd_wnd -= so->so_snd.sb_cc; 2642 sbdrop_locked(&so->so_snd, (int)so->so_snd.sb_cc); 2643 ourfinisacked = 1; 2644 } else { 2645 sbdrop_locked(&so->so_snd, acked); 2646 tp->snd_wnd -= acked; 2647 ourfinisacked = 0; 2648 } 2649 /* NB: sowwakeup_locked() does an implicit unlock. */ 2650 sowwakeup_locked(so); 2651 /* Detect una wraparound. */ 2652 if (!IN_RECOVERY(tp->t_flags) && 2653 SEQ_GT(tp->snd_una, tp->snd_recover) && 2654 SEQ_LEQ(th->th_ack, tp->snd_recover)) 2655 tp->snd_recover = th->th_ack - 1; 2656 /* XXXLAS: Can this be moved up into cc_post_recovery? */ 2657 if (IN_RECOVERY(tp->t_flags) && 2658 SEQ_GEQ(th->th_ack, tp->snd_recover)) { 2659 EXIT_RECOVERY(tp->t_flags); 2660 } 2661 tp->snd_una = th->th_ack; 2662 if (tp->t_flags & TF_SACK_PERMIT) { 2663 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 2664 tp->snd_recover = tp->snd_una; 2665 } 2666 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 2667 tp->snd_nxt = tp->snd_una; 2668 2669 switch (tp->t_state) { 2670 2671 /* 2672 * In FIN_WAIT_1 STATE in addition to the processing 2673 * for the ESTABLISHED state if our FIN is now acknowledged 2674 * then enter FIN_WAIT_2. 2675 */ 2676 case TCPS_FIN_WAIT_1: 2677 if (ourfinisacked) { 2678 /* 2679 * If we can't receive any more 2680 * data, then closing user can proceed. 2681 * Starting the timer is contrary to the 2682 * specification, but if we don't get a FIN 2683 * we'll hang forever. 2684 * 2685 * XXXjl: 2686 * we should release the tp also, and use a 2687 * compressed state. 2688 */ 2689 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 2690 soisdisconnected(so); 2691 tcp_timer_activate(tp, TT_2MSL, 2692 (tcp_fast_finwait2_recycle ? 2693 tcp_finwait2_timeout : 2694 TP_MAXIDLE(tp))); 2695 } 2696 tp->t_state = TCPS_FIN_WAIT_2; 2697 } 2698 break; 2699 2700 /* 2701 * In CLOSING STATE in addition to the processing for 2702 * the ESTABLISHED state if the ACK acknowledges our FIN 2703 * then enter the TIME-WAIT state, otherwise ignore 2704 * the segment. 2705 */ 2706 case TCPS_CLOSING: 2707 if (ourfinisacked) { 2708 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 2709 tcp_twstart(tp); 2710 INP_INFO_WUNLOCK(&V_tcbinfo); 2711 m_freem(m); 2712 return; 2713 } 2714 break; 2715 2716 /* 2717 * In LAST_ACK, we may still be waiting for data to drain 2718 * and/or to be acked, as well as for the ack of our FIN. 2719 * If our FIN is now acknowledged, delete the TCB, 2720 * enter the closed state and return. 2721 */ 2722 case TCPS_LAST_ACK: 2723 if (ourfinisacked) { 2724 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 2725 tp = tcp_close(tp); 2726 goto drop; 2727 } 2728 break; 2729 } 2730 } 2731 2732step6: 2733 INP_WLOCK_ASSERT(tp->t_inpcb); 2734 2735 /* 2736 * Update window information. 2737 * Don't look at window if no ACK: TAC's send garbage on first SYN. 2738 */ 2739 if ((thflags & TH_ACK) && 2740 (SEQ_LT(tp->snd_wl1, th->th_seq) || 2741 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 2742 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 2743 /* keep track of pure window updates */ 2744 if (tlen == 0 && 2745 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 2746 TCPSTAT_INC(tcps_rcvwinupd); 2747 tp->snd_wnd = tiwin; 2748 tp->snd_wl1 = th->th_seq; 2749 tp->snd_wl2 = th->th_ack; 2750 if (tp->snd_wnd > tp->max_sndwnd) 2751 tp->max_sndwnd = tp->snd_wnd; 2752 needoutput = 1; 2753 } 2754 2755 /* 2756 * Process segments with URG. 2757 */ 2758 if ((thflags & TH_URG) && th->th_urp && 2759 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2760 /* 2761 * This is a kludge, but if we receive and accept 2762 * random urgent pointers, we'll crash in 2763 * soreceive. It's hard to imagine someone 2764 * actually wanting to send this much urgent data. 2765 */ 2766 SOCKBUF_LOCK(&so->so_rcv); 2767 if (th->th_urp + so->so_rcv.sb_cc > sb_max) { 2768 th->th_urp = 0; /* XXX */ 2769 thflags &= ~TH_URG; /* XXX */ 2770 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */ 2771 goto dodata; /* XXX */ 2772 } 2773 /* 2774 * If this segment advances the known urgent pointer, 2775 * then mark the data stream. This should not happen 2776 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since 2777 * a FIN has been received from the remote side. 2778 * In these states we ignore the URG. 2779 * 2780 * According to RFC961 (Assigned Protocols), 2781 * the urgent pointer points to the last octet 2782 * of urgent data. We continue, however, 2783 * to consider it to indicate the first octet 2784 * of data past the urgent section as the original 2785 * spec states (in one of two places). 2786 */ 2787 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) { 2788 tp->rcv_up = th->th_seq + th->th_urp; 2789 so->so_oobmark = so->so_rcv.sb_cc + 2790 (tp->rcv_up - tp->rcv_nxt) - 1; 2791 if (so->so_oobmark == 0) 2792 so->so_rcv.sb_state |= SBS_RCVATMARK; 2793 sohasoutofband(so); 2794 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); 2795 } 2796 SOCKBUF_UNLOCK(&so->so_rcv); 2797 /* 2798 * Remove out of band data so doesn't get presented to user. 2799 * This can happen independent of advancing the URG pointer, 2800 * but if two URG's are pending at once, some out-of-band 2801 * data may creep in... ick. 2802 */ 2803 if (th->th_urp <= (u_long)tlen && 2804 !(so->so_options & SO_OOBINLINE)) { 2805 /* hdr drop is delayed */ 2806 tcp_pulloutofband(so, th, m, drop_hdrlen); 2807 } 2808 } else { 2809 /* 2810 * If no out of band data is expected, 2811 * pull receive urgent pointer along 2812 * with the receive window. 2813 */ 2814 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) 2815 tp->rcv_up = tp->rcv_nxt; 2816 } 2817dodata: /* XXX */ 2818 INP_WLOCK_ASSERT(tp->t_inpcb); 2819 2820 /* 2821 * Process the segment text, merging it into the TCP sequencing queue, 2822 * and arranging for acknowledgment of receipt if necessary. 2823 * This process logically involves adjusting tp->rcv_wnd as data 2824 * is presented to the user (this happens in tcp_usrreq.c, 2825 * case PRU_RCVD). If a FIN has already been received on this 2826 * connection then we just ignore the text. 2827 */ 2828 if ((tlen || (thflags & TH_FIN)) && 2829 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2830 tcp_seq save_start = th->th_seq; 2831 m_adj(m, drop_hdrlen); /* delayed header drop */ 2832 /* 2833 * Insert segment which includes th into TCP reassembly queue 2834 * with control block tp. Set thflags to whether reassembly now 2835 * includes a segment with FIN. This handles the common case 2836 * inline (segment is the next to be received on an established 2837 * connection, and the queue is empty), avoiding linkage into 2838 * and removal from the queue and repetition of various 2839 * conversions. 2840 * Set DELACK for segments received in order, but ack 2841 * immediately when segments are out of order (so 2842 * fast retransmit can work). 2843 */ 2844 if (th->th_seq == tp->rcv_nxt && 2845 LIST_EMPTY(&tp->t_segq) && 2846 TCPS_HAVEESTABLISHED(tp->t_state)) { 2847 if (DELAY_ACK(tp)) 2848 tp->t_flags |= TF_DELACK; 2849 else 2850 tp->t_flags |= TF_ACKNOW; 2851 tp->rcv_nxt += tlen; 2852 thflags = th->th_flags & TH_FIN; 2853 TCPSTAT_INC(tcps_rcvpack); 2854 TCPSTAT_ADD(tcps_rcvbyte, tlen); 2855 ND6_HINT(tp); 2856 SOCKBUF_LOCK(&so->so_rcv); 2857 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 2858 m_freem(m); 2859 else 2860 sbappendstream_locked(&so->so_rcv, m); 2861 /* NB: sorwakeup_locked() does an implicit unlock. */ 2862 sorwakeup_locked(so); 2863 } else { 2864 /* 2865 * XXX: Due to the header drop above "th" is 2866 * theoretically invalid by now. Fortunately 2867 * m_adj() doesn't actually frees any mbufs 2868 * when trimming from the head. 2869 */ 2870 thflags = tcp_reass(tp, th, &tlen, m); 2871 tp->t_flags |= TF_ACKNOW; 2872 } 2873 if (tlen > 0 && (tp->t_flags & TF_SACK_PERMIT)) 2874 tcp_update_sack_list(tp, save_start, save_start + tlen); 2875#if 0 2876 /* 2877 * Note the amount of data that peer has sent into 2878 * our window, in order to estimate the sender's 2879 * buffer size. 2880 * XXX: Unused. 2881 */ 2882 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) 2883 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt); 2884 else 2885 len = so->so_rcv.sb_hiwat; 2886#endif 2887 } else { 2888 m_freem(m); 2889 thflags &= ~TH_FIN; 2890 } 2891 2892 /* 2893 * If FIN is received ACK the FIN and let the user know 2894 * that the connection is closing. 2895 */ 2896 if (thflags & TH_FIN) { 2897 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2898 socantrcvmore(so); 2899 /* 2900 * If connection is half-synchronized 2901 * (ie NEEDSYN flag on) then delay ACK, 2902 * so it may be piggybacked when SYN is sent. 2903 * Otherwise, since we received a FIN then no 2904 * more input can be expected, send ACK now. 2905 */ 2906 if (tp->t_flags & TF_NEEDSYN) 2907 tp->t_flags |= TF_DELACK; 2908 else 2909 tp->t_flags |= TF_ACKNOW; 2910 tp->rcv_nxt++; 2911 } 2912 switch (tp->t_state) { 2913 2914 /* 2915 * In SYN_RECEIVED and ESTABLISHED STATES 2916 * enter the CLOSE_WAIT state. 2917 */ 2918 case TCPS_SYN_RECEIVED: 2919 tp->t_starttime = ticks; 2920 /* FALLTHROUGH */ 2921 case TCPS_ESTABLISHED: 2922 tp->t_state = TCPS_CLOSE_WAIT; 2923 break; 2924 2925 /* 2926 * If still in FIN_WAIT_1 STATE FIN has not been acked so 2927 * enter the CLOSING state. 2928 */ 2929 case TCPS_FIN_WAIT_1: 2930 tp->t_state = TCPS_CLOSING; 2931 break; 2932 2933 /* 2934 * In FIN_WAIT_2 state enter the TIME_WAIT state, 2935 * starting the time-wait timer, turning off the other 2936 * standard timers. 2937 */ 2938 case TCPS_FIN_WAIT_2: 2939 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 2940 KASSERT(ti_locked == TI_WLOCKED, ("%s: dodata " 2941 "TCP_FIN_WAIT_2 ti_locked: %d", __func__, 2942 ti_locked)); 2943 2944 tcp_twstart(tp); 2945 INP_INFO_WUNLOCK(&V_tcbinfo); 2946 return; 2947 } 2948 } 2949 if (ti_locked == TI_WLOCKED) 2950 INP_INFO_WUNLOCK(&V_tcbinfo); 2951 ti_locked = TI_UNLOCKED; 2952 2953#ifdef TCPDEBUG 2954 if (so->so_options & SO_DEBUG) 2955 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen, 2956 &tcp_savetcp, 0); 2957#endif 2958 2959 /* 2960 * Return any desired output. 2961 */ 2962 if (needoutput || (tp->t_flags & TF_ACKNOW)) 2963 (void) tcp_output(tp); 2964 2965check_delack: 2966 KASSERT(ti_locked == TI_UNLOCKED, ("%s: check_delack ti_locked %d", 2967 __func__, ti_locked)); 2968 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 2969 INP_WLOCK_ASSERT(tp->t_inpcb); 2970 2971 if (tp->t_flags & TF_DELACK) { 2972 tp->t_flags &= ~TF_DELACK; 2973 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime); 2974 } 2975 INP_WUNLOCK(tp->t_inpcb); 2976 return; 2977 2978dropafterack: 2979 /* 2980 * Generate an ACK dropping incoming segment if it occupies 2981 * sequence space, where the ACK reflects our state. 2982 * 2983 * We can now skip the test for the RST flag since all 2984 * paths to this code happen after packets containing 2985 * RST have been dropped. 2986 * 2987 * In the SYN-RECEIVED state, don't send an ACK unless the 2988 * segment we received passes the SYN-RECEIVED ACK test. 2989 * If it fails send a RST. This breaks the loop in the 2990 * "LAND" DoS attack, and also prevents an ACK storm 2991 * between two listening ports that have been sent forged 2992 * SYN segments, each with the source address of the other. 2993 */ 2994 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && 2995 (SEQ_GT(tp->snd_una, th->th_ack) || 2996 SEQ_GT(th->th_ack, tp->snd_max)) ) { 2997 rstreason = BANDLIM_RST_OPENPORT; 2998 goto dropwithreset; 2999 } 3000#ifdef TCPDEBUG 3001 if (so->so_options & SO_DEBUG) 3002 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 3003 &tcp_savetcp, 0); 3004#endif 3005 if (ti_locked == TI_WLOCKED) 3006 INP_INFO_WUNLOCK(&V_tcbinfo); 3007 ti_locked = TI_UNLOCKED; 3008 3009 tp->t_flags |= TF_ACKNOW; 3010 (void) tcp_output(tp); 3011 INP_WUNLOCK(tp->t_inpcb); 3012 m_freem(m); 3013 return; 3014 3015dropwithreset: 3016 if (ti_locked == TI_WLOCKED) 3017 INP_INFO_WUNLOCK(&V_tcbinfo); 3018 ti_locked = TI_UNLOCKED; 3019 3020 if (tp != NULL) { 3021 tcp_dropwithreset(m, th, tp, tlen, rstreason); 3022 INP_WUNLOCK(tp->t_inpcb); 3023 } else 3024 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 3025 return; 3026 3027drop: 3028 if (ti_locked == TI_WLOCKED) { 3029 INP_INFO_WUNLOCK(&V_tcbinfo); 3030 ti_locked = TI_UNLOCKED; 3031 } 3032#ifdef INVARIANTS 3033 else 3034 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 3035#endif 3036 3037 /* 3038 * Drop space held by incoming segment and return. 3039 */ 3040#ifdef TCPDEBUG 3041 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 3042 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 3043 &tcp_savetcp, 0); 3044#endif 3045 if (tp != NULL) 3046 INP_WUNLOCK(tp->t_inpcb); 3047 m_freem(m); 3048} 3049 3050/* 3051 * Issue RST and make ACK acceptable to originator of segment. 3052 * The mbuf must still include the original packet header. 3053 * tp may be NULL. 3054 */ 3055static void 3056tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, 3057 int tlen, int rstreason) 3058{ 3059#ifdef INET 3060 struct ip *ip; 3061#endif 3062#ifdef INET6 3063 struct ip6_hdr *ip6; 3064#endif 3065 3066 if (tp != NULL) { 3067 INP_WLOCK_ASSERT(tp->t_inpcb); 3068 } 3069 3070 /* Don't bother if destination was broadcast/multicast. */ 3071 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST)) 3072 goto drop; 3073#ifdef INET6 3074 if (mtod(m, struct ip *)->ip_v == 6) { 3075 ip6 = mtod(m, struct ip6_hdr *); 3076 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 3077 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 3078 goto drop; 3079 /* IPv6 anycast check is done at tcp6_input() */ 3080 } 3081#endif 3082#if defined(INET) && defined(INET6) 3083 else 3084#endif 3085#ifdef INET 3086 { 3087 ip = mtod(m, struct ip *); 3088 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 3089 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 3090 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 3091 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 3092 goto drop; 3093 } 3094#endif 3095 3096 /* Perform bandwidth limiting. */ 3097 if (badport_bandlim(rstreason) < 0) 3098 goto drop; 3099 3100 /* tcp_respond consumes the mbuf chain. */ 3101 if (th->th_flags & TH_ACK) { 3102 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, 3103 th->th_ack, TH_RST); 3104 } else { 3105 if (th->th_flags & TH_SYN) 3106 tlen++; 3107 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen, 3108 (tcp_seq)0, TH_RST|TH_ACK); 3109 } 3110 return; 3111drop: 3112 m_freem(m); 3113} 3114 3115/* 3116 * Parse TCP options and place in tcpopt. 3117 */ 3118static void 3119tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags) 3120{ 3121 int opt, optlen; 3122 3123 to->to_flags = 0; 3124 for (; cnt > 0; cnt -= optlen, cp += optlen) { 3125 opt = cp[0]; 3126 if (opt == TCPOPT_EOL) 3127 break; 3128 if (opt == TCPOPT_NOP) 3129 optlen = 1; 3130 else { 3131 if (cnt < 2) 3132 break; 3133 optlen = cp[1]; 3134 if (optlen < 2 || optlen > cnt) 3135 break; 3136 } 3137 switch (opt) { 3138 case TCPOPT_MAXSEG: 3139 if (optlen != TCPOLEN_MAXSEG) 3140 continue; 3141 if (!(flags & TO_SYN)) 3142 continue; 3143 to->to_flags |= TOF_MSS; 3144 bcopy((char *)cp + 2, 3145 (char *)&to->to_mss, sizeof(to->to_mss)); 3146 to->to_mss = ntohs(to->to_mss); 3147 break; 3148 case TCPOPT_WINDOW: 3149 if (optlen != TCPOLEN_WINDOW) 3150 continue; 3151 if (!(flags & TO_SYN)) 3152 continue; 3153 to->to_flags |= TOF_SCALE; 3154 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT); 3155 break; 3156 case TCPOPT_TIMESTAMP: 3157 if (optlen != TCPOLEN_TIMESTAMP) 3158 continue; 3159 to->to_flags |= TOF_TS; 3160 bcopy((char *)cp + 2, 3161 (char *)&to->to_tsval, sizeof(to->to_tsval)); 3162 to->to_tsval = ntohl(to->to_tsval); 3163 bcopy((char *)cp + 6, 3164 (char *)&to->to_tsecr, sizeof(to->to_tsecr)); 3165 to->to_tsecr = ntohl(to->to_tsecr); 3166 break; 3167#ifdef TCP_SIGNATURE 3168 /* 3169 * XXX In order to reply to a host which has set the 3170 * TCP_SIGNATURE option in its initial SYN, we have to 3171 * record the fact that the option was observed here 3172 * for the syncache code to perform the correct response. 3173 */ 3174 case TCPOPT_SIGNATURE: 3175 if (optlen != TCPOLEN_SIGNATURE) 3176 continue; 3177 to->to_flags |= TOF_SIGNATURE; 3178 to->to_signature = cp + 2; 3179 break; 3180#endif 3181 case TCPOPT_SACK_PERMITTED: 3182 if (optlen != TCPOLEN_SACK_PERMITTED) 3183 continue; 3184 if (!(flags & TO_SYN)) 3185 continue; 3186 if (!V_tcp_do_sack) 3187 continue; 3188 to->to_flags |= TOF_SACKPERM; 3189 break; 3190 case TCPOPT_SACK: 3191 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0) 3192 continue; 3193 if (flags & TO_SYN) 3194 continue; 3195 to->to_flags |= TOF_SACK; 3196 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK; 3197 to->to_sacks = cp + 2; 3198 TCPSTAT_INC(tcps_sack_rcv_blocks); 3199 break; 3200 default: 3201 continue; 3202 } 3203 } 3204} 3205 3206/* 3207 * Pull out of band byte out of a segment so 3208 * it doesn't appear in the user's data queue. 3209 * It is still reflected in the segment length for 3210 * sequencing purposes. 3211 */ 3212static void 3213tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, 3214 int off) 3215{ 3216 int cnt = off + th->th_urp - 1; 3217 3218 while (cnt >= 0) { 3219 if (m->m_len > cnt) { 3220 char *cp = mtod(m, caddr_t) + cnt; 3221 struct tcpcb *tp = sototcpcb(so); 3222 3223 INP_WLOCK_ASSERT(tp->t_inpcb); 3224 3225 tp->t_iobc = *cp; 3226 tp->t_oobflags |= TCPOOB_HAVEDATA; 3227 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1)); 3228 m->m_len--; 3229 if (m->m_flags & M_PKTHDR) 3230 m->m_pkthdr.len--; 3231 return; 3232 } 3233 cnt -= m->m_len; 3234 m = m->m_next; 3235 if (m == NULL) 3236 break; 3237 } 3238 panic("tcp_pulloutofband"); 3239} 3240 3241/* 3242 * Collect new round-trip time estimate 3243 * and update averages and current timeout. 3244 */ 3245static void 3246tcp_xmit_timer(struct tcpcb *tp, int rtt) 3247{ 3248 int delta; 3249 3250 INP_WLOCK_ASSERT(tp->t_inpcb); 3251 3252 TCPSTAT_INC(tcps_rttupdated); 3253 tp->t_rttupdated++; 3254 if (tp->t_srtt != 0) { 3255 /* 3256 * srtt is stored as fixed point with 5 bits after the 3257 * binary point (i.e., scaled by 8). The following magic 3258 * is equivalent to the smoothing algorithm in rfc793 with 3259 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed 3260 * point). Adjust rtt to origin 0. 3261 */ 3262 delta = ((rtt - 1) << TCP_DELTA_SHIFT) 3263 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); 3264 3265 if ((tp->t_srtt += delta) <= 0) 3266 tp->t_srtt = 1; 3267 3268 /* 3269 * We accumulate a smoothed rtt variance (actually, a 3270 * smoothed mean difference), then set the retransmit 3271 * timer to smoothed rtt + 4 times the smoothed variance. 3272 * rttvar is stored as fixed point with 4 bits after the 3273 * binary point (scaled by 16). The following is 3274 * equivalent to rfc793 smoothing with an alpha of .75 3275 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces 3276 * rfc793's wired-in beta. 3277 */ 3278 if (delta < 0) 3279 delta = -delta; 3280 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); 3281 if ((tp->t_rttvar += delta) <= 0) 3282 tp->t_rttvar = 1; 3283 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 3284 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 3285 } else { 3286 /* 3287 * No rtt measurement yet - use the unsmoothed rtt. 3288 * Set the variance to half the rtt (so our first 3289 * retransmit happens at 3*rtt). 3290 */ 3291 tp->t_srtt = rtt << TCP_RTT_SHIFT; 3292 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); 3293 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 3294 } 3295 tp->t_rtttime = 0; 3296 tp->t_rxtshift = 0; 3297 3298 /* 3299 * the retransmit should happen at rtt + 4 * rttvar. 3300 * Because of the way we do the smoothing, srtt and rttvar 3301 * will each average +1/2 tick of bias. When we compute 3302 * the retransmit timer, we want 1/2 tick of rounding and 3303 * 1 extra tick because of +-1/2 tick uncertainty in the 3304 * firing of the timer. The bias will give us exactly the 3305 * 1.5 tick we need. But, because the bias is 3306 * statistical, we have to test that we don't drop below 3307 * the minimum feasible timer (which is 2 ticks). 3308 */ 3309 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 3310 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX); 3311 3312 /* 3313 * We received an ack for a packet that wasn't retransmitted; 3314 * it is probably safe to discard any error indications we've 3315 * received recently. This isn't quite right, but close enough 3316 * for now (a route might have failed after we sent a segment, 3317 * and the return path might not be symmetrical). 3318 */ 3319 tp->t_softerror = 0; 3320} 3321 3322/* 3323 * Determine a reasonable value for maxseg size. 3324 * If the route is known, check route for mtu. 3325 * If none, use an mss that can be handled on the outgoing interface 3326 * without forcing IP to fragment. If no route is found, route has no mtu, 3327 * or the destination isn't local, use a default, hopefully conservative 3328 * size (usually 512 or the default IP max size, but no more than the mtu 3329 * of the interface), as we can't discover anything about intervening 3330 * gateways or networks. We also initialize the congestion/slow start 3331 * window to be a single segment if the destination isn't local. 3332 * While looking at the routing entry, we also initialize other path-dependent 3333 * parameters from pre-set or cached values in the routing entry. 3334 * 3335 * Also take into account the space needed for options that we 3336 * send regularly. Make maxseg shorter by that amount to assure 3337 * that we can send maxseg amount of data even when the options 3338 * are present. Store the upper limit of the length of options plus 3339 * data in maxopd. 3340 * 3341 * NOTE that this routine is only called when we process an incoming 3342 * segment, or an ICMP need fragmentation datagram. Outgoing SYN/ACK MSS 3343 * settings are handled in tcp_mssopt(). 3344 */ 3345void 3346tcp_mss_update(struct tcpcb *tp, int offer, int mtuoffer, 3347 struct hc_metrics_lite *metricptr, int *mtuflags) 3348{ 3349 int mss = 0; 3350 u_long maxmtu = 0; 3351 struct inpcb *inp = tp->t_inpcb; 3352 struct hc_metrics_lite metrics; 3353 int origoffer; 3354#ifdef INET6 3355 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0; 3356 size_t min_protoh = isipv6 ? 3357 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) : 3358 sizeof (struct tcpiphdr); 3359#else 3360 const size_t min_protoh = sizeof(struct tcpiphdr); 3361#endif 3362 3363 INP_WLOCK_ASSERT(tp->t_inpcb); 3364 3365 if (mtuoffer != -1) { 3366 KASSERT(offer == -1, ("%s: conflict", __func__)); 3367 offer = mtuoffer - min_protoh; 3368 } 3369 origoffer = offer; 3370 3371 /* Initialize. */ 3372#ifdef INET6 3373 if (isipv6) { 3374 maxmtu = tcp_maxmtu6(&inp->inp_inc, mtuflags); 3375 tp->t_maxopd = tp->t_maxseg = V_tcp_v6mssdflt; 3376 } 3377#endif 3378#if defined(INET) && defined(INET6) 3379 else 3380#endif 3381#ifdef INET 3382 { 3383 maxmtu = tcp_maxmtu(&inp->inp_inc, mtuflags); 3384 tp->t_maxopd = tp->t_maxseg = V_tcp_mssdflt; 3385 } 3386#endif 3387 3388 /* 3389 * No route to sender, stay with default mss and return. 3390 */ 3391 if (maxmtu == 0) { 3392 /* 3393 * In case we return early we need to initialize metrics 3394 * to a defined state as tcp_hc_get() would do for us 3395 * if there was no cache hit. 3396 */ 3397 if (metricptr != NULL) 3398 bzero(metricptr, sizeof(struct hc_metrics_lite)); 3399 return; 3400 } 3401 3402 /* What have we got? */ 3403 switch (offer) { 3404 case 0: 3405 /* 3406 * Offer == 0 means that there was no MSS on the SYN 3407 * segment, in this case we use tcp_mssdflt as 3408 * already assigned to t_maxopd above. 3409 */ 3410 offer = tp->t_maxopd; 3411 break; 3412 3413 case -1: 3414 /* 3415 * Offer == -1 means that we didn't receive SYN yet. 3416 */ 3417 /* FALLTHROUGH */ 3418 3419 default: 3420 /* 3421 * Prevent DoS attack with too small MSS. Round up 3422 * to at least minmss. 3423 */ 3424 offer = max(offer, V_tcp_minmss); 3425 } 3426 3427 /* 3428 * rmx information is now retrieved from tcp_hostcache. 3429 */ 3430 tcp_hc_get(&inp->inp_inc, &metrics); 3431 if (metricptr != NULL) 3432 bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite)); 3433 3434 /* 3435 * If there's a discovered mtu int tcp hostcache, use it 3436 * else, use the link mtu. 3437 */ 3438 if (metrics.rmx_mtu) 3439 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh; 3440 else { 3441#ifdef INET6 3442 if (isipv6) { 3443 mss = maxmtu - min_protoh; 3444 if (!V_path_mtu_discovery && 3445 !in6_localaddr(&inp->in6p_faddr)) 3446 mss = min(mss, V_tcp_v6mssdflt); 3447 } 3448#endif 3449#if defined(INET) && defined(INET6) 3450 else 3451#endif 3452#ifdef INET 3453 { 3454 mss = maxmtu - min_protoh; 3455 if (!V_path_mtu_discovery && 3456 !in_localaddr(inp->inp_faddr)) 3457 mss = min(mss, V_tcp_mssdflt); 3458 } 3459#endif 3460 /* 3461 * XXX - The above conditional (mss = maxmtu - min_protoh) 3462 * probably violates the TCP spec. 3463 * The problem is that, since we don't know the 3464 * other end's MSS, we are supposed to use a conservative 3465 * default. But, if we do that, then MTU discovery will 3466 * never actually take place, because the conservative 3467 * default is much less than the MTUs typically seen 3468 * on the Internet today. For the moment, we'll sweep 3469 * this under the carpet. 3470 * 3471 * The conservative default might not actually be a problem 3472 * if the only case this occurs is when sending an initial 3473 * SYN with options and data to a host we've never talked 3474 * to before. Then, they will reply with an MSS value which 3475 * will get recorded and the new parameters should get 3476 * recomputed. For Further Study. 3477 */ 3478 } 3479 mss = min(mss, offer); 3480 3481 /* 3482 * Sanity check: make sure that maxopd will be large 3483 * enough to allow some data on segments even if the 3484 * all the option space is used (40bytes). Otherwise 3485 * funny things may happen in tcp_output. 3486 */ 3487 mss = max(mss, 64); 3488 3489 /* 3490 * maxopd stores the maximum length of data AND options 3491 * in a segment; maxseg is the amount of data in a normal 3492 * segment. We need to store this value (maxopd) apart 3493 * from maxseg, because now every segment carries options 3494 * and thus we normally have somewhat less data in segments. 3495 */ 3496 tp->t_maxopd = mss; 3497 3498 /* 3499 * origoffer==-1 indicates that no segments were received yet. 3500 * In this case we just guess. 3501 */ 3502 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && 3503 (origoffer == -1 || 3504 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)) 3505 mss -= TCPOLEN_TSTAMP_APPA; 3506 3507 tp->t_maxseg = mss; 3508} 3509 3510void 3511tcp_mss(struct tcpcb *tp, int offer) 3512{ 3513 int mss; 3514 u_long bufsize; 3515 struct inpcb *inp; 3516 struct socket *so; 3517 struct hc_metrics_lite metrics; 3518 int mtuflags = 0; 3519 3520 KASSERT(tp != NULL, ("%s: tp == NULL", __func__)); 3521 3522 tcp_mss_update(tp, offer, -1, &metrics, &mtuflags); 3523 3524 mss = tp->t_maxseg; 3525 inp = tp->t_inpcb; 3526 3527 /* 3528 * If there's a pipesize, change the socket buffer to that size, 3529 * don't change if sb_hiwat is different than default (then it 3530 * has been changed on purpose with setsockopt). 3531 * Make the socket buffers an integral number of mss units; 3532 * if the mss is larger than the socket buffer, decrease the mss. 3533 */ 3534 so = inp->inp_socket; 3535 SOCKBUF_LOCK(&so->so_snd); 3536 if ((so->so_snd.sb_hiwat == V_tcp_sendspace) && metrics.rmx_sendpipe) 3537 bufsize = metrics.rmx_sendpipe; 3538 else 3539 bufsize = so->so_snd.sb_hiwat; 3540 if (bufsize < mss) 3541 mss = bufsize; 3542 else { 3543 bufsize = roundup(bufsize, mss); 3544 if (bufsize > sb_max) 3545 bufsize = sb_max; 3546 if (bufsize > so->so_snd.sb_hiwat) 3547 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL); 3548 } 3549 SOCKBUF_UNLOCK(&so->so_snd); 3550 tp->t_maxseg = mss; 3551 3552 SOCKBUF_LOCK(&so->so_rcv); 3553 if ((so->so_rcv.sb_hiwat == V_tcp_recvspace) && metrics.rmx_recvpipe) 3554 bufsize = metrics.rmx_recvpipe; 3555 else 3556 bufsize = so->so_rcv.sb_hiwat; 3557 if (bufsize > mss) { 3558 bufsize = roundup(bufsize, mss); 3559 if (bufsize > sb_max) 3560 bufsize = sb_max; 3561 if (bufsize > so->so_rcv.sb_hiwat) 3562 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL); 3563 } 3564 SOCKBUF_UNLOCK(&so->so_rcv); 3565 3566 /* Check the interface for TSO capabilities. */ 3567 if (mtuflags & CSUM_TSO) 3568 tp->t_flags |= TF_TSO; 3569} 3570 3571/* 3572 * Determine the MSS option to send on an outgoing SYN. 3573 */ 3574int 3575tcp_mssopt(struct in_conninfo *inc) 3576{ 3577 int mss = 0; 3578 u_long maxmtu = 0; 3579 u_long thcmtu = 0; 3580 size_t min_protoh; 3581 3582 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer")); 3583 3584#ifdef INET6 3585 if (inc->inc_flags & INC_ISIPV6) { 3586 mss = V_tcp_v6mssdflt; 3587 maxmtu = tcp_maxmtu6(inc, NULL); 3588 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 3589 } 3590#endif 3591#if defined(INET) && defined(INET6) 3592 else 3593#endif 3594#ifdef INET 3595 { 3596 mss = V_tcp_mssdflt; 3597 maxmtu = tcp_maxmtu(inc, NULL); 3598 min_protoh = sizeof(struct tcpiphdr); 3599 } 3600#endif 3601#if defined(INET6) || defined(INET) 3602 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */ 3603#endif 3604 3605 if (maxmtu && thcmtu) 3606 mss = min(maxmtu, thcmtu) - min_protoh; 3607 else if (maxmtu || thcmtu) 3608 mss = max(maxmtu, thcmtu) - min_protoh; 3609 3610 return (mss); 3611} 3612 3613 3614/* 3615 * On a partial ack arrives, force the retransmission of the 3616 * next unacknowledged segment. Do not clear tp->t_dupacks. 3617 * By setting snd_nxt to ti_ack, this forces retransmission timer to 3618 * be started again. 3619 */ 3620static void 3621tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th) 3622{ 3623 tcp_seq onxt = tp->snd_nxt; 3624 u_long ocwnd = tp->snd_cwnd; 3625 3626 INP_WLOCK_ASSERT(tp->t_inpcb); 3627 3628 tcp_timer_activate(tp, TT_REXMT, 0); 3629 tp->t_rtttime = 0; 3630 tp->snd_nxt = th->th_ack; 3631 /* 3632 * Set snd_cwnd to one segment beyond acknowledged offset. 3633 * (tp->snd_una has not yet been updated when this function is called.) 3634 */ 3635 tp->snd_cwnd = tp->t_maxseg + BYTES_THIS_ACK(tp, th); 3636 tp->t_flags |= TF_ACKNOW; 3637 (void) tcp_output(tp); 3638 tp->snd_cwnd = ocwnd; 3639 if (SEQ_GT(onxt, tp->snd_nxt)) 3640 tp->snd_nxt = onxt; 3641 /* 3642 * Partial window deflation. Relies on fact that tp->snd_una 3643 * not updated yet. 3644 */ 3645 if (tp->snd_cwnd > BYTES_THIS_ACK(tp, th)) 3646 tp->snd_cwnd -= BYTES_THIS_ACK(tp, th); 3647 else 3648 tp->snd_cwnd = 0; 3649 tp->snd_cwnd += tp->t_maxseg; 3650} 3651