tcp_input.c revision 216103
1/*- 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 3 * The Regents of the University of California. All rights reserved. 4 * Copyright (c) 2007-2008,2010 5 * Swinburne University of Technology, Melbourne, Australia. 6 * Copyright (c) 2009-2010 Lawrence Stewart <lstewart@freebsd.org> 7 * Copyright (c) 2010 The FreeBSD Foundation 8 * All rights reserved. 9 * 10 * Portions of this software were developed at the Centre for Advanced Internet 11 * Architectures, Swinburne University, by Lawrence Stewart, James Healy and 12 * David Hayes, made possible in part by a grant from the Cisco University 13 * Research Program Fund at Community Foundation Silicon Valley. 14 * 15 * Portions of this software were developed at the Centre for Advanced 16 * Internet Architectures, Swinburne University of Technology, Melbourne, 17 * Australia by David Hayes under sponsorship from the FreeBSD Foundation. 18 * 19 * Redistribution and use in source and binary forms, with or without 20 * modification, are permitted provided that the following conditions 21 * are met: 22 * 1. Redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer. 24 * 2. Redistributions in binary form must reproduce the above copyright 25 * notice, this list of conditions and the following disclaimer in the 26 * documentation and/or other materials provided with the distribution. 27 * 4. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 * 43 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95 44 */ 45 46#include <sys/cdefs.h> 47__FBSDID("$FreeBSD: head/sys/netinet/tcp_input.c 216103 2010-12-02 01:01:37Z lstewart $"); 48 49#include "opt_ipfw.h" /* for ipfw_fwd */ 50#include "opt_inet.h" 51#include "opt_inet6.h" 52#include "opt_ipsec.h" 53#include "opt_tcpdebug.h" 54 55#include <sys/param.h> 56#include <sys/kernel.h> 57#include <sys/malloc.h> 58#include <sys/mbuf.h> 59#include <sys/proc.h> /* for proc0 declaration */ 60#include <sys/protosw.h> 61#include <sys/signalvar.h> 62#include <sys/socket.h> 63#include <sys/socketvar.h> 64#include <sys/sysctl.h> 65#include <sys/syslog.h> 66#include <sys/systm.h> 67 68#include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */ 69 70#include <vm/uma.h> 71 72#include <net/if.h> 73#include <net/route.h> 74#include <net/vnet.h> 75 76#define TCPSTATES /* for logging */ 77 78#include <netinet/cc.h> 79#include <netinet/in.h> 80#include <netinet/in_pcb.h> 81#include <netinet/in_systm.h> 82#include <netinet/in_var.h> 83#include <netinet/ip.h> 84#include <netinet/ip_icmp.h> /* required for icmp_var.h */ 85#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 86#include <netinet/ip_var.h> 87#include <netinet/ip_options.h> 88#include <netinet/ip6.h> 89#include <netinet/icmp6.h> 90#include <netinet6/in6_pcb.h> 91#include <netinet6/ip6_var.h> 92#include <netinet6/nd6.h> 93#include <netinet/tcp_fsm.h> 94#include <netinet/tcp_seq.h> 95#include <netinet/tcp_timer.h> 96#include <netinet/tcp_var.h> 97#include <netinet6/tcp6_var.h> 98#include <netinet/tcpip.h> 99#include <netinet/tcp_syncache.h> 100#ifdef TCPDEBUG 101#include <netinet/tcp_debug.h> 102#endif /* TCPDEBUG */ 103 104#ifdef IPSEC 105#include <netipsec/ipsec.h> 106#include <netipsec/ipsec6.h> 107#endif /*IPSEC*/ 108 109#include <machine/in_cksum.h> 110 111#include <security/mac/mac_framework.h> 112 113const int tcprexmtthresh = 3; 114 115VNET_DEFINE(struct tcpstat, tcpstat); 116SYSCTL_VNET_STRUCT(_net_inet_tcp, TCPCTL_STATS, stats, CTLFLAG_RW, 117 &VNET_NAME(tcpstat), tcpstat, 118 "TCP statistics (struct tcpstat, netinet/tcp_var.h)"); 119 120int tcp_log_in_vain = 0; 121SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW, 122 &tcp_log_in_vain, 0, 123 "Log all incoming TCP segments to closed ports"); 124 125VNET_DEFINE(int, blackhole) = 0; 126#define V_blackhole VNET(blackhole) 127SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW, 128 &VNET_NAME(blackhole), 0, 129 "Do not send RST on segments to closed ports"); 130 131VNET_DEFINE(int, tcp_delack_enabled) = 1; 132SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW, 133 &VNET_NAME(tcp_delack_enabled), 0, 134 "Delay ACK to try and piggyback it onto a data packet"); 135 136VNET_DEFINE(int, drop_synfin) = 0; 137#define V_drop_synfin VNET(drop_synfin) 138SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW, 139 &VNET_NAME(drop_synfin), 0, 140 "Drop TCP packets with SYN+FIN set"); 141 142VNET_DEFINE(int, tcp_do_rfc3042) = 1; 143#define V_tcp_do_rfc3042 VNET(tcp_do_rfc3042) 144SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_RW, 145 &VNET_NAME(tcp_do_rfc3042), 0, 146 "Enable RFC 3042 (Limited Transmit)"); 147 148VNET_DEFINE(int, tcp_do_rfc3390) = 1; 149SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW, 150 &VNET_NAME(tcp_do_rfc3390), 0, 151 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)"); 152 153VNET_DEFINE(int, tcp_do_rfc3465) = 1; 154SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_RW, 155 &VNET_NAME(tcp_do_rfc3465), 0, 156 "Enable RFC 3465 (Appropriate Byte Counting)"); 157 158VNET_DEFINE(int, tcp_abc_l_var) = 2; 159SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, abc_l_var, CTLFLAG_RW, 160 &VNET_NAME(tcp_abc_l_var), 2, 161 "Cap the max cwnd increment during slow-start to this number of segments"); 162 163SYSCTL_NODE(_net_inet_tcp, OID_AUTO, ecn, CTLFLAG_RW, 0, "TCP ECN"); 164 165VNET_DEFINE(int, tcp_do_ecn) = 0; 166SYSCTL_VNET_INT(_net_inet_tcp_ecn, OID_AUTO, enable, CTLFLAG_RW, 167 &VNET_NAME(tcp_do_ecn), 0, 168 "TCP ECN support"); 169 170VNET_DEFINE(int, tcp_ecn_maxretries) = 1; 171SYSCTL_VNET_INT(_net_inet_tcp_ecn, OID_AUTO, maxretries, CTLFLAG_RW, 172 &VNET_NAME(tcp_ecn_maxretries), 0, 173 "Max retries before giving up on ECN"); 174 175VNET_DEFINE(int, tcp_insecure_rst) = 0; 176#define V_tcp_insecure_rst VNET(tcp_insecure_rst) 177SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_RW, 178 &VNET_NAME(tcp_insecure_rst), 0, 179 "Follow the old (insecure) criteria for accepting RST packets"); 180 181VNET_DEFINE(int, tcp_do_autorcvbuf) = 1; 182#define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf) 183SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_RW, 184 &VNET_NAME(tcp_do_autorcvbuf), 0, 185 "Enable automatic receive buffer sizing"); 186 187VNET_DEFINE(int, tcp_autorcvbuf_inc) = 16*1024; 188#define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc) 189SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_RW, 190 &VNET_NAME(tcp_autorcvbuf_inc), 0, 191 "Incrementor step size of automatic receive buffer"); 192 193VNET_DEFINE(int, tcp_autorcvbuf_max) = 256*1024; 194#define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max) 195SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_RW, 196 &VNET_NAME(tcp_autorcvbuf_max), 0, 197 "Max size of automatic receive buffer"); 198 199int tcp_read_locking = 1; 200SYSCTL_INT(_net_inet_tcp, OID_AUTO, read_locking, CTLFLAG_RW, 201 &tcp_read_locking, 0, "Enable read locking strategy"); 202 203VNET_DEFINE(struct inpcbhead, tcb); 204#define tcb6 tcb /* for KAME src sync over BSD*'s */ 205VNET_DEFINE(struct inpcbinfo, tcbinfo); 206 207static void tcp_dooptions(struct tcpopt *, u_char *, int, int); 208static void tcp_do_segment(struct mbuf *, struct tcphdr *, 209 struct socket *, struct tcpcb *, int, int, uint8_t, 210 int); 211static void tcp_dropwithreset(struct mbuf *, struct tcphdr *, 212 struct tcpcb *, int, int); 213static void tcp_pulloutofband(struct socket *, 214 struct tcphdr *, struct mbuf *, int); 215static void tcp_xmit_timer(struct tcpcb *, int); 216static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *); 217static void inline cc_ack_received(struct tcpcb *tp, struct tcphdr *th, 218 uint16_t type); 219static void inline cc_conn_init(struct tcpcb *tp); 220static void inline cc_post_recovery(struct tcpcb *tp, struct tcphdr *th); 221 222/* 223 * Kernel module interface for updating tcpstat. The argument is an index 224 * into tcpstat treated as an array of u_long. While this encodes the 225 * general layout of tcpstat into the caller, it doesn't encode its location, 226 * so that future changes to add, for example, per-CPU stats support won't 227 * cause binary compatibility problems for kernel modules. 228 */ 229void 230kmod_tcpstat_inc(int statnum) 231{ 232 233 (*((u_long *)&V_tcpstat + statnum))++; 234} 235 236/* 237 * CC wrapper hook functions 238 */ 239static void inline 240cc_ack_received(struct tcpcb *tp, struct tcphdr *th, uint16_t type) 241{ 242 INP_WLOCK_ASSERT(tp->t_inpcb); 243 244 tp->ccv->bytes_this_ack = BYTES_THIS_ACK(tp, th); 245 if (tp->snd_cwnd == min(tp->snd_cwnd, tp->snd_wnd)) 246 tp->ccv->flags |= CCF_CWND_LIMITED; 247 else 248 tp->ccv->flags &= ~CCF_CWND_LIMITED; 249 250 if (type == CC_ACK) { 251 if (tp->snd_cwnd > tp->snd_ssthresh) { 252 tp->t_bytes_acked += min(tp->ccv->bytes_this_ack, 253 V_tcp_abc_l_var * tp->t_maxseg); 254 if (tp->t_bytes_acked >= tp->snd_cwnd) { 255 tp->t_bytes_acked -= tp->snd_cwnd; 256 tp->ccv->flags |= CCF_ABC_SENTAWND; 257 } 258 } else { 259 tp->ccv->flags &= ~CCF_ABC_SENTAWND; 260 tp->t_bytes_acked = 0; 261 } 262 } 263 264 if (CC_ALGO(tp)->ack_received != NULL) { 265 /* XXXLAS: Find a way to live without this */ 266 tp->ccv->curack = th->th_ack; 267 CC_ALGO(tp)->ack_received(tp->ccv, type); 268 } 269} 270 271static void inline 272cc_conn_init(struct tcpcb *tp) 273{ 274 struct hc_metrics_lite metrics; 275 struct inpcb *inp = tp->t_inpcb; 276 int rtt; 277#ifdef INET6 278 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0; 279#endif 280 281 INP_WLOCK_ASSERT(tp->t_inpcb); 282 283 tcp_hc_get(&inp->inp_inc, &metrics); 284 285 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) { 286 tp->t_srtt = rtt; 287 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE; 288 TCPSTAT_INC(tcps_usedrtt); 289 if (metrics.rmx_rttvar) { 290 tp->t_rttvar = metrics.rmx_rttvar; 291 TCPSTAT_INC(tcps_usedrttvar); 292 } else { 293 /* default variation is +- 1 rtt */ 294 tp->t_rttvar = 295 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE; 296 } 297 TCPT_RANGESET(tp->t_rxtcur, 298 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, 299 tp->t_rttmin, TCPTV_REXMTMAX); 300 } 301 if (metrics.rmx_ssthresh) { 302 /* 303 * There's some sort of gateway or interface 304 * buffer limit on the path. Use this to set 305 * the slow start threshhold, but set the 306 * threshold to no less than 2*mss. 307 */ 308 tp->snd_ssthresh = max(2 * tp->t_maxseg, metrics.rmx_ssthresh); 309 TCPSTAT_INC(tcps_usedssthresh); 310 } 311 312 /* 313 * Set the slow-start flight size depending on whether this 314 * is a local network or not. 315 * 316 * Extend this so we cache the cwnd too and retrieve it here. 317 * Make cwnd even bigger than RFC3390 suggests but only if we 318 * have previous experience with the remote host. Be careful 319 * not make cwnd bigger than remote receive window or our own 320 * send socket buffer. Maybe put some additional upper bound 321 * on the retrieved cwnd. Should do incremental updates to 322 * hostcache when cwnd collapses so next connection doesn't 323 * overloads the path again. 324 * 325 * XXXAO: Initializing the CWND from the hostcache is broken 326 * and in its current form not RFC conformant. It is disabled 327 * until fixed or removed entirely. 328 * 329 * RFC3390 says only do this if SYN or SYN/ACK didn't got lost. 330 * We currently check only in syncache_socket for that. 331 */ 332/* #define TCP_METRICS_CWND */ 333#ifdef TCP_METRICS_CWND 334 if (metrics.rmx_cwnd) 335 tp->snd_cwnd = max(tp->t_maxseg, min(metrics.rmx_cwnd / 2, 336 min(tp->snd_wnd, so->so_snd.sb_hiwat))); 337 else 338#endif 339 if (V_tcp_do_rfc3390) 340 tp->snd_cwnd = min(4 * tp->t_maxseg, 341 max(2 * tp->t_maxseg, 4380)); 342#ifdef INET6 343 else if ((isipv6 && in6_localaddr(&inp->in6p_faddr)) || 344 (!isipv6 && in_localaddr(inp->inp_faddr))) 345#else 346 else if (in_localaddr(inp->inp_faddr)) 347#endif 348 tp->snd_cwnd = tp->t_maxseg * V_ss_fltsz_local; 349 else 350 tp->snd_cwnd = tp->t_maxseg * V_ss_fltsz; 351 352 if (CC_ALGO(tp)->conn_init != NULL) 353 CC_ALGO(tp)->conn_init(tp->ccv); 354} 355 356void inline 357cc_cong_signal(struct tcpcb *tp, struct tcphdr *th, uint32_t type) 358{ 359 INP_WLOCK_ASSERT(tp->t_inpcb); 360 361 switch(type) { 362 case CC_NDUPACK: 363 if (!IN_FASTRECOVERY(tp->t_flags)) { 364 tp->snd_recover = tp->snd_max; 365 if (tp->t_flags & TF_ECN_PERMIT) 366 tp->t_flags |= TF_ECN_SND_CWR; 367 } 368 break; 369 case CC_ECN: 370 if (!IN_CONGRECOVERY(tp->t_flags)) { 371 TCPSTAT_INC(tcps_ecn_rcwnd); 372 tp->snd_recover = tp->snd_max; 373 if (tp->t_flags & TF_ECN_PERMIT) 374 tp->t_flags |= TF_ECN_SND_CWR; 375 } 376 break; 377 case CC_RTO: 378 tp->t_dupacks = 0; 379 tp->t_bytes_acked = 0; 380 EXIT_RECOVERY(tp->t_flags); 381 tp->snd_ssthresh = max(2, min(tp->snd_wnd, tp->snd_cwnd) / 2 / 382 tp->t_maxseg) * tp->t_maxseg; 383 tp->snd_cwnd = tp->t_maxseg; 384 break; 385 case CC_RTO_ERR: 386 TCPSTAT_INC(tcps_sndrexmitbad); 387 /* RTO was unnecessary, so reset everything. */ 388 tp->snd_cwnd = tp->snd_cwnd_prev; 389 tp->snd_ssthresh = tp->snd_ssthresh_prev; 390 tp->snd_recover = tp->snd_recover_prev; 391 if (tp->t_flags & TF_WASFRECOVERY) 392 ENTER_FASTRECOVERY(tp->t_flags); 393 if (tp->t_flags & TF_WASCRECOVERY) 394 ENTER_CONGRECOVERY(tp->t_flags); 395 tp->snd_nxt = tp->snd_max; 396 tp->t_badrxtwin = 0; 397 break; 398 } 399 400 if (CC_ALGO(tp)->cong_signal != NULL) { 401 if (th != NULL) 402 tp->ccv->curack = th->th_ack; 403 CC_ALGO(tp)->cong_signal(tp->ccv, type); 404 } 405} 406 407static void inline 408cc_post_recovery(struct tcpcb *tp, struct tcphdr *th) 409{ 410 INP_WLOCK_ASSERT(tp->t_inpcb); 411 412 /* XXXLAS: KASSERT that we're in recovery? */ 413 414 if (CC_ALGO(tp)->post_recovery != NULL) { 415 tp->ccv->curack = th->th_ack; 416 CC_ALGO(tp)->post_recovery(tp->ccv); 417 } 418 /* XXXLAS: EXIT_RECOVERY ? */ 419 tp->t_bytes_acked = 0; 420} 421 422/* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */ 423#ifdef INET6 424#define ND6_HINT(tp) \ 425do { \ 426 if ((tp) && (tp)->t_inpcb && \ 427 ((tp)->t_inpcb->inp_vflag & INP_IPV6) != 0) \ 428 nd6_nud_hint(NULL, NULL, 0); \ 429} while (0) 430#else 431#define ND6_HINT(tp) 432#endif 433 434/* 435 * Indicate whether this ack should be delayed. We can delay the ack if 436 * - there is no delayed ack timer in progress and 437 * - our last ack wasn't a 0-sized window. We never want to delay 438 * the ack that opens up a 0-sized window and 439 * - delayed acks are enabled or 440 * - this is a half-synchronized T/TCP connection. 441 */ 442#define DELAY_ACK(tp) \ 443 ((!tcp_timer_active(tp, TT_DELACK) && \ 444 (tp->t_flags & TF_RXWIN0SENT) == 0) && \ 445 (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN))) 446 447/* 448 * TCP input handling is split into multiple parts: 449 * tcp6_input is a thin wrapper around tcp_input for the extended 450 * ip6_protox[] call format in ip6_input 451 * tcp_input handles primary segment validation, inpcb lookup and 452 * SYN processing on listen sockets 453 * tcp_do_segment processes the ACK and text of the segment for 454 * establishing, established and closing connections 455 */ 456#ifdef INET6 457int 458tcp6_input(struct mbuf **mp, int *offp, int proto) 459{ 460 struct mbuf *m = *mp; 461 struct in6_ifaddr *ia6; 462 463 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE); 464 465 /* 466 * draft-itojun-ipv6-tcp-to-anycast 467 * better place to put this in? 468 */ 469 ia6 = ip6_getdstifaddr(m); 470 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) { 471 struct ip6_hdr *ip6; 472 473 ifa_free(&ia6->ia_ifa); 474 ip6 = mtod(m, struct ip6_hdr *); 475 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR, 476 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6); 477 return IPPROTO_DONE; 478 } 479 480 tcp_input(m, *offp); 481 return IPPROTO_DONE; 482} 483#endif 484 485void 486tcp_input(struct mbuf *m, int off0) 487{ 488 struct tcphdr *th; 489 struct ip *ip = NULL; 490 struct ipovly *ipov; 491 struct inpcb *inp = NULL; 492 struct tcpcb *tp = NULL; 493 struct socket *so = NULL; 494 u_char *optp = NULL; 495 int optlen = 0; 496 int len, tlen, off; 497 int drop_hdrlen; 498 int thflags; 499 int rstreason = 0; /* For badport_bandlim accounting purposes */ 500 uint8_t iptos; 501#ifdef IPFIREWALL_FORWARD 502 struct m_tag *fwd_tag; 503#endif 504#ifdef INET6 505 struct ip6_hdr *ip6 = NULL; 506 int isipv6; 507#else 508 const void *ip6 = NULL; 509 const int isipv6 = 0; 510#endif 511 struct tcpopt to; /* options in this segment */ 512 char *s = NULL; /* address and port logging */ 513 int ti_locked; 514#define TI_UNLOCKED 1 515#define TI_RLOCKED 2 516#define TI_WLOCKED 3 517 518#ifdef TCPDEBUG 519 /* 520 * The size of tcp_saveipgen must be the size of the max ip header, 521 * now IPv6. 522 */ 523 u_char tcp_saveipgen[IP6_HDR_LEN]; 524 struct tcphdr tcp_savetcp; 525 short ostate = 0; 526#endif 527 528#ifdef INET6 529 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0; 530#endif 531 532 to.to_flags = 0; 533 TCPSTAT_INC(tcps_rcvtotal); 534 535 if (isipv6) { 536#ifdef INET6 537 /* IP6_EXTHDR_CHECK() is already done at tcp6_input(). */ 538 ip6 = mtod(m, struct ip6_hdr *); 539 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0; 540 if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) { 541 TCPSTAT_INC(tcps_rcvbadsum); 542 goto drop; 543 } 544 th = (struct tcphdr *)((caddr_t)ip6 + off0); 545 546 /* 547 * Be proactive about unspecified IPv6 address in source. 548 * As we use all-zero to indicate unbounded/unconnected pcb, 549 * unspecified IPv6 address can be used to confuse us. 550 * 551 * Note that packets with unspecified IPv6 destination is 552 * already dropped in ip6_input. 553 */ 554 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { 555 /* XXX stat */ 556 goto drop; 557 } 558#else 559 th = NULL; /* XXX: Avoid compiler warning. */ 560#endif 561 } else { 562 /* 563 * Get IP and TCP header together in first mbuf. 564 * Note: IP leaves IP header in first mbuf. 565 */ 566 if (off0 > sizeof (struct ip)) { 567 ip_stripoptions(m, (struct mbuf *)0); 568 off0 = sizeof(struct ip); 569 } 570 if (m->m_len < sizeof (struct tcpiphdr)) { 571 if ((m = m_pullup(m, sizeof (struct tcpiphdr))) 572 == NULL) { 573 TCPSTAT_INC(tcps_rcvshort); 574 return; 575 } 576 } 577 ip = mtod(m, struct ip *); 578 ipov = (struct ipovly *)ip; 579 th = (struct tcphdr *)((caddr_t)ip + off0); 580 tlen = ip->ip_len; 581 582 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 583 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 584 th->th_sum = m->m_pkthdr.csum_data; 585 else 586 th->th_sum = in_pseudo(ip->ip_src.s_addr, 587 ip->ip_dst.s_addr, 588 htonl(m->m_pkthdr.csum_data + 589 ip->ip_len + 590 IPPROTO_TCP)); 591 th->th_sum ^= 0xffff; 592#ifdef TCPDEBUG 593 ipov->ih_len = (u_short)tlen; 594 ipov->ih_len = htons(ipov->ih_len); 595#endif 596 } else { 597 /* 598 * Checksum extended TCP header and data. 599 */ 600 len = sizeof (struct ip) + tlen; 601 bzero(ipov->ih_x1, sizeof(ipov->ih_x1)); 602 ipov->ih_len = (u_short)tlen; 603 ipov->ih_len = htons(ipov->ih_len); 604 th->th_sum = in_cksum(m, len); 605 } 606 if (th->th_sum) { 607 TCPSTAT_INC(tcps_rcvbadsum); 608 goto drop; 609 } 610 /* Re-initialization for later version check */ 611 ip->ip_v = IPVERSION; 612 } 613 614#ifdef INET6 615 if (isipv6) 616 iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; 617 else 618#endif 619 iptos = ip->ip_tos; 620 621 /* 622 * Check that TCP offset makes sense, 623 * pull out TCP options and adjust length. XXX 624 */ 625 off = th->th_off << 2; 626 if (off < sizeof (struct tcphdr) || off > tlen) { 627 TCPSTAT_INC(tcps_rcvbadoff); 628 goto drop; 629 } 630 tlen -= off; /* tlen is used instead of ti->ti_len */ 631 if (off > sizeof (struct tcphdr)) { 632 if (isipv6) { 633#ifdef INET6 634 IP6_EXTHDR_CHECK(m, off0, off, ); 635 ip6 = mtod(m, struct ip6_hdr *); 636 th = (struct tcphdr *)((caddr_t)ip6 + off0); 637#endif 638 } else { 639 if (m->m_len < sizeof(struct ip) + off) { 640 if ((m = m_pullup(m, sizeof (struct ip) + off)) 641 == NULL) { 642 TCPSTAT_INC(tcps_rcvshort); 643 return; 644 } 645 ip = mtod(m, struct ip *); 646 ipov = (struct ipovly *)ip; 647 th = (struct tcphdr *)((caddr_t)ip + off0); 648 } 649 } 650 optlen = off - sizeof (struct tcphdr); 651 optp = (u_char *)(th + 1); 652 } 653 thflags = th->th_flags; 654 655 /* 656 * Convert TCP protocol specific fields to host format. 657 */ 658 th->th_seq = ntohl(th->th_seq); 659 th->th_ack = ntohl(th->th_ack); 660 th->th_win = ntohs(th->th_win); 661 th->th_urp = ntohs(th->th_urp); 662 663 /* 664 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options. 665 */ 666 drop_hdrlen = off0 + off; 667 668 /* 669 * Locate pcb for segment, which requires a lock on tcbinfo. 670 * Optimisticaly acquire a global read lock rather than a write lock 671 * unless header flags necessarily imply a state change. There are 672 * two cases where we might discover later we need a write lock 673 * despite the flags: ACKs moving a connection out of the syncache, 674 * and ACKs for a connection in TIMEWAIT. 675 */ 676 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 || 677 tcp_read_locking == 0) { 678 INP_INFO_WLOCK(&V_tcbinfo); 679 ti_locked = TI_WLOCKED; 680 } else { 681 INP_INFO_RLOCK(&V_tcbinfo); 682 ti_locked = TI_RLOCKED; 683 } 684 685findpcb: 686#ifdef INVARIANTS 687 if (ti_locked == TI_RLOCKED) 688 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 689 else if (ti_locked == TI_WLOCKED) 690 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 691 else 692 panic("%s: findpcb ti_locked %d\n", __func__, ti_locked); 693#endif 694 695#ifdef IPFIREWALL_FORWARD 696 /* 697 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. 698 */ 699 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL); 700 701 if (fwd_tag != NULL && isipv6 == 0) { /* IPv6 support is not yet */ 702 struct sockaddr_in *next_hop; 703 704 next_hop = (struct sockaddr_in *)(fwd_tag+1); 705 /* 706 * Transparently forwarded. Pretend to be the destination. 707 * already got one like this? 708 */ 709 inp = in_pcblookup_hash(&V_tcbinfo, 710 ip->ip_src, th->th_sport, 711 ip->ip_dst, th->th_dport, 712 0, m->m_pkthdr.rcvif); 713 if (!inp) { 714 /* It's new. Try to find the ambushing socket. */ 715 inp = in_pcblookup_hash(&V_tcbinfo, 716 ip->ip_src, th->th_sport, 717 next_hop->sin_addr, 718 next_hop->sin_port ? 719 ntohs(next_hop->sin_port) : 720 th->th_dport, 721 INPLOOKUP_WILDCARD, 722 m->m_pkthdr.rcvif); 723 } 724 /* Remove the tag from the packet. We don't need it anymore. */ 725 m_tag_delete(m, fwd_tag); 726 } else 727#endif /* IPFIREWALL_FORWARD */ 728 { 729 if (isipv6) { 730#ifdef INET6 731 inp = in6_pcblookup_hash(&V_tcbinfo, 732 &ip6->ip6_src, th->th_sport, 733 &ip6->ip6_dst, th->th_dport, 734 INPLOOKUP_WILDCARD, 735 m->m_pkthdr.rcvif); 736#endif 737 } else 738 inp = in_pcblookup_hash(&V_tcbinfo, 739 ip->ip_src, th->th_sport, 740 ip->ip_dst, th->th_dport, 741 INPLOOKUP_WILDCARD, 742 m->m_pkthdr.rcvif); 743 } 744 745 /* 746 * If the INPCB does not exist then all data in the incoming 747 * segment is discarded and an appropriate RST is sent back. 748 * XXX MRT Send RST using which routing table? 749 */ 750 if (inp == NULL) { 751 /* 752 * Log communication attempts to ports that are not 753 * in use. 754 */ 755 if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) || 756 tcp_log_in_vain == 2) { 757 if ((s = tcp_log_vain(NULL, th, (void *)ip, ip6))) 758 log(LOG_INFO, "%s; %s: Connection attempt " 759 "to closed port\n", s, __func__); 760 } 761 /* 762 * When blackholing do not respond with a RST but 763 * completely ignore the segment and drop it. 764 */ 765 if ((V_blackhole == 1 && (thflags & TH_SYN)) || 766 V_blackhole == 2) 767 goto dropunlock; 768 769 rstreason = BANDLIM_RST_CLOSEDPORT; 770 goto dropwithreset; 771 } 772 INP_WLOCK(inp); 773 if (!(inp->inp_flags & INP_HW_FLOWID) 774 && (m->m_flags & M_FLOWID) 775 && ((inp->inp_socket == NULL) 776 || !(inp->inp_socket->so_options & SO_ACCEPTCONN))) { 777 inp->inp_flags |= INP_HW_FLOWID; 778 inp->inp_flags &= ~INP_SW_FLOWID; 779 inp->inp_flowid = m->m_pkthdr.flowid; 780 } 781#ifdef IPSEC 782#ifdef INET6 783 if (isipv6 && ipsec6_in_reject(m, inp)) { 784 V_ipsec6stat.in_polvio++; 785 goto dropunlock; 786 } else 787#endif /* INET6 */ 788 if (ipsec4_in_reject(m, inp) != 0) { 789 V_ipsec4stat.in_polvio++; 790 goto dropunlock; 791 } 792#endif /* IPSEC */ 793 794 /* 795 * Check the minimum TTL for socket. 796 */ 797 if (inp->inp_ip_minttl != 0) { 798#ifdef INET6 799 if (isipv6 && inp->inp_ip_minttl > ip6->ip6_hlim) 800 goto dropunlock; 801 else 802#endif 803 if (inp->inp_ip_minttl > ip->ip_ttl) 804 goto dropunlock; 805 } 806 807 /* 808 * A previous connection in TIMEWAIT state is supposed to catch stray 809 * or duplicate segments arriving late. If this segment was a 810 * legitimate new connection attempt the old INPCB gets removed and 811 * we can try again to find a listening socket. 812 * 813 * At this point, due to earlier optimism, we may hold a read lock on 814 * the inpcbinfo, rather than a write lock. If so, we need to 815 * upgrade, or if that fails, acquire a reference on the inpcb, drop 816 * all locks, acquire a global write lock, and then re-acquire the 817 * inpcb lock. We may at that point discover that another thread has 818 * tried to free the inpcb, in which case we need to loop back and 819 * try to find a new inpcb to deliver to. 820 */ 821relocked: 822 if (inp->inp_flags & INP_TIMEWAIT) { 823 KASSERT(ti_locked == TI_RLOCKED || ti_locked == TI_WLOCKED, 824 ("%s: INP_TIMEWAIT ti_locked %d", __func__, ti_locked)); 825 826 if (ti_locked == TI_RLOCKED) { 827 if (INP_INFO_TRY_UPGRADE(&V_tcbinfo) == 0) { 828 in_pcbref(inp); 829 INP_WUNLOCK(inp); 830 INP_INFO_RUNLOCK(&V_tcbinfo); 831 INP_INFO_WLOCK(&V_tcbinfo); 832 ti_locked = TI_WLOCKED; 833 INP_WLOCK(inp); 834 if (in_pcbrele(inp)) { 835 inp = NULL; 836 goto findpcb; 837 } 838 } else 839 ti_locked = TI_WLOCKED; 840 } 841 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 842 843 if (thflags & TH_SYN) 844 tcp_dooptions(&to, optp, optlen, TO_SYN); 845 /* 846 * NB: tcp_twcheck unlocks the INP and frees the mbuf. 847 */ 848 if (tcp_twcheck(inp, &to, th, m, tlen)) 849 goto findpcb; 850 INP_INFO_WUNLOCK(&V_tcbinfo); 851 return; 852 } 853 /* 854 * The TCPCB may no longer exist if the connection is winding 855 * down or it is in the CLOSED state. Either way we drop the 856 * segment and send an appropriate response. 857 */ 858 tp = intotcpcb(inp); 859 if (tp == NULL || tp->t_state == TCPS_CLOSED) { 860 rstreason = BANDLIM_RST_CLOSEDPORT; 861 goto dropwithreset; 862 } 863 864 /* 865 * We've identified a valid inpcb, but it could be that we need an 866 * inpcbinfo write lock and have only a read lock. In this case, 867 * attempt to upgrade/relock using the same strategy as the TIMEWAIT 868 * case above. If we relock, we have to jump back to 'relocked' as 869 * the connection might now be in TIMEWAIT. 870 */ 871 if (tp->t_state != TCPS_ESTABLISHED || 872 (thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 || 873 tcp_read_locking == 0) { 874 KASSERT(ti_locked == TI_RLOCKED || ti_locked == TI_WLOCKED, 875 ("%s: upgrade check ti_locked %d", __func__, ti_locked)); 876 877 if (ti_locked == TI_RLOCKED) { 878 if (INP_INFO_TRY_UPGRADE(&V_tcbinfo) == 0) { 879 in_pcbref(inp); 880 INP_WUNLOCK(inp); 881 INP_INFO_RUNLOCK(&V_tcbinfo); 882 INP_INFO_WLOCK(&V_tcbinfo); 883 ti_locked = TI_WLOCKED; 884 INP_WLOCK(inp); 885 if (in_pcbrele(inp)) { 886 inp = NULL; 887 goto findpcb; 888 } 889 goto relocked; 890 } else 891 ti_locked = TI_WLOCKED; 892 } 893 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 894 } 895 896#ifdef MAC 897 INP_WLOCK_ASSERT(inp); 898 if (mac_inpcb_check_deliver(inp, m)) 899 goto dropunlock; 900#endif 901 so = inp->inp_socket; 902 KASSERT(so != NULL, ("%s: so == NULL", __func__)); 903#ifdef TCPDEBUG 904 if (so->so_options & SO_DEBUG) { 905 ostate = tp->t_state; 906 if (isipv6) { 907#ifdef INET6 908 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6)); 909#endif 910 } else 911 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip)); 912 tcp_savetcp = *th; 913 } 914#endif 915 /* 916 * When the socket is accepting connections (the INPCB is in LISTEN 917 * state) we look into the SYN cache if this is a new connection 918 * attempt or the completion of a previous one. 919 */ 920 if (so->so_options & SO_ACCEPTCONN) { 921 struct in_conninfo inc; 922 923 KASSERT(tp->t_state == TCPS_LISTEN, ("%s: so accepting but " 924 "tp not listening", __func__)); 925 926 bzero(&inc, sizeof(inc)); 927#ifdef INET6 928 if (isipv6) { 929 inc.inc_flags |= INC_ISIPV6; 930 inc.inc6_faddr = ip6->ip6_src; 931 inc.inc6_laddr = ip6->ip6_dst; 932 } else 933#endif 934 { 935 inc.inc_faddr = ip->ip_src; 936 inc.inc_laddr = ip->ip_dst; 937 } 938 inc.inc_fport = th->th_sport; 939 inc.inc_lport = th->th_dport; 940 inc.inc_fibnum = so->so_fibnum; 941 942 /* 943 * Check for an existing connection attempt in syncache if 944 * the flag is only ACK. A successful lookup creates a new 945 * socket appended to the listen queue in SYN_RECEIVED state. 946 */ 947 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) { 948 /* 949 * Parse the TCP options here because 950 * syncookies need access to the reflected 951 * timestamp. 952 */ 953 tcp_dooptions(&to, optp, optlen, 0); 954 /* 955 * NB: syncache_expand() doesn't unlock 956 * inp and tcpinfo locks. 957 */ 958 if (!syncache_expand(&inc, &to, th, &so, m)) { 959 /* 960 * No syncache entry or ACK was not 961 * for our SYN/ACK. Send a RST. 962 * NB: syncache did its own logging 963 * of the failure cause. 964 */ 965 rstreason = BANDLIM_RST_OPENPORT; 966 goto dropwithreset; 967 } 968 if (so == NULL) { 969 /* 970 * We completed the 3-way handshake 971 * but could not allocate a socket 972 * either due to memory shortage, 973 * listen queue length limits or 974 * global socket limits. Send RST 975 * or wait and have the remote end 976 * retransmit the ACK for another 977 * try. 978 */ 979 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 980 log(LOG_DEBUG, "%s; %s: Listen socket: " 981 "Socket allocation failed due to " 982 "limits or memory shortage, %s\n", 983 s, __func__, 984 V_tcp_sc_rst_sock_fail ? 985 "sending RST" : "try again"); 986 if (V_tcp_sc_rst_sock_fail) { 987 rstreason = BANDLIM_UNLIMITED; 988 goto dropwithreset; 989 } else 990 goto dropunlock; 991 } 992 /* 993 * Socket is created in state SYN_RECEIVED. 994 * Unlock the listen socket, lock the newly 995 * created socket and update the tp variable. 996 */ 997 INP_WUNLOCK(inp); /* listen socket */ 998 inp = sotoinpcb(so); 999 INP_WLOCK(inp); /* new connection */ 1000 tp = intotcpcb(inp); 1001 KASSERT(tp->t_state == TCPS_SYN_RECEIVED, 1002 ("%s: ", __func__)); 1003 /* 1004 * Process the segment and the data it 1005 * contains. tcp_do_segment() consumes 1006 * the mbuf chain and unlocks the inpcb. 1007 */ 1008 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, 1009 iptos, ti_locked); 1010 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1011 return; 1012 } 1013 /* 1014 * Segment flag validation for new connection attempts: 1015 * 1016 * Our (SYN|ACK) response was rejected. 1017 * Check with syncache and remove entry to prevent 1018 * retransmits. 1019 * 1020 * NB: syncache_chkrst does its own logging of failure 1021 * causes. 1022 */ 1023 if (thflags & TH_RST) { 1024 syncache_chkrst(&inc, th); 1025 goto dropunlock; 1026 } 1027 /* 1028 * We can't do anything without SYN. 1029 */ 1030 if ((thflags & TH_SYN) == 0) { 1031 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1032 log(LOG_DEBUG, "%s; %s: Listen socket: " 1033 "SYN is missing, segment ignored\n", 1034 s, __func__); 1035 TCPSTAT_INC(tcps_badsyn); 1036 goto dropunlock; 1037 } 1038 /* 1039 * (SYN|ACK) is bogus on a listen socket. 1040 */ 1041 if (thflags & TH_ACK) { 1042 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1043 log(LOG_DEBUG, "%s; %s: Listen socket: " 1044 "SYN|ACK invalid, segment rejected\n", 1045 s, __func__); 1046 syncache_badack(&inc); /* XXX: Not needed! */ 1047 TCPSTAT_INC(tcps_badsyn); 1048 rstreason = BANDLIM_RST_OPENPORT; 1049 goto dropwithreset; 1050 } 1051 /* 1052 * If the drop_synfin option is enabled, drop all 1053 * segments with both the SYN and FIN bits set. 1054 * This prevents e.g. nmap from identifying the 1055 * TCP/IP stack. 1056 * XXX: Poor reasoning. nmap has other methods 1057 * and is constantly refining its stack detection 1058 * strategies. 1059 * XXX: This is a violation of the TCP specification 1060 * and was used by RFC1644. 1061 */ 1062 if ((thflags & TH_FIN) && V_drop_synfin) { 1063 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1064 log(LOG_DEBUG, "%s; %s: Listen socket: " 1065 "SYN|FIN segment ignored (based on " 1066 "sysctl setting)\n", s, __func__); 1067 TCPSTAT_INC(tcps_badsyn); 1068 goto dropunlock; 1069 } 1070 /* 1071 * Segment's flags are (SYN) or (SYN|FIN). 1072 * 1073 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored 1074 * as they do not affect the state of the TCP FSM. 1075 * The data pointed to by TH_URG and th_urp is ignored. 1076 */ 1077 KASSERT((thflags & (TH_RST|TH_ACK)) == 0, 1078 ("%s: Listen socket: TH_RST or TH_ACK set", __func__)); 1079 KASSERT(thflags & (TH_SYN), 1080 ("%s: Listen socket: TH_SYN not set", __func__)); 1081#ifdef INET6 1082 /* 1083 * If deprecated address is forbidden, 1084 * we do not accept SYN to deprecated interface 1085 * address to prevent any new inbound connection from 1086 * getting established. 1087 * When we do not accept SYN, we send a TCP RST, 1088 * with deprecated source address (instead of dropping 1089 * it). We compromise it as it is much better for peer 1090 * to send a RST, and RST will be the final packet 1091 * for the exchange. 1092 * 1093 * If we do not forbid deprecated addresses, we accept 1094 * the SYN packet. RFC2462 does not suggest dropping 1095 * SYN in this case. 1096 * If we decipher RFC2462 5.5.4, it says like this: 1097 * 1. use of deprecated addr with existing 1098 * communication is okay - "SHOULD continue to be 1099 * used" 1100 * 2. use of it with new communication: 1101 * (2a) "SHOULD NOT be used if alternate address 1102 * with sufficient scope is available" 1103 * (2b) nothing mentioned otherwise. 1104 * Here we fall into (2b) case as we have no choice in 1105 * our source address selection - we must obey the peer. 1106 * 1107 * The wording in RFC2462 is confusing, and there are 1108 * multiple description text for deprecated address 1109 * handling - worse, they are not exactly the same. 1110 * I believe 5.5.4 is the best one, so we follow 5.5.4. 1111 */ 1112 if (isipv6 && !V_ip6_use_deprecated) { 1113 struct in6_ifaddr *ia6; 1114 1115 ia6 = ip6_getdstifaddr(m); 1116 if (ia6 != NULL && 1117 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) { 1118 ifa_free(&ia6->ia_ifa); 1119 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1120 log(LOG_DEBUG, "%s; %s: Listen socket: " 1121 "Connection attempt to deprecated " 1122 "IPv6 address rejected\n", 1123 s, __func__); 1124 rstreason = BANDLIM_RST_OPENPORT; 1125 goto dropwithreset; 1126 } 1127 ifa_free(&ia6->ia_ifa); 1128 } 1129#endif 1130 /* 1131 * Basic sanity checks on incoming SYN requests: 1132 * Don't respond if the destination is a link layer 1133 * broadcast according to RFC1122 4.2.3.10, p. 104. 1134 * If it is from this socket it must be forged. 1135 * Don't respond if the source or destination is a 1136 * global or subnet broad- or multicast address. 1137 * Note that it is quite possible to receive unicast 1138 * link-layer packets with a broadcast IP address. Use 1139 * in_broadcast() to find them. 1140 */ 1141 if (m->m_flags & (M_BCAST|M_MCAST)) { 1142 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1143 log(LOG_DEBUG, "%s; %s: Listen socket: " 1144 "Connection attempt from broad- or multicast " 1145 "link layer address ignored\n", s, __func__); 1146 goto dropunlock; 1147 } 1148 if (isipv6) { 1149#ifdef INET6 1150 if (th->th_dport == th->th_sport && 1151 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) { 1152 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1153 log(LOG_DEBUG, "%s; %s: Listen socket: " 1154 "Connection attempt to/from self " 1155 "ignored\n", s, __func__); 1156 goto dropunlock; 1157 } 1158 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 1159 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) { 1160 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1161 log(LOG_DEBUG, "%s; %s: Listen socket: " 1162 "Connection attempt from/to multicast " 1163 "address ignored\n", s, __func__); 1164 goto dropunlock; 1165 } 1166#endif 1167 } else { 1168 if (th->th_dport == th->th_sport && 1169 ip->ip_dst.s_addr == ip->ip_src.s_addr) { 1170 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1171 log(LOG_DEBUG, "%s; %s: Listen socket: " 1172 "Connection attempt from/to self " 1173 "ignored\n", s, __func__); 1174 goto dropunlock; 1175 } 1176 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 1177 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 1178 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 1179 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) { 1180 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1181 log(LOG_DEBUG, "%s; %s: Listen socket: " 1182 "Connection attempt from/to broad- " 1183 "or multicast address ignored\n", 1184 s, __func__); 1185 goto dropunlock; 1186 } 1187 } 1188 /* 1189 * SYN appears to be valid. Create compressed TCP state 1190 * for syncache. 1191 */ 1192#ifdef TCPDEBUG 1193 if (so->so_options & SO_DEBUG) 1194 tcp_trace(TA_INPUT, ostate, tp, 1195 (void *)tcp_saveipgen, &tcp_savetcp, 0); 1196#endif 1197 tcp_dooptions(&to, optp, optlen, TO_SYN); 1198 syncache_add(&inc, &to, th, inp, &so, m); 1199 /* 1200 * Entry added to syncache and mbuf consumed. 1201 * Everything already unlocked by syncache_add(). 1202 */ 1203 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1204 return; 1205 } 1206 1207 /* 1208 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later 1209 * state. tcp_do_segment() always consumes the mbuf chain, unlocks 1210 * the inpcb, and unlocks pcbinfo. 1211 */ 1212 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, iptos, ti_locked); 1213 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1214 return; 1215 1216dropwithreset: 1217 if (ti_locked == TI_RLOCKED) 1218 INP_INFO_RUNLOCK(&V_tcbinfo); 1219 else if (ti_locked == TI_WLOCKED) 1220 INP_INFO_WUNLOCK(&V_tcbinfo); 1221 else 1222 panic("%s: dropwithreset ti_locked %d", __func__, ti_locked); 1223 ti_locked = TI_UNLOCKED; 1224 1225 if (inp != NULL) { 1226 tcp_dropwithreset(m, th, tp, tlen, rstreason); 1227 INP_WUNLOCK(inp); 1228 } else 1229 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 1230 m = NULL; /* mbuf chain got consumed. */ 1231 goto drop; 1232 1233dropunlock: 1234 if (ti_locked == TI_RLOCKED) 1235 INP_INFO_RUNLOCK(&V_tcbinfo); 1236 else if (ti_locked == TI_WLOCKED) 1237 INP_INFO_WUNLOCK(&V_tcbinfo); 1238 else 1239 panic("%s: dropunlock ti_locked %d", __func__, ti_locked); 1240 ti_locked = TI_UNLOCKED; 1241 1242 if (inp != NULL) 1243 INP_WUNLOCK(inp); 1244 1245drop: 1246 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1247 if (s != NULL) 1248 free(s, M_TCPLOG); 1249 if (m != NULL) 1250 m_freem(m); 1251} 1252 1253static void 1254tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 1255 struct tcpcb *tp, int drop_hdrlen, int tlen, uint8_t iptos, 1256 int ti_locked) 1257{ 1258 int thflags, acked, ourfinisacked, needoutput = 0; 1259 int rstreason, todrop, win; 1260 u_long tiwin; 1261 struct tcpopt to; 1262 1263#ifdef TCPDEBUG 1264 /* 1265 * The size of tcp_saveipgen must be the size of the max ip header, 1266 * now IPv6. 1267 */ 1268 u_char tcp_saveipgen[IP6_HDR_LEN]; 1269 struct tcphdr tcp_savetcp; 1270 short ostate = 0; 1271#endif 1272 thflags = th->th_flags; 1273 1274 /* 1275 * If this is either a state-changing packet or current state isn't 1276 * established, we require a write lock on tcbinfo. Otherwise, we 1277 * allow either a read lock or a write lock, as we may have acquired 1278 * a write lock due to a race. 1279 * 1280 * Require a global write lock for SYN/FIN/RST segments or 1281 * non-established connections; otherwise accept either a read or 1282 * write lock, as we may have conservatively acquired a write lock in 1283 * certain cases in tcp_input() (is this still true?). Currently we 1284 * will never enter with no lock, so we try to drop it quickly in the 1285 * common pure ack/pure data cases. 1286 */ 1287 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 || 1288 tp->t_state != TCPS_ESTABLISHED) { 1289 KASSERT(ti_locked == TI_WLOCKED, ("%s ti_locked %d for " 1290 "SYN/FIN/RST/!EST", __func__, ti_locked)); 1291 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 1292 } else { 1293#ifdef INVARIANTS 1294 if (ti_locked == TI_RLOCKED) 1295 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 1296 else if (ti_locked == TI_WLOCKED) 1297 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 1298 else 1299 panic("%s: ti_locked %d for EST", __func__, 1300 ti_locked); 1301#endif 1302 } 1303 INP_WLOCK_ASSERT(tp->t_inpcb); 1304 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 1305 __func__)); 1306 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 1307 __func__)); 1308 1309 /* 1310 * Segment received on connection. 1311 * Reset idle time and keep-alive timer. 1312 * XXX: This should be done after segment 1313 * validation to ignore broken/spoofed segs. 1314 */ 1315 tp->t_rcvtime = ticks; 1316 if (TCPS_HAVEESTABLISHED(tp->t_state)) 1317 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle); 1318 1319 /* 1320 * Unscale the window into a 32-bit value. 1321 * For the SYN_SENT state the scale is zero. 1322 */ 1323 tiwin = th->th_win << tp->snd_scale; 1324 1325 /* 1326 * TCP ECN processing. 1327 */ 1328 if (tp->t_flags & TF_ECN_PERMIT) { 1329 if (thflags & TH_CWR) 1330 tp->t_flags &= ~TF_ECN_SND_ECE; 1331 switch (iptos & IPTOS_ECN_MASK) { 1332 case IPTOS_ECN_CE: 1333 tp->t_flags |= TF_ECN_SND_ECE; 1334 TCPSTAT_INC(tcps_ecn_ce); 1335 break; 1336 case IPTOS_ECN_ECT0: 1337 TCPSTAT_INC(tcps_ecn_ect0); 1338 break; 1339 case IPTOS_ECN_ECT1: 1340 TCPSTAT_INC(tcps_ecn_ect1); 1341 break; 1342 } 1343 /* Congestion experienced. */ 1344 if (thflags & TH_ECE) { 1345 cc_cong_signal(tp, th, CC_ECN); 1346 } 1347 } 1348 1349 /* 1350 * Parse options on any incoming segment. 1351 */ 1352 tcp_dooptions(&to, (u_char *)(th + 1), 1353 (th->th_off << 2) - sizeof(struct tcphdr), 1354 (thflags & TH_SYN) ? TO_SYN : 0); 1355 1356 /* 1357 * If echoed timestamp is later than the current time, 1358 * fall back to non RFC1323 RTT calculation. Normalize 1359 * timestamp if syncookies were used when this connection 1360 * was established. 1361 */ 1362 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 1363 to.to_tsecr -= tp->ts_offset; 1364 if (TSTMP_GT(to.to_tsecr, ticks)) 1365 to.to_tsecr = 0; 1366 } 1367 1368 /* 1369 * Process options only when we get SYN/ACK back. The SYN case 1370 * for incoming connections is handled in tcp_syncache. 1371 * According to RFC1323 the window field in a SYN (i.e., a <SYN> 1372 * or <SYN,ACK>) segment itself is never scaled. 1373 * XXX this is traditional behavior, may need to be cleaned up. 1374 */ 1375 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 1376 if ((to.to_flags & TOF_SCALE) && 1377 (tp->t_flags & TF_REQ_SCALE)) { 1378 tp->t_flags |= TF_RCVD_SCALE; 1379 tp->snd_scale = to.to_wscale; 1380 } 1381 /* 1382 * Initial send window. It will be updated with 1383 * the next incoming segment to the scaled value. 1384 */ 1385 tp->snd_wnd = th->th_win; 1386 if (to.to_flags & TOF_TS) { 1387 tp->t_flags |= TF_RCVD_TSTMP; 1388 tp->ts_recent = to.to_tsval; 1389 tp->ts_recent_age = ticks; 1390 } 1391 if (to.to_flags & TOF_MSS) 1392 tcp_mss(tp, to.to_mss); 1393 if ((tp->t_flags & TF_SACK_PERMIT) && 1394 (to.to_flags & TOF_SACKPERM) == 0) 1395 tp->t_flags &= ~TF_SACK_PERMIT; 1396 } 1397 1398 /* 1399 * Header prediction: check for the two common cases 1400 * of a uni-directional data xfer. If the packet has 1401 * no control flags, is in-sequence, the window didn't 1402 * change and we're not retransmitting, it's a 1403 * candidate. If the length is zero and the ack moved 1404 * forward, we're the sender side of the xfer. Just 1405 * free the data acked & wake any higher level process 1406 * that was blocked waiting for space. If the length 1407 * is non-zero and the ack didn't move, we're the 1408 * receiver side. If we're getting packets in-order 1409 * (the reassembly queue is empty), add the data to 1410 * the socket buffer and note that we need a delayed ack. 1411 * Make sure that the hidden state-flags are also off. 1412 * Since we check for TCPS_ESTABLISHED first, it can only 1413 * be TH_NEEDSYN. 1414 */ 1415 if (tp->t_state == TCPS_ESTABLISHED && 1416 th->th_seq == tp->rcv_nxt && 1417 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && 1418 tp->snd_nxt == tp->snd_max && 1419 tiwin && tiwin == tp->snd_wnd && 1420 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) && 1421 LIST_EMPTY(&tp->t_segq) && 1422 ((to.to_flags & TOF_TS) == 0 || 1423 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) { 1424 1425 /* 1426 * If last ACK falls within this segment's sequence numbers, 1427 * record the timestamp. 1428 * NOTE that the test is modified according to the latest 1429 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1430 */ 1431 if ((to.to_flags & TOF_TS) != 0 && 1432 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 1433 tp->ts_recent_age = ticks; 1434 tp->ts_recent = to.to_tsval; 1435 } 1436 1437 if (tlen == 0) { 1438 if (SEQ_GT(th->th_ack, tp->snd_una) && 1439 SEQ_LEQ(th->th_ack, tp->snd_max) && 1440 !IN_RECOVERY(tp->t_flags) && 1441 (to.to_flags & TOF_SACK) == 0 && 1442 TAILQ_EMPTY(&tp->snd_holes)) { 1443 /* 1444 * This is a pure ack for outstanding data. 1445 */ 1446 if (ti_locked == TI_RLOCKED) 1447 INP_INFO_RUNLOCK(&V_tcbinfo); 1448 else if (ti_locked == TI_WLOCKED) 1449 INP_INFO_WUNLOCK(&V_tcbinfo); 1450 else 1451 panic("%s: ti_locked %d on pure ACK", 1452 __func__, ti_locked); 1453 ti_locked = TI_UNLOCKED; 1454 1455 TCPSTAT_INC(tcps_predack); 1456 1457 /* 1458 * "bad retransmit" recovery. 1459 */ 1460 if (tp->t_rxtshift == 1 && 1461 (int)(ticks - tp->t_badrxtwin) < 0) { 1462 cc_cong_signal(tp, th, CC_RTO_ERR); 1463 } 1464 1465 /* 1466 * Recalculate the transmit timer / rtt. 1467 * 1468 * Some boxes send broken timestamp replies 1469 * during the SYN+ACK phase, ignore 1470 * timestamps of 0 or we could calculate a 1471 * huge RTT and blow up the retransmit timer. 1472 */ 1473 if ((to.to_flags & TOF_TS) != 0 && 1474 to.to_tsecr) { 1475 if (!tp->t_rttlow || 1476 tp->t_rttlow > ticks - to.to_tsecr) 1477 tp->t_rttlow = ticks - to.to_tsecr; 1478 tcp_xmit_timer(tp, 1479 ticks - to.to_tsecr + 1); 1480 } else if (tp->t_rtttime && 1481 SEQ_GT(th->th_ack, tp->t_rtseq)) { 1482 if (!tp->t_rttlow || 1483 tp->t_rttlow > ticks - tp->t_rtttime) 1484 tp->t_rttlow = ticks - tp->t_rtttime; 1485 tcp_xmit_timer(tp, 1486 ticks - tp->t_rtttime); 1487 } 1488 acked = BYTES_THIS_ACK(tp, th); 1489 TCPSTAT_INC(tcps_rcvackpack); 1490 TCPSTAT_ADD(tcps_rcvackbyte, acked); 1491 sbdrop(&so->so_snd, acked); 1492 if (SEQ_GT(tp->snd_una, tp->snd_recover) && 1493 SEQ_LEQ(th->th_ack, tp->snd_recover)) 1494 tp->snd_recover = th->th_ack - 1; 1495 1496 /* 1497 * Let the congestion control algorithm update 1498 * congestion control related information. This 1499 * typically means increasing the congestion 1500 * window. 1501 */ 1502 cc_ack_received(tp, th, CC_ACK); 1503 1504 tp->snd_una = th->th_ack; 1505 /* 1506 * Pull snd_wl2 up to prevent seq wrap relative 1507 * to th_ack. 1508 */ 1509 tp->snd_wl2 = th->th_ack; 1510 tp->t_dupacks = 0; 1511 m_freem(m); 1512 ND6_HINT(tp); /* Some progress has been made. */ 1513 1514 /* 1515 * If all outstanding data are acked, stop 1516 * retransmit timer, otherwise restart timer 1517 * using current (possibly backed-off) value. 1518 * If process is waiting for space, 1519 * wakeup/selwakeup/signal. If data 1520 * are ready to send, let tcp_output 1521 * decide between more output or persist. 1522 */ 1523#ifdef TCPDEBUG 1524 if (so->so_options & SO_DEBUG) 1525 tcp_trace(TA_INPUT, ostate, tp, 1526 (void *)tcp_saveipgen, 1527 &tcp_savetcp, 0); 1528#endif 1529 if (tp->snd_una == tp->snd_max) 1530 tcp_timer_activate(tp, TT_REXMT, 0); 1531 else if (!tcp_timer_active(tp, TT_PERSIST)) 1532 tcp_timer_activate(tp, TT_REXMT, 1533 tp->t_rxtcur); 1534 sowwakeup(so); 1535 if (so->so_snd.sb_cc) 1536 (void) tcp_output(tp); 1537 goto check_delack; 1538 } 1539 } else if (th->th_ack == tp->snd_una && 1540 tlen <= sbspace(&so->so_rcv)) { 1541 int newsize = 0; /* automatic sockbuf scaling */ 1542 1543 /* 1544 * This is a pure, in-sequence data packet with 1545 * nothing on the reassembly queue and we have enough 1546 * buffer space to take it. 1547 */ 1548 if (ti_locked == TI_RLOCKED) 1549 INP_INFO_RUNLOCK(&V_tcbinfo); 1550 else if (ti_locked == TI_WLOCKED) 1551 INP_INFO_WUNLOCK(&V_tcbinfo); 1552 else 1553 panic("%s: ti_locked %d on pure data " 1554 "segment", __func__, ti_locked); 1555 ti_locked = TI_UNLOCKED; 1556 1557 /* Clean receiver SACK report if present */ 1558 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks) 1559 tcp_clean_sackreport(tp); 1560 TCPSTAT_INC(tcps_preddat); 1561 tp->rcv_nxt += tlen; 1562 /* 1563 * Pull snd_wl1 up to prevent seq wrap relative to 1564 * th_seq. 1565 */ 1566 tp->snd_wl1 = th->th_seq; 1567 /* 1568 * Pull rcv_up up to prevent seq wrap relative to 1569 * rcv_nxt. 1570 */ 1571 tp->rcv_up = tp->rcv_nxt; 1572 TCPSTAT_INC(tcps_rcvpack); 1573 TCPSTAT_ADD(tcps_rcvbyte, tlen); 1574 ND6_HINT(tp); /* Some progress has been made */ 1575#ifdef TCPDEBUG 1576 if (so->so_options & SO_DEBUG) 1577 tcp_trace(TA_INPUT, ostate, tp, 1578 (void *)tcp_saveipgen, &tcp_savetcp, 0); 1579#endif 1580 /* 1581 * Automatic sizing of receive socket buffer. Often the send 1582 * buffer size is not optimally adjusted to the actual network 1583 * conditions at hand (delay bandwidth product). Setting the 1584 * buffer size too small limits throughput on links with high 1585 * bandwidth and high delay (eg. trans-continental/oceanic links). 1586 * 1587 * On the receive side the socket buffer memory is only rarely 1588 * used to any significant extent. This allows us to be much 1589 * more aggressive in scaling the receive socket buffer. For 1590 * the case that the buffer space is actually used to a large 1591 * extent and we run out of kernel memory we can simply drop 1592 * the new segments; TCP on the sender will just retransmit it 1593 * later. Setting the buffer size too big may only consume too 1594 * much kernel memory if the application doesn't read() from 1595 * the socket or packet loss or reordering makes use of the 1596 * reassembly queue. 1597 * 1598 * The criteria to step up the receive buffer one notch are: 1599 * 1. the number of bytes received during the time it takes 1600 * one timestamp to be reflected back to us (the RTT); 1601 * 2. received bytes per RTT is within seven eighth of the 1602 * current socket buffer size; 1603 * 3. receive buffer size has not hit maximal automatic size; 1604 * 1605 * This algorithm does one step per RTT at most and only if 1606 * we receive a bulk stream w/o packet losses or reorderings. 1607 * Shrinking the buffer during idle times is not necessary as 1608 * it doesn't consume any memory when idle. 1609 * 1610 * TODO: Only step up if the application is actually serving 1611 * the buffer to better manage the socket buffer resources. 1612 */ 1613 if (V_tcp_do_autorcvbuf && 1614 to.to_tsecr && 1615 (so->so_rcv.sb_flags & SB_AUTOSIZE)) { 1616 if (TSTMP_GT(to.to_tsecr, tp->rfbuf_ts) && 1617 to.to_tsecr - tp->rfbuf_ts < hz) { 1618 if (tp->rfbuf_cnt > 1619 (so->so_rcv.sb_hiwat / 8 * 7) && 1620 so->so_rcv.sb_hiwat < 1621 V_tcp_autorcvbuf_max) { 1622 newsize = 1623 min(so->so_rcv.sb_hiwat + 1624 V_tcp_autorcvbuf_inc, 1625 V_tcp_autorcvbuf_max); 1626 } 1627 /* Start over with next RTT. */ 1628 tp->rfbuf_ts = 0; 1629 tp->rfbuf_cnt = 0; 1630 } else 1631 tp->rfbuf_cnt += tlen; /* add up */ 1632 } 1633 1634 /* Add data to socket buffer. */ 1635 SOCKBUF_LOCK(&so->so_rcv); 1636 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1637 m_freem(m); 1638 } else { 1639 /* 1640 * Set new socket buffer size. 1641 * Give up when limit is reached. 1642 */ 1643 if (newsize) 1644 if (!sbreserve_locked(&so->so_rcv, 1645 newsize, so, NULL)) 1646 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 1647 m_adj(m, drop_hdrlen); /* delayed header drop */ 1648 sbappendstream_locked(&so->so_rcv, m); 1649 } 1650 /* NB: sorwakeup_locked() does an implicit unlock. */ 1651 sorwakeup_locked(so); 1652 if (DELAY_ACK(tp)) { 1653 tp->t_flags |= TF_DELACK; 1654 } else { 1655 tp->t_flags |= TF_ACKNOW; 1656 tcp_output(tp); 1657 } 1658 goto check_delack; 1659 } 1660 } 1661 1662 /* 1663 * Calculate amount of space in receive window, 1664 * and then do TCP input processing. 1665 * Receive window is amount of space in rcv queue, 1666 * but not less than advertised window. 1667 */ 1668 win = sbspace(&so->so_rcv); 1669 if (win < 0) 1670 win = 0; 1671 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt)); 1672 1673 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 1674 tp->rfbuf_ts = 0; 1675 tp->rfbuf_cnt = 0; 1676 1677 switch (tp->t_state) { 1678 1679 /* 1680 * If the state is SYN_RECEIVED: 1681 * if seg contains an ACK, but not for our SYN/ACK, send a RST. 1682 */ 1683 case TCPS_SYN_RECEIVED: 1684 if ((thflags & TH_ACK) && 1685 (SEQ_LEQ(th->th_ack, tp->snd_una) || 1686 SEQ_GT(th->th_ack, tp->snd_max))) { 1687 rstreason = BANDLIM_RST_OPENPORT; 1688 goto dropwithreset; 1689 } 1690 break; 1691 1692 /* 1693 * If the state is SYN_SENT: 1694 * if seg contains an ACK, but not for our SYN, drop the input. 1695 * if seg contains a RST, then drop the connection. 1696 * if seg does not contain SYN, then drop it. 1697 * Otherwise this is an acceptable SYN segment 1698 * initialize tp->rcv_nxt and tp->irs 1699 * if seg contains ack then advance tp->snd_una 1700 * if seg contains an ECE and ECN support is enabled, the stream 1701 * is ECN capable. 1702 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state 1703 * arrange for segment to be acked (eventually) 1704 * continue processing rest of data/controls, beginning with URG 1705 */ 1706 case TCPS_SYN_SENT: 1707 if ((thflags & TH_ACK) && 1708 (SEQ_LEQ(th->th_ack, tp->iss) || 1709 SEQ_GT(th->th_ack, tp->snd_max))) { 1710 rstreason = BANDLIM_UNLIMITED; 1711 goto dropwithreset; 1712 } 1713 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) 1714 tp = tcp_drop(tp, ECONNREFUSED); 1715 if (thflags & TH_RST) 1716 goto drop; 1717 if (!(thflags & TH_SYN)) 1718 goto drop; 1719 1720 tp->irs = th->th_seq; 1721 tcp_rcvseqinit(tp); 1722 if (thflags & TH_ACK) { 1723 TCPSTAT_INC(tcps_connects); 1724 soisconnected(so); 1725#ifdef MAC 1726 mac_socketpeer_set_from_mbuf(m, so); 1727#endif 1728 /* Do window scaling on this connection? */ 1729 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 1730 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 1731 tp->rcv_scale = tp->request_r_scale; 1732 } 1733 tp->rcv_adv += tp->rcv_wnd; 1734 tp->snd_una++; /* SYN is acked */ 1735 /* 1736 * If there's data, delay ACK; if there's also a FIN 1737 * ACKNOW will be turned on later. 1738 */ 1739 if (DELAY_ACK(tp) && tlen != 0) 1740 tcp_timer_activate(tp, TT_DELACK, 1741 tcp_delacktime); 1742 else 1743 tp->t_flags |= TF_ACKNOW; 1744 1745 if ((thflags & TH_ECE) && V_tcp_do_ecn) { 1746 tp->t_flags |= TF_ECN_PERMIT; 1747 TCPSTAT_INC(tcps_ecn_shs); 1748 } 1749 1750 /* 1751 * Received <SYN,ACK> in SYN_SENT[*] state. 1752 * Transitions: 1753 * SYN_SENT --> ESTABLISHED 1754 * SYN_SENT* --> FIN_WAIT_1 1755 */ 1756 tp->t_starttime = ticks; 1757 if (tp->t_flags & TF_NEEDFIN) { 1758 tp->t_state = TCPS_FIN_WAIT_1; 1759 tp->t_flags &= ~TF_NEEDFIN; 1760 thflags &= ~TH_SYN; 1761 } else { 1762 tp->t_state = TCPS_ESTABLISHED; 1763 cc_conn_init(tp); 1764 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle); 1765 } 1766 } else { 1767 /* 1768 * Received initial SYN in SYN-SENT[*] state => 1769 * simultaneous open. If segment contains CC option 1770 * and there is a cached CC, apply TAO test. 1771 * If it succeeds, connection is * half-synchronized. 1772 * Otherwise, do 3-way handshake: 1773 * SYN-SENT -> SYN-RECEIVED 1774 * SYN-SENT* -> SYN-RECEIVED* 1775 * If there was no CC option, clear cached CC value. 1776 */ 1777 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 1778 tcp_timer_activate(tp, TT_REXMT, 0); 1779 tp->t_state = TCPS_SYN_RECEIVED; 1780 } 1781 1782 KASSERT(ti_locked == TI_WLOCKED, ("%s: trimthenstep6: " 1783 "ti_locked %d", __func__, ti_locked)); 1784 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 1785 INP_WLOCK_ASSERT(tp->t_inpcb); 1786 1787 /* 1788 * Advance th->th_seq to correspond to first data byte. 1789 * If data, trim to stay within window, 1790 * dropping FIN if necessary. 1791 */ 1792 th->th_seq++; 1793 if (tlen > tp->rcv_wnd) { 1794 todrop = tlen - tp->rcv_wnd; 1795 m_adj(m, -todrop); 1796 tlen = tp->rcv_wnd; 1797 thflags &= ~TH_FIN; 1798 TCPSTAT_INC(tcps_rcvpackafterwin); 1799 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 1800 } 1801 tp->snd_wl1 = th->th_seq - 1; 1802 tp->rcv_up = th->th_seq; 1803 /* 1804 * Client side of transaction: already sent SYN and data. 1805 * If the remote host used T/TCP to validate the SYN, 1806 * our data will be ACK'd; if so, enter normal data segment 1807 * processing in the middle of step 5, ack processing. 1808 * Otherwise, goto step 6. 1809 */ 1810 if (thflags & TH_ACK) 1811 goto process_ACK; 1812 1813 goto step6; 1814 1815 /* 1816 * If the state is LAST_ACK or CLOSING or TIME_WAIT: 1817 * do normal processing. 1818 * 1819 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later. 1820 */ 1821 case TCPS_LAST_ACK: 1822 case TCPS_CLOSING: 1823 break; /* continue normal processing */ 1824 } 1825 1826 /* 1827 * States other than LISTEN or SYN_SENT. 1828 * First check the RST flag and sequence number since reset segments 1829 * are exempt from the timestamp and connection count tests. This 1830 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix 1831 * below which allowed reset segments in half the sequence space 1832 * to fall though and be processed (which gives forged reset 1833 * segments with a random sequence number a 50 percent chance of 1834 * killing a connection). 1835 * Then check timestamp, if present. 1836 * Then check the connection count, if present. 1837 * Then check that at least some bytes of segment are within 1838 * receive window. If segment begins before rcv_nxt, 1839 * drop leading data (and SYN); if nothing left, just ack. 1840 * 1841 * 1842 * If the RST bit is set, check the sequence number to see 1843 * if this is a valid reset segment. 1844 * RFC 793 page 37: 1845 * In all states except SYN-SENT, all reset (RST) segments 1846 * are validated by checking their SEQ-fields. A reset is 1847 * valid if its sequence number is in the window. 1848 * Note: this does not take into account delayed ACKs, so 1849 * we should test against last_ack_sent instead of rcv_nxt. 1850 * The sequence number in the reset segment is normally an 1851 * echo of our outgoing acknowlegement numbers, but some hosts 1852 * send a reset with the sequence number at the rightmost edge 1853 * of our receive window, and we have to handle this case. 1854 * Note 2: Paul Watson's paper "Slipping in the Window" has shown 1855 * that brute force RST attacks are possible. To combat this, 1856 * we use a much stricter check while in the ESTABLISHED state, 1857 * only accepting RSTs where the sequence number is equal to 1858 * last_ack_sent. In all other states (the states in which a 1859 * RST is more likely), the more permissive check is used. 1860 * If we have multiple segments in flight, the initial reset 1861 * segment sequence numbers will be to the left of last_ack_sent, 1862 * but they will eventually catch up. 1863 * In any case, it never made sense to trim reset segments to 1864 * fit the receive window since RFC 1122 says: 1865 * 4.2.2.12 RST Segment: RFC-793 Section 3.4 1866 * 1867 * A TCP SHOULD allow a received RST segment to include data. 1868 * 1869 * DISCUSSION 1870 * It has been suggested that a RST segment could contain 1871 * ASCII text that encoded and explained the cause of the 1872 * RST. No standard has yet been established for such 1873 * data. 1874 * 1875 * If the reset segment passes the sequence number test examine 1876 * the state: 1877 * SYN_RECEIVED STATE: 1878 * If passive open, return to LISTEN state. 1879 * If active open, inform user that connection was refused. 1880 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES: 1881 * Inform user that connection was reset, and close tcb. 1882 * CLOSING, LAST_ACK STATES: 1883 * Close the tcb. 1884 * TIME_WAIT STATE: 1885 * Drop the segment - see Stevens, vol. 2, p. 964 and 1886 * RFC 1337. 1887 */ 1888 if (thflags & TH_RST) { 1889 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) && 1890 SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) { 1891 switch (tp->t_state) { 1892 1893 case TCPS_SYN_RECEIVED: 1894 so->so_error = ECONNREFUSED; 1895 goto close; 1896 1897 case TCPS_ESTABLISHED: 1898 if (V_tcp_insecure_rst == 0 && 1899 !(SEQ_GEQ(th->th_seq, tp->rcv_nxt - 1) && 1900 SEQ_LEQ(th->th_seq, tp->rcv_nxt + 1)) && 1901 !(SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) && 1902 SEQ_LEQ(th->th_seq, tp->last_ack_sent + 1))) { 1903 TCPSTAT_INC(tcps_badrst); 1904 goto drop; 1905 } 1906 /* FALLTHROUGH */ 1907 case TCPS_FIN_WAIT_1: 1908 case TCPS_FIN_WAIT_2: 1909 case TCPS_CLOSE_WAIT: 1910 so->so_error = ECONNRESET; 1911 close: 1912 KASSERT(ti_locked == TI_WLOCKED, 1913 ("tcp_do_segment: TH_RST 1 ti_locked %d", 1914 ti_locked)); 1915 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 1916 1917 tp->t_state = TCPS_CLOSED; 1918 TCPSTAT_INC(tcps_drops); 1919 tp = tcp_close(tp); 1920 break; 1921 1922 case TCPS_CLOSING: 1923 case TCPS_LAST_ACK: 1924 KASSERT(ti_locked == TI_WLOCKED, 1925 ("tcp_do_segment: TH_RST 2 ti_locked %d", 1926 ti_locked)); 1927 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 1928 1929 tp = tcp_close(tp); 1930 break; 1931 } 1932 } 1933 goto drop; 1934 } 1935 1936 /* 1937 * RFC 1323 PAWS: If we have a timestamp reply on this segment 1938 * and it's less than ts_recent, drop it. 1939 */ 1940 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent && 1941 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 1942 1943 /* Check to see if ts_recent is over 24 days old. */ 1944 if (ticks - tp->ts_recent_age > TCP_PAWS_IDLE) { 1945 /* 1946 * Invalidate ts_recent. If this segment updates 1947 * ts_recent, the age will be reset later and ts_recent 1948 * will get a valid value. If it does not, setting 1949 * ts_recent to zero will at least satisfy the 1950 * requirement that zero be placed in the timestamp 1951 * echo reply when ts_recent isn't valid. The 1952 * age isn't reset until we get a valid ts_recent 1953 * because we don't want out-of-order segments to be 1954 * dropped when ts_recent is old. 1955 */ 1956 tp->ts_recent = 0; 1957 } else { 1958 TCPSTAT_INC(tcps_rcvduppack); 1959 TCPSTAT_ADD(tcps_rcvdupbyte, tlen); 1960 TCPSTAT_INC(tcps_pawsdrop); 1961 if (tlen) 1962 goto dropafterack; 1963 goto drop; 1964 } 1965 } 1966 1967 /* 1968 * In the SYN-RECEIVED state, validate that the packet belongs to 1969 * this connection before trimming the data to fit the receive 1970 * window. Check the sequence number versus IRS since we know 1971 * the sequence numbers haven't wrapped. This is a partial fix 1972 * for the "LAND" DoS attack. 1973 */ 1974 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) { 1975 rstreason = BANDLIM_RST_OPENPORT; 1976 goto dropwithreset; 1977 } 1978 1979 todrop = tp->rcv_nxt - th->th_seq; 1980 if (todrop > 0) { 1981 /* 1982 * If this is a duplicate SYN for our current connection, 1983 * advance over it and pretend and it's not a SYN. 1984 */ 1985 if (thflags & TH_SYN && th->th_seq == tp->irs) { 1986 thflags &= ~TH_SYN; 1987 th->th_seq++; 1988 if (th->th_urp > 1) 1989 th->th_urp--; 1990 else 1991 thflags &= ~TH_URG; 1992 todrop--; 1993 } 1994 /* 1995 * Following if statement from Stevens, vol. 2, p. 960. 1996 */ 1997 if (todrop > tlen 1998 || (todrop == tlen && (thflags & TH_FIN) == 0)) { 1999 /* 2000 * Any valid FIN must be to the left of the window. 2001 * At this point the FIN must be a duplicate or out 2002 * of sequence; drop it. 2003 */ 2004 thflags &= ~TH_FIN; 2005 2006 /* 2007 * Send an ACK to resynchronize and drop any data. 2008 * But keep on processing for RST or ACK. 2009 */ 2010 tp->t_flags |= TF_ACKNOW; 2011 todrop = tlen; 2012 TCPSTAT_INC(tcps_rcvduppack); 2013 TCPSTAT_ADD(tcps_rcvdupbyte, todrop); 2014 } else { 2015 TCPSTAT_INC(tcps_rcvpartduppack); 2016 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop); 2017 } 2018 drop_hdrlen += todrop; /* drop from the top afterwards */ 2019 th->th_seq += todrop; 2020 tlen -= todrop; 2021 if (th->th_urp > todrop) 2022 th->th_urp -= todrop; 2023 else { 2024 thflags &= ~TH_URG; 2025 th->th_urp = 0; 2026 } 2027 } 2028 2029 /* 2030 * If new data are received on a connection after the 2031 * user processes are gone, then RST the other end. 2032 */ 2033 if ((so->so_state & SS_NOFDREF) && 2034 tp->t_state > TCPS_CLOSE_WAIT && tlen) { 2035 char *s; 2036 2037 KASSERT(ti_locked == TI_WLOCKED, ("%s: SS_NOFDEREF && " 2038 "CLOSE_WAIT && tlen ti_locked %d", __func__, ti_locked)); 2039 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 2040 2041 if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL, NULL))) { 2042 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data after socket " 2043 "was closed, sending RST and removing tcpcb\n", 2044 s, __func__, tcpstates[tp->t_state], tlen); 2045 free(s, M_TCPLOG); 2046 } 2047 tp = tcp_close(tp); 2048 TCPSTAT_INC(tcps_rcvafterclose); 2049 rstreason = BANDLIM_UNLIMITED; 2050 goto dropwithreset; 2051 } 2052 2053 /* 2054 * If segment ends after window, drop trailing data 2055 * (and PUSH and FIN); if nothing left, just ACK. 2056 */ 2057 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd); 2058 if (todrop > 0) { 2059 TCPSTAT_INC(tcps_rcvpackafterwin); 2060 if (todrop >= tlen) { 2061 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen); 2062 /* 2063 * If window is closed can only take segments at 2064 * window edge, and have to drop data and PUSH from 2065 * incoming segments. Continue processing, but 2066 * remember to ack. Otherwise, drop segment 2067 * and ack. 2068 */ 2069 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { 2070 tp->t_flags |= TF_ACKNOW; 2071 TCPSTAT_INC(tcps_rcvwinprobe); 2072 } else 2073 goto dropafterack; 2074 } else 2075 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 2076 m_adj(m, -todrop); 2077 tlen -= todrop; 2078 thflags &= ~(TH_PUSH|TH_FIN); 2079 } 2080 2081 /* 2082 * If last ACK falls within this segment's sequence numbers, 2083 * record its timestamp. 2084 * NOTE: 2085 * 1) That the test incorporates suggestions from the latest 2086 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 2087 * 2) That updating only on newer timestamps interferes with 2088 * our earlier PAWS tests, so this check should be solely 2089 * predicated on the sequence space of this segment. 2090 * 3) That we modify the segment boundary check to be 2091 * Last.ACK.Sent <= SEG.SEQ + SEG.Len 2092 * instead of RFC1323's 2093 * Last.ACK.Sent < SEG.SEQ + SEG.Len, 2094 * This modified check allows us to overcome RFC1323's 2095 * limitations as described in Stevens TCP/IP Illustrated 2096 * Vol. 2 p.869. In such cases, we can still calculate the 2097 * RTT correctly when RCV.NXT == Last.ACK.Sent. 2098 */ 2099 if ((to.to_flags & TOF_TS) != 0 && 2100 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 2101 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 2102 ((thflags & (TH_SYN|TH_FIN)) != 0))) { 2103 tp->ts_recent_age = ticks; 2104 tp->ts_recent = to.to_tsval; 2105 } 2106 2107 /* 2108 * If a SYN is in the window, then this is an 2109 * error and we send an RST and drop the connection. 2110 */ 2111 if (thflags & TH_SYN) { 2112 KASSERT(ti_locked == TI_WLOCKED, 2113 ("tcp_do_segment: TH_SYN ti_locked %d", ti_locked)); 2114 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 2115 2116 tp = tcp_drop(tp, ECONNRESET); 2117 rstreason = BANDLIM_UNLIMITED; 2118 goto drop; 2119 } 2120 2121 /* 2122 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN 2123 * flag is on (half-synchronized state), then queue data for 2124 * later processing; else drop segment and return. 2125 */ 2126 if ((thflags & TH_ACK) == 0) { 2127 if (tp->t_state == TCPS_SYN_RECEIVED || 2128 (tp->t_flags & TF_NEEDSYN)) 2129 goto step6; 2130 else if (tp->t_flags & TF_ACKNOW) 2131 goto dropafterack; 2132 else 2133 goto drop; 2134 } 2135 2136 /* 2137 * Ack processing. 2138 */ 2139 switch (tp->t_state) { 2140 2141 /* 2142 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter 2143 * ESTABLISHED state and continue processing. 2144 * The ACK was checked above. 2145 */ 2146 case TCPS_SYN_RECEIVED: 2147 2148 TCPSTAT_INC(tcps_connects); 2149 soisconnected(so); 2150 /* Do window scaling? */ 2151 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2152 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2153 tp->rcv_scale = tp->request_r_scale; 2154 tp->snd_wnd = tiwin; 2155 } 2156 /* 2157 * Make transitions: 2158 * SYN-RECEIVED -> ESTABLISHED 2159 * SYN-RECEIVED* -> FIN-WAIT-1 2160 */ 2161 tp->t_starttime = ticks; 2162 if (tp->t_flags & TF_NEEDFIN) { 2163 tp->t_state = TCPS_FIN_WAIT_1; 2164 tp->t_flags &= ~TF_NEEDFIN; 2165 } else { 2166 tp->t_state = TCPS_ESTABLISHED; 2167 cc_conn_init(tp); 2168 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle); 2169 } 2170 /* 2171 * If segment contains data or ACK, will call tcp_reass() 2172 * later; if not, do so now to pass queued data to user. 2173 */ 2174 if (tlen == 0 && (thflags & TH_FIN) == 0) 2175 (void) tcp_reass(tp, (struct tcphdr *)0, 0, 2176 (struct mbuf *)0); 2177 tp->snd_wl1 = th->th_seq - 1; 2178 /* FALLTHROUGH */ 2179 2180 /* 2181 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range 2182 * ACKs. If the ack is in the range 2183 * tp->snd_una < th->th_ack <= tp->snd_max 2184 * then advance tp->snd_una to th->th_ack and drop 2185 * data from the retransmission queue. If this ACK reflects 2186 * more up to date window information we update our window information. 2187 */ 2188 case TCPS_ESTABLISHED: 2189 case TCPS_FIN_WAIT_1: 2190 case TCPS_FIN_WAIT_2: 2191 case TCPS_CLOSE_WAIT: 2192 case TCPS_CLOSING: 2193 case TCPS_LAST_ACK: 2194 if (SEQ_GT(th->th_ack, tp->snd_max)) { 2195 TCPSTAT_INC(tcps_rcvacktoomuch); 2196 goto dropafterack; 2197 } 2198 if ((tp->t_flags & TF_SACK_PERMIT) && 2199 ((to.to_flags & TOF_SACK) || 2200 !TAILQ_EMPTY(&tp->snd_holes))) 2201 tcp_sack_doack(tp, &to, th->th_ack); 2202 if (SEQ_LEQ(th->th_ack, tp->snd_una)) { 2203 if (tlen == 0 && tiwin == tp->snd_wnd) { 2204 TCPSTAT_INC(tcps_rcvdupack); 2205 /* 2206 * If we have outstanding data (other than 2207 * a window probe), this is a completely 2208 * duplicate ack (ie, window info didn't 2209 * change), the ack is the biggest we've 2210 * seen and we've seen exactly our rexmt 2211 * threshhold of them, assume a packet 2212 * has been dropped and retransmit it. 2213 * Kludge snd_nxt & the congestion 2214 * window so we send only this one 2215 * packet. 2216 * 2217 * We know we're losing at the current 2218 * window size so do congestion avoidance 2219 * (set ssthresh to half the current window 2220 * and pull our congestion window back to 2221 * the new ssthresh). 2222 * 2223 * Dup acks mean that packets have left the 2224 * network (they're now cached at the receiver) 2225 * so bump cwnd by the amount in the receiver 2226 * to keep a constant cwnd packets in the 2227 * network. 2228 * 2229 * When using TCP ECN, notify the peer that 2230 * we reduced the cwnd. 2231 */ 2232 if (!tcp_timer_active(tp, TT_REXMT) || 2233 th->th_ack != tp->snd_una) 2234 tp->t_dupacks = 0; 2235 else if (++tp->t_dupacks > tcprexmtthresh || 2236 IN_FASTRECOVERY(tp->t_flags)) { 2237 cc_ack_received(tp, th, CC_DUPACK); 2238 if ((tp->t_flags & TF_SACK_PERMIT) && 2239 IN_FASTRECOVERY(tp->t_flags)) { 2240 int awnd; 2241 2242 /* 2243 * Compute the amount of data in flight first. 2244 * We can inject new data into the pipe iff 2245 * we have less than 1/2 the original window's 2246 * worth of data in flight. 2247 */ 2248 awnd = (tp->snd_nxt - tp->snd_fack) + 2249 tp->sackhint.sack_bytes_rexmit; 2250 if (awnd < tp->snd_ssthresh) { 2251 tp->snd_cwnd += tp->t_maxseg; 2252 if (tp->snd_cwnd > tp->snd_ssthresh) 2253 tp->snd_cwnd = tp->snd_ssthresh; 2254 } 2255 } else 2256 tp->snd_cwnd += tp->t_maxseg; 2257 (void) tcp_output(tp); 2258 goto drop; 2259 } else if (tp->t_dupacks == tcprexmtthresh) { 2260 tcp_seq onxt = tp->snd_nxt; 2261 2262 /* 2263 * If we're doing sack, check to 2264 * see if we're already in sack 2265 * recovery. If we're not doing sack, 2266 * check to see if we're in newreno 2267 * recovery. 2268 */ 2269 if (tp->t_flags & TF_SACK_PERMIT) { 2270 if (IN_FASTRECOVERY(tp->t_flags)) { 2271 tp->t_dupacks = 0; 2272 break; 2273 } 2274 } else { 2275 if (SEQ_LEQ(th->th_ack, 2276 tp->snd_recover)) { 2277 tp->t_dupacks = 0; 2278 break; 2279 } 2280 } 2281 /* Congestion signal before ack. */ 2282 cc_cong_signal(tp, th, CC_NDUPACK); 2283 cc_ack_received(tp, th, CC_DUPACK); 2284 tcp_timer_activate(tp, TT_REXMT, 0); 2285 tp->t_rtttime = 0; 2286 if (tp->t_flags & TF_SACK_PERMIT) { 2287 TCPSTAT_INC( 2288 tcps_sack_recovery_episode); 2289 tp->sack_newdata = tp->snd_nxt; 2290 tp->snd_cwnd = tp->t_maxseg; 2291 (void) tcp_output(tp); 2292 goto drop; 2293 } 2294 tp->snd_nxt = th->th_ack; 2295 tp->snd_cwnd = tp->t_maxseg; 2296 (void) tcp_output(tp); 2297 KASSERT(tp->snd_limited <= 2, 2298 ("%s: tp->snd_limited too big", 2299 __func__)); 2300 tp->snd_cwnd = tp->snd_ssthresh + 2301 tp->t_maxseg * 2302 (tp->t_dupacks - tp->snd_limited); 2303 if (SEQ_GT(onxt, tp->snd_nxt)) 2304 tp->snd_nxt = onxt; 2305 goto drop; 2306 } else if (V_tcp_do_rfc3042) { 2307 cc_ack_received(tp, th, CC_DUPACK); 2308 u_long oldcwnd = tp->snd_cwnd; 2309 tcp_seq oldsndmax = tp->snd_max; 2310 u_int sent; 2311 2312 KASSERT(tp->t_dupacks == 1 || 2313 tp->t_dupacks == 2, 2314 ("%s: dupacks not 1 or 2", 2315 __func__)); 2316 if (tp->t_dupacks == 1) 2317 tp->snd_limited = 0; 2318 tp->snd_cwnd = 2319 (tp->snd_nxt - tp->snd_una) + 2320 (tp->t_dupacks - tp->snd_limited) * 2321 tp->t_maxseg; 2322 (void) tcp_output(tp); 2323 sent = tp->snd_max - oldsndmax; 2324 if (sent > tp->t_maxseg) { 2325 KASSERT((tp->t_dupacks == 2 && 2326 tp->snd_limited == 0) || 2327 (sent == tp->t_maxseg + 1 && 2328 tp->t_flags & TF_SENTFIN), 2329 ("%s: sent too much", 2330 __func__)); 2331 tp->snd_limited = 2; 2332 } else if (sent > 0) 2333 ++tp->snd_limited; 2334 tp->snd_cwnd = oldcwnd; 2335 goto drop; 2336 } 2337 } else 2338 tp->t_dupacks = 0; 2339 break; 2340 } 2341 2342 KASSERT(SEQ_GT(th->th_ack, tp->snd_una), 2343 ("%s: th_ack <= snd_una", __func__)); 2344 2345 /* 2346 * If the congestion window was inflated to account 2347 * for the other side's cached packets, retract it. 2348 */ 2349 if (IN_FASTRECOVERY(tp->t_flags)) { 2350 if (SEQ_LT(th->th_ack, tp->snd_recover)) { 2351 if (tp->t_flags & TF_SACK_PERMIT) 2352 tcp_sack_partialack(tp, th); 2353 else 2354 tcp_newreno_partial_ack(tp, th); 2355 } else 2356 cc_post_recovery(tp, th); 2357 } 2358 tp->t_dupacks = 0; 2359 /* 2360 * If we reach this point, ACK is not a duplicate, 2361 * i.e., it ACKs something we sent. 2362 */ 2363 if (tp->t_flags & TF_NEEDSYN) { 2364 /* 2365 * T/TCP: Connection was half-synchronized, and our 2366 * SYN has been ACK'd (so connection is now fully 2367 * synchronized). Go to non-starred state, 2368 * increment snd_una for ACK of SYN, and check if 2369 * we can do window scaling. 2370 */ 2371 tp->t_flags &= ~TF_NEEDSYN; 2372 tp->snd_una++; 2373 /* Do window scaling? */ 2374 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2375 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2376 tp->rcv_scale = tp->request_r_scale; 2377 /* Send window already scaled. */ 2378 } 2379 } 2380 2381process_ACK: 2382 INP_INFO_LOCK_ASSERT(&V_tcbinfo); 2383 KASSERT(ti_locked == TI_RLOCKED || ti_locked == TI_WLOCKED, 2384 ("tcp_input: process_ACK ti_locked %d", ti_locked)); 2385 INP_WLOCK_ASSERT(tp->t_inpcb); 2386 2387 acked = BYTES_THIS_ACK(tp, th); 2388 TCPSTAT_INC(tcps_rcvackpack); 2389 TCPSTAT_ADD(tcps_rcvackbyte, acked); 2390 2391 /* 2392 * If we just performed our first retransmit, and the ACK 2393 * arrives within our recovery window, then it was a mistake 2394 * to do the retransmit in the first place. Recover our 2395 * original cwnd and ssthresh, and proceed to transmit where 2396 * we left off. 2397 */ 2398 if (tp->t_rxtshift == 1 && (int)(ticks - tp->t_badrxtwin) < 0) 2399 cc_cong_signal(tp, th, CC_RTO_ERR); 2400 2401 /* 2402 * If we have a timestamp reply, update smoothed 2403 * round trip time. If no timestamp is present but 2404 * transmit timer is running and timed sequence 2405 * number was acked, update smoothed round trip time. 2406 * Since we now have an rtt measurement, cancel the 2407 * timer backoff (cf., Phil Karn's retransmit alg.). 2408 * Recompute the initial retransmit timer. 2409 * 2410 * Some boxes send broken timestamp replies 2411 * during the SYN+ACK phase, ignore 2412 * timestamps of 0 or we could calculate a 2413 * huge RTT and blow up the retransmit timer. 2414 */ 2415 if ((to.to_flags & TOF_TS) != 0 && 2416 to.to_tsecr) { 2417 if (!tp->t_rttlow || tp->t_rttlow > ticks - to.to_tsecr) 2418 tp->t_rttlow = ticks - to.to_tsecr; 2419 tcp_xmit_timer(tp, ticks - to.to_tsecr + 1); 2420 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) { 2421 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime) 2422 tp->t_rttlow = ticks - tp->t_rtttime; 2423 tcp_xmit_timer(tp, ticks - tp->t_rtttime); 2424 } 2425 2426 /* 2427 * If all outstanding data is acked, stop retransmit 2428 * timer and remember to restart (more output or persist). 2429 * If there is more data to be acked, restart retransmit 2430 * timer, using current (possibly backed-off) value. 2431 */ 2432 if (th->th_ack == tp->snd_max) { 2433 tcp_timer_activate(tp, TT_REXMT, 0); 2434 needoutput = 1; 2435 } else if (!tcp_timer_active(tp, TT_PERSIST)) 2436 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur); 2437 2438 /* 2439 * If no data (only SYN) was ACK'd, 2440 * skip rest of ACK processing. 2441 */ 2442 if (acked == 0) 2443 goto step6; 2444 2445 /* 2446 * Let the congestion control algorithm update congestion 2447 * control related information. This typically means increasing 2448 * the congestion window. 2449 */ 2450 cc_ack_received(tp, th, CC_ACK); 2451 2452 SOCKBUF_LOCK(&so->so_snd); 2453 if (acked > so->so_snd.sb_cc) { 2454 tp->snd_wnd -= so->so_snd.sb_cc; 2455 sbdrop_locked(&so->so_snd, (int)so->so_snd.sb_cc); 2456 ourfinisacked = 1; 2457 } else { 2458 sbdrop_locked(&so->so_snd, acked); 2459 tp->snd_wnd -= acked; 2460 ourfinisacked = 0; 2461 } 2462 /* NB: sowwakeup_locked() does an implicit unlock. */ 2463 sowwakeup_locked(so); 2464 /* Detect una wraparound. */ 2465 if (!IN_RECOVERY(tp->t_flags) && 2466 SEQ_GT(tp->snd_una, tp->snd_recover) && 2467 SEQ_LEQ(th->th_ack, tp->snd_recover)) 2468 tp->snd_recover = th->th_ack - 1; 2469 /* XXXLAS: Can this be moved up into cc_post_recovery? */ 2470 if (IN_RECOVERY(tp->t_flags) && 2471 SEQ_GEQ(th->th_ack, tp->snd_recover)) { 2472 EXIT_RECOVERY(tp->t_flags); 2473 } 2474 tp->snd_una = th->th_ack; 2475 if (tp->t_flags & TF_SACK_PERMIT) { 2476 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 2477 tp->snd_recover = tp->snd_una; 2478 } 2479 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 2480 tp->snd_nxt = tp->snd_una; 2481 2482 switch (tp->t_state) { 2483 2484 /* 2485 * In FIN_WAIT_1 STATE in addition to the processing 2486 * for the ESTABLISHED state if our FIN is now acknowledged 2487 * then enter FIN_WAIT_2. 2488 */ 2489 case TCPS_FIN_WAIT_1: 2490 if (ourfinisacked) { 2491 /* 2492 * If we can't receive any more 2493 * data, then closing user can proceed. 2494 * Starting the timer is contrary to the 2495 * specification, but if we don't get a FIN 2496 * we'll hang forever. 2497 * 2498 * XXXjl: 2499 * we should release the tp also, and use a 2500 * compressed state. 2501 */ 2502 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 2503 int timeout; 2504 2505 soisdisconnected(so); 2506 timeout = (tcp_fast_finwait2_recycle) ? 2507 tcp_finwait2_timeout : tcp_maxidle; 2508 tcp_timer_activate(tp, TT_2MSL, timeout); 2509 } 2510 tp->t_state = TCPS_FIN_WAIT_2; 2511 } 2512 break; 2513 2514 /* 2515 * In CLOSING STATE in addition to the processing for 2516 * the ESTABLISHED state if the ACK acknowledges our FIN 2517 * then enter the TIME-WAIT state, otherwise ignore 2518 * the segment. 2519 */ 2520 case TCPS_CLOSING: 2521 if (ourfinisacked) { 2522 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 2523 tcp_twstart(tp); 2524 INP_INFO_WUNLOCK(&V_tcbinfo); 2525 m_freem(m); 2526 return; 2527 } 2528 break; 2529 2530 /* 2531 * In LAST_ACK, we may still be waiting for data to drain 2532 * and/or to be acked, as well as for the ack of our FIN. 2533 * If our FIN is now acknowledged, delete the TCB, 2534 * enter the closed state and return. 2535 */ 2536 case TCPS_LAST_ACK: 2537 if (ourfinisacked) { 2538 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 2539 tp = tcp_close(tp); 2540 goto drop; 2541 } 2542 break; 2543 } 2544 } 2545 2546step6: 2547 INP_INFO_LOCK_ASSERT(&V_tcbinfo); 2548 KASSERT(ti_locked == TI_RLOCKED || ti_locked == TI_WLOCKED, 2549 ("tcp_do_segment: step6 ti_locked %d", ti_locked)); 2550 INP_WLOCK_ASSERT(tp->t_inpcb); 2551 2552 /* 2553 * Update window information. 2554 * Don't look at window if no ACK: TAC's send garbage on first SYN. 2555 */ 2556 if ((thflags & TH_ACK) && 2557 (SEQ_LT(tp->snd_wl1, th->th_seq) || 2558 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 2559 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 2560 /* keep track of pure window updates */ 2561 if (tlen == 0 && 2562 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 2563 TCPSTAT_INC(tcps_rcvwinupd); 2564 tp->snd_wnd = tiwin; 2565 tp->snd_wl1 = th->th_seq; 2566 tp->snd_wl2 = th->th_ack; 2567 if (tp->snd_wnd > tp->max_sndwnd) 2568 tp->max_sndwnd = tp->snd_wnd; 2569 needoutput = 1; 2570 } 2571 2572 /* 2573 * Process segments with URG. 2574 */ 2575 if ((thflags & TH_URG) && th->th_urp && 2576 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2577 /* 2578 * This is a kludge, but if we receive and accept 2579 * random urgent pointers, we'll crash in 2580 * soreceive. It's hard to imagine someone 2581 * actually wanting to send this much urgent data. 2582 */ 2583 SOCKBUF_LOCK(&so->so_rcv); 2584 if (th->th_urp + so->so_rcv.sb_cc > sb_max) { 2585 th->th_urp = 0; /* XXX */ 2586 thflags &= ~TH_URG; /* XXX */ 2587 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */ 2588 goto dodata; /* XXX */ 2589 } 2590 /* 2591 * If this segment advances the known urgent pointer, 2592 * then mark the data stream. This should not happen 2593 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since 2594 * a FIN has been received from the remote side. 2595 * In these states we ignore the URG. 2596 * 2597 * According to RFC961 (Assigned Protocols), 2598 * the urgent pointer points to the last octet 2599 * of urgent data. We continue, however, 2600 * to consider it to indicate the first octet 2601 * of data past the urgent section as the original 2602 * spec states (in one of two places). 2603 */ 2604 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) { 2605 tp->rcv_up = th->th_seq + th->th_urp; 2606 so->so_oobmark = so->so_rcv.sb_cc + 2607 (tp->rcv_up - tp->rcv_nxt) - 1; 2608 if (so->so_oobmark == 0) 2609 so->so_rcv.sb_state |= SBS_RCVATMARK; 2610 sohasoutofband(so); 2611 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); 2612 } 2613 SOCKBUF_UNLOCK(&so->so_rcv); 2614 /* 2615 * Remove out of band data so doesn't get presented to user. 2616 * This can happen independent of advancing the URG pointer, 2617 * but if two URG's are pending at once, some out-of-band 2618 * data may creep in... ick. 2619 */ 2620 if (th->th_urp <= (u_long)tlen && 2621 !(so->so_options & SO_OOBINLINE)) { 2622 /* hdr drop is delayed */ 2623 tcp_pulloutofband(so, th, m, drop_hdrlen); 2624 } 2625 } else { 2626 /* 2627 * If no out of band data is expected, 2628 * pull receive urgent pointer along 2629 * with the receive window. 2630 */ 2631 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) 2632 tp->rcv_up = tp->rcv_nxt; 2633 } 2634dodata: /* XXX */ 2635 INP_INFO_LOCK_ASSERT(&V_tcbinfo); 2636 KASSERT(ti_locked == TI_RLOCKED || ti_locked == TI_WLOCKED, 2637 ("tcp_do_segment: dodata ti_locked %d", ti_locked)); 2638 INP_WLOCK_ASSERT(tp->t_inpcb); 2639 2640 /* 2641 * Process the segment text, merging it into the TCP sequencing queue, 2642 * and arranging for acknowledgment of receipt if necessary. 2643 * This process logically involves adjusting tp->rcv_wnd as data 2644 * is presented to the user (this happens in tcp_usrreq.c, 2645 * case PRU_RCVD). If a FIN has already been received on this 2646 * connection then we just ignore the text. 2647 */ 2648 if ((tlen || (thflags & TH_FIN)) && 2649 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2650 tcp_seq save_start = th->th_seq; 2651 m_adj(m, drop_hdrlen); /* delayed header drop */ 2652 /* 2653 * Insert segment which includes th into TCP reassembly queue 2654 * with control block tp. Set thflags to whether reassembly now 2655 * includes a segment with FIN. This handles the common case 2656 * inline (segment is the next to be received on an established 2657 * connection, and the queue is empty), avoiding linkage into 2658 * and removal from the queue and repetition of various 2659 * conversions. 2660 * Set DELACK for segments received in order, but ack 2661 * immediately when segments are out of order (so 2662 * fast retransmit can work). 2663 */ 2664 if (th->th_seq == tp->rcv_nxt && 2665 LIST_EMPTY(&tp->t_segq) && 2666 TCPS_HAVEESTABLISHED(tp->t_state)) { 2667 if (DELAY_ACK(tp)) 2668 tp->t_flags |= TF_DELACK; 2669 else 2670 tp->t_flags |= TF_ACKNOW; 2671 tp->rcv_nxt += tlen; 2672 thflags = th->th_flags & TH_FIN; 2673 TCPSTAT_INC(tcps_rcvpack); 2674 TCPSTAT_ADD(tcps_rcvbyte, tlen); 2675 ND6_HINT(tp); 2676 SOCKBUF_LOCK(&so->so_rcv); 2677 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 2678 m_freem(m); 2679 else 2680 sbappendstream_locked(&so->so_rcv, m); 2681 /* NB: sorwakeup_locked() does an implicit unlock. */ 2682 sorwakeup_locked(so); 2683 } else { 2684 /* 2685 * XXX: Due to the header drop above "th" is 2686 * theoretically invalid by now. Fortunately 2687 * m_adj() doesn't actually frees any mbufs 2688 * when trimming from the head. 2689 */ 2690 thflags = tcp_reass(tp, th, &tlen, m); 2691 tp->t_flags |= TF_ACKNOW; 2692 } 2693 if (tlen > 0 && (tp->t_flags & TF_SACK_PERMIT)) 2694 tcp_update_sack_list(tp, save_start, save_start + tlen); 2695#if 0 2696 /* 2697 * Note the amount of data that peer has sent into 2698 * our window, in order to estimate the sender's 2699 * buffer size. 2700 * XXX: Unused. 2701 */ 2702 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt); 2703#endif 2704 } else { 2705 m_freem(m); 2706 thflags &= ~TH_FIN; 2707 } 2708 2709 /* 2710 * If FIN is received ACK the FIN and let the user know 2711 * that the connection is closing. 2712 */ 2713 if (thflags & TH_FIN) { 2714 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2715 socantrcvmore(so); 2716 /* 2717 * If connection is half-synchronized 2718 * (ie NEEDSYN flag on) then delay ACK, 2719 * so it may be piggybacked when SYN is sent. 2720 * Otherwise, since we received a FIN then no 2721 * more input can be expected, send ACK now. 2722 */ 2723 if (tp->t_flags & TF_NEEDSYN) 2724 tp->t_flags |= TF_DELACK; 2725 else 2726 tp->t_flags |= TF_ACKNOW; 2727 tp->rcv_nxt++; 2728 } 2729 switch (tp->t_state) { 2730 2731 /* 2732 * In SYN_RECEIVED and ESTABLISHED STATES 2733 * enter the CLOSE_WAIT state. 2734 */ 2735 case TCPS_SYN_RECEIVED: 2736 tp->t_starttime = ticks; 2737 /* FALLTHROUGH */ 2738 case TCPS_ESTABLISHED: 2739 tp->t_state = TCPS_CLOSE_WAIT; 2740 break; 2741 2742 /* 2743 * If still in FIN_WAIT_1 STATE FIN has not been acked so 2744 * enter the CLOSING state. 2745 */ 2746 case TCPS_FIN_WAIT_1: 2747 tp->t_state = TCPS_CLOSING; 2748 break; 2749 2750 /* 2751 * In FIN_WAIT_2 state enter the TIME_WAIT state, 2752 * starting the time-wait timer, turning off the other 2753 * standard timers. 2754 */ 2755 case TCPS_FIN_WAIT_2: 2756 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 2757 KASSERT(ti_locked == TI_WLOCKED, ("%s: dodata " 2758 "TCP_FIN_WAIT_2 ti_locked: %d", __func__, 2759 ti_locked)); 2760 2761 tcp_twstart(tp); 2762 INP_INFO_WUNLOCK(&V_tcbinfo); 2763 return; 2764 } 2765 } 2766 if (ti_locked == TI_RLOCKED) 2767 INP_INFO_RUNLOCK(&V_tcbinfo); 2768 else if (ti_locked == TI_WLOCKED) 2769 INP_INFO_WUNLOCK(&V_tcbinfo); 2770 else 2771 panic("%s: dodata epilogue ti_locked %d", __func__, 2772 ti_locked); 2773 ti_locked = TI_UNLOCKED; 2774 2775#ifdef TCPDEBUG 2776 if (so->so_options & SO_DEBUG) 2777 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen, 2778 &tcp_savetcp, 0); 2779#endif 2780 2781 /* 2782 * Return any desired output. 2783 */ 2784 if (needoutput || (tp->t_flags & TF_ACKNOW)) 2785 (void) tcp_output(tp); 2786 2787check_delack: 2788 KASSERT(ti_locked == TI_UNLOCKED, ("%s: check_delack ti_locked %d", 2789 __func__, ti_locked)); 2790 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 2791 INP_WLOCK_ASSERT(tp->t_inpcb); 2792 2793 if (tp->t_flags & TF_DELACK) { 2794 tp->t_flags &= ~TF_DELACK; 2795 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime); 2796 } 2797 INP_WUNLOCK(tp->t_inpcb); 2798 return; 2799 2800dropafterack: 2801 KASSERT(ti_locked == TI_RLOCKED || ti_locked == TI_WLOCKED, 2802 ("tcp_do_segment: dropafterack ti_locked %d", ti_locked)); 2803 2804 /* 2805 * Generate an ACK dropping incoming segment if it occupies 2806 * sequence space, where the ACK reflects our state. 2807 * 2808 * We can now skip the test for the RST flag since all 2809 * paths to this code happen after packets containing 2810 * RST have been dropped. 2811 * 2812 * In the SYN-RECEIVED state, don't send an ACK unless the 2813 * segment we received passes the SYN-RECEIVED ACK test. 2814 * If it fails send a RST. This breaks the loop in the 2815 * "LAND" DoS attack, and also prevents an ACK storm 2816 * between two listening ports that have been sent forged 2817 * SYN segments, each with the source address of the other. 2818 */ 2819 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && 2820 (SEQ_GT(tp->snd_una, th->th_ack) || 2821 SEQ_GT(th->th_ack, tp->snd_max)) ) { 2822 rstreason = BANDLIM_RST_OPENPORT; 2823 goto dropwithreset; 2824 } 2825#ifdef TCPDEBUG 2826 if (so->so_options & SO_DEBUG) 2827 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 2828 &tcp_savetcp, 0); 2829#endif 2830 if (ti_locked == TI_RLOCKED) 2831 INP_INFO_RUNLOCK(&V_tcbinfo); 2832 else if (ti_locked == TI_WLOCKED) 2833 INP_INFO_WUNLOCK(&V_tcbinfo); 2834 else 2835 panic("%s: dropafterack epilogue ti_locked %d", __func__, 2836 ti_locked); 2837 ti_locked = TI_UNLOCKED; 2838 2839 tp->t_flags |= TF_ACKNOW; 2840 (void) tcp_output(tp); 2841 INP_WUNLOCK(tp->t_inpcb); 2842 m_freem(m); 2843 return; 2844 2845dropwithreset: 2846 if (ti_locked == TI_RLOCKED) 2847 INP_INFO_RUNLOCK(&V_tcbinfo); 2848 else if (ti_locked == TI_WLOCKED) 2849 INP_INFO_WUNLOCK(&V_tcbinfo); 2850 else 2851 panic("%s: dropwithreset ti_locked %d", __func__, ti_locked); 2852 ti_locked = TI_UNLOCKED; 2853 2854 if (tp != NULL) { 2855 tcp_dropwithreset(m, th, tp, tlen, rstreason); 2856 INP_WUNLOCK(tp->t_inpcb); 2857 } else 2858 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 2859 return; 2860 2861drop: 2862 if (ti_locked == TI_RLOCKED) 2863 INP_INFO_RUNLOCK(&V_tcbinfo); 2864 else if (ti_locked == TI_WLOCKED) 2865 INP_INFO_WUNLOCK(&V_tcbinfo); 2866#ifdef INVARIANTS 2867 else 2868 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 2869#endif 2870 ti_locked = TI_UNLOCKED; 2871 2872 /* 2873 * Drop space held by incoming segment and return. 2874 */ 2875#ifdef TCPDEBUG 2876 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 2877 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 2878 &tcp_savetcp, 0); 2879#endif 2880 if (tp != NULL) 2881 INP_WUNLOCK(tp->t_inpcb); 2882 m_freem(m); 2883} 2884 2885/* 2886 * Issue RST and make ACK acceptable to originator of segment. 2887 * The mbuf must still include the original packet header. 2888 * tp may be NULL. 2889 */ 2890static void 2891tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, 2892 int tlen, int rstreason) 2893{ 2894 struct ip *ip; 2895#ifdef INET6 2896 struct ip6_hdr *ip6; 2897#endif 2898 2899 if (tp != NULL) { 2900 INP_WLOCK_ASSERT(tp->t_inpcb); 2901 } 2902 2903 /* Don't bother if destination was broadcast/multicast. */ 2904 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST)) 2905 goto drop; 2906#ifdef INET6 2907 if (mtod(m, struct ip *)->ip_v == 6) { 2908 ip6 = mtod(m, struct ip6_hdr *); 2909 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 2910 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 2911 goto drop; 2912 /* IPv6 anycast check is done at tcp6_input() */ 2913 } else 2914#endif 2915 { 2916 ip = mtod(m, struct ip *); 2917 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 2918 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 2919 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 2920 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 2921 goto drop; 2922 } 2923 2924 /* Perform bandwidth limiting. */ 2925 if (badport_bandlim(rstreason) < 0) 2926 goto drop; 2927 2928 /* tcp_respond consumes the mbuf chain. */ 2929 if (th->th_flags & TH_ACK) { 2930 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, 2931 th->th_ack, TH_RST); 2932 } else { 2933 if (th->th_flags & TH_SYN) 2934 tlen++; 2935 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen, 2936 (tcp_seq)0, TH_RST|TH_ACK); 2937 } 2938 return; 2939drop: 2940 m_freem(m); 2941} 2942 2943/* 2944 * Parse TCP options and place in tcpopt. 2945 */ 2946static void 2947tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags) 2948{ 2949 int opt, optlen; 2950 2951 to->to_flags = 0; 2952 for (; cnt > 0; cnt -= optlen, cp += optlen) { 2953 opt = cp[0]; 2954 if (opt == TCPOPT_EOL) 2955 break; 2956 if (opt == TCPOPT_NOP) 2957 optlen = 1; 2958 else { 2959 if (cnt < 2) 2960 break; 2961 optlen = cp[1]; 2962 if (optlen < 2 || optlen > cnt) 2963 break; 2964 } 2965 switch (opt) { 2966 case TCPOPT_MAXSEG: 2967 if (optlen != TCPOLEN_MAXSEG) 2968 continue; 2969 if (!(flags & TO_SYN)) 2970 continue; 2971 to->to_flags |= TOF_MSS; 2972 bcopy((char *)cp + 2, 2973 (char *)&to->to_mss, sizeof(to->to_mss)); 2974 to->to_mss = ntohs(to->to_mss); 2975 break; 2976 case TCPOPT_WINDOW: 2977 if (optlen != TCPOLEN_WINDOW) 2978 continue; 2979 if (!(flags & TO_SYN)) 2980 continue; 2981 to->to_flags |= TOF_SCALE; 2982 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT); 2983 break; 2984 case TCPOPT_TIMESTAMP: 2985 if (optlen != TCPOLEN_TIMESTAMP) 2986 continue; 2987 to->to_flags |= TOF_TS; 2988 bcopy((char *)cp + 2, 2989 (char *)&to->to_tsval, sizeof(to->to_tsval)); 2990 to->to_tsval = ntohl(to->to_tsval); 2991 bcopy((char *)cp + 6, 2992 (char *)&to->to_tsecr, sizeof(to->to_tsecr)); 2993 to->to_tsecr = ntohl(to->to_tsecr); 2994 break; 2995#ifdef TCP_SIGNATURE 2996 /* 2997 * XXX In order to reply to a host which has set the 2998 * TCP_SIGNATURE option in its initial SYN, we have to 2999 * record the fact that the option was observed here 3000 * for the syncache code to perform the correct response. 3001 */ 3002 case TCPOPT_SIGNATURE: 3003 if (optlen != TCPOLEN_SIGNATURE) 3004 continue; 3005 to->to_flags |= TOF_SIGNATURE; 3006 to->to_signature = cp + 2; 3007 break; 3008#endif 3009 case TCPOPT_SACK_PERMITTED: 3010 if (optlen != TCPOLEN_SACK_PERMITTED) 3011 continue; 3012 if (!(flags & TO_SYN)) 3013 continue; 3014 if (!V_tcp_do_sack) 3015 continue; 3016 to->to_flags |= TOF_SACKPERM; 3017 break; 3018 case TCPOPT_SACK: 3019 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0) 3020 continue; 3021 if (flags & TO_SYN) 3022 continue; 3023 to->to_flags |= TOF_SACK; 3024 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK; 3025 to->to_sacks = cp + 2; 3026 TCPSTAT_INC(tcps_sack_rcv_blocks); 3027 break; 3028 default: 3029 continue; 3030 } 3031 } 3032} 3033 3034/* 3035 * Pull out of band byte out of a segment so 3036 * it doesn't appear in the user's data queue. 3037 * It is still reflected in the segment length for 3038 * sequencing purposes. 3039 */ 3040static void 3041tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, 3042 int off) 3043{ 3044 int cnt = off + th->th_urp - 1; 3045 3046 while (cnt >= 0) { 3047 if (m->m_len > cnt) { 3048 char *cp = mtod(m, caddr_t) + cnt; 3049 struct tcpcb *tp = sototcpcb(so); 3050 3051 INP_WLOCK_ASSERT(tp->t_inpcb); 3052 3053 tp->t_iobc = *cp; 3054 tp->t_oobflags |= TCPOOB_HAVEDATA; 3055 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1)); 3056 m->m_len--; 3057 if (m->m_flags & M_PKTHDR) 3058 m->m_pkthdr.len--; 3059 return; 3060 } 3061 cnt -= m->m_len; 3062 m = m->m_next; 3063 if (m == NULL) 3064 break; 3065 } 3066 panic("tcp_pulloutofband"); 3067} 3068 3069/* 3070 * Collect new round-trip time estimate 3071 * and update averages and current timeout. 3072 */ 3073static void 3074tcp_xmit_timer(struct tcpcb *tp, int rtt) 3075{ 3076 int delta; 3077 3078 INP_WLOCK_ASSERT(tp->t_inpcb); 3079 3080 TCPSTAT_INC(tcps_rttupdated); 3081 tp->t_rttupdated++; 3082 if (tp->t_srtt != 0) { 3083 /* 3084 * srtt is stored as fixed point with 5 bits after the 3085 * binary point (i.e., scaled by 8). The following magic 3086 * is equivalent to the smoothing algorithm in rfc793 with 3087 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed 3088 * point). Adjust rtt to origin 0. 3089 */ 3090 delta = ((rtt - 1) << TCP_DELTA_SHIFT) 3091 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); 3092 3093 if ((tp->t_srtt += delta) <= 0) 3094 tp->t_srtt = 1; 3095 3096 /* 3097 * We accumulate a smoothed rtt variance (actually, a 3098 * smoothed mean difference), then set the retransmit 3099 * timer to smoothed rtt + 4 times the smoothed variance. 3100 * rttvar is stored as fixed point with 4 bits after the 3101 * binary point (scaled by 16). The following is 3102 * equivalent to rfc793 smoothing with an alpha of .75 3103 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces 3104 * rfc793's wired-in beta. 3105 */ 3106 if (delta < 0) 3107 delta = -delta; 3108 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); 3109 if ((tp->t_rttvar += delta) <= 0) 3110 tp->t_rttvar = 1; 3111 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 3112 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 3113 } else { 3114 /* 3115 * No rtt measurement yet - use the unsmoothed rtt. 3116 * Set the variance to half the rtt (so our first 3117 * retransmit happens at 3*rtt). 3118 */ 3119 tp->t_srtt = rtt << TCP_RTT_SHIFT; 3120 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); 3121 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 3122 } 3123 tp->t_rtttime = 0; 3124 tp->t_rxtshift = 0; 3125 3126 /* 3127 * the retransmit should happen at rtt + 4 * rttvar. 3128 * Because of the way we do the smoothing, srtt and rttvar 3129 * will each average +1/2 tick of bias. When we compute 3130 * the retransmit timer, we want 1/2 tick of rounding and 3131 * 1 extra tick because of +-1/2 tick uncertainty in the 3132 * firing of the timer. The bias will give us exactly the 3133 * 1.5 tick we need. But, because the bias is 3134 * statistical, we have to test that we don't drop below 3135 * the minimum feasible timer (which is 2 ticks). 3136 */ 3137 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 3138 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX); 3139 3140 /* 3141 * We received an ack for a packet that wasn't retransmitted; 3142 * it is probably safe to discard any error indications we've 3143 * received recently. This isn't quite right, but close enough 3144 * for now (a route might have failed after we sent a segment, 3145 * and the return path might not be symmetrical). 3146 */ 3147 tp->t_softerror = 0; 3148} 3149 3150/* 3151 * Determine a reasonable value for maxseg size. 3152 * If the route is known, check route for mtu. 3153 * If none, use an mss that can be handled on the outgoing 3154 * interface without forcing IP to fragment; if bigger than 3155 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES 3156 * to utilize large mbufs. If no route is found, route has no mtu, 3157 * or the destination isn't local, use a default, hopefully conservative 3158 * size (usually 512 or the default IP max size, but no more than the mtu 3159 * of the interface), as we can't discover anything about intervening 3160 * gateways or networks. We also initialize the congestion/slow start 3161 * window to be a single segment if the destination isn't local. 3162 * While looking at the routing entry, we also initialize other path-dependent 3163 * parameters from pre-set or cached values in the routing entry. 3164 * 3165 * Also take into account the space needed for options that we 3166 * send regularly. Make maxseg shorter by that amount to assure 3167 * that we can send maxseg amount of data even when the options 3168 * are present. Store the upper limit of the length of options plus 3169 * data in maxopd. 3170 * 3171 * In case of T/TCP, we call this routine during implicit connection 3172 * setup as well (offer = -1), to initialize maxseg from the cached 3173 * MSS of our peer. 3174 * 3175 * NOTE that this routine is only called when we process an incoming 3176 * segment. Outgoing SYN/ACK MSS settings are handled in tcp_mssopt(). 3177 */ 3178void 3179tcp_mss_update(struct tcpcb *tp, int offer, 3180 struct hc_metrics_lite *metricptr, int *mtuflags) 3181{ 3182 int mss; 3183 u_long maxmtu; 3184 struct inpcb *inp = tp->t_inpcb; 3185 struct hc_metrics_lite metrics; 3186 int origoffer = offer; 3187#ifdef INET6 3188 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0; 3189 size_t min_protoh = isipv6 ? 3190 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) : 3191 sizeof (struct tcpiphdr); 3192#else 3193 const size_t min_protoh = sizeof(struct tcpiphdr); 3194#endif 3195 3196 INP_WLOCK_ASSERT(tp->t_inpcb); 3197 3198 /* Initialize. */ 3199#ifdef INET6 3200 if (isipv6) { 3201 maxmtu = tcp_maxmtu6(&inp->inp_inc, mtuflags); 3202 tp->t_maxopd = tp->t_maxseg = V_tcp_v6mssdflt; 3203 } else 3204#endif 3205 { 3206 maxmtu = tcp_maxmtu(&inp->inp_inc, mtuflags); 3207 tp->t_maxopd = tp->t_maxseg = V_tcp_mssdflt; 3208 } 3209 3210 /* 3211 * No route to sender, stay with default mss and return. 3212 */ 3213 if (maxmtu == 0) { 3214 /* 3215 * In case we return early we need to initialize metrics 3216 * to a defined state as tcp_hc_get() would do for us 3217 * if there was no cache hit. 3218 */ 3219 if (metricptr != NULL) 3220 bzero(metricptr, sizeof(struct hc_metrics_lite)); 3221 return; 3222 } 3223 3224 /* What have we got? */ 3225 switch (offer) { 3226 case 0: 3227 /* 3228 * Offer == 0 means that there was no MSS on the SYN 3229 * segment, in this case we use tcp_mssdflt as 3230 * already assigned to t_maxopd above. 3231 */ 3232 offer = tp->t_maxopd; 3233 break; 3234 3235 case -1: 3236 /* 3237 * Offer == -1 means that we didn't receive SYN yet. 3238 */ 3239 /* FALLTHROUGH */ 3240 3241 default: 3242 /* 3243 * Prevent DoS attack with too small MSS. Round up 3244 * to at least minmss. 3245 */ 3246 offer = max(offer, V_tcp_minmss); 3247 } 3248 3249 /* 3250 * rmx information is now retrieved from tcp_hostcache. 3251 */ 3252 tcp_hc_get(&inp->inp_inc, &metrics); 3253 if (metricptr != NULL) 3254 bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite)); 3255 3256 /* 3257 * If there's a discovered mtu int tcp hostcache, use it 3258 * else, use the link mtu. 3259 */ 3260 if (metrics.rmx_mtu) 3261 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh; 3262 else { 3263#ifdef INET6 3264 if (isipv6) { 3265 mss = maxmtu - min_protoh; 3266 if (!V_path_mtu_discovery && 3267 !in6_localaddr(&inp->in6p_faddr)) 3268 mss = min(mss, V_tcp_v6mssdflt); 3269 } else 3270#endif 3271 { 3272 mss = maxmtu - min_protoh; 3273 if (!V_path_mtu_discovery && 3274 !in_localaddr(inp->inp_faddr)) 3275 mss = min(mss, V_tcp_mssdflt); 3276 } 3277 /* 3278 * XXX - The above conditional (mss = maxmtu - min_protoh) 3279 * probably violates the TCP spec. 3280 * The problem is that, since we don't know the 3281 * other end's MSS, we are supposed to use a conservative 3282 * default. But, if we do that, then MTU discovery will 3283 * never actually take place, because the conservative 3284 * default is much less than the MTUs typically seen 3285 * on the Internet today. For the moment, we'll sweep 3286 * this under the carpet. 3287 * 3288 * The conservative default might not actually be a problem 3289 * if the only case this occurs is when sending an initial 3290 * SYN with options and data to a host we've never talked 3291 * to before. Then, they will reply with an MSS value which 3292 * will get recorded and the new parameters should get 3293 * recomputed. For Further Study. 3294 */ 3295 } 3296 mss = min(mss, offer); 3297 3298 /* 3299 * Sanity check: make sure that maxopd will be large 3300 * enough to allow some data on segments even if the 3301 * all the option space is used (40bytes). Otherwise 3302 * funny things may happen in tcp_output. 3303 */ 3304 mss = max(mss, 64); 3305 3306 /* 3307 * maxopd stores the maximum length of data AND options 3308 * in a segment; maxseg is the amount of data in a normal 3309 * segment. We need to store this value (maxopd) apart 3310 * from maxseg, because now every segment carries options 3311 * and thus we normally have somewhat less data in segments. 3312 */ 3313 tp->t_maxopd = mss; 3314 3315 /* 3316 * origoffer==-1 indicates that no segments were received yet. 3317 * In this case we just guess. 3318 */ 3319 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && 3320 (origoffer == -1 || 3321 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)) 3322 mss -= TCPOLEN_TSTAMP_APPA; 3323 3324#if (MCLBYTES & (MCLBYTES - 1)) == 0 3325 if (mss > MCLBYTES) 3326 mss &= ~(MCLBYTES-1); 3327#else 3328 if (mss > MCLBYTES) 3329 mss = mss / MCLBYTES * MCLBYTES; 3330#endif 3331 tp->t_maxseg = mss; 3332} 3333 3334void 3335tcp_mss(struct tcpcb *tp, int offer) 3336{ 3337 int mss; 3338 u_long bufsize; 3339 struct inpcb *inp; 3340 struct socket *so; 3341 struct hc_metrics_lite metrics; 3342 int mtuflags = 0; 3343 3344 KASSERT(tp != NULL, ("%s: tp == NULL", __func__)); 3345 3346 tcp_mss_update(tp, offer, &metrics, &mtuflags); 3347 3348 mss = tp->t_maxseg; 3349 inp = tp->t_inpcb; 3350 3351 /* 3352 * If there's a pipesize, change the socket buffer to that size, 3353 * don't change if sb_hiwat is different than default (then it 3354 * has been changed on purpose with setsockopt). 3355 * Make the socket buffers an integral number of mss units; 3356 * if the mss is larger than the socket buffer, decrease the mss. 3357 */ 3358 so = inp->inp_socket; 3359 SOCKBUF_LOCK(&so->so_snd); 3360 if ((so->so_snd.sb_hiwat == tcp_sendspace) && metrics.rmx_sendpipe) 3361 bufsize = metrics.rmx_sendpipe; 3362 else 3363 bufsize = so->so_snd.sb_hiwat; 3364 if (bufsize < mss) 3365 mss = bufsize; 3366 else { 3367 bufsize = roundup(bufsize, mss); 3368 if (bufsize > sb_max) 3369 bufsize = sb_max; 3370 if (bufsize > so->so_snd.sb_hiwat) 3371 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL); 3372 } 3373 SOCKBUF_UNLOCK(&so->so_snd); 3374 tp->t_maxseg = mss; 3375 3376 SOCKBUF_LOCK(&so->so_rcv); 3377 if ((so->so_rcv.sb_hiwat == tcp_recvspace) && metrics.rmx_recvpipe) 3378 bufsize = metrics.rmx_recvpipe; 3379 else 3380 bufsize = so->so_rcv.sb_hiwat; 3381 if (bufsize > mss) { 3382 bufsize = roundup(bufsize, mss); 3383 if (bufsize > sb_max) 3384 bufsize = sb_max; 3385 if (bufsize > so->so_rcv.sb_hiwat) 3386 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL); 3387 } 3388 SOCKBUF_UNLOCK(&so->so_rcv); 3389 3390 /* Check the interface for TSO capabilities. */ 3391 if (mtuflags & CSUM_TSO) 3392 tp->t_flags |= TF_TSO; 3393} 3394 3395/* 3396 * Determine the MSS option to send on an outgoing SYN. 3397 */ 3398int 3399tcp_mssopt(struct in_conninfo *inc) 3400{ 3401 int mss = 0; 3402 u_long maxmtu = 0; 3403 u_long thcmtu = 0; 3404 size_t min_protoh; 3405 3406 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer")); 3407 3408#ifdef INET6 3409 if (inc->inc_flags & INC_ISIPV6) { 3410 mss = V_tcp_v6mssdflt; 3411 maxmtu = tcp_maxmtu6(inc, NULL); 3412 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */ 3413 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 3414 } else 3415#endif 3416 { 3417 mss = V_tcp_mssdflt; 3418 maxmtu = tcp_maxmtu(inc, NULL); 3419 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */ 3420 min_protoh = sizeof(struct tcpiphdr); 3421 } 3422 if (maxmtu && thcmtu) 3423 mss = min(maxmtu, thcmtu) - min_protoh; 3424 else if (maxmtu || thcmtu) 3425 mss = max(maxmtu, thcmtu) - min_protoh; 3426 3427 return (mss); 3428} 3429 3430 3431/* 3432 * On a partial ack arrives, force the retransmission of the 3433 * next unacknowledged segment. Do not clear tp->t_dupacks. 3434 * By setting snd_nxt to ti_ack, this forces retransmission timer to 3435 * be started again. 3436 */ 3437static void 3438tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th) 3439{ 3440 tcp_seq onxt = tp->snd_nxt; 3441 u_long ocwnd = tp->snd_cwnd; 3442 3443 INP_WLOCK_ASSERT(tp->t_inpcb); 3444 3445 tcp_timer_activate(tp, TT_REXMT, 0); 3446 tp->t_rtttime = 0; 3447 tp->snd_nxt = th->th_ack; 3448 /* 3449 * Set snd_cwnd to one segment beyond acknowledged offset. 3450 * (tp->snd_una has not yet been updated when this function is called.) 3451 */ 3452 tp->snd_cwnd = tp->t_maxseg + BYTES_THIS_ACK(tp, th); 3453 tp->t_flags |= TF_ACKNOW; 3454 (void) tcp_output(tp); 3455 tp->snd_cwnd = ocwnd; 3456 if (SEQ_GT(onxt, tp->snd_nxt)) 3457 tp->snd_nxt = onxt; 3458 /* 3459 * Partial window deflation. Relies on fact that tp->snd_una 3460 * not updated yet. 3461 */ 3462 if (tp->snd_cwnd > BYTES_THIS_ACK(tp, th)) 3463 tp->snd_cwnd -= BYTES_THIS_ACK(tp, th); 3464 else 3465 tp->snd_cwnd = 0; 3466 tp->snd_cwnd += tp->t_maxseg; 3467} 3468