tcp_input.c revision 183356
1/*- 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95 30 */ 31 32#include <sys/cdefs.h> 33__FBSDID("$FreeBSD: head/sys/netinet/tcp_input.c 183356 2008-09-25 17:26:54Z rwatson $"); 34 35#include "opt_ipfw.h" /* for ipfw_fwd */ 36#include "opt_inet.h" 37#include "opt_inet6.h" 38#include "opt_ipsec.h" 39#include "opt_mac.h" 40#include "opt_tcpdebug.h" 41 42#include <sys/param.h> 43#include <sys/kernel.h> 44#include <sys/malloc.h> 45#include <sys/mbuf.h> 46#include <sys/proc.h> /* for proc0 declaration */ 47#include <sys/protosw.h> 48#include <sys/signalvar.h> 49#include <sys/socket.h> 50#include <sys/socketvar.h> 51#include <sys/sysctl.h> 52#include <sys/syslog.h> 53#include <sys/systm.h> 54#include <sys/vimage.h> 55 56#include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */ 57 58#include <vm/uma.h> 59 60#include <net/if.h> 61#include <net/route.h> 62 63#define TCPSTATES /* for logging */ 64 65#include <netinet/in.h> 66#include <netinet/in_pcb.h> 67#include <netinet/in_systm.h> 68#include <netinet/in_var.h> 69#include <netinet/ip.h> 70#include <netinet/ip_icmp.h> /* required for icmp_var.h */ 71#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 72#include <netinet/ip_var.h> 73#include <netinet/ip_options.h> 74#include <netinet/ip6.h> 75#include <netinet/icmp6.h> 76#include <netinet6/in6_pcb.h> 77#include <netinet6/ip6_var.h> 78#include <netinet6/nd6.h> 79#include <netinet/tcp.h> 80#include <netinet/tcp_fsm.h> 81#include <netinet/tcp_seq.h> 82#include <netinet/tcp_timer.h> 83#include <netinet/tcp_var.h> 84#include <netinet6/tcp6_var.h> 85#include <netinet/tcpip.h> 86#include <netinet/tcp_syncache.h> 87#ifdef TCPDEBUG 88#include <netinet/tcp_debug.h> 89#endif /* TCPDEBUG */ 90 91#ifdef IPSEC 92#include <netipsec/ipsec.h> 93#include <netipsec/ipsec6.h> 94#endif /*IPSEC*/ 95 96#include <machine/in_cksum.h> 97 98#include <security/mac/mac_framework.h> 99 100static const int tcprexmtthresh = 3; 101 102struct tcpstat tcpstat; 103SYSCTL_STRUCT(_net_inet_tcp, TCPCTL_STATS, stats, CTLFLAG_RW, 104 &tcpstat , tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)"); 105 106int tcp_log_in_vain = 0; 107SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW, 108 &tcp_log_in_vain, 0, "Log all incoming TCP segments to closed ports"); 109 110static int blackhole = 0; 111SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW, 112 &blackhole, 0, "Do not send RST on segments to closed ports"); 113 114int tcp_delack_enabled = 1; 115SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW, 116 &tcp_delack_enabled, 0, 117 "Delay ACK to try and piggyback it onto a data packet"); 118 119static int drop_synfin = 0; 120SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW, 121 &drop_synfin, 0, "Drop TCP packets with SYN+FIN set"); 122 123static int tcp_do_rfc3042 = 1; 124SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_RW, 125 &tcp_do_rfc3042, 0, "Enable RFC 3042 (Limited Transmit)"); 126 127static int tcp_do_rfc3390 = 1; 128SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW, 129 &tcp_do_rfc3390, 0, 130 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)"); 131 132int tcp_do_ecn = 0; 133int tcp_ecn_maxretries = 1; 134SYSCTL_NODE(_net_inet_tcp, OID_AUTO, ecn, CTLFLAG_RW, 0, "TCP ECN"); 135SYSCTL_INT(_net_inet_tcp_ecn, OID_AUTO, enable, CTLFLAG_RW, 136 &tcp_do_ecn, 0, "TCP ECN support"); 137SYSCTL_INT(_net_inet_tcp_ecn, OID_AUTO, maxretries, CTLFLAG_RW, 138 &tcp_ecn_maxretries, 0, "Max retries before giving up on ECN"); 139 140static int tcp_insecure_rst = 0; 141SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_RW, 142 &tcp_insecure_rst, 0, 143 "Follow the old (insecure) criteria for accepting RST packets"); 144 145int tcp_do_autorcvbuf = 1; 146SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_RW, 147 &tcp_do_autorcvbuf, 0, "Enable automatic receive buffer sizing"); 148 149int tcp_autorcvbuf_inc = 16*1024; 150SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_RW, 151 &tcp_autorcvbuf_inc, 0, 152 "Incrementor step size of automatic receive buffer"); 153 154int tcp_autorcvbuf_max = 256*1024; 155SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_RW, 156 &tcp_autorcvbuf_max, 0, "Max size of automatic receive buffer"); 157 158struct inpcbhead tcb; 159#define tcb6 tcb /* for KAME src sync over BSD*'s */ 160struct inpcbinfo tcbinfo; 161 162static void tcp_dooptions(struct tcpopt *, u_char *, int, int); 163static void tcp_do_segment(struct mbuf *, struct tcphdr *, 164 struct socket *, struct tcpcb *, int, int, uint8_t); 165static void tcp_dropwithreset(struct mbuf *, struct tcphdr *, 166 struct tcpcb *, int, int); 167static void tcp_pulloutofband(struct socket *, 168 struct tcphdr *, struct mbuf *, int); 169static void tcp_xmit_timer(struct tcpcb *, int); 170static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *); 171static void inline 172 tcp_congestion_exp(struct tcpcb *); 173 174static void inline 175tcp_congestion_exp(struct tcpcb *tp) 176{ 177 u_int win; 178 179 win = min(tp->snd_wnd, tp->snd_cwnd) / 180 2 / tp->t_maxseg; 181 if (win < 2) 182 win = 2; 183 tp->snd_ssthresh = win * tp->t_maxseg; 184 ENTER_FASTRECOVERY(tp); 185 tp->snd_recover = tp->snd_max; 186 if (tp->t_flags & TF_ECN_PERMIT) 187 tp->t_flags |= TF_ECN_SND_CWR; 188} 189 190/* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */ 191#ifdef INET6 192#define ND6_HINT(tp) \ 193do { \ 194 if ((tp) && (tp)->t_inpcb && \ 195 ((tp)->t_inpcb->inp_vflag & INP_IPV6) != 0) \ 196 nd6_nud_hint(NULL, NULL, 0); \ 197} while (0) 198#else 199#define ND6_HINT(tp) 200#endif 201 202/* 203 * Indicate whether this ack should be delayed. We can delay the ack if 204 * - there is no delayed ack timer in progress and 205 * - our last ack wasn't a 0-sized window. We never want to delay 206 * the ack that opens up a 0-sized window and 207 * - delayed acks are enabled or 208 * - this is a half-synchronized T/TCP connection. 209 */ 210#define DELAY_ACK(tp) \ 211 ((!tcp_timer_active(tp, TT_DELACK) && \ 212 (tp->t_flags & TF_RXWIN0SENT) == 0) && \ 213 (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN))) 214 215/* 216 * TCP input handling is split into multiple parts: 217 * tcp6_input is a thin wrapper around tcp_input for the extended 218 * ip6_protox[] call format in ip6_input 219 * tcp_input handles primary segment validation, inpcb lookup and 220 * SYN processing on listen sockets 221 * tcp_do_segment processes the ACK and text of the segment for 222 * establishing, established and closing connections 223 */ 224#ifdef INET6 225int 226tcp6_input(struct mbuf **mp, int *offp, int proto) 227{ 228 struct mbuf *m = *mp; 229 struct in6_ifaddr *ia6; 230 231 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE); 232 233 /* 234 * draft-itojun-ipv6-tcp-to-anycast 235 * better place to put this in? 236 */ 237 ia6 = ip6_getdstifaddr(m); 238 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) { 239 struct ip6_hdr *ip6; 240 241 ip6 = mtod(m, struct ip6_hdr *); 242 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR, 243 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6); 244 return IPPROTO_DONE; 245 } 246 247 tcp_input(m, *offp); 248 return IPPROTO_DONE; 249} 250#endif 251 252void 253tcp_input(struct mbuf *m, int off0) 254{ 255 struct tcphdr *th; 256 struct ip *ip = NULL; 257 struct ipovly *ipov; 258 struct inpcb *inp = NULL; 259 struct tcpcb *tp = NULL; 260 struct socket *so = NULL; 261 u_char *optp = NULL; 262 int optlen = 0; 263 int len, tlen, off; 264 int drop_hdrlen; 265 int thflags; 266 int rstreason = 0; /* For badport_bandlim accounting purposes */ 267 uint8_t iptos; 268#ifdef IPFIREWALL_FORWARD 269 struct m_tag *fwd_tag; 270#endif 271#ifdef INET6 272 struct ip6_hdr *ip6 = NULL; 273 int isipv6; 274#else 275 const void *ip6 = NULL; 276 const int isipv6 = 0; 277#endif 278 struct tcpopt to; /* options in this segment */ 279 char *s = NULL; /* address and port logging */ 280 281#ifdef TCPDEBUG 282 /* 283 * The size of tcp_saveipgen must be the size of the max ip header, 284 * now IPv6. 285 */ 286 u_char tcp_saveipgen[IP6_HDR_LEN]; 287 struct tcphdr tcp_savetcp; 288 short ostate = 0; 289#endif 290 291#ifdef INET6 292 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0; 293#endif 294 295 to.to_flags = 0; 296 V_tcpstat.tcps_rcvtotal++; 297 298 if (isipv6) { 299#ifdef INET6 300 /* IP6_EXTHDR_CHECK() is already done at tcp6_input(). */ 301 ip6 = mtod(m, struct ip6_hdr *); 302 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0; 303 if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) { 304 V_tcpstat.tcps_rcvbadsum++; 305 goto drop; 306 } 307 th = (struct tcphdr *)((caddr_t)ip6 + off0); 308 309 /* 310 * Be proactive about unspecified IPv6 address in source. 311 * As we use all-zero to indicate unbounded/unconnected pcb, 312 * unspecified IPv6 address can be used to confuse us. 313 * 314 * Note that packets with unspecified IPv6 destination is 315 * already dropped in ip6_input. 316 */ 317 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { 318 /* XXX stat */ 319 goto drop; 320 } 321#else 322 th = NULL; /* XXX: Avoid compiler warning. */ 323#endif 324 } else { 325 /* 326 * Get IP and TCP header together in first mbuf. 327 * Note: IP leaves IP header in first mbuf. 328 */ 329 if (off0 > sizeof (struct ip)) { 330 ip_stripoptions(m, (struct mbuf *)0); 331 off0 = sizeof(struct ip); 332 } 333 if (m->m_len < sizeof (struct tcpiphdr)) { 334 if ((m = m_pullup(m, sizeof (struct tcpiphdr))) 335 == NULL) { 336 V_tcpstat.tcps_rcvshort++; 337 return; 338 } 339 } 340 ip = mtod(m, struct ip *); 341 ipov = (struct ipovly *)ip; 342 th = (struct tcphdr *)((caddr_t)ip + off0); 343 tlen = ip->ip_len; 344 345 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 346 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 347 th->th_sum = m->m_pkthdr.csum_data; 348 else 349 th->th_sum = in_pseudo(ip->ip_src.s_addr, 350 ip->ip_dst.s_addr, 351 htonl(m->m_pkthdr.csum_data + 352 ip->ip_len + 353 IPPROTO_TCP)); 354 th->th_sum ^= 0xffff; 355#ifdef TCPDEBUG 356 ipov->ih_len = (u_short)tlen; 357 ipov->ih_len = htons(ipov->ih_len); 358#endif 359 } else { 360 /* 361 * Checksum extended TCP header and data. 362 */ 363 len = sizeof (struct ip) + tlen; 364 bzero(ipov->ih_x1, sizeof(ipov->ih_x1)); 365 ipov->ih_len = (u_short)tlen; 366 ipov->ih_len = htons(ipov->ih_len); 367 th->th_sum = in_cksum(m, len); 368 } 369 if (th->th_sum) { 370 V_tcpstat.tcps_rcvbadsum++; 371 goto drop; 372 } 373 /* Re-initialization for later version check */ 374 ip->ip_v = IPVERSION; 375 } 376 377#ifdef INET6 378 if (isipv6) 379 iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; 380 else 381#endif 382 iptos = ip->ip_tos; 383 384 /* 385 * Check that TCP offset makes sense, 386 * pull out TCP options and adjust length. XXX 387 */ 388 off = th->th_off << 2; 389 if (off < sizeof (struct tcphdr) || off > tlen) { 390 V_tcpstat.tcps_rcvbadoff++; 391 goto drop; 392 } 393 tlen -= off; /* tlen is used instead of ti->ti_len */ 394 if (off > sizeof (struct tcphdr)) { 395 if (isipv6) { 396#ifdef INET6 397 IP6_EXTHDR_CHECK(m, off0, off, ); 398 ip6 = mtod(m, struct ip6_hdr *); 399 th = (struct tcphdr *)((caddr_t)ip6 + off0); 400#endif 401 } else { 402 if (m->m_len < sizeof(struct ip) + off) { 403 if ((m = m_pullup(m, sizeof (struct ip) + off)) 404 == NULL) { 405 V_tcpstat.tcps_rcvshort++; 406 return; 407 } 408 ip = mtod(m, struct ip *); 409 ipov = (struct ipovly *)ip; 410 th = (struct tcphdr *)((caddr_t)ip + off0); 411 } 412 } 413 optlen = off - sizeof (struct tcphdr); 414 optp = (u_char *)(th + 1); 415 } 416 thflags = th->th_flags; 417 418 /* 419 * Convert TCP protocol specific fields to host format. 420 */ 421 th->th_seq = ntohl(th->th_seq); 422 th->th_ack = ntohl(th->th_ack); 423 th->th_win = ntohs(th->th_win); 424 th->th_urp = ntohs(th->th_urp); 425 426 /* 427 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options. 428 */ 429 drop_hdrlen = off0 + off; 430 431 /* 432 * Locate pcb for segment. 433 */ 434 INP_INFO_WLOCK(&V_tcbinfo); 435findpcb: 436 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 437#ifdef IPFIREWALL_FORWARD 438 /* 439 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. 440 */ 441 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL); 442 443 if (fwd_tag != NULL && isipv6 == 0) { /* IPv6 support is not yet */ 444 struct sockaddr_in *next_hop; 445 446 next_hop = (struct sockaddr_in *)(fwd_tag+1); 447 /* 448 * Transparently forwarded. Pretend to be the destination. 449 * already got one like this? 450 */ 451 inp = in_pcblookup_hash(&V_tcbinfo, 452 ip->ip_src, th->th_sport, 453 ip->ip_dst, th->th_dport, 454 0, m->m_pkthdr.rcvif); 455 if (!inp) { 456 /* It's new. Try to find the ambushing socket. */ 457 inp = in_pcblookup_hash(&V_tcbinfo, 458 ip->ip_src, th->th_sport, 459 next_hop->sin_addr, 460 next_hop->sin_port ? 461 ntohs(next_hop->sin_port) : 462 th->th_dport, 463 INPLOOKUP_WILDCARD, 464 m->m_pkthdr.rcvif); 465 } 466 /* Remove the tag from the packet. We don't need it anymore. */ 467 m_tag_delete(m, fwd_tag); 468 } else 469#endif /* IPFIREWALL_FORWARD */ 470 { 471 if (isipv6) { 472#ifdef INET6 473 inp = in6_pcblookup_hash(&V_tcbinfo, 474 &ip6->ip6_src, th->th_sport, 475 &ip6->ip6_dst, th->th_dport, 476 INPLOOKUP_WILDCARD, 477 m->m_pkthdr.rcvif); 478#endif 479 } else 480 inp = in_pcblookup_hash(&V_tcbinfo, 481 ip->ip_src, th->th_sport, 482 ip->ip_dst, th->th_dport, 483 INPLOOKUP_WILDCARD, 484 m->m_pkthdr.rcvif); 485 } 486 487 /* 488 * If the INPCB does not exist then all data in the incoming 489 * segment is discarded and an appropriate RST is sent back. 490 * XXX MRT Send RST using which routing table? 491 */ 492 if (inp == NULL) { 493 /* 494 * Log communication attempts to ports that are not 495 * in use. 496 */ 497 if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) || 498 tcp_log_in_vain == 2) { 499 if ((s = tcp_log_addrs(NULL, th, (void *)ip, ip6))) 500 log(LOG_INFO, "%s; %s: Connection attempt " 501 "to closed port\n", s, __func__); 502 } 503 /* 504 * When blackholing do not respond with a RST but 505 * completely ignore the segment and drop it. 506 */ 507 if ((V_blackhole == 1 && (thflags & TH_SYN)) || 508 V_blackhole == 2) 509 goto dropunlock; 510 511 rstreason = BANDLIM_RST_CLOSEDPORT; 512 goto dropwithreset; 513 } 514 INP_WLOCK(inp); 515 516#ifdef IPSEC 517#ifdef INET6 518 if (isipv6 && ipsec6_in_reject(m, inp)) { 519 V_ipsec6stat.in_polvio++; 520 goto dropunlock; 521 } else 522#endif /* INET6 */ 523 if (ipsec4_in_reject(m, inp) != 0) { 524 V_ipsec4stat.in_polvio++; 525 goto dropunlock; 526 } 527#endif /* IPSEC */ 528 529 /* 530 * Check the minimum TTL for socket. 531 */ 532 if (inp->inp_ip_minttl != 0) { 533#ifdef INET6 534 if (isipv6 && inp->inp_ip_minttl > ip6->ip6_hlim) 535 goto dropunlock; 536 else 537#endif 538 if (inp->inp_ip_minttl > ip->ip_ttl) 539 goto dropunlock; 540 } 541 542 /* 543 * A previous connection in TIMEWAIT state is supposed to catch 544 * stray or duplicate segments arriving late. If this segment 545 * was a legitimate new connection attempt the old INPCB gets 546 * removed and we can try again to find a listening socket. 547 */ 548 if (inp->inp_vflag & INP_TIMEWAIT) { 549 if (thflags & TH_SYN) 550 tcp_dooptions(&to, optp, optlen, TO_SYN); 551 /* 552 * NB: tcp_twcheck unlocks the INP and frees the mbuf. 553 */ 554 if (tcp_twcheck(inp, &to, th, m, tlen)) 555 goto findpcb; 556 INP_INFO_WUNLOCK(&V_tcbinfo); 557 return; 558 } 559 /* 560 * The TCPCB may no longer exist if the connection is winding 561 * down or it is in the CLOSED state. Either way we drop the 562 * segment and send an appropriate response. 563 */ 564 tp = intotcpcb(inp); 565 if (tp == NULL || tp->t_state == TCPS_CLOSED) { 566 rstreason = BANDLIM_RST_CLOSEDPORT; 567 goto dropwithreset; 568 } 569 570#ifdef MAC 571 INP_WLOCK_ASSERT(inp); 572 if (mac_inpcb_check_deliver(inp, m)) 573 goto dropunlock; 574#endif 575 so = inp->inp_socket; 576 KASSERT(so != NULL, ("%s: so == NULL", __func__)); 577#ifdef TCPDEBUG 578 if (so->so_options & SO_DEBUG) { 579 ostate = tp->t_state; 580 if (isipv6) { 581#ifdef INET6 582 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6)); 583#endif 584 } else 585 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip)); 586 tcp_savetcp = *th; 587 } 588#endif 589 /* 590 * When the socket is accepting connections (the INPCB is in LISTEN 591 * state) we look into the SYN cache if this is a new connection 592 * attempt or the completion of a previous one. 593 */ 594 if (so->so_options & SO_ACCEPTCONN) { 595 struct in_conninfo inc; 596 597 KASSERT(tp->t_state == TCPS_LISTEN, ("%s: so accepting but " 598 "tp not listening", __func__)); 599 600 bzero(&inc, sizeof(inc)); 601 inc.inc_isipv6 = isipv6; 602#ifdef INET6 603 if (isipv6) { 604 inc.inc6_faddr = ip6->ip6_src; 605 inc.inc6_laddr = ip6->ip6_dst; 606 } else 607#endif 608 { 609 inc.inc_faddr = ip->ip_src; 610 inc.inc_laddr = ip->ip_dst; 611 } 612 inc.inc_fport = th->th_sport; 613 inc.inc_lport = th->th_dport; 614 615 /* 616 * Check for an existing connection attempt in syncache if 617 * the flag is only ACK. A successful lookup creates a new 618 * socket appended to the listen queue in SYN_RECEIVED state. 619 */ 620 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) { 621 /* 622 * Parse the TCP options here because 623 * syncookies need access to the reflected 624 * timestamp. 625 */ 626 tcp_dooptions(&to, optp, optlen, 0); 627 /* 628 * NB: syncache_expand() doesn't unlock 629 * inp and tcpinfo locks. 630 */ 631 if (!syncache_expand(&inc, &to, th, &so, m)) { 632 /* 633 * No syncache entry or ACK was not 634 * for our SYN/ACK. Send a RST. 635 * NB: syncache did its own logging 636 * of the failure cause. 637 */ 638 rstreason = BANDLIM_RST_OPENPORT; 639 goto dropwithreset; 640 } 641 if (so == NULL) { 642 /* 643 * We completed the 3-way handshake 644 * but could not allocate a socket 645 * either due to memory shortage, 646 * listen queue length limits or 647 * global socket limits. Send RST 648 * or wait and have the remote end 649 * retransmit the ACK for another 650 * try. 651 */ 652 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 653 log(LOG_DEBUG, "%s; %s: Listen socket: " 654 "Socket allocation failed due to " 655 "limits or memory shortage, %s\n", 656 s, __func__, 657 V_tcp_sc_rst_sock_fail ? 658 "sending RST" : "try again"); 659 if (V_tcp_sc_rst_sock_fail) { 660 rstreason = BANDLIM_UNLIMITED; 661 goto dropwithreset; 662 } else 663 goto dropunlock; 664 } 665 /* 666 * Socket is created in state SYN_RECEIVED. 667 * Unlock the listen socket, lock the newly 668 * created socket and update the tp variable. 669 */ 670 INP_WUNLOCK(inp); /* listen socket */ 671 inp = sotoinpcb(so); 672 INP_WLOCK(inp); /* new connection */ 673 tp = intotcpcb(inp); 674 KASSERT(tp->t_state == TCPS_SYN_RECEIVED, 675 ("%s: ", __func__)); 676 /* 677 * Process the segment and the data it 678 * contains. tcp_do_segment() consumes 679 * the mbuf chain and unlocks the inpcb. 680 */ 681 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, 682 iptos); 683 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 684 return; 685 } 686 /* 687 * Segment flag validation for new connection attempts: 688 * 689 * Our (SYN|ACK) response was rejected. 690 * Check with syncache and remove entry to prevent 691 * retransmits. 692 * 693 * NB: syncache_chkrst does its own logging of failure 694 * causes. 695 */ 696 if (thflags & TH_RST) { 697 syncache_chkrst(&inc, th); 698 goto dropunlock; 699 } 700 /* 701 * We can't do anything without SYN. 702 */ 703 if ((thflags & TH_SYN) == 0) { 704 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 705 log(LOG_DEBUG, "%s; %s: Listen socket: " 706 "SYN is missing, segment ignored\n", 707 s, __func__); 708 V_tcpstat.tcps_badsyn++; 709 goto dropunlock; 710 } 711 /* 712 * (SYN|ACK) is bogus on a listen socket. 713 */ 714 if (thflags & TH_ACK) { 715 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 716 log(LOG_DEBUG, "%s; %s: Listen socket: " 717 "SYN|ACK invalid, segment rejected\n", 718 s, __func__); 719 syncache_badack(&inc); /* XXX: Not needed! */ 720 V_tcpstat.tcps_badsyn++; 721 rstreason = BANDLIM_RST_OPENPORT; 722 goto dropwithreset; 723 } 724 /* 725 * If the drop_synfin option is enabled, drop all 726 * segments with both the SYN and FIN bits set. 727 * This prevents e.g. nmap from identifying the 728 * TCP/IP stack. 729 * XXX: Poor reasoning. nmap has other methods 730 * and is constantly refining its stack detection 731 * strategies. 732 * XXX: This is a violation of the TCP specification 733 * and was used by RFC1644. 734 */ 735 if ((thflags & TH_FIN) && V_drop_synfin) { 736 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 737 log(LOG_DEBUG, "%s; %s: Listen socket: " 738 "SYN|FIN segment ignored (based on " 739 "sysctl setting)\n", s, __func__); 740 V_tcpstat.tcps_badsyn++; 741 goto dropunlock; 742 } 743 /* 744 * Segment's flags are (SYN) or (SYN|FIN). 745 * 746 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored 747 * as they do not affect the state of the TCP FSM. 748 * The data pointed to by TH_URG and th_urp is ignored. 749 */ 750 KASSERT((thflags & (TH_RST|TH_ACK)) == 0, 751 ("%s: Listen socket: TH_RST or TH_ACK set", __func__)); 752 KASSERT(thflags & (TH_SYN), 753 ("%s: Listen socket: TH_SYN not set", __func__)); 754#ifdef INET6 755 /* 756 * If deprecated address is forbidden, 757 * we do not accept SYN to deprecated interface 758 * address to prevent any new inbound connection from 759 * getting established. 760 * When we do not accept SYN, we send a TCP RST, 761 * with deprecated source address (instead of dropping 762 * it). We compromise it as it is much better for peer 763 * to send a RST, and RST will be the final packet 764 * for the exchange. 765 * 766 * If we do not forbid deprecated addresses, we accept 767 * the SYN packet. RFC2462 does not suggest dropping 768 * SYN in this case. 769 * If we decipher RFC2462 5.5.4, it says like this: 770 * 1. use of deprecated addr with existing 771 * communication is okay - "SHOULD continue to be 772 * used" 773 * 2. use of it with new communication: 774 * (2a) "SHOULD NOT be used if alternate address 775 * with sufficient scope is available" 776 * (2b) nothing mentioned otherwise. 777 * Here we fall into (2b) case as we have no choice in 778 * our source address selection - we must obey the peer. 779 * 780 * The wording in RFC2462 is confusing, and there are 781 * multiple description text for deprecated address 782 * handling - worse, they are not exactly the same. 783 * I believe 5.5.4 is the best one, so we follow 5.5.4. 784 */ 785 if (isipv6 && !V_ip6_use_deprecated) { 786 struct in6_ifaddr *ia6; 787 788 if ((ia6 = ip6_getdstifaddr(m)) && 789 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) { 790 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 791 log(LOG_DEBUG, "%s; %s: Listen socket: " 792 "Connection attempt to deprecated " 793 "IPv6 address rejected\n", 794 s, __func__); 795 rstreason = BANDLIM_RST_OPENPORT; 796 goto dropwithreset; 797 } 798 } 799#endif 800 /* 801 * Basic sanity checks on incoming SYN requests: 802 * Don't respond if the destination is a link layer 803 * broadcast according to RFC1122 4.2.3.10, p. 104. 804 * If it is from this socket it must be forged. 805 * Don't respond if the source or destination is a 806 * global or subnet broad- or multicast address. 807 * Note that it is quite possible to receive unicast 808 * link-layer packets with a broadcast IP address. Use 809 * in_broadcast() to find them. 810 */ 811 if (m->m_flags & (M_BCAST|M_MCAST)) { 812 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 813 log(LOG_DEBUG, "%s; %s: Listen socket: " 814 "Connection attempt from broad- or multicast " 815 "link layer address ignored\n", s, __func__); 816 goto dropunlock; 817 } 818 if (isipv6) { 819#ifdef INET6 820 if (th->th_dport == th->th_sport && 821 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) { 822 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 823 log(LOG_DEBUG, "%s; %s: Listen socket: " 824 "Connection attempt to/from self " 825 "ignored\n", s, __func__); 826 goto dropunlock; 827 } 828 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 829 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) { 830 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 831 log(LOG_DEBUG, "%s; %s: Listen socket: " 832 "Connection attempt from/to multicast " 833 "address ignored\n", s, __func__); 834 goto dropunlock; 835 } 836#endif 837 } else { 838 if (th->th_dport == th->th_sport && 839 ip->ip_dst.s_addr == ip->ip_src.s_addr) { 840 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 841 log(LOG_DEBUG, "%s; %s: Listen socket: " 842 "Connection attempt from/to self " 843 "ignored\n", s, __func__); 844 goto dropunlock; 845 } 846 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 847 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 848 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 849 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) { 850 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 851 log(LOG_DEBUG, "%s; %s: Listen socket: " 852 "Connection attempt from/to broad- " 853 "or multicast address ignored\n", 854 s, __func__); 855 goto dropunlock; 856 } 857 } 858 /* 859 * SYN appears to be valid. Create compressed TCP state 860 * for syncache. 861 */ 862#ifdef TCPDEBUG 863 if (so->so_options & SO_DEBUG) 864 tcp_trace(TA_INPUT, ostate, tp, 865 (void *)tcp_saveipgen, &tcp_savetcp, 0); 866#endif 867 tcp_dooptions(&to, optp, optlen, TO_SYN); 868 syncache_add(&inc, &to, th, inp, &so, m); 869 /* 870 * Entry added to syncache and mbuf consumed. 871 * Everything already unlocked by syncache_add(). 872 */ 873 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 874 return; 875 } 876 877 /* 878 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later 879 * state. tcp_do_segment() always consumes the mbuf chain, unlocks 880 * the inpcb, and unlocks pcbinfo. 881 */ 882 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, iptos); 883 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 884 return; 885 886dropwithreset: 887 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 888 889 /* 890 * If inp is non-NULL, we call tcp_dropwithreset() holding both inpcb 891 * and global locks. However, if NULL, we must hold neither as 892 * firewalls may acquire the global lock in order to look for a 893 * matching inpcb. 894 */ 895 if (inp != NULL) { 896 tcp_dropwithreset(m, th, tp, tlen, rstreason); 897 INP_WUNLOCK(inp); 898 } 899 INP_INFO_WUNLOCK(&V_tcbinfo); 900 if (inp == NULL) 901 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 902 m = NULL; /* mbuf chain got consumed. */ 903 goto drop; 904 905dropunlock: 906 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 907 if (inp != NULL) 908 INP_WUNLOCK(inp); 909 INP_INFO_WUNLOCK(&V_tcbinfo); 910 911drop: 912 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 913 if (s != NULL) 914 free(s, M_TCPLOG); 915 if (m != NULL) 916 m_freem(m); 917 return; 918} 919 920static void 921tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 922 struct tcpcb *tp, int drop_hdrlen, int tlen, uint8_t iptos) 923{ 924 int thflags, acked, ourfinisacked, needoutput = 0; 925 int headlocked = 1; 926 int rstreason, todrop, win; 927 u_long tiwin; 928 struct tcpopt to; 929 930#ifdef TCPDEBUG 931 /* 932 * The size of tcp_saveipgen must be the size of the max ip header, 933 * now IPv6. 934 */ 935 u_char tcp_saveipgen[IP6_HDR_LEN]; 936 struct tcphdr tcp_savetcp; 937 short ostate = 0; 938#endif 939 thflags = th->th_flags; 940 941 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 942 INP_WLOCK_ASSERT(tp->t_inpcb); 943 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 944 __func__)); 945 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 946 __func__)); 947 948 /* 949 * Segment received on connection. 950 * Reset idle time and keep-alive timer. 951 * XXX: This should be done after segment 952 * validation to ignore broken/spoofed segs. 953 */ 954 tp->t_rcvtime = ticks; 955 if (TCPS_HAVEESTABLISHED(tp->t_state)) 956 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle); 957 958 /* 959 * Unscale the window into a 32-bit value. 960 * For the SYN_SENT state the scale is zero. 961 */ 962 tiwin = th->th_win << tp->snd_scale; 963 964 /* 965 * TCP ECN processing. 966 */ 967 if (tp->t_flags & TF_ECN_PERMIT) { 968 switch (iptos & IPTOS_ECN_MASK) { 969 case IPTOS_ECN_CE: 970 tp->t_flags |= TF_ECN_SND_ECE; 971 V_tcpstat.tcps_ecn_ce++; 972 break; 973 case IPTOS_ECN_ECT0: 974 V_tcpstat.tcps_ecn_ect0++; 975 break; 976 case IPTOS_ECN_ECT1: 977 V_tcpstat.tcps_ecn_ect1++; 978 break; 979 } 980 981 if (thflags & TH_CWR) 982 tp->t_flags &= ~TF_ECN_SND_ECE; 983 984 /* 985 * Congestion experienced. 986 * Ignore if we are already trying to recover. 987 */ 988 if ((thflags & TH_ECE) && 989 SEQ_LEQ(th->th_ack, tp->snd_recover)) { 990 V_tcpstat.tcps_ecn_rcwnd++; 991 tcp_congestion_exp(tp); 992 } 993 } 994 995 /* 996 * Parse options on any incoming segment. 997 */ 998 tcp_dooptions(&to, (u_char *)(th + 1), 999 (th->th_off << 2) - sizeof(struct tcphdr), 1000 (thflags & TH_SYN) ? TO_SYN : 0); 1001 1002 /* 1003 * If echoed timestamp is later than the current time, 1004 * fall back to non RFC1323 RTT calculation. Normalize 1005 * timestamp if syncookies were used when this connection 1006 * was established. 1007 */ 1008 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 1009 to.to_tsecr -= tp->ts_offset; 1010 if (TSTMP_GT(to.to_tsecr, ticks)) 1011 to.to_tsecr = 0; 1012 } 1013 1014 /* 1015 * Process options only when we get SYN/ACK back. The SYN case 1016 * for incoming connections is handled in tcp_syncache. 1017 * According to RFC1323 the window field in a SYN (i.e., a <SYN> 1018 * or <SYN,ACK>) segment itself is never scaled. 1019 * XXX this is traditional behavior, may need to be cleaned up. 1020 */ 1021 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 1022 if ((to.to_flags & TOF_SCALE) && 1023 (tp->t_flags & TF_REQ_SCALE)) { 1024 tp->t_flags |= TF_RCVD_SCALE; 1025 tp->snd_scale = to.to_wscale; 1026 } 1027 /* 1028 * Initial send window. It will be updated with 1029 * the next incoming segment to the scaled value. 1030 */ 1031 tp->snd_wnd = th->th_win; 1032 if (to.to_flags & TOF_TS) { 1033 tp->t_flags |= TF_RCVD_TSTMP; 1034 tp->ts_recent = to.to_tsval; 1035 tp->ts_recent_age = ticks; 1036 } 1037 if (to.to_flags & TOF_MSS) 1038 tcp_mss(tp, to.to_mss); 1039 if ((tp->t_flags & TF_SACK_PERMIT) && 1040 (to.to_flags & TOF_SACKPERM) == 0) 1041 tp->t_flags &= ~TF_SACK_PERMIT; 1042 } 1043 1044 /* 1045 * Header prediction: check for the two common cases 1046 * of a uni-directional data xfer. If the packet has 1047 * no control flags, is in-sequence, the window didn't 1048 * change and we're not retransmitting, it's a 1049 * candidate. If the length is zero and the ack moved 1050 * forward, we're the sender side of the xfer. Just 1051 * free the data acked & wake any higher level process 1052 * that was blocked waiting for space. If the length 1053 * is non-zero and the ack didn't move, we're the 1054 * receiver side. If we're getting packets in-order 1055 * (the reassembly queue is empty), add the data to 1056 * the socket buffer and note that we need a delayed ack. 1057 * Make sure that the hidden state-flags are also off. 1058 * Since we check for TCPS_ESTABLISHED first, it can only 1059 * be TH_NEEDSYN. 1060 */ 1061 if (tp->t_state == TCPS_ESTABLISHED && 1062 th->th_seq == tp->rcv_nxt && 1063 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && 1064 tp->snd_nxt == tp->snd_max && 1065 tiwin && tiwin == tp->snd_wnd && 1066 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) && 1067 LIST_EMPTY(&tp->t_segq) && 1068 ((to.to_flags & TOF_TS) == 0 || 1069 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) { 1070 1071 /* 1072 * If last ACK falls within this segment's sequence numbers, 1073 * record the timestamp. 1074 * NOTE that the test is modified according to the latest 1075 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1076 */ 1077 if ((to.to_flags & TOF_TS) != 0 && 1078 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 1079 tp->ts_recent_age = ticks; 1080 tp->ts_recent = to.to_tsval; 1081 } 1082 1083 if (tlen == 0) { 1084 if (SEQ_GT(th->th_ack, tp->snd_una) && 1085 SEQ_LEQ(th->th_ack, tp->snd_max) && 1086 tp->snd_cwnd >= tp->snd_wnd && 1087 ((!V_tcp_do_newreno && 1088 !(tp->t_flags & TF_SACK_PERMIT) && 1089 tp->t_dupacks < tcprexmtthresh) || 1090 ((V_tcp_do_newreno || 1091 (tp->t_flags & TF_SACK_PERMIT)) && 1092 !IN_FASTRECOVERY(tp) && 1093 (to.to_flags & TOF_SACK) == 0 && 1094 TAILQ_EMPTY(&tp->snd_holes)))) { 1095 KASSERT(headlocked, 1096 ("%s: headlocked", __func__)); 1097 INP_INFO_WUNLOCK(&V_tcbinfo); 1098 headlocked = 0; 1099 /* 1100 * This is a pure ack for outstanding data. 1101 */ 1102 ++V_tcpstat.tcps_predack; 1103 /* 1104 * "bad retransmit" recovery. 1105 */ 1106 if (tp->t_rxtshift == 1 && 1107 ticks < tp->t_badrxtwin) { 1108 ++V_tcpstat.tcps_sndrexmitbad; 1109 tp->snd_cwnd = tp->snd_cwnd_prev; 1110 tp->snd_ssthresh = 1111 tp->snd_ssthresh_prev; 1112 tp->snd_recover = tp->snd_recover_prev; 1113 if (tp->t_flags & TF_WASFRECOVERY) 1114 ENTER_FASTRECOVERY(tp); 1115 tp->snd_nxt = tp->snd_max; 1116 tp->t_badrxtwin = 0; 1117 } 1118 1119 /* 1120 * Recalculate the transmit timer / rtt. 1121 * 1122 * Some boxes send broken timestamp replies 1123 * during the SYN+ACK phase, ignore 1124 * timestamps of 0 or we could calculate a 1125 * huge RTT and blow up the retransmit timer. 1126 */ 1127 if ((to.to_flags & TOF_TS) != 0 && 1128 to.to_tsecr) { 1129 if (!tp->t_rttlow || 1130 tp->t_rttlow > ticks - to.to_tsecr) 1131 tp->t_rttlow = ticks - to.to_tsecr; 1132 tcp_xmit_timer(tp, 1133 ticks - to.to_tsecr + 1); 1134 } else if (tp->t_rtttime && 1135 SEQ_GT(th->th_ack, tp->t_rtseq)) { 1136 if (!tp->t_rttlow || 1137 tp->t_rttlow > ticks - tp->t_rtttime) 1138 tp->t_rttlow = ticks - tp->t_rtttime; 1139 tcp_xmit_timer(tp, 1140 ticks - tp->t_rtttime); 1141 } 1142 tcp_xmit_bandwidth_limit(tp, th->th_ack); 1143 acked = th->th_ack - tp->snd_una; 1144 V_tcpstat.tcps_rcvackpack++; 1145 V_tcpstat.tcps_rcvackbyte += acked; 1146 sbdrop(&so->so_snd, acked); 1147 if (SEQ_GT(tp->snd_una, tp->snd_recover) && 1148 SEQ_LEQ(th->th_ack, tp->snd_recover)) 1149 tp->snd_recover = th->th_ack - 1; 1150 tp->snd_una = th->th_ack; 1151 /* 1152 * Pull snd_wl2 up to prevent seq wrap relative 1153 * to th_ack. 1154 */ 1155 tp->snd_wl2 = th->th_ack; 1156 tp->t_dupacks = 0; 1157 m_freem(m); 1158 ND6_HINT(tp); /* Some progress has been made. */ 1159 1160 /* 1161 * If all outstanding data are acked, stop 1162 * retransmit timer, otherwise restart timer 1163 * using current (possibly backed-off) value. 1164 * If process is waiting for space, 1165 * wakeup/selwakeup/signal. If data 1166 * are ready to send, let tcp_output 1167 * decide between more output or persist. 1168 */ 1169#ifdef TCPDEBUG 1170 if (so->so_options & SO_DEBUG) 1171 tcp_trace(TA_INPUT, ostate, tp, 1172 (void *)tcp_saveipgen, 1173 &tcp_savetcp, 0); 1174#endif 1175 if (tp->snd_una == tp->snd_max) 1176 tcp_timer_activate(tp, TT_REXMT, 0); 1177 else if (!tcp_timer_active(tp, TT_PERSIST)) 1178 tcp_timer_activate(tp, TT_REXMT, 1179 tp->t_rxtcur); 1180 sowwakeup(so); 1181 if (so->so_snd.sb_cc) 1182 (void) tcp_output(tp); 1183 goto check_delack; 1184 } 1185 } else if (th->th_ack == tp->snd_una && 1186 tlen <= sbspace(&so->so_rcv)) { 1187 int newsize = 0; /* automatic sockbuf scaling */ 1188 1189 KASSERT(headlocked, ("%s: headlocked", __func__)); 1190 INP_INFO_WUNLOCK(&V_tcbinfo); 1191 headlocked = 0; 1192 /* 1193 * This is a pure, in-sequence data packet 1194 * with nothing on the reassembly queue and 1195 * we have enough buffer space to take it. 1196 */ 1197 /* Clean receiver SACK report if present */ 1198 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks) 1199 tcp_clean_sackreport(tp); 1200 ++V_tcpstat.tcps_preddat; 1201 tp->rcv_nxt += tlen; 1202 /* 1203 * Pull snd_wl1 up to prevent seq wrap relative to 1204 * th_seq. 1205 */ 1206 tp->snd_wl1 = th->th_seq; 1207 /* 1208 * Pull rcv_up up to prevent seq wrap relative to 1209 * rcv_nxt. 1210 */ 1211 tp->rcv_up = tp->rcv_nxt; 1212 V_tcpstat.tcps_rcvpack++; 1213 V_tcpstat.tcps_rcvbyte += tlen; 1214 ND6_HINT(tp); /* Some progress has been made */ 1215#ifdef TCPDEBUG 1216 if (so->so_options & SO_DEBUG) 1217 tcp_trace(TA_INPUT, ostate, tp, 1218 (void *)tcp_saveipgen, &tcp_savetcp, 0); 1219#endif 1220 /* 1221 * Automatic sizing of receive socket buffer. Often the send 1222 * buffer size is not optimally adjusted to the actual network 1223 * conditions at hand (delay bandwidth product). Setting the 1224 * buffer size too small limits throughput on links with high 1225 * bandwidth and high delay (eg. trans-continental/oceanic links). 1226 * 1227 * On the receive side the socket buffer memory is only rarely 1228 * used to any significant extent. This allows us to be much 1229 * more aggressive in scaling the receive socket buffer. For 1230 * the case that the buffer space is actually used to a large 1231 * extent and we run out of kernel memory we can simply drop 1232 * the new segments; TCP on the sender will just retransmit it 1233 * later. Setting the buffer size too big may only consume too 1234 * much kernel memory if the application doesn't read() from 1235 * the socket or packet loss or reordering makes use of the 1236 * reassembly queue. 1237 * 1238 * The criteria to step up the receive buffer one notch are: 1239 * 1. the number of bytes received during the time it takes 1240 * one timestamp to be reflected back to us (the RTT); 1241 * 2. received bytes per RTT is within seven eighth of the 1242 * current socket buffer size; 1243 * 3. receive buffer size has not hit maximal automatic size; 1244 * 1245 * This algorithm does one step per RTT at most and only if 1246 * we receive a bulk stream w/o packet losses or reorderings. 1247 * Shrinking the buffer during idle times is not necessary as 1248 * it doesn't consume any memory when idle. 1249 * 1250 * TODO: Only step up if the application is actually serving 1251 * the buffer to better manage the socket buffer resources. 1252 */ 1253 if (V_tcp_do_autorcvbuf && 1254 to.to_tsecr && 1255 (so->so_rcv.sb_flags & SB_AUTOSIZE)) { 1256 if (to.to_tsecr > tp->rfbuf_ts && 1257 to.to_tsecr - tp->rfbuf_ts < hz) { 1258 if (tp->rfbuf_cnt > 1259 (so->so_rcv.sb_hiwat / 8 * 7) && 1260 so->so_rcv.sb_hiwat < 1261 V_tcp_autorcvbuf_max) { 1262 newsize = 1263 min(so->so_rcv.sb_hiwat + 1264 V_tcp_autorcvbuf_inc, 1265 V_tcp_autorcvbuf_max); 1266 } 1267 /* Start over with next RTT. */ 1268 tp->rfbuf_ts = 0; 1269 tp->rfbuf_cnt = 0; 1270 } else 1271 tp->rfbuf_cnt += tlen; /* add up */ 1272 } 1273 1274 /* Add data to socket buffer. */ 1275 SOCKBUF_LOCK(&so->so_rcv); 1276 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1277 m_freem(m); 1278 } else { 1279 /* 1280 * Set new socket buffer size. 1281 * Give up when limit is reached. 1282 */ 1283 if (newsize) 1284 if (!sbreserve_locked(&so->so_rcv, 1285 newsize, so, curthread)) 1286 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 1287 m_adj(m, drop_hdrlen); /* delayed header drop */ 1288 sbappendstream_locked(&so->so_rcv, m); 1289 } 1290 /* NB: sorwakeup_locked() does an implicit unlock. */ 1291 sorwakeup_locked(so); 1292 if (DELAY_ACK(tp)) { 1293 tp->t_flags |= TF_DELACK; 1294 } else { 1295 tp->t_flags |= TF_ACKNOW; 1296 tcp_output(tp); 1297 } 1298 goto check_delack; 1299 } 1300 } 1301 1302 /* 1303 * Calculate amount of space in receive window, 1304 * and then do TCP input processing. 1305 * Receive window is amount of space in rcv queue, 1306 * but not less than advertised window. 1307 */ 1308 win = sbspace(&so->so_rcv); 1309 if (win < 0) 1310 win = 0; 1311 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt)); 1312 1313 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 1314 tp->rfbuf_ts = 0; 1315 tp->rfbuf_cnt = 0; 1316 1317 switch (tp->t_state) { 1318 1319 /* 1320 * If the state is SYN_RECEIVED: 1321 * if seg contains an ACK, but not for our SYN/ACK, send a RST. 1322 */ 1323 case TCPS_SYN_RECEIVED: 1324 if ((thflags & TH_ACK) && 1325 (SEQ_LEQ(th->th_ack, tp->snd_una) || 1326 SEQ_GT(th->th_ack, tp->snd_max))) { 1327 rstreason = BANDLIM_RST_OPENPORT; 1328 goto dropwithreset; 1329 } 1330 break; 1331 1332 /* 1333 * If the state is SYN_SENT: 1334 * if seg contains an ACK, but not for our SYN, drop the input. 1335 * if seg contains a RST, then drop the connection. 1336 * if seg does not contain SYN, then drop it. 1337 * Otherwise this is an acceptable SYN segment 1338 * initialize tp->rcv_nxt and tp->irs 1339 * if seg contains ack then advance tp->snd_una 1340 * if seg contains an ECE and ECN support is enabled, the stream 1341 * is ECN capable. 1342 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state 1343 * arrange for segment to be acked (eventually) 1344 * continue processing rest of data/controls, beginning with URG 1345 */ 1346 case TCPS_SYN_SENT: 1347 if ((thflags & TH_ACK) && 1348 (SEQ_LEQ(th->th_ack, tp->iss) || 1349 SEQ_GT(th->th_ack, tp->snd_max))) { 1350 rstreason = BANDLIM_UNLIMITED; 1351 goto dropwithreset; 1352 } 1353 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) 1354 tp = tcp_drop(tp, ECONNREFUSED); 1355 if (thflags & TH_RST) 1356 goto drop; 1357 if (!(thflags & TH_SYN)) 1358 goto drop; 1359 1360 tp->irs = th->th_seq; 1361 tcp_rcvseqinit(tp); 1362 if (thflags & TH_ACK) { 1363 V_tcpstat.tcps_connects++; 1364 soisconnected(so); 1365#ifdef MAC 1366 SOCK_LOCK(so); 1367 mac_socketpeer_set_from_mbuf(m, so); 1368 SOCK_UNLOCK(so); 1369#endif 1370 /* Do window scaling on this connection? */ 1371 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 1372 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 1373 tp->rcv_scale = tp->request_r_scale; 1374 } 1375 tp->rcv_adv += tp->rcv_wnd; 1376 tp->snd_una++; /* SYN is acked */ 1377 /* 1378 * If there's data, delay ACK; if there's also a FIN 1379 * ACKNOW will be turned on later. 1380 */ 1381 if (DELAY_ACK(tp) && tlen != 0) 1382 tcp_timer_activate(tp, TT_DELACK, 1383 tcp_delacktime); 1384 else 1385 tp->t_flags |= TF_ACKNOW; 1386 1387 if ((thflags & TH_ECE) && V_tcp_do_ecn) { 1388 tp->t_flags |= TF_ECN_PERMIT; 1389 V_tcpstat.tcps_ecn_shs++; 1390 } 1391 1392 /* 1393 * Received <SYN,ACK> in SYN_SENT[*] state. 1394 * Transitions: 1395 * SYN_SENT --> ESTABLISHED 1396 * SYN_SENT* --> FIN_WAIT_1 1397 */ 1398 tp->t_starttime = ticks; 1399 if (tp->t_flags & TF_NEEDFIN) { 1400 tp->t_state = TCPS_FIN_WAIT_1; 1401 tp->t_flags &= ~TF_NEEDFIN; 1402 thflags &= ~TH_SYN; 1403 } else { 1404 tp->t_state = TCPS_ESTABLISHED; 1405 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle); 1406 } 1407 } else { 1408 /* 1409 * Received initial SYN in SYN-SENT[*] state => 1410 * simultaneous open. If segment contains CC option 1411 * and there is a cached CC, apply TAO test. 1412 * If it succeeds, connection is * half-synchronized. 1413 * Otherwise, do 3-way handshake: 1414 * SYN-SENT -> SYN-RECEIVED 1415 * SYN-SENT* -> SYN-RECEIVED* 1416 * If there was no CC option, clear cached CC value. 1417 */ 1418 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 1419 tcp_timer_activate(tp, TT_REXMT, 0); 1420 tp->t_state = TCPS_SYN_RECEIVED; 1421 } 1422 1423 KASSERT(headlocked, ("%s: trimthenstep6: head not locked", 1424 __func__)); 1425 INP_WLOCK_ASSERT(tp->t_inpcb); 1426 1427 /* 1428 * Advance th->th_seq to correspond to first data byte. 1429 * If data, trim to stay within window, 1430 * dropping FIN if necessary. 1431 */ 1432 th->th_seq++; 1433 if (tlen > tp->rcv_wnd) { 1434 todrop = tlen - tp->rcv_wnd; 1435 m_adj(m, -todrop); 1436 tlen = tp->rcv_wnd; 1437 thflags &= ~TH_FIN; 1438 V_tcpstat.tcps_rcvpackafterwin++; 1439 V_tcpstat.tcps_rcvbyteafterwin += todrop; 1440 } 1441 tp->snd_wl1 = th->th_seq - 1; 1442 tp->rcv_up = th->th_seq; 1443 /* 1444 * Client side of transaction: already sent SYN and data. 1445 * If the remote host used T/TCP to validate the SYN, 1446 * our data will be ACK'd; if so, enter normal data segment 1447 * processing in the middle of step 5, ack processing. 1448 * Otherwise, goto step 6. 1449 */ 1450 if (thflags & TH_ACK) 1451 goto process_ACK; 1452 1453 goto step6; 1454 1455 /* 1456 * If the state is LAST_ACK or CLOSING or TIME_WAIT: 1457 * do normal processing. 1458 * 1459 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later. 1460 */ 1461 case TCPS_LAST_ACK: 1462 case TCPS_CLOSING: 1463 break; /* continue normal processing */ 1464 } 1465 1466 /* 1467 * States other than LISTEN or SYN_SENT. 1468 * First check the RST flag and sequence number since reset segments 1469 * are exempt from the timestamp and connection count tests. This 1470 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix 1471 * below which allowed reset segments in half the sequence space 1472 * to fall though and be processed (which gives forged reset 1473 * segments with a random sequence number a 50 percent chance of 1474 * killing a connection). 1475 * Then check timestamp, if present. 1476 * Then check the connection count, if present. 1477 * Then check that at least some bytes of segment are within 1478 * receive window. If segment begins before rcv_nxt, 1479 * drop leading data (and SYN); if nothing left, just ack. 1480 * 1481 * 1482 * If the RST bit is set, check the sequence number to see 1483 * if this is a valid reset segment. 1484 * RFC 793 page 37: 1485 * In all states except SYN-SENT, all reset (RST) segments 1486 * are validated by checking their SEQ-fields. A reset is 1487 * valid if its sequence number is in the window. 1488 * Note: this does not take into account delayed ACKs, so 1489 * we should test against last_ack_sent instead of rcv_nxt. 1490 * The sequence number in the reset segment is normally an 1491 * echo of our outgoing acknowlegement numbers, but some hosts 1492 * send a reset with the sequence number at the rightmost edge 1493 * of our receive window, and we have to handle this case. 1494 * Note 2: Paul Watson's paper "Slipping in the Window" has shown 1495 * that brute force RST attacks are possible. To combat this, 1496 * we use a much stricter check while in the ESTABLISHED state, 1497 * only accepting RSTs where the sequence number is equal to 1498 * last_ack_sent. In all other states (the states in which a 1499 * RST is more likely), the more permissive check is used. 1500 * If we have multiple segments in flight, the intial reset 1501 * segment sequence numbers will be to the left of last_ack_sent, 1502 * but they will eventually catch up. 1503 * In any case, it never made sense to trim reset segments to 1504 * fit the receive window since RFC 1122 says: 1505 * 4.2.2.12 RST Segment: RFC-793 Section 3.4 1506 * 1507 * A TCP SHOULD allow a received RST segment to include data. 1508 * 1509 * DISCUSSION 1510 * It has been suggested that a RST segment could contain 1511 * ASCII text that encoded and explained the cause of the 1512 * RST. No standard has yet been established for such 1513 * data. 1514 * 1515 * If the reset segment passes the sequence number test examine 1516 * the state: 1517 * SYN_RECEIVED STATE: 1518 * If passive open, return to LISTEN state. 1519 * If active open, inform user that connection was refused. 1520 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES: 1521 * Inform user that connection was reset, and close tcb. 1522 * CLOSING, LAST_ACK STATES: 1523 * Close the tcb. 1524 * TIME_WAIT STATE: 1525 * Drop the segment - see Stevens, vol. 2, p. 964 and 1526 * RFC 1337. 1527 */ 1528 if (thflags & TH_RST) { 1529 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) && 1530 SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) { 1531 switch (tp->t_state) { 1532 1533 case TCPS_SYN_RECEIVED: 1534 so->so_error = ECONNREFUSED; 1535 goto close; 1536 1537 case TCPS_ESTABLISHED: 1538 if (V_tcp_insecure_rst == 0 && 1539 !(SEQ_GEQ(th->th_seq, tp->rcv_nxt - 1) && 1540 SEQ_LEQ(th->th_seq, tp->rcv_nxt + 1)) && 1541 !(SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) && 1542 SEQ_LEQ(th->th_seq, tp->last_ack_sent + 1))) { 1543 V_tcpstat.tcps_badrst++; 1544 goto drop; 1545 } 1546 /* FALLTHROUGH */ 1547 case TCPS_FIN_WAIT_1: 1548 case TCPS_FIN_WAIT_2: 1549 case TCPS_CLOSE_WAIT: 1550 so->so_error = ECONNRESET; 1551 close: 1552 tp->t_state = TCPS_CLOSED; 1553 V_tcpstat.tcps_drops++; 1554 KASSERT(headlocked, ("%s: trimthenstep6: " 1555 "tcp_close: head not locked", __func__)); 1556 tp = tcp_close(tp); 1557 break; 1558 1559 case TCPS_CLOSING: 1560 case TCPS_LAST_ACK: 1561 KASSERT(headlocked, ("%s: trimthenstep6: " 1562 "tcp_close.2: head not locked", __func__)); 1563 tp = tcp_close(tp); 1564 break; 1565 } 1566 } 1567 goto drop; 1568 } 1569 1570 /* 1571 * RFC 1323 PAWS: If we have a timestamp reply on this segment 1572 * and it's less than ts_recent, drop it. 1573 */ 1574 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent && 1575 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 1576 1577 /* Check to see if ts_recent is over 24 days old. */ 1578 if ((int)(ticks - tp->ts_recent_age) > TCP_PAWS_IDLE) { 1579 /* 1580 * Invalidate ts_recent. If this segment updates 1581 * ts_recent, the age will be reset later and ts_recent 1582 * will get a valid value. If it does not, setting 1583 * ts_recent to zero will at least satisfy the 1584 * requirement that zero be placed in the timestamp 1585 * echo reply when ts_recent isn't valid. The 1586 * age isn't reset until we get a valid ts_recent 1587 * because we don't want out-of-order segments to be 1588 * dropped when ts_recent is old. 1589 */ 1590 tp->ts_recent = 0; 1591 } else { 1592 V_tcpstat.tcps_rcvduppack++; 1593 V_tcpstat.tcps_rcvdupbyte += tlen; 1594 V_tcpstat.tcps_pawsdrop++; 1595 if (tlen) 1596 goto dropafterack; 1597 goto drop; 1598 } 1599 } 1600 1601 /* 1602 * In the SYN-RECEIVED state, validate that the packet belongs to 1603 * this connection before trimming the data to fit the receive 1604 * window. Check the sequence number versus IRS since we know 1605 * the sequence numbers haven't wrapped. This is a partial fix 1606 * for the "LAND" DoS attack. 1607 */ 1608 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) { 1609 rstreason = BANDLIM_RST_OPENPORT; 1610 goto dropwithreset; 1611 } 1612 1613 todrop = tp->rcv_nxt - th->th_seq; 1614 if (todrop > 0) { 1615 if (thflags & TH_SYN) { 1616 thflags &= ~TH_SYN; 1617 th->th_seq++; 1618 if (th->th_urp > 1) 1619 th->th_urp--; 1620 else 1621 thflags &= ~TH_URG; 1622 todrop--; 1623 } 1624 /* 1625 * Following if statement from Stevens, vol. 2, p. 960. 1626 */ 1627 if (todrop > tlen 1628 || (todrop == tlen && (thflags & TH_FIN) == 0)) { 1629 /* 1630 * Any valid FIN must be to the left of the window. 1631 * At this point the FIN must be a duplicate or out 1632 * of sequence; drop it. 1633 */ 1634 thflags &= ~TH_FIN; 1635 1636 /* 1637 * Send an ACK to resynchronize and drop any data. 1638 * But keep on processing for RST or ACK. 1639 */ 1640 tp->t_flags |= TF_ACKNOW; 1641 todrop = tlen; 1642 V_tcpstat.tcps_rcvduppack++; 1643 V_tcpstat.tcps_rcvdupbyte += todrop; 1644 } else { 1645 V_tcpstat.tcps_rcvpartduppack++; 1646 V_tcpstat.tcps_rcvpartdupbyte += todrop; 1647 } 1648 drop_hdrlen += todrop; /* drop from the top afterwards */ 1649 th->th_seq += todrop; 1650 tlen -= todrop; 1651 if (th->th_urp > todrop) 1652 th->th_urp -= todrop; 1653 else { 1654 thflags &= ~TH_URG; 1655 th->th_urp = 0; 1656 } 1657 } 1658 1659 /* 1660 * If new data are received on a connection after the 1661 * user processes are gone, then RST the other end. 1662 */ 1663 if ((so->so_state & SS_NOFDREF) && 1664 tp->t_state > TCPS_CLOSE_WAIT && tlen) { 1665 char *s; 1666 1667 KASSERT(headlocked, ("%s: trimthenstep6: tcp_close.3: head " 1668 "not locked", __func__)); 1669 if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL, NULL))) { 1670 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data after socket " 1671 "was closed, sending RST and removing tcpcb\n", 1672 s, __func__, tcpstates[tp->t_state], tlen); 1673 free(s, M_TCPLOG); 1674 } 1675 tp = tcp_close(tp); 1676 V_tcpstat.tcps_rcvafterclose++; 1677 rstreason = BANDLIM_UNLIMITED; 1678 goto dropwithreset; 1679 } 1680 1681 /* 1682 * If segment ends after window, drop trailing data 1683 * (and PUSH and FIN); if nothing left, just ACK. 1684 */ 1685 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd); 1686 if (todrop > 0) { 1687 V_tcpstat.tcps_rcvpackafterwin++; 1688 if (todrop >= tlen) { 1689 V_tcpstat.tcps_rcvbyteafterwin += tlen; 1690 /* 1691 * If window is closed can only take segments at 1692 * window edge, and have to drop data and PUSH from 1693 * incoming segments. Continue processing, but 1694 * remember to ack. Otherwise, drop segment 1695 * and ack. 1696 */ 1697 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { 1698 tp->t_flags |= TF_ACKNOW; 1699 V_tcpstat.tcps_rcvwinprobe++; 1700 } else 1701 goto dropafterack; 1702 } else 1703 V_tcpstat.tcps_rcvbyteafterwin += todrop; 1704 m_adj(m, -todrop); 1705 tlen -= todrop; 1706 thflags &= ~(TH_PUSH|TH_FIN); 1707 } 1708 1709 /* 1710 * If last ACK falls within this segment's sequence numbers, 1711 * record its timestamp. 1712 * NOTE: 1713 * 1) That the test incorporates suggestions from the latest 1714 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1715 * 2) That updating only on newer timestamps interferes with 1716 * our earlier PAWS tests, so this check should be solely 1717 * predicated on the sequence space of this segment. 1718 * 3) That we modify the segment boundary check to be 1719 * Last.ACK.Sent <= SEG.SEQ + SEG.Len 1720 * instead of RFC1323's 1721 * Last.ACK.Sent < SEG.SEQ + SEG.Len, 1722 * This modified check allows us to overcome RFC1323's 1723 * limitations as described in Stevens TCP/IP Illustrated 1724 * Vol. 2 p.869. In such cases, we can still calculate the 1725 * RTT correctly when RCV.NXT == Last.ACK.Sent. 1726 */ 1727 if ((to.to_flags & TOF_TS) != 0 && 1728 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 1729 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 1730 ((thflags & (TH_SYN|TH_FIN)) != 0))) { 1731 tp->ts_recent_age = ticks; 1732 tp->ts_recent = to.to_tsval; 1733 } 1734 1735 /* 1736 * If a SYN is in the window, then this is an 1737 * error and we send an RST and drop the connection. 1738 */ 1739 if (thflags & TH_SYN) { 1740 KASSERT(headlocked, ("%s: tcp_drop: trimthenstep6: " 1741 "head not locked", __func__)); 1742 tp = tcp_drop(tp, ECONNRESET); 1743 rstreason = BANDLIM_UNLIMITED; 1744 goto drop; 1745 } 1746 1747 /* 1748 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN 1749 * flag is on (half-synchronized state), then queue data for 1750 * later processing; else drop segment and return. 1751 */ 1752 if ((thflags & TH_ACK) == 0) { 1753 if (tp->t_state == TCPS_SYN_RECEIVED || 1754 (tp->t_flags & TF_NEEDSYN)) 1755 goto step6; 1756 else if (tp->t_flags & TF_ACKNOW) 1757 goto dropafterack; 1758 else 1759 goto drop; 1760 } 1761 1762 /* 1763 * Ack processing. 1764 */ 1765 switch (tp->t_state) { 1766 1767 /* 1768 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter 1769 * ESTABLISHED state and continue processing. 1770 * The ACK was checked above. 1771 */ 1772 case TCPS_SYN_RECEIVED: 1773 1774 V_tcpstat.tcps_connects++; 1775 soisconnected(so); 1776 /* Do window scaling? */ 1777 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 1778 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 1779 tp->rcv_scale = tp->request_r_scale; 1780 tp->snd_wnd = tiwin; 1781 } 1782 /* 1783 * Make transitions: 1784 * SYN-RECEIVED -> ESTABLISHED 1785 * SYN-RECEIVED* -> FIN-WAIT-1 1786 */ 1787 tp->t_starttime = ticks; 1788 if (tp->t_flags & TF_NEEDFIN) { 1789 tp->t_state = TCPS_FIN_WAIT_1; 1790 tp->t_flags &= ~TF_NEEDFIN; 1791 } else { 1792 tp->t_state = TCPS_ESTABLISHED; 1793 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle); 1794 } 1795 /* 1796 * If segment contains data or ACK, will call tcp_reass() 1797 * later; if not, do so now to pass queued data to user. 1798 */ 1799 if (tlen == 0 && (thflags & TH_FIN) == 0) 1800 (void) tcp_reass(tp, (struct tcphdr *)0, 0, 1801 (struct mbuf *)0); 1802 tp->snd_wl1 = th->th_seq - 1; 1803 /* FALLTHROUGH */ 1804 1805 /* 1806 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range 1807 * ACKs. If the ack is in the range 1808 * tp->snd_una < th->th_ack <= tp->snd_max 1809 * then advance tp->snd_una to th->th_ack and drop 1810 * data from the retransmission queue. If this ACK reflects 1811 * more up to date window information we update our window information. 1812 */ 1813 case TCPS_ESTABLISHED: 1814 case TCPS_FIN_WAIT_1: 1815 case TCPS_FIN_WAIT_2: 1816 case TCPS_CLOSE_WAIT: 1817 case TCPS_CLOSING: 1818 case TCPS_LAST_ACK: 1819 if (SEQ_GT(th->th_ack, tp->snd_max)) { 1820 V_tcpstat.tcps_rcvacktoomuch++; 1821 goto dropafterack; 1822 } 1823 if ((tp->t_flags & TF_SACK_PERMIT) && 1824 ((to.to_flags & TOF_SACK) || 1825 !TAILQ_EMPTY(&tp->snd_holes))) 1826 tcp_sack_doack(tp, &to, th->th_ack); 1827 if (SEQ_LEQ(th->th_ack, tp->snd_una)) { 1828 if (tlen == 0 && tiwin == tp->snd_wnd) { 1829 V_tcpstat.tcps_rcvdupack++; 1830 /* 1831 * If we have outstanding data (other than 1832 * a window probe), this is a completely 1833 * duplicate ack (ie, window info didn't 1834 * change), the ack is the biggest we've 1835 * seen and we've seen exactly our rexmt 1836 * threshhold of them, assume a packet 1837 * has been dropped and retransmit it. 1838 * Kludge snd_nxt & the congestion 1839 * window so we send only this one 1840 * packet. 1841 * 1842 * We know we're losing at the current 1843 * window size so do congestion avoidance 1844 * (set ssthresh to half the current window 1845 * and pull our congestion window back to 1846 * the new ssthresh). 1847 * 1848 * Dup acks mean that packets have left the 1849 * network (they're now cached at the receiver) 1850 * so bump cwnd by the amount in the receiver 1851 * to keep a constant cwnd packets in the 1852 * network. 1853 * 1854 * When using TCP ECN, notify the peer that 1855 * we reduced the cwnd. 1856 */ 1857 if (!tcp_timer_active(tp, TT_REXMT) || 1858 th->th_ack != tp->snd_una) 1859 tp->t_dupacks = 0; 1860 else if (++tp->t_dupacks > tcprexmtthresh || 1861 ((V_tcp_do_newreno || 1862 (tp->t_flags & TF_SACK_PERMIT)) && 1863 IN_FASTRECOVERY(tp))) { 1864 if ((tp->t_flags & TF_SACK_PERMIT) && 1865 IN_FASTRECOVERY(tp)) { 1866 int awnd; 1867 1868 /* 1869 * Compute the amount of data in flight first. 1870 * We can inject new data into the pipe iff 1871 * we have less than 1/2 the original window's 1872 * worth of data in flight. 1873 */ 1874 awnd = (tp->snd_nxt - tp->snd_fack) + 1875 tp->sackhint.sack_bytes_rexmit; 1876 if (awnd < tp->snd_ssthresh) { 1877 tp->snd_cwnd += tp->t_maxseg; 1878 if (tp->snd_cwnd > tp->snd_ssthresh) 1879 tp->snd_cwnd = tp->snd_ssthresh; 1880 } 1881 } else 1882 tp->snd_cwnd += tp->t_maxseg; 1883 (void) tcp_output(tp); 1884 goto drop; 1885 } else if (tp->t_dupacks == tcprexmtthresh) { 1886 tcp_seq onxt = tp->snd_nxt; 1887 1888 /* 1889 * If we're doing sack, check to 1890 * see if we're already in sack 1891 * recovery. If we're not doing sack, 1892 * check to see if we're in newreno 1893 * recovery. 1894 */ 1895 if (tp->t_flags & TF_SACK_PERMIT) { 1896 if (IN_FASTRECOVERY(tp)) { 1897 tp->t_dupacks = 0; 1898 break; 1899 } 1900 } else if (V_tcp_do_newreno || 1901 V_tcp_do_ecn) { 1902 if (SEQ_LEQ(th->th_ack, 1903 tp->snd_recover)) { 1904 tp->t_dupacks = 0; 1905 break; 1906 } 1907 } 1908 tcp_congestion_exp(tp); 1909 tcp_timer_activate(tp, TT_REXMT, 0); 1910 tp->t_rtttime = 0; 1911 if (tp->t_flags & TF_SACK_PERMIT) { 1912 V_tcpstat.tcps_sack_recovery_episode++; 1913 tp->sack_newdata = tp->snd_nxt; 1914 tp->snd_cwnd = tp->t_maxseg; 1915 (void) tcp_output(tp); 1916 goto drop; 1917 } 1918 tp->snd_nxt = th->th_ack; 1919 tp->snd_cwnd = tp->t_maxseg; 1920 (void) tcp_output(tp); 1921 KASSERT(tp->snd_limited <= 2, 1922 ("%s: tp->snd_limited too big", 1923 __func__)); 1924 tp->snd_cwnd = tp->snd_ssthresh + 1925 tp->t_maxseg * 1926 (tp->t_dupacks - tp->snd_limited); 1927 if (SEQ_GT(onxt, tp->snd_nxt)) 1928 tp->snd_nxt = onxt; 1929 goto drop; 1930 } else if (V_tcp_do_rfc3042) { 1931 u_long oldcwnd = tp->snd_cwnd; 1932 tcp_seq oldsndmax = tp->snd_max; 1933 u_int sent; 1934 1935 KASSERT(tp->t_dupacks == 1 || 1936 tp->t_dupacks == 2, 1937 ("%s: dupacks not 1 or 2", 1938 __func__)); 1939 if (tp->t_dupacks == 1) 1940 tp->snd_limited = 0; 1941 tp->snd_cwnd = 1942 (tp->snd_nxt - tp->snd_una) + 1943 (tp->t_dupacks - tp->snd_limited) * 1944 tp->t_maxseg; 1945 (void) tcp_output(tp); 1946 sent = tp->snd_max - oldsndmax; 1947 if (sent > tp->t_maxseg) { 1948 KASSERT((tp->t_dupacks == 2 && 1949 tp->snd_limited == 0) || 1950 (sent == tp->t_maxseg + 1 && 1951 tp->t_flags & TF_SENTFIN), 1952 ("%s: sent too much", 1953 __func__)); 1954 tp->snd_limited = 2; 1955 } else if (sent > 0) 1956 ++tp->snd_limited; 1957 tp->snd_cwnd = oldcwnd; 1958 goto drop; 1959 } 1960 } else 1961 tp->t_dupacks = 0; 1962 break; 1963 } 1964 1965 KASSERT(SEQ_GT(th->th_ack, tp->snd_una), 1966 ("%s: th_ack <= snd_una", __func__)); 1967 1968 /* 1969 * If the congestion window was inflated to account 1970 * for the other side's cached packets, retract it. 1971 */ 1972 if (V_tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) { 1973 if (IN_FASTRECOVERY(tp)) { 1974 if (SEQ_LT(th->th_ack, tp->snd_recover)) { 1975 if (tp->t_flags & TF_SACK_PERMIT) 1976 tcp_sack_partialack(tp, th); 1977 else 1978 tcp_newreno_partial_ack(tp, th); 1979 } else { 1980 /* 1981 * Out of fast recovery. 1982 * Window inflation should have left us 1983 * with approximately snd_ssthresh 1984 * outstanding data. 1985 * But in case we would be inclined to 1986 * send a burst, better to do it via 1987 * the slow start mechanism. 1988 */ 1989 if (SEQ_GT(th->th_ack + 1990 tp->snd_ssthresh, 1991 tp->snd_max)) 1992 tp->snd_cwnd = tp->snd_max - 1993 th->th_ack + 1994 tp->t_maxseg; 1995 else 1996 tp->snd_cwnd = tp->snd_ssthresh; 1997 } 1998 } 1999 } else { 2000 if (tp->t_dupacks >= tcprexmtthresh && 2001 tp->snd_cwnd > tp->snd_ssthresh) 2002 tp->snd_cwnd = tp->snd_ssthresh; 2003 } 2004 tp->t_dupacks = 0; 2005 /* 2006 * If we reach this point, ACK is not a duplicate, 2007 * i.e., it ACKs something we sent. 2008 */ 2009 if (tp->t_flags & TF_NEEDSYN) { 2010 /* 2011 * T/TCP: Connection was half-synchronized, and our 2012 * SYN has been ACK'd (so connection is now fully 2013 * synchronized). Go to non-starred state, 2014 * increment snd_una for ACK of SYN, and check if 2015 * we can do window scaling. 2016 */ 2017 tp->t_flags &= ~TF_NEEDSYN; 2018 tp->snd_una++; 2019 /* Do window scaling? */ 2020 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2021 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2022 tp->rcv_scale = tp->request_r_scale; 2023 /* Send window already scaled. */ 2024 } 2025 } 2026 2027process_ACK: 2028 KASSERT(headlocked, ("%s: process_ACK: head not locked", 2029 __func__)); 2030 INP_WLOCK_ASSERT(tp->t_inpcb); 2031 2032 acked = th->th_ack - tp->snd_una; 2033 V_tcpstat.tcps_rcvackpack++; 2034 V_tcpstat.tcps_rcvackbyte += acked; 2035 2036 /* 2037 * If we just performed our first retransmit, and the ACK 2038 * arrives within our recovery window, then it was a mistake 2039 * to do the retransmit in the first place. Recover our 2040 * original cwnd and ssthresh, and proceed to transmit where 2041 * we left off. 2042 */ 2043 if (tp->t_rxtshift == 1 && ticks < tp->t_badrxtwin) { 2044 ++V_tcpstat.tcps_sndrexmitbad; 2045 tp->snd_cwnd = tp->snd_cwnd_prev; 2046 tp->snd_ssthresh = tp->snd_ssthresh_prev; 2047 tp->snd_recover = tp->snd_recover_prev; 2048 if (tp->t_flags & TF_WASFRECOVERY) 2049 ENTER_FASTRECOVERY(tp); 2050 tp->snd_nxt = tp->snd_max; 2051 tp->t_badrxtwin = 0; /* XXX probably not required */ 2052 } 2053 2054 /* 2055 * If we have a timestamp reply, update smoothed 2056 * round trip time. If no timestamp is present but 2057 * transmit timer is running and timed sequence 2058 * number was acked, update smoothed round trip time. 2059 * Since we now have an rtt measurement, cancel the 2060 * timer backoff (cf., Phil Karn's retransmit alg.). 2061 * Recompute the initial retransmit timer. 2062 * 2063 * Some boxes send broken timestamp replies 2064 * during the SYN+ACK phase, ignore 2065 * timestamps of 0 or we could calculate a 2066 * huge RTT and blow up the retransmit timer. 2067 */ 2068 if ((to.to_flags & TOF_TS) != 0 && 2069 to.to_tsecr) { 2070 if (!tp->t_rttlow || tp->t_rttlow > ticks - to.to_tsecr) 2071 tp->t_rttlow = ticks - to.to_tsecr; 2072 tcp_xmit_timer(tp, ticks - to.to_tsecr + 1); 2073 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) { 2074 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime) 2075 tp->t_rttlow = ticks - tp->t_rtttime; 2076 tcp_xmit_timer(tp, ticks - tp->t_rtttime); 2077 } 2078 tcp_xmit_bandwidth_limit(tp, th->th_ack); 2079 2080 /* 2081 * If all outstanding data is acked, stop retransmit 2082 * timer and remember to restart (more output or persist). 2083 * If there is more data to be acked, restart retransmit 2084 * timer, using current (possibly backed-off) value. 2085 */ 2086 if (th->th_ack == tp->snd_max) { 2087 tcp_timer_activate(tp, TT_REXMT, 0); 2088 needoutput = 1; 2089 } else if (!tcp_timer_active(tp, TT_PERSIST)) 2090 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur); 2091 2092 /* 2093 * If no data (only SYN) was ACK'd, 2094 * skip rest of ACK processing. 2095 */ 2096 if (acked == 0) 2097 goto step6; 2098 2099 /* 2100 * When new data is acked, open the congestion window. 2101 * If the window gives us less than ssthresh packets 2102 * in flight, open exponentially (maxseg per packet). 2103 * Otherwise open linearly: maxseg per window 2104 * (maxseg^2 / cwnd per packet). 2105 * If cwnd > maxseg^2, fix the cwnd increment at 1 byte 2106 * to avoid capping cwnd (as suggested in RFC2581). 2107 */ 2108 if ((!V_tcp_do_newreno && !(tp->t_flags & TF_SACK_PERMIT)) || 2109 !IN_FASTRECOVERY(tp)) { 2110 u_int cw = tp->snd_cwnd; 2111 u_int incr = tp->t_maxseg; 2112 if (cw > tp->snd_ssthresh) 2113 incr = max((incr * incr / cw), 1); 2114 tp->snd_cwnd = min(cw+incr, TCP_MAXWIN<<tp->snd_scale); 2115 } 2116 SOCKBUF_LOCK(&so->so_snd); 2117 if (acked > so->so_snd.sb_cc) { 2118 tp->snd_wnd -= so->so_snd.sb_cc; 2119 sbdrop_locked(&so->so_snd, (int)so->so_snd.sb_cc); 2120 ourfinisacked = 1; 2121 } else { 2122 sbdrop_locked(&so->so_snd, acked); 2123 tp->snd_wnd -= acked; 2124 ourfinisacked = 0; 2125 } 2126 /* NB: sowwakeup_locked() does an implicit unlock. */ 2127 sowwakeup_locked(so); 2128 /* Detect una wraparound. */ 2129 if ((V_tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) && 2130 !IN_FASTRECOVERY(tp) && 2131 SEQ_GT(tp->snd_una, tp->snd_recover) && 2132 SEQ_LEQ(th->th_ack, tp->snd_recover)) 2133 tp->snd_recover = th->th_ack - 1; 2134 if ((V_tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) && 2135 IN_FASTRECOVERY(tp) && 2136 SEQ_GEQ(th->th_ack, tp->snd_recover)) 2137 EXIT_FASTRECOVERY(tp); 2138 tp->snd_una = th->th_ack; 2139 if (tp->t_flags & TF_SACK_PERMIT) { 2140 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 2141 tp->snd_recover = tp->snd_una; 2142 } 2143 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 2144 tp->snd_nxt = tp->snd_una; 2145 2146 switch (tp->t_state) { 2147 2148 /* 2149 * In FIN_WAIT_1 STATE in addition to the processing 2150 * for the ESTABLISHED state if our FIN is now acknowledged 2151 * then enter FIN_WAIT_2. 2152 */ 2153 case TCPS_FIN_WAIT_1: 2154 if (ourfinisacked) { 2155 /* 2156 * If we can't receive any more 2157 * data, then closing user can proceed. 2158 * Starting the timer is contrary to the 2159 * specification, but if we don't get a FIN 2160 * we'll hang forever. 2161 * 2162 * XXXjl: 2163 * we should release the tp also, and use a 2164 * compressed state. 2165 */ 2166 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 2167 int timeout; 2168 2169 soisdisconnected(so); 2170 timeout = (tcp_fast_finwait2_recycle) ? 2171 tcp_finwait2_timeout : tcp_maxidle; 2172 tcp_timer_activate(tp, TT_2MSL, timeout); 2173 } 2174 tp->t_state = TCPS_FIN_WAIT_2; 2175 } 2176 break; 2177 2178 /* 2179 * In CLOSING STATE in addition to the processing for 2180 * the ESTABLISHED state if the ACK acknowledges our FIN 2181 * then enter the TIME-WAIT state, otherwise ignore 2182 * the segment. 2183 */ 2184 case TCPS_CLOSING: 2185 if (ourfinisacked) { 2186 KASSERT(headlocked, ("%s: process_ACK: " 2187 "head not locked", __func__)); 2188 tcp_twstart(tp); 2189 INP_INFO_WUNLOCK(&V_tcbinfo); 2190 headlocked = 0; 2191 m_freem(m); 2192 return; 2193 } 2194 break; 2195 2196 /* 2197 * In LAST_ACK, we may still be waiting for data to drain 2198 * and/or to be acked, as well as for the ack of our FIN. 2199 * If our FIN is now acknowledged, delete the TCB, 2200 * enter the closed state and return. 2201 */ 2202 case TCPS_LAST_ACK: 2203 if (ourfinisacked) { 2204 KASSERT(headlocked, ("%s: process_ACK: " 2205 "tcp_close: head not locked", __func__)); 2206 tp = tcp_close(tp); 2207 goto drop; 2208 } 2209 break; 2210 } 2211 } 2212 2213step6: 2214 KASSERT(headlocked, ("%s: step6: head not locked", __func__)); 2215 INP_WLOCK_ASSERT(tp->t_inpcb); 2216 2217 /* 2218 * Update window information. 2219 * Don't look at window if no ACK: TAC's send garbage on first SYN. 2220 */ 2221 if ((thflags & TH_ACK) && 2222 (SEQ_LT(tp->snd_wl1, th->th_seq) || 2223 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 2224 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 2225 /* keep track of pure window updates */ 2226 if (tlen == 0 && 2227 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 2228 V_tcpstat.tcps_rcvwinupd++; 2229 tp->snd_wnd = tiwin; 2230 tp->snd_wl1 = th->th_seq; 2231 tp->snd_wl2 = th->th_ack; 2232 if (tp->snd_wnd > tp->max_sndwnd) 2233 tp->max_sndwnd = tp->snd_wnd; 2234 needoutput = 1; 2235 } 2236 2237 /* 2238 * Process segments with URG. 2239 */ 2240 if ((thflags & TH_URG) && th->th_urp && 2241 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2242 /* 2243 * This is a kludge, but if we receive and accept 2244 * random urgent pointers, we'll crash in 2245 * soreceive. It's hard to imagine someone 2246 * actually wanting to send this much urgent data. 2247 */ 2248 SOCKBUF_LOCK(&so->so_rcv); 2249 if (th->th_urp + so->so_rcv.sb_cc > sb_max) { 2250 th->th_urp = 0; /* XXX */ 2251 thflags &= ~TH_URG; /* XXX */ 2252 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */ 2253 goto dodata; /* XXX */ 2254 } 2255 /* 2256 * If this segment advances the known urgent pointer, 2257 * then mark the data stream. This should not happen 2258 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since 2259 * a FIN has been received from the remote side. 2260 * In these states we ignore the URG. 2261 * 2262 * According to RFC961 (Assigned Protocols), 2263 * the urgent pointer points to the last octet 2264 * of urgent data. We continue, however, 2265 * to consider it to indicate the first octet 2266 * of data past the urgent section as the original 2267 * spec states (in one of two places). 2268 */ 2269 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) { 2270 tp->rcv_up = th->th_seq + th->th_urp; 2271 so->so_oobmark = so->so_rcv.sb_cc + 2272 (tp->rcv_up - tp->rcv_nxt) - 1; 2273 if (so->so_oobmark == 0) 2274 so->so_rcv.sb_state |= SBS_RCVATMARK; 2275 sohasoutofband(so); 2276 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); 2277 } 2278 SOCKBUF_UNLOCK(&so->so_rcv); 2279 /* 2280 * Remove out of band data so doesn't get presented to user. 2281 * This can happen independent of advancing the URG pointer, 2282 * but if two URG's are pending at once, some out-of-band 2283 * data may creep in... ick. 2284 */ 2285 if (th->th_urp <= (u_long)tlen && 2286 !(so->so_options & SO_OOBINLINE)) { 2287 /* hdr drop is delayed */ 2288 tcp_pulloutofband(so, th, m, drop_hdrlen); 2289 } 2290 } else { 2291 /* 2292 * If no out of band data is expected, 2293 * pull receive urgent pointer along 2294 * with the receive window. 2295 */ 2296 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) 2297 tp->rcv_up = tp->rcv_nxt; 2298 } 2299dodata: /* XXX */ 2300 KASSERT(headlocked, ("%s: dodata: head not locked", __func__)); 2301 INP_WLOCK_ASSERT(tp->t_inpcb); 2302 2303 /* 2304 * Process the segment text, merging it into the TCP sequencing queue, 2305 * and arranging for acknowledgment of receipt if necessary. 2306 * This process logically involves adjusting tp->rcv_wnd as data 2307 * is presented to the user (this happens in tcp_usrreq.c, 2308 * case PRU_RCVD). If a FIN has already been received on this 2309 * connection then we just ignore the text. 2310 */ 2311 if ((tlen || (thflags & TH_FIN)) && 2312 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2313 tcp_seq save_start = th->th_seq; 2314 m_adj(m, drop_hdrlen); /* delayed header drop */ 2315 /* 2316 * Insert segment which includes th into TCP reassembly queue 2317 * with control block tp. Set thflags to whether reassembly now 2318 * includes a segment with FIN. This handles the common case 2319 * inline (segment is the next to be received on an established 2320 * connection, and the queue is empty), avoiding linkage into 2321 * and removal from the queue and repetition of various 2322 * conversions. 2323 * Set DELACK for segments received in order, but ack 2324 * immediately when segments are out of order (so 2325 * fast retransmit can work). 2326 */ 2327 if (th->th_seq == tp->rcv_nxt && 2328 LIST_EMPTY(&tp->t_segq) && 2329 TCPS_HAVEESTABLISHED(tp->t_state)) { 2330 if (DELAY_ACK(tp)) 2331 tp->t_flags |= TF_DELACK; 2332 else 2333 tp->t_flags |= TF_ACKNOW; 2334 tp->rcv_nxt += tlen; 2335 thflags = th->th_flags & TH_FIN; 2336 V_tcpstat.tcps_rcvpack++; 2337 V_tcpstat.tcps_rcvbyte += tlen; 2338 ND6_HINT(tp); 2339 SOCKBUF_LOCK(&so->so_rcv); 2340 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 2341 m_freem(m); 2342 else 2343 sbappendstream_locked(&so->so_rcv, m); 2344 /* NB: sorwakeup_locked() does an implicit unlock. */ 2345 sorwakeup_locked(so); 2346 } else { 2347 /* 2348 * XXX: Due to the header drop above "th" is 2349 * theoretically invalid by now. Fortunately 2350 * m_adj() doesn't actually frees any mbufs 2351 * when trimming from the head. 2352 */ 2353 thflags = tcp_reass(tp, th, &tlen, m); 2354 tp->t_flags |= TF_ACKNOW; 2355 } 2356 if (tlen > 0 && (tp->t_flags & TF_SACK_PERMIT)) 2357 tcp_update_sack_list(tp, save_start, save_start + tlen); 2358#if 0 2359 /* 2360 * Note the amount of data that peer has sent into 2361 * our window, in order to estimate the sender's 2362 * buffer size. 2363 * XXX: Unused. 2364 */ 2365 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt); 2366#endif 2367 } else { 2368 m_freem(m); 2369 thflags &= ~TH_FIN; 2370 } 2371 2372 /* 2373 * If FIN is received ACK the FIN and let the user know 2374 * that the connection is closing. 2375 */ 2376 if (thflags & TH_FIN) { 2377 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2378 socantrcvmore(so); 2379 /* 2380 * If connection is half-synchronized 2381 * (ie NEEDSYN flag on) then delay ACK, 2382 * so it may be piggybacked when SYN is sent. 2383 * Otherwise, since we received a FIN then no 2384 * more input can be expected, send ACK now. 2385 */ 2386 if (tp->t_flags & TF_NEEDSYN) 2387 tp->t_flags |= TF_DELACK; 2388 else 2389 tp->t_flags |= TF_ACKNOW; 2390 tp->rcv_nxt++; 2391 } 2392 switch (tp->t_state) { 2393 2394 /* 2395 * In SYN_RECEIVED and ESTABLISHED STATES 2396 * enter the CLOSE_WAIT state. 2397 */ 2398 case TCPS_SYN_RECEIVED: 2399 tp->t_starttime = ticks; 2400 /* FALLTHROUGH */ 2401 case TCPS_ESTABLISHED: 2402 tp->t_state = TCPS_CLOSE_WAIT; 2403 break; 2404 2405 /* 2406 * If still in FIN_WAIT_1 STATE FIN has not been acked so 2407 * enter the CLOSING state. 2408 */ 2409 case TCPS_FIN_WAIT_1: 2410 tp->t_state = TCPS_CLOSING; 2411 break; 2412 2413 /* 2414 * In FIN_WAIT_2 state enter the TIME_WAIT state, 2415 * starting the time-wait timer, turning off the other 2416 * standard timers. 2417 */ 2418 case TCPS_FIN_WAIT_2: 2419 KASSERT(headlocked == 1, ("%s: dodata: " 2420 "TCP_FIN_WAIT_2: head not locked", __func__)); 2421 tcp_twstart(tp); 2422 INP_INFO_WUNLOCK(&V_tcbinfo); 2423 return; 2424 } 2425 } 2426 INP_INFO_WUNLOCK(&V_tcbinfo); 2427 headlocked = 0; 2428#ifdef TCPDEBUG 2429 if (so->so_options & SO_DEBUG) 2430 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen, 2431 &tcp_savetcp, 0); 2432#endif 2433 2434 /* 2435 * Return any desired output. 2436 */ 2437 if (needoutput || (tp->t_flags & TF_ACKNOW)) 2438 (void) tcp_output(tp); 2439 2440check_delack: 2441 KASSERT(headlocked == 0, ("%s: check_delack: head locked", 2442 __func__)); 2443 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 2444 INP_WLOCK_ASSERT(tp->t_inpcb); 2445 if (tp->t_flags & TF_DELACK) { 2446 tp->t_flags &= ~TF_DELACK; 2447 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime); 2448 } 2449 INP_WUNLOCK(tp->t_inpcb); 2450 return; 2451 2452dropafterack: 2453 KASSERT(headlocked, ("%s: dropafterack: head not locked", __func__)); 2454 /* 2455 * Generate an ACK dropping incoming segment if it occupies 2456 * sequence space, where the ACK reflects our state. 2457 * 2458 * We can now skip the test for the RST flag since all 2459 * paths to this code happen after packets containing 2460 * RST have been dropped. 2461 * 2462 * In the SYN-RECEIVED state, don't send an ACK unless the 2463 * segment we received passes the SYN-RECEIVED ACK test. 2464 * If it fails send a RST. This breaks the loop in the 2465 * "LAND" DoS attack, and also prevents an ACK storm 2466 * between two listening ports that have been sent forged 2467 * SYN segments, each with the source address of the other. 2468 */ 2469 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && 2470 (SEQ_GT(tp->snd_una, th->th_ack) || 2471 SEQ_GT(th->th_ack, tp->snd_max)) ) { 2472 rstreason = BANDLIM_RST_OPENPORT; 2473 goto dropwithreset; 2474 } 2475#ifdef TCPDEBUG 2476 if (so->so_options & SO_DEBUG) 2477 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 2478 &tcp_savetcp, 0); 2479#endif 2480 KASSERT(headlocked, ("%s: headlocked should be 1", __func__)); 2481 INP_INFO_WUNLOCK(&V_tcbinfo); 2482 tp->t_flags |= TF_ACKNOW; 2483 (void) tcp_output(tp); 2484 INP_WUNLOCK(tp->t_inpcb); 2485 m_freem(m); 2486 return; 2487 2488dropwithreset: 2489 KASSERT(headlocked, ("%s: dropwithreset: head not locked", __func__)); 2490 2491 /* 2492 * If tp is non-NULL, we call tcp_dropwithreset() holding both inpcb 2493 * and global locks. However, if NULL, we must hold neither as 2494 * firewalls may acquire the global lock in order to look for a 2495 * matching inpcb. 2496 */ 2497 if (tp != NULL) { 2498 tcp_dropwithreset(m, th, tp, tlen, rstreason); 2499 INP_WUNLOCK(tp->t_inpcb); 2500 } 2501 INP_INFO_WUNLOCK(&V_tcbinfo); 2502 if (tp == NULL) 2503 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 2504 return; 2505 2506drop: 2507 /* 2508 * Drop space held by incoming segment and return. 2509 */ 2510#ifdef TCPDEBUG 2511 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 2512 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 2513 &tcp_savetcp, 0); 2514#endif 2515 if (tp != NULL) 2516 INP_WUNLOCK(tp->t_inpcb); 2517 if (headlocked) 2518 INP_INFO_WUNLOCK(&V_tcbinfo); 2519 m_freem(m); 2520 return; 2521} 2522 2523/* 2524 * Issue RST and make ACK acceptable to originator of segment. 2525 * The mbuf must still include the original packet header. 2526 * tp may be NULL. 2527 */ 2528static void 2529tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, 2530 int tlen, int rstreason) 2531{ 2532 struct ip *ip; 2533#ifdef INET6 2534 struct ip6_hdr *ip6; 2535#endif 2536 2537 if (tp != NULL) { 2538 INP_WLOCK_ASSERT(tp->t_inpcb); 2539 } 2540 2541 /* Don't bother if destination was broadcast/multicast. */ 2542 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST)) 2543 goto drop; 2544#ifdef INET6 2545 if (mtod(m, struct ip *)->ip_v == 6) { 2546 ip6 = mtod(m, struct ip6_hdr *); 2547 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 2548 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 2549 goto drop; 2550 /* IPv6 anycast check is done at tcp6_input() */ 2551 } else 2552#endif 2553 { 2554 ip = mtod(m, struct ip *); 2555 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 2556 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 2557 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 2558 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 2559 goto drop; 2560 } 2561 2562 /* Perform bandwidth limiting. */ 2563 if (badport_bandlim(rstreason) < 0) 2564 goto drop; 2565 2566 /* tcp_respond consumes the mbuf chain. */ 2567 if (th->th_flags & TH_ACK) { 2568 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, 2569 th->th_ack, TH_RST); 2570 } else { 2571 if (th->th_flags & TH_SYN) 2572 tlen++; 2573 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen, 2574 (tcp_seq)0, TH_RST|TH_ACK); 2575 } 2576 return; 2577drop: 2578 m_freem(m); 2579 return; 2580} 2581 2582/* 2583 * Parse TCP options and place in tcpopt. 2584 */ 2585static void 2586tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags) 2587{ 2588 int opt, optlen; 2589 2590 to->to_flags = 0; 2591 for (; cnt > 0; cnt -= optlen, cp += optlen) { 2592 opt = cp[0]; 2593 if (opt == TCPOPT_EOL) 2594 break; 2595 if (opt == TCPOPT_NOP) 2596 optlen = 1; 2597 else { 2598 if (cnt < 2) 2599 break; 2600 optlen = cp[1]; 2601 if (optlen < 2 || optlen > cnt) 2602 break; 2603 } 2604 switch (opt) { 2605 case TCPOPT_MAXSEG: 2606 if (optlen != TCPOLEN_MAXSEG) 2607 continue; 2608 if (!(flags & TO_SYN)) 2609 continue; 2610 to->to_flags |= TOF_MSS; 2611 bcopy((char *)cp + 2, 2612 (char *)&to->to_mss, sizeof(to->to_mss)); 2613 to->to_mss = ntohs(to->to_mss); 2614 break; 2615 case TCPOPT_WINDOW: 2616 if (optlen != TCPOLEN_WINDOW) 2617 continue; 2618 if (!(flags & TO_SYN)) 2619 continue; 2620 to->to_flags |= TOF_SCALE; 2621 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT); 2622 break; 2623 case TCPOPT_TIMESTAMP: 2624 if (optlen != TCPOLEN_TIMESTAMP) 2625 continue; 2626 to->to_flags |= TOF_TS; 2627 bcopy((char *)cp + 2, 2628 (char *)&to->to_tsval, sizeof(to->to_tsval)); 2629 to->to_tsval = ntohl(to->to_tsval); 2630 bcopy((char *)cp + 6, 2631 (char *)&to->to_tsecr, sizeof(to->to_tsecr)); 2632 to->to_tsecr = ntohl(to->to_tsecr); 2633 break; 2634#ifdef TCP_SIGNATURE 2635 /* 2636 * XXX In order to reply to a host which has set the 2637 * TCP_SIGNATURE option in its initial SYN, we have to 2638 * record the fact that the option was observed here 2639 * for the syncache code to perform the correct response. 2640 */ 2641 case TCPOPT_SIGNATURE: 2642 if (optlen != TCPOLEN_SIGNATURE) 2643 continue; 2644 to->to_flags |= TOF_SIGNATURE; 2645 to->to_signature = cp + 2; 2646 break; 2647#endif 2648 case TCPOPT_SACK_PERMITTED: 2649 if (optlen != TCPOLEN_SACK_PERMITTED) 2650 continue; 2651 if (!(flags & TO_SYN)) 2652 continue; 2653 if (!V_tcp_do_sack) 2654 continue; 2655 to->to_flags |= TOF_SACKPERM; 2656 break; 2657 case TCPOPT_SACK: 2658 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0) 2659 continue; 2660 if (flags & TO_SYN) 2661 continue; 2662 to->to_flags |= TOF_SACK; 2663 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK; 2664 to->to_sacks = cp + 2; 2665 V_tcpstat.tcps_sack_rcv_blocks++; 2666 break; 2667 default: 2668 continue; 2669 } 2670 } 2671} 2672 2673/* 2674 * Pull out of band byte out of a segment so 2675 * it doesn't appear in the user's data queue. 2676 * It is still reflected in the segment length for 2677 * sequencing purposes. 2678 */ 2679static void 2680tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, 2681 int off) 2682{ 2683 int cnt = off + th->th_urp - 1; 2684 2685 while (cnt >= 0) { 2686 if (m->m_len > cnt) { 2687 char *cp = mtod(m, caddr_t) + cnt; 2688 struct tcpcb *tp = sototcpcb(so); 2689 2690 INP_WLOCK_ASSERT(tp->t_inpcb); 2691 2692 tp->t_iobc = *cp; 2693 tp->t_oobflags |= TCPOOB_HAVEDATA; 2694 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1)); 2695 m->m_len--; 2696 if (m->m_flags & M_PKTHDR) 2697 m->m_pkthdr.len--; 2698 return; 2699 } 2700 cnt -= m->m_len; 2701 m = m->m_next; 2702 if (m == NULL) 2703 break; 2704 } 2705 panic("tcp_pulloutofband"); 2706} 2707 2708/* 2709 * Collect new round-trip time estimate 2710 * and update averages and current timeout. 2711 */ 2712static void 2713tcp_xmit_timer(struct tcpcb *tp, int rtt) 2714{ 2715 int delta; 2716 2717 INP_WLOCK_ASSERT(tp->t_inpcb); 2718 2719 V_tcpstat.tcps_rttupdated++; 2720 tp->t_rttupdated++; 2721 if (tp->t_srtt != 0) { 2722 /* 2723 * srtt is stored as fixed point with 5 bits after the 2724 * binary point (i.e., scaled by 8). The following magic 2725 * is equivalent to the smoothing algorithm in rfc793 with 2726 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed 2727 * point). Adjust rtt to origin 0. 2728 */ 2729 delta = ((rtt - 1) << TCP_DELTA_SHIFT) 2730 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); 2731 2732 if ((tp->t_srtt += delta) <= 0) 2733 tp->t_srtt = 1; 2734 2735 /* 2736 * We accumulate a smoothed rtt variance (actually, a 2737 * smoothed mean difference), then set the retransmit 2738 * timer to smoothed rtt + 4 times the smoothed variance. 2739 * rttvar is stored as fixed point with 4 bits after the 2740 * binary point (scaled by 16). The following is 2741 * equivalent to rfc793 smoothing with an alpha of .75 2742 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces 2743 * rfc793's wired-in beta. 2744 */ 2745 if (delta < 0) 2746 delta = -delta; 2747 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); 2748 if ((tp->t_rttvar += delta) <= 0) 2749 tp->t_rttvar = 1; 2750 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 2751 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 2752 } else { 2753 /* 2754 * No rtt measurement yet - use the unsmoothed rtt. 2755 * Set the variance to half the rtt (so our first 2756 * retransmit happens at 3*rtt). 2757 */ 2758 tp->t_srtt = rtt << TCP_RTT_SHIFT; 2759 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); 2760 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 2761 } 2762 tp->t_rtttime = 0; 2763 tp->t_rxtshift = 0; 2764 2765 /* 2766 * the retransmit should happen at rtt + 4 * rttvar. 2767 * Because of the way we do the smoothing, srtt and rttvar 2768 * will each average +1/2 tick of bias. When we compute 2769 * the retransmit timer, we want 1/2 tick of rounding and 2770 * 1 extra tick because of +-1/2 tick uncertainty in the 2771 * firing of the timer. The bias will give us exactly the 2772 * 1.5 tick we need. But, because the bias is 2773 * statistical, we have to test that we don't drop below 2774 * the minimum feasible timer (which is 2 ticks). 2775 */ 2776 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 2777 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX); 2778 2779 /* 2780 * We received an ack for a packet that wasn't retransmitted; 2781 * it is probably safe to discard any error indications we've 2782 * received recently. This isn't quite right, but close enough 2783 * for now (a route might have failed after we sent a segment, 2784 * and the return path might not be symmetrical). 2785 */ 2786 tp->t_softerror = 0; 2787} 2788 2789/* 2790 * Determine a reasonable value for maxseg size. 2791 * If the route is known, check route for mtu. 2792 * If none, use an mss that can be handled on the outgoing 2793 * interface without forcing IP to fragment; if bigger than 2794 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES 2795 * to utilize large mbufs. If no route is found, route has no mtu, 2796 * or the destination isn't local, use a default, hopefully conservative 2797 * size (usually 512 or the default IP max size, but no more than the mtu 2798 * of the interface), as we can't discover anything about intervening 2799 * gateways or networks. We also initialize the congestion/slow start 2800 * window to be a single segment if the destination isn't local. 2801 * While looking at the routing entry, we also initialize other path-dependent 2802 * parameters from pre-set or cached values in the routing entry. 2803 * 2804 * Also take into account the space needed for options that we 2805 * send regularly. Make maxseg shorter by that amount to assure 2806 * that we can send maxseg amount of data even when the options 2807 * are present. Store the upper limit of the length of options plus 2808 * data in maxopd. 2809 * 2810 * In case of T/TCP, we call this routine during implicit connection 2811 * setup as well (offer = -1), to initialize maxseg from the cached 2812 * MSS of our peer. 2813 * 2814 * NOTE that this routine is only called when we process an incoming 2815 * segment. Outgoing SYN/ACK MSS settings are handled in tcp_mssopt(). 2816 */ 2817void 2818tcp_mss_update(struct tcpcb *tp, int offer, struct hc_metrics_lite *metricptr) 2819{ 2820 int mss; 2821 u_long maxmtu; 2822 struct inpcb *inp = tp->t_inpcb; 2823 struct hc_metrics_lite metrics; 2824 int origoffer = offer; 2825 int mtuflags = 0; 2826#ifdef INET6 2827 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0; 2828 size_t min_protoh = isipv6 ? 2829 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) : 2830 sizeof (struct tcpiphdr); 2831#else 2832 const size_t min_protoh = sizeof(struct tcpiphdr); 2833#endif 2834 2835 INP_WLOCK_ASSERT(tp->t_inpcb); 2836 2837 /* Initialize. */ 2838#ifdef INET6 2839 if (isipv6) { 2840 maxmtu = tcp_maxmtu6(&inp->inp_inc, &mtuflags); 2841 tp->t_maxopd = tp->t_maxseg = V_tcp_v6mssdflt; 2842 } else 2843#endif 2844 { 2845 maxmtu = tcp_maxmtu(&inp->inp_inc, &mtuflags); 2846 tp->t_maxopd = tp->t_maxseg = V_tcp_mssdflt; 2847 } 2848 2849 /* 2850 * No route to sender, stay with default mss and return. 2851 */ 2852 if (maxmtu == 0) 2853 return; 2854 2855 /* Check the interface for TSO capabilities. */ 2856 if (mtuflags & CSUM_TSO) 2857 tp->t_flags |= TF_TSO; 2858 2859 /* What have we got? */ 2860 switch (offer) { 2861 case 0: 2862 /* 2863 * Offer == 0 means that there was no MSS on the SYN 2864 * segment, in this case we use tcp_mssdflt as 2865 * already assigned to t_maxopd above. 2866 */ 2867 offer = tp->t_maxopd; 2868 break; 2869 2870 case -1: 2871 /* 2872 * Offer == -1 means that we didn't receive SYN yet. 2873 */ 2874 /* FALLTHROUGH */ 2875 2876 default: 2877 /* 2878 * Prevent DoS attack with too small MSS. Round up 2879 * to at least minmss. 2880 */ 2881 offer = max(offer, V_tcp_minmss); 2882 } 2883 2884 /* 2885 * rmx information is now retrieved from tcp_hostcache. 2886 */ 2887 tcp_hc_get(&inp->inp_inc, &metrics); 2888 if (metricptr != NULL) 2889 bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite)); 2890 2891 /* 2892 * If there's a discovered mtu int tcp hostcache, use it 2893 * else, use the link mtu. 2894 */ 2895 if (metrics.rmx_mtu) 2896 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh; 2897 else { 2898#ifdef INET6 2899 if (isipv6) { 2900 mss = maxmtu - min_protoh; 2901 if (!V_path_mtu_discovery && 2902 !in6_localaddr(&inp->in6p_faddr)) 2903 mss = min(mss, V_tcp_v6mssdflt); 2904 } else 2905#endif 2906 { 2907 mss = maxmtu - min_protoh; 2908 if (!V_path_mtu_discovery && 2909 !in_localaddr(inp->inp_faddr)) 2910 mss = min(mss, V_tcp_mssdflt); 2911 } 2912 /* 2913 * XXX - The above conditional (mss = maxmtu - min_protoh) 2914 * probably violates the TCP spec. 2915 * The problem is that, since we don't know the 2916 * other end's MSS, we are supposed to use a conservative 2917 * default. But, if we do that, then MTU discovery will 2918 * never actually take place, because the conservative 2919 * default is much less than the MTUs typically seen 2920 * on the Internet today. For the moment, we'll sweep 2921 * this under the carpet. 2922 * 2923 * The conservative default might not actually be a problem 2924 * if the only case this occurs is when sending an initial 2925 * SYN with options and data to a host we've never talked 2926 * to before. Then, they will reply with an MSS value which 2927 * will get recorded and the new parameters should get 2928 * recomputed. For Further Study. 2929 */ 2930 } 2931 mss = min(mss, offer); 2932 2933 /* 2934 * Sanity check: make sure that maxopd will be large 2935 * enough to allow some data on segments even if the 2936 * all the option space is used (40bytes). Otherwise 2937 * funny things may happen in tcp_output. 2938 */ 2939 mss = max(mss, 64); 2940 2941 /* 2942 * maxopd stores the maximum length of data AND options 2943 * in a segment; maxseg is the amount of data in a normal 2944 * segment. We need to store this value (maxopd) apart 2945 * from maxseg, because now every segment carries options 2946 * and thus we normally have somewhat less data in segments. 2947 */ 2948 tp->t_maxopd = mss; 2949 2950 /* 2951 * origoffer==-1 indicates that no segments were received yet. 2952 * In this case we just guess. 2953 */ 2954 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && 2955 (origoffer == -1 || 2956 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)) 2957 mss -= TCPOLEN_TSTAMP_APPA; 2958 2959#if (MCLBYTES & (MCLBYTES - 1)) == 0 2960 if (mss > MCLBYTES) 2961 mss &= ~(MCLBYTES-1); 2962#else 2963 if (mss > MCLBYTES) 2964 mss = mss / MCLBYTES * MCLBYTES; 2965#endif 2966 tp->t_maxseg = mss; 2967} 2968 2969void 2970tcp_mss(struct tcpcb *tp, int offer) 2971{ 2972 int rtt, mss; 2973 u_long bufsize; 2974 struct inpcb *inp; 2975 struct socket *so; 2976 struct hc_metrics_lite metrics; 2977#ifdef INET6 2978 int isipv6; 2979#endif 2980 KASSERT(tp != NULL, ("%s: tp == NULL", __func__)); 2981 2982 tcp_mss_update(tp, offer, &metrics); 2983 2984 mss = tp->t_maxseg; 2985 inp = tp->t_inpcb; 2986#ifdef INET6 2987 isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0; 2988#endif 2989 2990 /* 2991 * If there's a pipesize, change the socket buffer to that size, 2992 * don't change if sb_hiwat is different than default (then it 2993 * has been changed on purpose with setsockopt). 2994 * Make the socket buffers an integral number of mss units; 2995 * if the mss is larger than the socket buffer, decrease the mss. 2996 */ 2997 so = inp->inp_socket; 2998 SOCKBUF_LOCK(&so->so_snd); 2999 if ((so->so_snd.sb_hiwat == tcp_sendspace) && metrics.rmx_sendpipe) 3000 bufsize = metrics.rmx_sendpipe; 3001 else 3002 bufsize = so->so_snd.sb_hiwat; 3003 if (bufsize < mss) 3004 mss = bufsize; 3005 else { 3006 bufsize = roundup(bufsize, mss); 3007 if (bufsize > sb_max) 3008 bufsize = sb_max; 3009 if (bufsize > so->so_snd.sb_hiwat) 3010 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL); 3011 } 3012 SOCKBUF_UNLOCK(&so->so_snd); 3013 tp->t_maxseg = mss; 3014 3015 SOCKBUF_LOCK(&so->so_rcv); 3016 if ((so->so_rcv.sb_hiwat == tcp_recvspace) && metrics.rmx_recvpipe) 3017 bufsize = metrics.rmx_recvpipe; 3018 else 3019 bufsize = so->so_rcv.sb_hiwat; 3020 if (bufsize > mss) { 3021 bufsize = roundup(bufsize, mss); 3022 if (bufsize > sb_max) 3023 bufsize = sb_max; 3024 if (bufsize > so->so_rcv.sb_hiwat) 3025 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL); 3026 } 3027 SOCKBUF_UNLOCK(&so->so_rcv); 3028 /* 3029 * While we're here, check the others too. 3030 */ 3031 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) { 3032 tp->t_srtt = rtt; 3033 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE; 3034 V_tcpstat.tcps_usedrtt++; 3035 if (metrics.rmx_rttvar) { 3036 tp->t_rttvar = metrics.rmx_rttvar; 3037 V_tcpstat.tcps_usedrttvar++; 3038 } else { 3039 /* default variation is +- 1 rtt */ 3040 tp->t_rttvar = 3041 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE; 3042 } 3043 TCPT_RANGESET(tp->t_rxtcur, 3044 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, 3045 tp->t_rttmin, TCPTV_REXMTMAX); 3046 } 3047 if (metrics.rmx_ssthresh) { 3048 /* 3049 * There's some sort of gateway or interface 3050 * buffer limit on the path. Use this to set 3051 * the slow start threshhold, but set the 3052 * threshold to no less than 2*mss. 3053 */ 3054 tp->snd_ssthresh = max(2 * mss, metrics.rmx_ssthresh); 3055 V_tcpstat.tcps_usedssthresh++; 3056 } 3057 if (metrics.rmx_bandwidth) 3058 tp->snd_bandwidth = metrics.rmx_bandwidth; 3059 3060 /* 3061 * Set the slow-start flight size depending on whether this 3062 * is a local network or not. 3063 * 3064 * Extend this so we cache the cwnd too and retrieve it here. 3065 * Make cwnd even bigger than RFC3390 suggests but only if we 3066 * have previous experience with the remote host. Be careful 3067 * not make cwnd bigger than remote receive window or our own 3068 * send socket buffer. Maybe put some additional upper bound 3069 * on the retrieved cwnd. Should do incremental updates to 3070 * hostcache when cwnd collapses so next connection doesn't 3071 * overloads the path again. 3072 * 3073 * RFC3390 says only do this if SYN or SYN/ACK didn't got lost. 3074 * We currently check only in syncache_socket for that. 3075 */ 3076#define TCP_METRICS_CWND 3077#ifdef TCP_METRICS_CWND 3078 if (metrics.rmx_cwnd) 3079 tp->snd_cwnd = max(mss, 3080 min(metrics.rmx_cwnd / 2, 3081 min(tp->snd_wnd, so->so_snd.sb_hiwat))); 3082 else 3083#endif 3084 if (V_tcp_do_rfc3390) 3085 tp->snd_cwnd = min(4 * mss, max(2 * mss, 4380)); 3086#ifdef INET6 3087 else if ((isipv6 && in6_localaddr(&inp->in6p_faddr)) || 3088 (!isipv6 && in_localaddr(inp->inp_faddr))) 3089#else 3090 else if (in_localaddr(inp->inp_faddr)) 3091#endif 3092 tp->snd_cwnd = mss * V_ss_fltsz_local; 3093 else 3094 tp->snd_cwnd = mss * V_ss_fltsz; 3095} 3096 3097/* 3098 * Determine the MSS option to send on an outgoing SYN. 3099 */ 3100int 3101tcp_mssopt(struct in_conninfo *inc) 3102{ 3103 int mss = 0; 3104 u_long maxmtu = 0; 3105 u_long thcmtu = 0; 3106 size_t min_protoh; 3107#ifdef INET6 3108 int isipv6 = inc->inc_isipv6 ? 1 : 0; 3109#endif 3110 3111 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer")); 3112 3113#ifdef INET6 3114 if (isipv6) { 3115 mss = V_tcp_v6mssdflt; 3116 maxmtu = tcp_maxmtu6(inc, NULL); 3117 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */ 3118 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 3119 } else 3120#endif 3121 { 3122 mss = V_tcp_mssdflt; 3123 maxmtu = tcp_maxmtu(inc, NULL); 3124 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */ 3125 min_protoh = sizeof(struct tcpiphdr); 3126 } 3127 if (maxmtu && thcmtu) 3128 mss = min(maxmtu, thcmtu) - min_protoh; 3129 else if (maxmtu || thcmtu) 3130 mss = max(maxmtu, thcmtu) - min_protoh; 3131 3132 return (mss); 3133} 3134 3135 3136/* 3137 * On a partial ack arrives, force the retransmission of the 3138 * next unacknowledged segment. Do not clear tp->t_dupacks. 3139 * By setting snd_nxt to ti_ack, this forces retransmission timer to 3140 * be started again. 3141 */ 3142static void 3143tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th) 3144{ 3145 tcp_seq onxt = tp->snd_nxt; 3146 u_long ocwnd = tp->snd_cwnd; 3147 3148 INP_WLOCK_ASSERT(tp->t_inpcb); 3149 3150 tcp_timer_activate(tp, TT_REXMT, 0); 3151 tp->t_rtttime = 0; 3152 tp->snd_nxt = th->th_ack; 3153 /* 3154 * Set snd_cwnd to one segment beyond acknowledged offset. 3155 * (tp->snd_una has not yet been updated when this function is called.) 3156 */ 3157 tp->snd_cwnd = tp->t_maxseg + (th->th_ack - tp->snd_una); 3158 tp->t_flags |= TF_ACKNOW; 3159 (void) tcp_output(tp); 3160 tp->snd_cwnd = ocwnd; 3161 if (SEQ_GT(onxt, tp->snd_nxt)) 3162 tp->snd_nxt = onxt; 3163 /* 3164 * Partial window deflation. Relies on fact that tp->snd_una 3165 * not updated yet. 3166 */ 3167 if (tp->snd_cwnd > th->th_ack - tp->snd_una) 3168 tp->snd_cwnd -= th->th_ack - tp->snd_una; 3169 else 3170 tp->snd_cwnd = 0; 3171 tp->snd_cwnd += tp->t_maxseg; 3172} 3173