tcp_input.c revision 185571
1/*- 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95 30 */ 31 32#include <sys/cdefs.h> 33__FBSDID("$FreeBSD: head/sys/netinet/tcp_input.c 185571 2008-12-02 21:37:28Z bz $"); 34 35#include "opt_ipfw.h" /* for ipfw_fwd */ 36#include "opt_inet.h" 37#include "opt_inet6.h" 38#include "opt_ipsec.h" 39#include "opt_mac.h" 40#include "opt_tcpdebug.h" 41 42#include <sys/param.h> 43#include <sys/kernel.h> 44#include <sys/malloc.h> 45#include <sys/mbuf.h> 46#include <sys/proc.h> /* for proc0 declaration */ 47#include <sys/protosw.h> 48#include <sys/signalvar.h> 49#include <sys/socket.h> 50#include <sys/socketvar.h> 51#include <sys/sysctl.h> 52#include <sys/syslog.h> 53#include <sys/systm.h> 54#include <sys/vimage.h> 55 56#include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */ 57 58#include <vm/uma.h> 59 60#include <net/if.h> 61#include <net/route.h> 62 63#define TCPSTATES /* for logging */ 64 65#include <netinet/in.h> 66#include <netinet/in_pcb.h> 67#include <netinet/in_systm.h> 68#include <netinet/in_var.h> 69#include <netinet/ip.h> 70#include <netinet/ip_icmp.h> /* required for icmp_var.h */ 71#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 72#include <netinet/ip_var.h> 73#include <netinet/ip_options.h> 74#include <netinet/ip6.h> 75#include <netinet/icmp6.h> 76#include <netinet6/in6_pcb.h> 77#include <netinet6/ip6_var.h> 78#include <netinet6/nd6.h> 79#include <netinet/tcp.h> 80#include <netinet/tcp_fsm.h> 81#include <netinet/tcp_seq.h> 82#include <netinet/tcp_timer.h> 83#include <netinet/tcp_var.h> 84#include <netinet6/tcp6_var.h> 85#include <netinet/tcpip.h> 86#include <netinet/tcp_syncache.h> 87#ifdef TCPDEBUG 88#include <netinet/tcp_debug.h> 89#endif /* TCPDEBUG */ 90#include <netinet/vinet.h> 91 92#ifdef INET6 93#include <netinet6/vinet6.h> 94#endif 95 96#ifdef IPSEC 97#include <netipsec/ipsec.h> 98#include <netipsec/ipsec6.h> 99#endif /*IPSEC*/ 100 101#include <machine/in_cksum.h> 102 103#include <security/mac/mac_framework.h> 104 105static const int tcprexmtthresh = 3; 106 107#ifdef VIMAGE_GLOBALS 108struct tcpstat tcpstat; 109int blackhole; 110int tcp_delack_enabled; 111int drop_synfin; 112int tcp_do_rfc3042; 113int tcp_do_rfc3390; 114int tcp_do_ecn; 115int tcp_ecn_maxretries; 116int tcp_insecure_rst; 117int tcp_do_autorcvbuf; 118int tcp_autorcvbuf_inc; 119int tcp_autorcvbuf_max; 120#endif 121 122SYSCTL_V_STRUCT(V_NET, vnet_inet, _net_inet_tcp, TCPCTL_STATS, stats, 123 CTLFLAG_RW, tcpstat , tcpstat, 124 "TCP statistics (struct tcpstat, netinet/tcp_var.h)"); 125 126int tcp_log_in_vain = 0; 127SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW, 128 &tcp_log_in_vain, 0, "Log all incoming TCP segments to closed ports"); 129 130SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW, 131 blackhole, 0, "Do not send RST on segments to closed ports"); 132 133SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp, OID_AUTO, delayed_ack, 134 CTLFLAG_RW, tcp_delack_enabled, 0, 135 "Delay ACK to try and piggyback it onto a data packet"); 136 137SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp, OID_AUTO, drop_synfin, 138 CTLFLAG_RW, drop_synfin, 0, "Drop TCP packets with SYN+FIN set"); 139 140SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_RW, 141 tcp_do_rfc3042, 0, "Enable RFC 3042 (Limited Transmit)"); 142 143SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW, 144 tcp_do_rfc3390, 0, 145 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)"); 146 147SYSCTL_NODE(_net_inet_tcp, OID_AUTO, ecn, CTLFLAG_RW, 0, "TCP ECN"); 148SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp_ecn, OID_AUTO, enable, 149 CTLFLAG_RW, tcp_do_ecn, 0, "TCP ECN support"); 150SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp_ecn, OID_AUTO, maxretries, 151 CTLFLAG_RW, tcp_ecn_maxretries, 0, "Max retries before giving up on ECN"); 152 153SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp, OID_AUTO, insecure_rst, 154 CTLFLAG_RW, tcp_insecure_rst, 0, 155 "Follow the old (insecure) criteria for accepting RST packets"); 156 157SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp, OID_AUTO, recvbuf_auto, 158 CTLFLAG_RW, tcp_do_autorcvbuf, 0, 159 "Enable automatic receive buffer sizing"); 160 161SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp, OID_AUTO, recvbuf_inc, 162 CTLFLAG_RW, tcp_autorcvbuf_inc, 0, 163 "Incrementor step size of automatic receive buffer"); 164 165SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp, OID_AUTO, recvbuf_max, 166 CTLFLAG_RW, tcp_autorcvbuf_max, 0, 167 "Max size of automatic receive buffer"); 168 169#ifdef VIMAGE_GLOBALS 170struct inpcbhead tcb; 171struct inpcbinfo tcbinfo; 172#endif 173#define tcb6 tcb /* for KAME src sync over BSD*'s */ 174 175static void tcp_dooptions(struct tcpopt *, u_char *, int, int); 176static void tcp_do_segment(struct mbuf *, struct tcphdr *, 177 struct socket *, struct tcpcb *, int, int, uint8_t); 178static void tcp_dropwithreset(struct mbuf *, struct tcphdr *, 179 struct tcpcb *, int, int); 180static void tcp_pulloutofband(struct socket *, 181 struct tcphdr *, struct mbuf *, int); 182static void tcp_xmit_timer(struct tcpcb *, int); 183static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *); 184static void inline 185 tcp_congestion_exp(struct tcpcb *); 186 187static void inline 188tcp_congestion_exp(struct tcpcb *tp) 189{ 190 u_int win; 191 192 win = min(tp->snd_wnd, tp->snd_cwnd) / 193 2 / tp->t_maxseg; 194 if (win < 2) 195 win = 2; 196 tp->snd_ssthresh = win * tp->t_maxseg; 197 ENTER_FASTRECOVERY(tp); 198 tp->snd_recover = tp->snd_max; 199 if (tp->t_flags & TF_ECN_PERMIT) 200 tp->t_flags |= TF_ECN_SND_CWR; 201} 202 203/* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */ 204#ifdef INET6 205#define ND6_HINT(tp) \ 206do { \ 207 if ((tp) && (tp)->t_inpcb && \ 208 ((tp)->t_inpcb->inp_vflag & INP_IPV6) != 0) \ 209 nd6_nud_hint(NULL, NULL, 0); \ 210} while (0) 211#else 212#define ND6_HINT(tp) 213#endif 214 215/* 216 * Indicate whether this ack should be delayed. We can delay the ack if 217 * - there is no delayed ack timer in progress and 218 * - our last ack wasn't a 0-sized window. We never want to delay 219 * the ack that opens up a 0-sized window and 220 * - delayed acks are enabled or 221 * - this is a half-synchronized T/TCP connection. 222 */ 223#define DELAY_ACK(tp) \ 224 ((!tcp_timer_active(tp, TT_DELACK) && \ 225 (tp->t_flags & TF_RXWIN0SENT) == 0) && \ 226 (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN))) 227 228/* 229 * TCP input handling is split into multiple parts: 230 * tcp6_input is a thin wrapper around tcp_input for the extended 231 * ip6_protox[] call format in ip6_input 232 * tcp_input handles primary segment validation, inpcb lookup and 233 * SYN processing on listen sockets 234 * tcp_do_segment processes the ACK and text of the segment for 235 * establishing, established and closing connections 236 */ 237#ifdef INET6 238int 239tcp6_input(struct mbuf **mp, int *offp, int proto) 240{ 241 INIT_VNET_INET6(curvnet); 242 struct mbuf *m = *mp; 243 struct in6_ifaddr *ia6; 244 245 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE); 246 247 /* 248 * draft-itojun-ipv6-tcp-to-anycast 249 * better place to put this in? 250 */ 251 ia6 = ip6_getdstifaddr(m); 252 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) { 253 struct ip6_hdr *ip6; 254 255 ip6 = mtod(m, struct ip6_hdr *); 256 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR, 257 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6); 258 return IPPROTO_DONE; 259 } 260 261 tcp_input(m, *offp); 262 return IPPROTO_DONE; 263} 264#endif 265 266void 267tcp_input(struct mbuf *m, int off0) 268{ 269 INIT_VNET_INET(curvnet); 270#ifdef INET6 271 INIT_VNET_INET6(curvnet); 272#endif 273#ifdef IPSEC 274 INIT_VNET_IPSEC(curvnet); 275#endif 276 struct tcphdr *th; 277 struct ip *ip = NULL; 278 struct ipovly *ipov; 279 struct inpcb *inp = NULL; 280 struct tcpcb *tp = NULL; 281 struct socket *so = NULL; 282 u_char *optp = NULL; 283 int optlen = 0; 284 int len, tlen, off; 285 int drop_hdrlen; 286 int thflags; 287 int rstreason = 0; /* For badport_bandlim accounting purposes */ 288 uint8_t iptos; 289#ifdef IPFIREWALL_FORWARD 290 struct m_tag *fwd_tag; 291#endif 292#ifdef INET6 293 struct ip6_hdr *ip6 = NULL; 294 int isipv6; 295#else 296 const void *ip6 = NULL; 297 const int isipv6 = 0; 298#endif 299 struct tcpopt to; /* options in this segment */ 300 char *s = NULL; /* address and port logging */ 301 302#ifdef TCPDEBUG 303 /* 304 * The size of tcp_saveipgen must be the size of the max ip header, 305 * now IPv6. 306 */ 307 u_char tcp_saveipgen[IP6_HDR_LEN]; 308 struct tcphdr tcp_savetcp; 309 short ostate = 0; 310#endif 311 312#ifdef INET6 313 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0; 314#endif 315 316 to.to_flags = 0; 317 V_tcpstat.tcps_rcvtotal++; 318 319 if (isipv6) { 320#ifdef INET6 321 /* IP6_EXTHDR_CHECK() is already done at tcp6_input(). */ 322 ip6 = mtod(m, struct ip6_hdr *); 323 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0; 324 if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) { 325 V_tcpstat.tcps_rcvbadsum++; 326 goto drop; 327 } 328 th = (struct tcphdr *)((caddr_t)ip6 + off0); 329 330 /* 331 * Be proactive about unspecified IPv6 address in source. 332 * As we use all-zero to indicate unbounded/unconnected pcb, 333 * unspecified IPv6 address can be used to confuse us. 334 * 335 * Note that packets with unspecified IPv6 destination is 336 * already dropped in ip6_input. 337 */ 338 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { 339 /* XXX stat */ 340 goto drop; 341 } 342#else 343 th = NULL; /* XXX: Avoid compiler warning. */ 344#endif 345 } else { 346 /* 347 * Get IP and TCP header together in first mbuf. 348 * Note: IP leaves IP header in first mbuf. 349 */ 350 if (off0 > sizeof (struct ip)) { 351 ip_stripoptions(m, (struct mbuf *)0); 352 off0 = sizeof(struct ip); 353 } 354 if (m->m_len < sizeof (struct tcpiphdr)) { 355 if ((m = m_pullup(m, sizeof (struct tcpiphdr))) 356 == NULL) { 357 V_tcpstat.tcps_rcvshort++; 358 return; 359 } 360 } 361 ip = mtod(m, struct ip *); 362 ipov = (struct ipovly *)ip; 363 th = (struct tcphdr *)((caddr_t)ip + off0); 364 tlen = ip->ip_len; 365 366 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 367 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 368 th->th_sum = m->m_pkthdr.csum_data; 369 else 370 th->th_sum = in_pseudo(ip->ip_src.s_addr, 371 ip->ip_dst.s_addr, 372 htonl(m->m_pkthdr.csum_data + 373 ip->ip_len + 374 IPPROTO_TCP)); 375 th->th_sum ^= 0xffff; 376#ifdef TCPDEBUG 377 ipov->ih_len = (u_short)tlen; 378 ipov->ih_len = htons(ipov->ih_len); 379#endif 380 } else { 381 /* 382 * Checksum extended TCP header and data. 383 */ 384 len = sizeof (struct ip) + tlen; 385 bzero(ipov->ih_x1, sizeof(ipov->ih_x1)); 386 ipov->ih_len = (u_short)tlen; 387 ipov->ih_len = htons(ipov->ih_len); 388 th->th_sum = in_cksum(m, len); 389 } 390 if (th->th_sum) { 391 V_tcpstat.tcps_rcvbadsum++; 392 goto drop; 393 } 394 /* Re-initialization for later version check */ 395 ip->ip_v = IPVERSION; 396 } 397 398#ifdef INET6 399 if (isipv6) 400 iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; 401 else 402#endif 403 iptos = ip->ip_tos; 404 405 /* 406 * Check that TCP offset makes sense, 407 * pull out TCP options and adjust length. XXX 408 */ 409 off = th->th_off << 2; 410 if (off < sizeof (struct tcphdr) || off > tlen) { 411 V_tcpstat.tcps_rcvbadoff++; 412 goto drop; 413 } 414 tlen -= off; /* tlen is used instead of ti->ti_len */ 415 if (off > sizeof (struct tcphdr)) { 416 if (isipv6) { 417#ifdef INET6 418 IP6_EXTHDR_CHECK(m, off0, off, ); 419 ip6 = mtod(m, struct ip6_hdr *); 420 th = (struct tcphdr *)((caddr_t)ip6 + off0); 421#endif 422 } else { 423 if (m->m_len < sizeof(struct ip) + off) { 424 if ((m = m_pullup(m, sizeof (struct ip) + off)) 425 == NULL) { 426 V_tcpstat.tcps_rcvshort++; 427 return; 428 } 429 ip = mtod(m, struct ip *); 430 ipov = (struct ipovly *)ip; 431 th = (struct tcphdr *)((caddr_t)ip + off0); 432 } 433 } 434 optlen = off - sizeof (struct tcphdr); 435 optp = (u_char *)(th + 1); 436 } 437 thflags = th->th_flags; 438 439 /* 440 * Convert TCP protocol specific fields to host format. 441 */ 442 th->th_seq = ntohl(th->th_seq); 443 th->th_ack = ntohl(th->th_ack); 444 th->th_win = ntohs(th->th_win); 445 th->th_urp = ntohs(th->th_urp); 446 447 /* 448 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options. 449 */ 450 drop_hdrlen = off0 + off; 451 452 /* 453 * Locate pcb for segment. 454 */ 455 INP_INFO_WLOCK(&V_tcbinfo); 456findpcb: 457 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 458#ifdef IPFIREWALL_FORWARD 459 /* 460 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. 461 */ 462 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL); 463 464 if (fwd_tag != NULL && isipv6 == 0) { /* IPv6 support is not yet */ 465 struct sockaddr_in *next_hop; 466 467 next_hop = (struct sockaddr_in *)(fwd_tag+1); 468 /* 469 * Transparently forwarded. Pretend to be the destination. 470 * already got one like this? 471 */ 472 inp = in_pcblookup_hash(&V_tcbinfo, 473 ip->ip_src, th->th_sport, 474 ip->ip_dst, th->th_dport, 475 0, m->m_pkthdr.rcvif); 476 if (!inp) { 477 /* It's new. Try to find the ambushing socket. */ 478 inp = in_pcblookup_hash(&V_tcbinfo, 479 ip->ip_src, th->th_sport, 480 next_hop->sin_addr, 481 next_hop->sin_port ? 482 ntohs(next_hop->sin_port) : 483 th->th_dport, 484 INPLOOKUP_WILDCARD, 485 m->m_pkthdr.rcvif); 486 } 487 /* Remove the tag from the packet. We don't need it anymore. */ 488 m_tag_delete(m, fwd_tag); 489 } else 490#endif /* IPFIREWALL_FORWARD */ 491 { 492 if (isipv6) { 493#ifdef INET6 494 inp = in6_pcblookup_hash(&V_tcbinfo, 495 &ip6->ip6_src, th->th_sport, 496 &ip6->ip6_dst, th->th_dport, 497 INPLOOKUP_WILDCARD, 498 m->m_pkthdr.rcvif); 499#endif 500 } else 501 inp = in_pcblookup_hash(&V_tcbinfo, 502 ip->ip_src, th->th_sport, 503 ip->ip_dst, th->th_dport, 504 INPLOOKUP_WILDCARD, 505 m->m_pkthdr.rcvif); 506 } 507 508 /* 509 * If the INPCB does not exist then all data in the incoming 510 * segment is discarded and an appropriate RST is sent back. 511 * XXX MRT Send RST using which routing table? 512 */ 513 if (inp == NULL) { 514 /* 515 * Log communication attempts to ports that are not 516 * in use. 517 */ 518 if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) || 519 tcp_log_in_vain == 2) { 520 if ((s = tcp_log_addrs(NULL, th, (void *)ip, ip6))) 521 log(LOG_INFO, "%s; %s: Connection attempt " 522 "to closed port\n", s, __func__); 523 } 524 /* 525 * When blackholing do not respond with a RST but 526 * completely ignore the segment and drop it. 527 */ 528 if ((V_blackhole == 1 && (thflags & TH_SYN)) || 529 V_blackhole == 2) 530 goto dropunlock; 531 532 rstreason = BANDLIM_RST_CLOSEDPORT; 533 goto dropwithreset; 534 } 535 INP_WLOCK(inp); 536 537#ifdef IPSEC 538#ifdef INET6 539 if (isipv6 && ipsec6_in_reject(m, inp)) { 540 V_ipsec6stat.in_polvio++; 541 goto dropunlock; 542 } else 543#endif /* INET6 */ 544 if (ipsec4_in_reject(m, inp) != 0) { 545 V_ipsec4stat.in_polvio++; 546 goto dropunlock; 547 } 548#endif /* IPSEC */ 549 550 /* 551 * Check the minimum TTL for socket. 552 */ 553 if (inp->inp_ip_minttl != 0) { 554#ifdef INET6 555 if (isipv6 && inp->inp_ip_minttl > ip6->ip6_hlim) 556 goto dropunlock; 557 else 558#endif 559 if (inp->inp_ip_minttl > ip->ip_ttl) 560 goto dropunlock; 561 } 562 563 /* 564 * A previous connection in TIMEWAIT state is supposed to catch 565 * stray or duplicate segments arriving late. If this segment 566 * was a legitimate new connection attempt the old INPCB gets 567 * removed and we can try again to find a listening socket. 568 */ 569 if (inp->inp_vflag & INP_TIMEWAIT) { 570 if (thflags & TH_SYN) 571 tcp_dooptions(&to, optp, optlen, TO_SYN); 572 /* 573 * NB: tcp_twcheck unlocks the INP and frees the mbuf. 574 */ 575 if (tcp_twcheck(inp, &to, th, m, tlen)) 576 goto findpcb; 577 INP_INFO_WUNLOCK(&V_tcbinfo); 578 return; 579 } 580 /* 581 * The TCPCB may no longer exist if the connection is winding 582 * down or it is in the CLOSED state. Either way we drop the 583 * segment and send an appropriate response. 584 */ 585 tp = intotcpcb(inp); 586 if (tp == NULL || tp->t_state == TCPS_CLOSED) { 587 rstreason = BANDLIM_RST_CLOSEDPORT; 588 goto dropwithreset; 589 } 590 591#ifdef MAC 592 INP_WLOCK_ASSERT(inp); 593 if (mac_inpcb_check_deliver(inp, m)) 594 goto dropunlock; 595#endif 596 so = inp->inp_socket; 597 KASSERT(so != NULL, ("%s: so == NULL", __func__)); 598#ifdef TCPDEBUG 599 if (so->so_options & SO_DEBUG) { 600 ostate = tp->t_state; 601 if (isipv6) { 602#ifdef INET6 603 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6)); 604#endif 605 } else 606 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip)); 607 tcp_savetcp = *th; 608 } 609#endif 610 /* 611 * When the socket is accepting connections (the INPCB is in LISTEN 612 * state) we look into the SYN cache if this is a new connection 613 * attempt or the completion of a previous one. 614 */ 615 if (so->so_options & SO_ACCEPTCONN) { 616 struct in_conninfo inc; 617 618 KASSERT(tp->t_state == TCPS_LISTEN, ("%s: so accepting but " 619 "tp not listening", __func__)); 620 621 bzero(&inc, sizeof(inc)); 622 inc.inc_isipv6 = isipv6; 623#ifdef INET6 624 if (isipv6) { 625 inc.inc6_faddr = ip6->ip6_src; 626 inc.inc6_laddr = ip6->ip6_dst; 627 } else 628#endif 629 { 630 inc.inc_faddr = ip->ip_src; 631 inc.inc_laddr = ip->ip_dst; 632 } 633 inc.inc_fport = th->th_sport; 634 inc.inc_lport = th->th_dport; 635 636 /* 637 * Check for an existing connection attempt in syncache if 638 * the flag is only ACK. A successful lookup creates a new 639 * socket appended to the listen queue in SYN_RECEIVED state. 640 */ 641 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) { 642 /* 643 * Parse the TCP options here because 644 * syncookies need access to the reflected 645 * timestamp. 646 */ 647 tcp_dooptions(&to, optp, optlen, 0); 648 /* 649 * NB: syncache_expand() doesn't unlock 650 * inp and tcpinfo locks. 651 */ 652 if (!syncache_expand(&inc, &to, th, &so, m)) { 653 /* 654 * No syncache entry or ACK was not 655 * for our SYN/ACK. Send a RST. 656 * NB: syncache did its own logging 657 * of the failure cause. 658 */ 659 rstreason = BANDLIM_RST_OPENPORT; 660 goto dropwithreset; 661 } 662 if (so == NULL) { 663 /* 664 * We completed the 3-way handshake 665 * but could not allocate a socket 666 * either due to memory shortage, 667 * listen queue length limits or 668 * global socket limits. Send RST 669 * or wait and have the remote end 670 * retransmit the ACK for another 671 * try. 672 */ 673 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 674 log(LOG_DEBUG, "%s; %s: Listen socket: " 675 "Socket allocation failed due to " 676 "limits or memory shortage, %s\n", 677 s, __func__, 678 V_tcp_sc_rst_sock_fail ? 679 "sending RST" : "try again"); 680 if (V_tcp_sc_rst_sock_fail) { 681 rstreason = BANDLIM_UNLIMITED; 682 goto dropwithreset; 683 } else 684 goto dropunlock; 685 } 686 /* 687 * Socket is created in state SYN_RECEIVED. 688 * Unlock the listen socket, lock the newly 689 * created socket and update the tp variable. 690 */ 691 INP_WUNLOCK(inp); /* listen socket */ 692 inp = sotoinpcb(so); 693 INP_WLOCK(inp); /* new connection */ 694 tp = intotcpcb(inp); 695 KASSERT(tp->t_state == TCPS_SYN_RECEIVED, 696 ("%s: ", __func__)); 697 /* 698 * Process the segment and the data it 699 * contains. tcp_do_segment() consumes 700 * the mbuf chain and unlocks the inpcb. 701 */ 702 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, 703 iptos); 704 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 705 return; 706 } 707 /* 708 * Segment flag validation for new connection attempts: 709 * 710 * Our (SYN|ACK) response was rejected. 711 * Check with syncache and remove entry to prevent 712 * retransmits. 713 * 714 * NB: syncache_chkrst does its own logging of failure 715 * causes. 716 */ 717 if (thflags & TH_RST) { 718 syncache_chkrst(&inc, th); 719 goto dropunlock; 720 } 721 /* 722 * We can't do anything without SYN. 723 */ 724 if ((thflags & TH_SYN) == 0) { 725 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 726 log(LOG_DEBUG, "%s; %s: Listen socket: " 727 "SYN is missing, segment ignored\n", 728 s, __func__); 729 V_tcpstat.tcps_badsyn++; 730 goto dropunlock; 731 } 732 /* 733 * (SYN|ACK) is bogus on a listen socket. 734 */ 735 if (thflags & TH_ACK) { 736 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 737 log(LOG_DEBUG, "%s; %s: Listen socket: " 738 "SYN|ACK invalid, segment rejected\n", 739 s, __func__); 740 syncache_badack(&inc); /* XXX: Not needed! */ 741 V_tcpstat.tcps_badsyn++; 742 rstreason = BANDLIM_RST_OPENPORT; 743 goto dropwithreset; 744 } 745 /* 746 * If the drop_synfin option is enabled, drop all 747 * segments with both the SYN and FIN bits set. 748 * This prevents e.g. nmap from identifying the 749 * TCP/IP stack. 750 * XXX: Poor reasoning. nmap has other methods 751 * and is constantly refining its stack detection 752 * strategies. 753 * XXX: This is a violation of the TCP specification 754 * and was used by RFC1644. 755 */ 756 if ((thflags & TH_FIN) && V_drop_synfin) { 757 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 758 log(LOG_DEBUG, "%s; %s: Listen socket: " 759 "SYN|FIN segment ignored (based on " 760 "sysctl setting)\n", s, __func__); 761 V_tcpstat.tcps_badsyn++; 762 goto dropunlock; 763 } 764 /* 765 * Segment's flags are (SYN) or (SYN|FIN). 766 * 767 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored 768 * as they do not affect the state of the TCP FSM. 769 * The data pointed to by TH_URG and th_urp is ignored. 770 */ 771 KASSERT((thflags & (TH_RST|TH_ACK)) == 0, 772 ("%s: Listen socket: TH_RST or TH_ACK set", __func__)); 773 KASSERT(thflags & (TH_SYN), 774 ("%s: Listen socket: TH_SYN not set", __func__)); 775#ifdef INET6 776 /* 777 * If deprecated address is forbidden, 778 * we do not accept SYN to deprecated interface 779 * address to prevent any new inbound connection from 780 * getting established. 781 * When we do not accept SYN, we send a TCP RST, 782 * with deprecated source address (instead of dropping 783 * it). We compromise it as it is much better for peer 784 * to send a RST, and RST will be the final packet 785 * for the exchange. 786 * 787 * If we do not forbid deprecated addresses, we accept 788 * the SYN packet. RFC2462 does not suggest dropping 789 * SYN in this case. 790 * If we decipher RFC2462 5.5.4, it says like this: 791 * 1. use of deprecated addr with existing 792 * communication is okay - "SHOULD continue to be 793 * used" 794 * 2. use of it with new communication: 795 * (2a) "SHOULD NOT be used if alternate address 796 * with sufficient scope is available" 797 * (2b) nothing mentioned otherwise. 798 * Here we fall into (2b) case as we have no choice in 799 * our source address selection - we must obey the peer. 800 * 801 * The wording in RFC2462 is confusing, and there are 802 * multiple description text for deprecated address 803 * handling - worse, they are not exactly the same. 804 * I believe 5.5.4 is the best one, so we follow 5.5.4. 805 */ 806 if (isipv6 && !V_ip6_use_deprecated) { 807 struct in6_ifaddr *ia6; 808 809 if ((ia6 = ip6_getdstifaddr(m)) && 810 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) { 811 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 812 log(LOG_DEBUG, "%s; %s: Listen socket: " 813 "Connection attempt to deprecated " 814 "IPv6 address rejected\n", 815 s, __func__); 816 rstreason = BANDLIM_RST_OPENPORT; 817 goto dropwithreset; 818 } 819 } 820#endif 821 /* 822 * Basic sanity checks on incoming SYN requests: 823 * Don't respond if the destination is a link layer 824 * broadcast according to RFC1122 4.2.3.10, p. 104. 825 * If it is from this socket it must be forged. 826 * Don't respond if the source or destination is a 827 * global or subnet broad- or multicast address. 828 * Note that it is quite possible to receive unicast 829 * link-layer packets with a broadcast IP address. Use 830 * in_broadcast() to find them. 831 */ 832 if (m->m_flags & (M_BCAST|M_MCAST)) { 833 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 834 log(LOG_DEBUG, "%s; %s: Listen socket: " 835 "Connection attempt from broad- or multicast " 836 "link layer address ignored\n", s, __func__); 837 goto dropunlock; 838 } 839 if (isipv6) { 840#ifdef INET6 841 if (th->th_dport == th->th_sport && 842 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) { 843 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 844 log(LOG_DEBUG, "%s; %s: Listen socket: " 845 "Connection attempt to/from self " 846 "ignored\n", s, __func__); 847 goto dropunlock; 848 } 849 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 850 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) { 851 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 852 log(LOG_DEBUG, "%s; %s: Listen socket: " 853 "Connection attempt from/to multicast " 854 "address ignored\n", s, __func__); 855 goto dropunlock; 856 } 857#endif 858 } else { 859 if (th->th_dport == th->th_sport && 860 ip->ip_dst.s_addr == ip->ip_src.s_addr) { 861 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 862 log(LOG_DEBUG, "%s; %s: Listen socket: " 863 "Connection attempt from/to self " 864 "ignored\n", s, __func__); 865 goto dropunlock; 866 } 867 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 868 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 869 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 870 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) { 871 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 872 log(LOG_DEBUG, "%s; %s: Listen socket: " 873 "Connection attempt from/to broad- " 874 "or multicast address ignored\n", 875 s, __func__); 876 goto dropunlock; 877 } 878 } 879 /* 880 * SYN appears to be valid. Create compressed TCP state 881 * for syncache. 882 */ 883#ifdef TCPDEBUG 884 if (so->so_options & SO_DEBUG) 885 tcp_trace(TA_INPUT, ostate, tp, 886 (void *)tcp_saveipgen, &tcp_savetcp, 0); 887#endif 888 tcp_dooptions(&to, optp, optlen, TO_SYN); 889 syncache_add(&inc, &to, th, inp, &so, m); 890 /* 891 * Entry added to syncache and mbuf consumed. 892 * Everything already unlocked by syncache_add(). 893 */ 894 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 895 return; 896 } 897 898 /* 899 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later 900 * state. tcp_do_segment() always consumes the mbuf chain, unlocks 901 * the inpcb, and unlocks pcbinfo. 902 */ 903 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, iptos); 904 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 905 return; 906 907dropwithreset: 908 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 909 INP_INFO_WUNLOCK(&V_tcbinfo); 910 911 if (inp != NULL) { 912 tcp_dropwithreset(m, th, tp, tlen, rstreason); 913 INP_WUNLOCK(inp); 914 } else 915 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 916 m = NULL; /* mbuf chain got consumed. */ 917 goto drop; 918 919dropunlock: 920 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 921 if (inp != NULL) 922 INP_WUNLOCK(inp); 923 INP_INFO_WUNLOCK(&V_tcbinfo); 924 925drop: 926 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 927 if (s != NULL) 928 free(s, M_TCPLOG); 929 if (m != NULL) 930 m_freem(m); 931} 932 933static void 934tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 935 struct tcpcb *tp, int drop_hdrlen, int tlen, uint8_t iptos) 936{ 937 INIT_VNET_INET(tp->t_vnet); 938 int thflags, acked, ourfinisacked, needoutput = 0; 939 int headlocked = 1; 940 int rstreason, todrop, win; 941 u_long tiwin; 942 struct tcpopt to; 943 944#ifdef TCPDEBUG 945 /* 946 * The size of tcp_saveipgen must be the size of the max ip header, 947 * now IPv6. 948 */ 949 u_char tcp_saveipgen[IP6_HDR_LEN]; 950 struct tcphdr tcp_savetcp; 951 short ostate = 0; 952#endif 953 thflags = th->th_flags; 954 955 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 956 INP_WLOCK_ASSERT(tp->t_inpcb); 957 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 958 __func__)); 959 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 960 __func__)); 961 962 /* 963 * Segment received on connection. 964 * Reset idle time and keep-alive timer. 965 * XXX: This should be done after segment 966 * validation to ignore broken/spoofed segs. 967 */ 968 tp->t_rcvtime = ticks; 969 if (TCPS_HAVEESTABLISHED(tp->t_state)) 970 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle); 971 972 /* 973 * Unscale the window into a 32-bit value. 974 * For the SYN_SENT state the scale is zero. 975 */ 976 tiwin = th->th_win << tp->snd_scale; 977 978 /* 979 * TCP ECN processing. 980 */ 981 if (tp->t_flags & TF_ECN_PERMIT) { 982 switch (iptos & IPTOS_ECN_MASK) { 983 case IPTOS_ECN_CE: 984 tp->t_flags |= TF_ECN_SND_ECE; 985 V_tcpstat.tcps_ecn_ce++; 986 break; 987 case IPTOS_ECN_ECT0: 988 V_tcpstat.tcps_ecn_ect0++; 989 break; 990 case IPTOS_ECN_ECT1: 991 V_tcpstat.tcps_ecn_ect1++; 992 break; 993 } 994 995 if (thflags & TH_CWR) 996 tp->t_flags &= ~TF_ECN_SND_ECE; 997 998 /* 999 * Congestion experienced. 1000 * Ignore if we are already trying to recover. 1001 */ 1002 if ((thflags & TH_ECE) && 1003 SEQ_LEQ(th->th_ack, tp->snd_recover)) { 1004 V_tcpstat.tcps_ecn_rcwnd++; 1005 tcp_congestion_exp(tp); 1006 } 1007 } 1008 1009 /* 1010 * Parse options on any incoming segment. 1011 */ 1012 tcp_dooptions(&to, (u_char *)(th + 1), 1013 (th->th_off << 2) - sizeof(struct tcphdr), 1014 (thflags & TH_SYN) ? TO_SYN : 0); 1015 1016 /* 1017 * If echoed timestamp is later than the current time, 1018 * fall back to non RFC1323 RTT calculation. Normalize 1019 * timestamp if syncookies were used when this connection 1020 * was established. 1021 */ 1022 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 1023 to.to_tsecr -= tp->ts_offset; 1024 if (TSTMP_GT(to.to_tsecr, ticks)) 1025 to.to_tsecr = 0; 1026 } 1027 1028 /* 1029 * Process options only when we get SYN/ACK back. The SYN case 1030 * for incoming connections is handled in tcp_syncache. 1031 * According to RFC1323 the window field in a SYN (i.e., a <SYN> 1032 * or <SYN,ACK>) segment itself is never scaled. 1033 * XXX this is traditional behavior, may need to be cleaned up. 1034 */ 1035 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 1036 if ((to.to_flags & TOF_SCALE) && 1037 (tp->t_flags & TF_REQ_SCALE)) { 1038 tp->t_flags |= TF_RCVD_SCALE; 1039 tp->snd_scale = to.to_wscale; 1040 } 1041 /* 1042 * Initial send window. It will be updated with 1043 * the next incoming segment to the scaled value. 1044 */ 1045 tp->snd_wnd = th->th_win; 1046 if (to.to_flags & TOF_TS) { 1047 tp->t_flags |= TF_RCVD_TSTMP; 1048 tp->ts_recent = to.to_tsval; 1049 tp->ts_recent_age = ticks; 1050 } 1051 if (to.to_flags & TOF_MSS) 1052 tcp_mss(tp, to.to_mss); 1053 if ((tp->t_flags & TF_SACK_PERMIT) && 1054 (to.to_flags & TOF_SACKPERM) == 0) 1055 tp->t_flags &= ~TF_SACK_PERMIT; 1056 } 1057 1058 /* 1059 * Header prediction: check for the two common cases 1060 * of a uni-directional data xfer. If the packet has 1061 * no control flags, is in-sequence, the window didn't 1062 * change and we're not retransmitting, it's a 1063 * candidate. If the length is zero and the ack moved 1064 * forward, we're the sender side of the xfer. Just 1065 * free the data acked & wake any higher level process 1066 * that was blocked waiting for space. If the length 1067 * is non-zero and the ack didn't move, we're the 1068 * receiver side. If we're getting packets in-order 1069 * (the reassembly queue is empty), add the data to 1070 * the socket buffer and note that we need a delayed ack. 1071 * Make sure that the hidden state-flags are also off. 1072 * Since we check for TCPS_ESTABLISHED first, it can only 1073 * be TH_NEEDSYN. 1074 */ 1075 if (tp->t_state == TCPS_ESTABLISHED && 1076 th->th_seq == tp->rcv_nxt && 1077 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && 1078 tp->snd_nxt == tp->snd_max && 1079 tiwin && tiwin == tp->snd_wnd && 1080 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) && 1081 LIST_EMPTY(&tp->t_segq) && 1082 ((to.to_flags & TOF_TS) == 0 || 1083 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) { 1084 1085 /* 1086 * If last ACK falls within this segment's sequence numbers, 1087 * record the timestamp. 1088 * NOTE that the test is modified according to the latest 1089 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1090 */ 1091 if ((to.to_flags & TOF_TS) != 0 && 1092 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 1093 tp->ts_recent_age = ticks; 1094 tp->ts_recent = to.to_tsval; 1095 } 1096 1097 if (tlen == 0) { 1098 if (SEQ_GT(th->th_ack, tp->snd_una) && 1099 SEQ_LEQ(th->th_ack, tp->snd_max) && 1100 tp->snd_cwnd >= tp->snd_wnd && 1101 ((!V_tcp_do_newreno && 1102 !(tp->t_flags & TF_SACK_PERMIT) && 1103 tp->t_dupacks < tcprexmtthresh) || 1104 ((V_tcp_do_newreno || 1105 (tp->t_flags & TF_SACK_PERMIT)) && 1106 !IN_FASTRECOVERY(tp) && 1107 (to.to_flags & TOF_SACK) == 0 && 1108 TAILQ_EMPTY(&tp->snd_holes)))) { 1109 KASSERT(headlocked, 1110 ("%s: headlocked", __func__)); 1111 INP_INFO_WUNLOCK(&V_tcbinfo); 1112 headlocked = 0; 1113 /* 1114 * This is a pure ack for outstanding data. 1115 */ 1116 ++V_tcpstat.tcps_predack; 1117 /* 1118 * "bad retransmit" recovery. 1119 */ 1120 if (tp->t_rxtshift == 1 && 1121 ticks < tp->t_badrxtwin) { 1122 ++V_tcpstat.tcps_sndrexmitbad; 1123 tp->snd_cwnd = tp->snd_cwnd_prev; 1124 tp->snd_ssthresh = 1125 tp->snd_ssthresh_prev; 1126 tp->snd_recover = tp->snd_recover_prev; 1127 if (tp->t_flags & TF_WASFRECOVERY) 1128 ENTER_FASTRECOVERY(tp); 1129 tp->snd_nxt = tp->snd_max; 1130 tp->t_badrxtwin = 0; 1131 } 1132 1133 /* 1134 * Recalculate the transmit timer / rtt. 1135 * 1136 * Some boxes send broken timestamp replies 1137 * during the SYN+ACK phase, ignore 1138 * timestamps of 0 or we could calculate a 1139 * huge RTT and blow up the retransmit timer. 1140 */ 1141 if ((to.to_flags & TOF_TS) != 0 && 1142 to.to_tsecr) { 1143 if (!tp->t_rttlow || 1144 tp->t_rttlow > ticks - to.to_tsecr) 1145 tp->t_rttlow = ticks - to.to_tsecr; 1146 tcp_xmit_timer(tp, 1147 ticks - to.to_tsecr + 1); 1148 } else if (tp->t_rtttime && 1149 SEQ_GT(th->th_ack, tp->t_rtseq)) { 1150 if (!tp->t_rttlow || 1151 tp->t_rttlow > ticks - tp->t_rtttime) 1152 tp->t_rttlow = ticks - tp->t_rtttime; 1153 tcp_xmit_timer(tp, 1154 ticks - tp->t_rtttime); 1155 } 1156 tcp_xmit_bandwidth_limit(tp, th->th_ack); 1157 acked = th->th_ack - tp->snd_una; 1158 V_tcpstat.tcps_rcvackpack++; 1159 V_tcpstat.tcps_rcvackbyte += acked; 1160 sbdrop(&so->so_snd, acked); 1161 if (SEQ_GT(tp->snd_una, tp->snd_recover) && 1162 SEQ_LEQ(th->th_ack, tp->snd_recover)) 1163 tp->snd_recover = th->th_ack - 1; 1164 tp->snd_una = th->th_ack; 1165 /* 1166 * Pull snd_wl2 up to prevent seq wrap relative 1167 * to th_ack. 1168 */ 1169 tp->snd_wl2 = th->th_ack; 1170 tp->t_dupacks = 0; 1171 m_freem(m); 1172 ND6_HINT(tp); /* Some progress has been made. */ 1173 1174 /* 1175 * If all outstanding data are acked, stop 1176 * retransmit timer, otherwise restart timer 1177 * using current (possibly backed-off) value. 1178 * If process is waiting for space, 1179 * wakeup/selwakeup/signal. If data 1180 * are ready to send, let tcp_output 1181 * decide between more output or persist. 1182 */ 1183#ifdef TCPDEBUG 1184 if (so->so_options & SO_DEBUG) 1185 tcp_trace(TA_INPUT, ostate, tp, 1186 (void *)tcp_saveipgen, 1187 &tcp_savetcp, 0); 1188#endif 1189 if (tp->snd_una == tp->snd_max) 1190 tcp_timer_activate(tp, TT_REXMT, 0); 1191 else if (!tcp_timer_active(tp, TT_PERSIST)) 1192 tcp_timer_activate(tp, TT_REXMT, 1193 tp->t_rxtcur); 1194 sowwakeup(so); 1195 if (so->so_snd.sb_cc) 1196 (void) tcp_output(tp); 1197 goto check_delack; 1198 } 1199 } else if (th->th_ack == tp->snd_una && 1200 tlen <= sbspace(&so->so_rcv)) { 1201 int newsize = 0; /* automatic sockbuf scaling */ 1202 1203 KASSERT(headlocked, ("%s: headlocked", __func__)); 1204 INP_INFO_WUNLOCK(&V_tcbinfo); 1205 headlocked = 0; 1206 /* 1207 * This is a pure, in-sequence data packet 1208 * with nothing on the reassembly queue and 1209 * we have enough buffer space to take it. 1210 */ 1211 /* Clean receiver SACK report if present */ 1212 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks) 1213 tcp_clean_sackreport(tp); 1214 ++V_tcpstat.tcps_preddat; 1215 tp->rcv_nxt += tlen; 1216 /* 1217 * Pull snd_wl1 up to prevent seq wrap relative to 1218 * th_seq. 1219 */ 1220 tp->snd_wl1 = th->th_seq; 1221 /* 1222 * Pull rcv_up up to prevent seq wrap relative to 1223 * rcv_nxt. 1224 */ 1225 tp->rcv_up = tp->rcv_nxt; 1226 V_tcpstat.tcps_rcvpack++; 1227 V_tcpstat.tcps_rcvbyte += tlen; 1228 ND6_HINT(tp); /* Some progress has been made */ 1229#ifdef TCPDEBUG 1230 if (so->so_options & SO_DEBUG) 1231 tcp_trace(TA_INPUT, ostate, tp, 1232 (void *)tcp_saveipgen, &tcp_savetcp, 0); 1233#endif 1234 /* 1235 * Automatic sizing of receive socket buffer. Often the send 1236 * buffer size is not optimally adjusted to the actual network 1237 * conditions at hand (delay bandwidth product). Setting the 1238 * buffer size too small limits throughput on links with high 1239 * bandwidth and high delay (eg. trans-continental/oceanic links). 1240 * 1241 * On the receive side the socket buffer memory is only rarely 1242 * used to any significant extent. This allows us to be much 1243 * more aggressive in scaling the receive socket buffer. For 1244 * the case that the buffer space is actually used to a large 1245 * extent and we run out of kernel memory we can simply drop 1246 * the new segments; TCP on the sender will just retransmit it 1247 * later. Setting the buffer size too big may only consume too 1248 * much kernel memory if the application doesn't read() from 1249 * the socket or packet loss or reordering makes use of the 1250 * reassembly queue. 1251 * 1252 * The criteria to step up the receive buffer one notch are: 1253 * 1. the number of bytes received during the time it takes 1254 * one timestamp to be reflected back to us (the RTT); 1255 * 2. received bytes per RTT is within seven eighth of the 1256 * current socket buffer size; 1257 * 3. receive buffer size has not hit maximal automatic size; 1258 * 1259 * This algorithm does one step per RTT at most and only if 1260 * we receive a bulk stream w/o packet losses or reorderings. 1261 * Shrinking the buffer during idle times is not necessary as 1262 * it doesn't consume any memory when idle. 1263 * 1264 * TODO: Only step up if the application is actually serving 1265 * the buffer to better manage the socket buffer resources. 1266 */ 1267 if (V_tcp_do_autorcvbuf && 1268 to.to_tsecr && 1269 (so->so_rcv.sb_flags & SB_AUTOSIZE)) { 1270 if (to.to_tsecr > tp->rfbuf_ts && 1271 to.to_tsecr - tp->rfbuf_ts < hz) { 1272 if (tp->rfbuf_cnt > 1273 (so->so_rcv.sb_hiwat / 8 * 7) && 1274 so->so_rcv.sb_hiwat < 1275 V_tcp_autorcvbuf_max) { 1276 newsize = 1277 min(so->so_rcv.sb_hiwat + 1278 V_tcp_autorcvbuf_inc, 1279 V_tcp_autorcvbuf_max); 1280 } 1281 /* Start over with next RTT. */ 1282 tp->rfbuf_ts = 0; 1283 tp->rfbuf_cnt = 0; 1284 } else 1285 tp->rfbuf_cnt += tlen; /* add up */ 1286 } 1287 1288 /* Add data to socket buffer. */ 1289 SOCKBUF_LOCK(&so->so_rcv); 1290 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1291 m_freem(m); 1292 } else { 1293 /* 1294 * Set new socket buffer size. 1295 * Give up when limit is reached. 1296 */ 1297 if (newsize) 1298 if (!sbreserve_locked(&so->so_rcv, 1299 newsize, so, NULL)) 1300 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 1301 m_adj(m, drop_hdrlen); /* delayed header drop */ 1302 sbappendstream_locked(&so->so_rcv, m); 1303 } 1304 /* NB: sorwakeup_locked() does an implicit unlock. */ 1305 sorwakeup_locked(so); 1306 if (DELAY_ACK(tp)) { 1307 tp->t_flags |= TF_DELACK; 1308 } else { 1309 tp->t_flags |= TF_ACKNOW; 1310 tcp_output(tp); 1311 } 1312 goto check_delack; 1313 } 1314 } 1315 1316 /* 1317 * Calculate amount of space in receive window, 1318 * and then do TCP input processing. 1319 * Receive window is amount of space in rcv queue, 1320 * but not less than advertised window. 1321 */ 1322 win = sbspace(&so->so_rcv); 1323 if (win < 0) 1324 win = 0; 1325 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt)); 1326 1327 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 1328 tp->rfbuf_ts = 0; 1329 tp->rfbuf_cnt = 0; 1330 1331 switch (tp->t_state) { 1332 1333 /* 1334 * If the state is SYN_RECEIVED: 1335 * if seg contains an ACK, but not for our SYN/ACK, send a RST. 1336 */ 1337 case TCPS_SYN_RECEIVED: 1338 if ((thflags & TH_ACK) && 1339 (SEQ_LEQ(th->th_ack, tp->snd_una) || 1340 SEQ_GT(th->th_ack, tp->snd_max))) { 1341 rstreason = BANDLIM_RST_OPENPORT; 1342 goto dropwithreset; 1343 } 1344 break; 1345 1346 /* 1347 * If the state is SYN_SENT: 1348 * if seg contains an ACK, but not for our SYN, drop the input. 1349 * if seg contains a RST, then drop the connection. 1350 * if seg does not contain SYN, then drop it. 1351 * Otherwise this is an acceptable SYN segment 1352 * initialize tp->rcv_nxt and tp->irs 1353 * if seg contains ack then advance tp->snd_una 1354 * if seg contains an ECE and ECN support is enabled, the stream 1355 * is ECN capable. 1356 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state 1357 * arrange for segment to be acked (eventually) 1358 * continue processing rest of data/controls, beginning with URG 1359 */ 1360 case TCPS_SYN_SENT: 1361 if ((thflags & TH_ACK) && 1362 (SEQ_LEQ(th->th_ack, tp->iss) || 1363 SEQ_GT(th->th_ack, tp->snd_max))) { 1364 rstreason = BANDLIM_UNLIMITED; 1365 goto dropwithreset; 1366 } 1367 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) 1368 tp = tcp_drop(tp, ECONNREFUSED); 1369 if (thflags & TH_RST) 1370 goto drop; 1371 if (!(thflags & TH_SYN)) 1372 goto drop; 1373 1374 tp->irs = th->th_seq; 1375 tcp_rcvseqinit(tp); 1376 if (thflags & TH_ACK) { 1377 V_tcpstat.tcps_connects++; 1378 soisconnected(so); 1379#ifdef MAC 1380 SOCK_LOCK(so); 1381 mac_socketpeer_set_from_mbuf(m, so); 1382 SOCK_UNLOCK(so); 1383#endif 1384 /* Do window scaling on this connection? */ 1385 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 1386 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 1387 tp->rcv_scale = tp->request_r_scale; 1388 } 1389 tp->rcv_adv += tp->rcv_wnd; 1390 tp->snd_una++; /* SYN is acked */ 1391 /* 1392 * If there's data, delay ACK; if there's also a FIN 1393 * ACKNOW will be turned on later. 1394 */ 1395 if (DELAY_ACK(tp) && tlen != 0) 1396 tcp_timer_activate(tp, TT_DELACK, 1397 tcp_delacktime); 1398 else 1399 tp->t_flags |= TF_ACKNOW; 1400 1401 if ((thflags & TH_ECE) && V_tcp_do_ecn) { 1402 tp->t_flags |= TF_ECN_PERMIT; 1403 V_tcpstat.tcps_ecn_shs++; 1404 } 1405 1406 /* 1407 * Received <SYN,ACK> in SYN_SENT[*] state. 1408 * Transitions: 1409 * SYN_SENT --> ESTABLISHED 1410 * SYN_SENT* --> FIN_WAIT_1 1411 */ 1412 tp->t_starttime = ticks; 1413 if (tp->t_flags & TF_NEEDFIN) { 1414 tp->t_state = TCPS_FIN_WAIT_1; 1415 tp->t_flags &= ~TF_NEEDFIN; 1416 thflags &= ~TH_SYN; 1417 } else { 1418 tp->t_state = TCPS_ESTABLISHED; 1419 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle); 1420 } 1421 } else { 1422 /* 1423 * Received initial SYN in SYN-SENT[*] state => 1424 * simultaneous open. If segment contains CC option 1425 * and there is a cached CC, apply TAO test. 1426 * If it succeeds, connection is * half-synchronized. 1427 * Otherwise, do 3-way handshake: 1428 * SYN-SENT -> SYN-RECEIVED 1429 * SYN-SENT* -> SYN-RECEIVED* 1430 * If there was no CC option, clear cached CC value. 1431 */ 1432 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 1433 tcp_timer_activate(tp, TT_REXMT, 0); 1434 tp->t_state = TCPS_SYN_RECEIVED; 1435 } 1436 1437 KASSERT(headlocked, ("%s: trimthenstep6: head not locked", 1438 __func__)); 1439 INP_WLOCK_ASSERT(tp->t_inpcb); 1440 1441 /* 1442 * Advance th->th_seq to correspond to first data byte. 1443 * If data, trim to stay within window, 1444 * dropping FIN if necessary. 1445 */ 1446 th->th_seq++; 1447 if (tlen > tp->rcv_wnd) { 1448 todrop = tlen - tp->rcv_wnd; 1449 m_adj(m, -todrop); 1450 tlen = tp->rcv_wnd; 1451 thflags &= ~TH_FIN; 1452 V_tcpstat.tcps_rcvpackafterwin++; 1453 V_tcpstat.tcps_rcvbyteafterwin += todrop; 1454 } 1455 tp->snd_wl1 = th->th_seq - 1; 1456 tp->rcv_up = th->th_seq; 1457 /* 1458 * Client side of transaction: already sent SYN and data. 1459 * If the remote host used T/TCP to validate the SYN, 1460 * our data will be ACK'd; if so, enter normal data segment 1461 * processing in the middle of step 5, ack processing. 1462 * Otherwise, goto step 6. 1463 */ 1464 if (thflags & TH_ACK) 1465 goto process_ACK; 1466 1467 goto step6; 1468 1469 /* 1470 * If the state is LAST_ACK or CLOSING or TIME_WAIT: 1471 * do normal processing. 1472 * 1473 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later. 1474 */ 1475 case TCPS_LAST_ACK: 1476 case TCPS_CLOSING: 1477 break; /* continue normal processing */ 1478 } 1479 1480 /* 1481 * States other than LISTEN or SYN_SENT. 1482 * First check the RST flag and sequence number since reset segments 1483 * are exempt from the timestamp and connection count tests. This 1484 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix 1485 * below which allowed reset segments in half the sequence space 1486 * to fall though and be processed (which gives forged reset 1487 * segments with a random sequence number a 50 percent chance of 1488 * killing a connection). 1489 * Then check timestamp, if present. 1490 * Then check the connection count, if present. 1491 * Then check that at least some bytes of segment are within 1492 * receive window. If segment begins before rcv_nxt, 1493 * drop leading data (and SYN); if nothing left, just ack. 1494 * 1495 * 1496 * If the RST bit is set, check the sequence number to see 1497 * if this is a valid reset segment. 1498 * RFC 793 page 37: 1499 * In all states except SYN-SENT, all reset (RST) segments 1500 * are validated by checking their SEQ-fields. A reset is 1501 * valid if its sequence number is in the window. 1502 * Note: this does not take into account delayed ACKs, so 1503 * we should test against last_ack_sent instead of rcv_nxt. 1504 * The sequence number in the reset segment is normally an 1505 * echo of our outgoing acknowlegement numbers, but some hosts 1506 * send a reset with the sequence number at the rightmost edge 1507 * of our receive window, and we have to handle this case. 1508 * Note 2: Paul Watson's paper "Slipping in the Window" has shown 1509 * that brute force RST attacks are possible. To combat this, 1510 * we use a much stricter check while in the ESTABLISHED state, 1511 * only accepting RSTs where the sequence number is equal to 1512 * last_ack_sent. In all other states (the states in which a 1513 * RST is more likely), the more permissive check is used. 1514 * If we have multiple segments in flight, the initial reset 1515 * segment sequence numbers will be to the left of last_ack_sent, 1516 * but they will eventually catch up. 1517 * In any case, it never made sense to trim reset segments to 1518 * fit the receive window since RFC 1122 says: 1519 * 4.2.2.12 RST Segment: RFC-793 Section 3.4 1520 * 1521 * A TCP SHOULD allow a received RST segment to include data. 1522 * 1523 * DISCUSSION 1524 * It has been suggested that a RST segment could contain 1525 * ASCII text that encoded and explained the cause of the 1526 * RST. No standard has yet been established for such 1527 * data. 1528 * 1529 * If the reset segment passes the sequence number test examine 1530 * the state: 1531 * SYN_RECEIVED STATE: 1532 * If passive open, return to LISTEN state. 1533 * If active open, inform user that connection was refused. 1534 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES: 1535 * Inform user that connection was reset, and close tcb. 1536 * CLOSING, LAST_ACK STATES: 1537 * Close the tcb. 1538 * TIME_WAIT STATE: 1539 * Drop the segment - see Stevens, vol. 2, p. 964 and 1540 * RFC 1337. 1541 */ 1542 if (thflags & TH_RST) { 1543 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) && 1544 SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) { 1545 switch (tp->t_state) { 1546 1547 case TCPS_SYN_RECEIVED: 1548 so->so_error = ECONNREFUSED; 1549 goto close; 1550 1551 case TCPS_ESTABLISHED: 1552 if (V_tcp_insecure_rst == 0 && 1553 !(SEQ_GEQ(th->th_seq, tp->rcv_nxt - 1) && 1554 SEQ_LEQ(th->th_seq, tp->rcv_nxt + 1)) && 1555 !(SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) && 1556 SEQ_LEQ(th->th_seq, tp->last_ack_sent + 1))) { 1557 V_tcpstat.tcps_badrst++; 1558 goto drop; 1559 } 1560 /* FALLTHROUGH */ 1561 case TCPS_FIN_WAIT_1: 1562 case TCPS_FIN_WAIT_2: 1563 case TCPS_CLOSE_WAIT: 1564 so->so_error = ECONNRESET; 1565 close: 1566 tp->t_state = TCPS_CLOSED; 1567 V_tcpstat.tcps_drops++; 1568 KASSERT(headlocked, ("%s: trimthenstep6: " 1569 "tcp_close: head not locked", __func__)); 1570 tp = tcp_close(tp); 1571 break; 1572 1573 case TCPS_CLOSING: 1574 case TCPS_LAST_ACK: 1575 KASSERT(headlocked, ("%s: trimthenstep6: " 1576 "tcp_close.2: head not locked", __func__)); 1577 tp = tcp_close(tp); 1578 break; 1579 } 1580 } 1581 goto drop; 1582 } 1583 1584 /* 1585 * RFC 1323 PAWS: If we have a timestamp reply on this segment 1586 * and it's less than ts_recent, drop it. 1587 */ 1588 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent && 1589 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 1590 1591 /* Check to see if ts_recent is over 24 days old. */ 1592 if ((int)(ticks - tp->ts_recent_age) > TCP_PAWS_IDLE) { 1593 /* 1594 * Invalidate ts_recent. If this segment updates 1595 * ts_recent, the age will be reset later and ts_recent 1596 * will get a valid value. If it does not, setting 1597 * ts_recent to zero will at least satisfy the 1598 * requirement that zero be placed in the timestamp 1599 * echo reply when ts_recent isn't valid. The 1600 * age isn't reset until we get a valid ts_recent 1601 * because we don't want out-of-order segments to be 1602 * dropped when ts_recent is old. 1603 */ 1604 tp->ts_recent = 0; 1605 } else { 1606 V_tcpstat.tcps_rcvduppack++; 1607 V_tcpstat.tcps_rcvdupbyte += tlen; 1608 V_tcpstat.tcps_pawsdrop++; 1609 if (tlen) 1610 goto dropafterack; 1611 goto drop; 1612 } 1613 } 1614 1615 /* 1616 * In the SYN-RECEIVED state, validate that the packet belongs to 1617 * this connection before trimming the data to fit the receive 1618 * window. Check the sequence number versus IRS since we know 1619 * the sequence numbers haven't wrapped. This is a partial fix 1620 * for the "LAND" DoS attack. 1621 */ 1622 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) { 1623 rstreason = BANDLIM_RST_OPENPORT; 1624 goto dropwithreset; 1625 } 1626 1627 todrop = tp->rcv_nxt - th->th_seq; 1628 if (todrop > 0) { 1629 if (thflags & TH_SYN) { 1630 thflags &= ~TH_SYN; 1631 th->th_seq++; 1632 if (th->th_urp > 1) 1633 th->th_urp--; 1634 else 1635 thflags &= ~TH_URG; 1636 todrop--; 1637 } 1638 /* 1639 * Following if statement from Stevens, vol. 2, p. 960. 1640 */ 1641 if (todrop > tlen 1642 || (todrop == tlen && (thflags & TH_FIN) == 0)) { 1643 /* 1644 * Any valid FIN must be to the left of the window. 1645 * At this point the FIN must be a duplicate or out 1646 * of sequence; drop it. 1647 */ 1648 thflags &= ~TH_FIN; 1649 1650 /* 1651 * Send an ACK to resynchronize and drop any data. 1652 * But keep on processing for RST or ACK. 1653 */ 1654 tp->t_flags |= TF_ACKNOW; 1655 todrop = tlen; 1656 V_tcpstat.tcps_rcvduppack++; 1657 V_tcpstat.tcps_rcvdupbyte += todrop; 1658 } else { 1659 V_tcpstat.tcps_rcvpartduppack++; 1660 V_tcpstat.tcps_rcvpartdupbyte += todrop; 1661 } 1662 drop_hdrlen += todrop; /* drop from the top afterwards */ 1663 th->th_seq += todrop; 1664 tlen -= todrop; 1665 if (th->th_urp > todrop) 1666 th->th_urp -= todrop; 1667 else { 1668 thflags &= ~TH_URG; 1669 th->th_urp = 0; 1670 } 1671 } 1672 1673 /* 1674 * If new data are received on a connection after the 1675 * user processes are gone, then RST the other end. 1676 */ 1677 if ((so->so_state & SS_NOFDREF) && 1678 tp->t_state > TCPS_CLOSE_WAIT && tlen) { 1679 char *s; 1680 1681 KASSERT(headlocked, ("%s: trimthenstep6: tcp_close.3: head " 1682 "not locked", __func__)); 1683 if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL, NULL))) { 1684 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data after socket " 1685 "was closed, sending RST and removing tcpcb\n", 1686 s, __func__, tcpstates[tp->t_state], tlen); 1687 free(s, M_TCPLOG); 1688 } 1689 tp = tcp_close(tp); 1690 V_tcpstat.tcps_rcvafterclose++; 1691 rstreason = BANDLIM_UNLIMITED; 1692 goto dropwithreset; 1693 } 1694 1695 /* 1696 * If segment ends after window, drop trailing data 1697 * (and PUSH and FIN); if nothing left, just ACK. 1698 */ 1699 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd); 1700 if (todrop > 0) { 1701 V_tcpstat.tcps_rcvpackafterwin++; 1702 if (todrop >= tlen) { 1703 V_tcpstat.tcps_rcvbyteafterwin += tlen; 1704 /* 1705 * If window is closed can only take segments at 1706 * window edge, and have to drop data and PUSH from 1707 * incoming segments. Continue processing, but 1708 * remember to ack. Otherwise, drop segment 1709 * and ack. 1710 */ 1711 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { 1712 tp->t_flags |= TF_ACKNOW; 1713 V_tcpstat.tcps_rcvwinprobe++; 1714 } else 1715 goto dropafterack; 1716 } else 1717 V_tcpstat.tcps_rcvbyteafterwin += todrop; 1718 m_adj(m, -todrop); 1719 tlen -= todrop; 1720 thflags &= ~(TH_PUSH|TH_FIN); 1721 } 1722 1723 /* 1724 * If last ACK falls within this segment's sequence numbers, 1725 * record its timestamp. 1726 * NOTE: 1727 * 1) That the test incorporates suggestions from the latest 1728 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1729 * 2) That updating only on newer timestamps interferes with 1730 * our earlier PAWS tests, so this check should be solely 1731 * predicated on the sequence space of this segment. 1732 * 3) That we modify the segment boundary check to be 1733 * Last.ACK.Sent <= SEG.SEQ + SEG.Len 1734 * instead of RFC1323's 1735 * Last.ACK.Sent < SEG.SEQ + SEG.Len, 1736 * This modified check allows us to overcome RFC1323's 1737 * limitations as described in Stevens TCP/IP Illustrated 1738 * Vol. 2 p.869. In such cases, we can still calculate the 1739 * RTT correctly when RCV.NXT == Last.ACK.Sent. 1740 */ 1741 if ((to.to_flags & TOF_TS) != 0 && 1742 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 1743 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 1744 ((thflags & (TH_SYN|TH_FIN)) != 0))) { 1745 tp->ts_recent_age = ticks; 1746 tp->ts_recent = to.to_tsval; 1747 } 1748 1749 /* 1750 * If a SYN is in the window, then this is an 1751 * error and we send an RST and drop the connection. 1752 */ 1753 if (thflags & TH_SYN) { 1754 KASSERT(headlocked, ("%s: tcp_drop: trimthenstep6: " 1755 "head not locked", __func__)); 1756 tp = tcp_drop(tp, ECONNRESET); 1757 rstreason = BANDLIM_UNLIMITED; 1758 goto drop; 1759 } 1760 1761 /* 1762 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN 1763 * flag is on (half-synchronized state), then queue data for 1764 * later processing; else drop segment and return. 1765 */ 1766 if ((thflags & TH_ACK) == 0) { 1767 if (tp->t_state == TCPS_SYN_RECEIVED || 1768 (tp->t_flags & TF_NEEDSYN)) 1769 goto step6; 1770 else if (tp->t_flags & TF_ACKNOW) 1771 goto dropafterack; 1772 else 1773 goto drop; 1774 } 1775 1776 /* 1777 * Ack processing. 1778 */ 1779 switch (tp->t_state) { 1780 1781 /* 1782 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter 1783 * ESTABLISHED state and continue processing. 1784 * The ACK was checked above. 1785 */ 1786 case TCPS_SYN_RECEIVED: 1787 1788 V_tcpstat.tcps_connects++; 1789 soisconnected(so); 1790 /* Do window scaling? */ 1791 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 1792 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 1793 tp->rcv_scale = tp->request_r_scale; 1794 tp->snd_wnd = tiwin; 1795 } 1796 /* 1797 * Make transitions: 1798 * SYN-RECEIVED -> ESTABLISHED 1799 * SYN-RECEIVED* -> FIN-WAIT-1 1800 */ 1801 tp->t_starttime = ticks; 1802 if (tp->t_flags & TF_NEEDFIN) { 1803 tp->t_state = TCPS_FIN_WAIT_1; 1804 tp->t_flags &= ~TF_NEEDFIN; 1805 } else { 1806 tp->t_state = TCPS_ESTABLISHED; 1807 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle); 1808 } 1809 /* 1810 * If segment contains data or ACK, will call tcp_reass() 1811 * later; if not, do so now to pass queued data to user. 1812 */ 1813 if (tlen == 0 && (thflags & TH_FIN) == 0) 1814 (void) tcp_reass(tp, (struct tcphdr *)0, 0, 1815 (struct mbuf *)0); 1816 tp->snd_wl1 = th->th_seq - 1; 1817 /* FALLTHROUGH */ 1818 1819 /* 1820 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range 1821 * ACKs. If the ack is in the range 1822 * tp->snd_una < th->th_ack <= tp->snd_max 1823 * then advance tp->snd_una to th->th_ack and drop 1824 * data from the retransmission queue. If this ACK reflects 1825 * more up to date window information we update our window information. 1826 */ 1827 case TCPS_ESTABLISHED: 1828 case TCPS_FIN_WAIT_1: 1829 case TCPS_FIN_WAIT_2: 1830 case TCPS_CLOSE_WAIT: 1831 case TCPS_CLOSING: 1832 case TCPS_LAST_ACK: 1833 if (SEQ_GT(th->th_ack, tp->snd_max)) { 1834 V_tcpstat.tcps_rcvacktoomuch++; 1835 goto dropafterack; 1836 } 1837 if ((tp->t_flags & TF_SACK_PERMIT) && 1838 ((to.to_flags & TOF_SACK) || 1839 !TAILQ_EMPTY(&tp->snd_holes))) 1840 tcp_sack_doack(tp, &to, th->th_ack); 1841 if (SEQ_LEQ(th->th_ack, tp->snd_una)) { 1842 if (tlen == 0 && tiwin == tp->snd_wnd) { 1843 V_tcpstat.tcps_rcvdupack++; 1844 /* 1845 * If we have outstanding data (other than 1846 * a window probe), this is a completely 1847 * duplicate ack (ie, window info didn't 1848 * change), the ack is the biggest we've 1849 * seen and we've seen exactly our rexmt 1850 * threshhold of them, assume a packet 1851 * has been dropped and retransmit it. 1852 * Kludge snd_nxt & the congestion 1853 * window so we send only this one 1854 * packet. 1855 * 1856 * We know we're losing at the current 1857 * window size so do congestion avoidance 1858 * (set ssthresh to half the current window 1859 * and pull our congestion window back to 1860 * the new ssthresh). 1861 * 1862 * Dup acks mean that packets have left the 1863 * network (they're now cached at the receiver) 1864 * so bump cwnd by the amount in the receiver 1865 * to keep a constant cwnd packets in the 1866 * network. 1867 * 1868 * When using TCP ECN, notify the peer that 1869 * we reduced the cwnd. 1870 */ 1871 if (!tcp_timer_active(tp, TT_REXMT) || 1872 th->th_ack != tp->snd_una) 1873 tp->t_dupacks = 0; 1874 else if (++tp->t_dupacks > tcprexmtthresh || 1875 ((V_tcp_do_newreno || 1876 (tp->t_flags & TF_SACK_PERMIT)) && 1877 IN_FASTRECOVERY(tp))) { 1878 if ((tp->t_flags & TF_SACK_PERMIT) && 1879 IN_FASTRECOVERY(tp)) { 1880 int awnd; 1881 1882 /* 1883 * Compute the amount of data in flight first. 1884 * We can inject new data into the pipe iff 1885 * we have less than 1/2 the original window's 1886 * worth of data in flight. 1887 */ 1888 awnd = (tp->snd_nxt - tp->snd_fack) + 1889 tp->sackhint.sack_bytes_rexmit; 1890 if (awnd < tp->snd_ssthresh) { 1891 tp->snd_cwnd += tp->t_maxseg; 1892 if (tp->snd_cwnd > tp->snd_ssthresh) 1893 tp->snd_cwnd = tp->snd_ssthresh; 1894 } 1895 } else 1896 tp->snd_cwnd += tp->t_maxseg; 1897 (void) tcp_output(tp); 1898 goto drop; 1899 } else if (tp->t_dupacks == tcprexmtthresh) { 1900 tcp_seq onxt = tp->snd_nxt; 1901 1902 /* 1903 * If we're doing sack, check to 1904 * see if we're already in sack 1905 * recovery. If we're not doing sack, 1906 * check to see if we're in newreno 1907 * recovery. 1908 */ 1909 if (tp->t_flags & TF_SACK_PERMIT) { 1910 if (IN_FASTRECOVERY(tp)) { 1911 tp->t_dupacks = 0; 1912 break; 1913 } 1914 } else if (V_tcp_do_newreno || 1915 V_tcp_do_ecn) { 1916 if (SEQ_LEQ(th->th_ack, 1917 tp->snd_recover)) { 1918 tp->t_dupacks = 0; 1919 break; 1920 } 1921 } 1922 tcp_congestion_exp(tp); 1923 tcp_timer_activate(tp, TT_REXMT, 0); 1924 tp->t_rtttime = 0; 1925 if (tp->t_flags & TF_SACK_PERMIT) { 1926 V_tcpstat.tcps_sack_recovery_episode++; 1927 tp->sack_newdata = tp->snd_nxt; 1928 tp->snd_cwnd = tp->t_maxseg; 1929 (void) tcp_output(tp); 1930 goto drop; 1931 } 1932 tp->snd_nxt = th->th_ack; 1933 tp->snd_cwnd = tp->t_maxseg; 1934 (void) tcp_output(tp); 1935 KASSERT(tp->snd_limited <= 2, 1936 ("%s: tp->snd_limited too big", 1937 __func__)); 1938 tp->snd_cwnd = tp->snd_ssthresh + 1939 tp->t_maxseg * 1940 (tp->t_dupacks - tp->snd_limited); 1941 if (SEQ_GT(onxt, tp->snd_nxt)) 1942 tp->snd_nxt = onxt; 1943 goto drop; 1944 } else if (V_tcp_do_rfc3042) { 1945 u_long oldcwnd = tp->snd_cwnd; 1946 tcp_seq oldsndmax = tp->snd_max; 1947 u_int sent; 1948 1949 KASSERT(tp->t_dupacks == 1 || 1950 tp->t_dupacks == 2, 1951 ("%s: dupacks not 1 or 2", 1952 __func__)); 1953 if (tp->t_dupacks == 1) 1954 tp->snd_limited = 0; 1955 tp->snd_cwnd = 1956 (tp->snd_nxt - tp->snd_una) + 1957 (tp->t_dupacks - tp->snd_limited) * 1958 tp->t_maxseg; 1959 (void) tcp_output(tp); 1960 sent = tp->snd_max - oldsndmax; 1961 if (sent > tp->t_maxseg) { 1962 KASSERT((tp->t_dupacks == 2 && 1963 tp->snd_limited == 0) || 1964 (sent == tp->t_maxseg + 1 && 1965 tp->t_flags & TF_SENTFIN), 1966 ("%s: sent too much", 1967 __func__)); 1968 tp->snd_limited = 2; 1969 } else if (sent > 0) 1970 ++tp->snd_limited; 1971 tp->snd_cwnd = oldcwnd; 1972 goto drop; 1973 } 1974 } else 1975 tp->t_dupacks = 0; 1976 break; 1977 } 1978 1979 KASSERT(SEQ_GT(th->th_ack, tp->snd_una), 1980 ("%s: th_ack <= snd_una", __func__)); 1981 1982 /* 1983 * If the congestion window was inflated to account 1984 * for the other side's cached packets, retract it. 1985 */ 1986 if (V_tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) { 1987 if (IN_FASTRECOVERY(tp)) { 1988 if (SEQ_LT(th->th_ack, tp->snd_recover)) { 1989 if (tp->t_flags & TF_SACK_PERMIT) 1990 tcp_sack_partialack(tp, th); 1991 else 1992 tcp_newreno_partial_ack(tp, th); 1993 } else { 1994 /* 1995 * Out of fast recovery. 1996 * Window inflation should have left us 1997 * with approximately snd_ssthresh 1998 * outstanding data. 1999 * But in case we would be inclined to 2000 * send a burst, better to do it via 2001 * the slow start mechanism. 2002 */ 2003 if (SEQ_GT(th->th_ack + 2004 tp->snd_ssthresh, 2005 tp->snd_max)) 2006 tp->snd_cwnd = tp->snd_max - 2007 th->th_ack + 2008 tp->t_maxseg; 2009 else 2010 tp->snd_cwnd = tp->snd_ssthresh; 2011 } 2012 } 2013 } else { 2014 if (tp->t_dupacks >= tcprexmtthresh && 2015 tp->snd_cwnd > tp->snd_ssthresh) 2016 tp->snd_cwnd = tp->snd_ssthresh; 2017 } 2018 tp->t_dupacks = 0; 2019 /* 2020 * If we reach this point, ACK is not a duplicate, 2021 * i.e., it ACKs something we sent. 2022 */ 2023 if (tp->t_flags & TF_NEEDSYN) { 2024 /* 2025 * T/TCP: Connection was half-synchronized, and our 2026 * SYN has been ACK'd (so connection is now fully 2027 * synchronized). Go to non-starred state, 2028 * increment snd_una for ACK of SYN, and check if 2029 * we can do window scaling. 2030 */ 2031 tp->t_flags &= ~TF_NEEDSYN; 2032 tp->snd_una++; 2033 /* Do window scaling? */ 2034 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2035 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2036 tp->rcv_scale = tp->request_r_scale; 2037 /* Send window already scaled. */ 2038 } 2039 } 2040 2041process_ACK: 2042 KASSERT(headlocked, ("%s: process_ACK: head not locked", 2043 __func__)); 2044 INP_WLOCK_ASSERT(tp->t_inpcb); 2045 2046 acked = th->th_ack - tp->snd_una; 2047 V_tcpstat.tcps_rcvackpack++; 2048 V_tcpstat.tcps_rcvackbyte += acked; 2049 2050 /* 2051 * If we just performed our first retransmit, and the ACK 2052 * arrives within our recovery window, then it was a mistake 2053 * to do the retransmit in the first place. Recover our 2054 * original cwnd and ssthresh, and proceed to transmit where 2055 * we left off. 2056 */ 2057 if (tp->t_rxtshift == 1 && ticks < tp->t_badrxtwin) { 2058 ++V_tcpstat.tcps_sndrexmitbad; 2059 tp->snd_cwnd = tp->snd_cwnd_prev; 2060 tp->snd_ssthresh = tp->snd_ssthresh_prev; 2061 tp->snd_recover = tp->snd_recover_prev; 2062 if (tp->t_flags & TF_WASFRECOVERY) 2063 ENTER_FASTRECOVERY(tp); 2064 tp->snd_nxt = tp->snd_max; 2065 tp->t_badrxtwin = 0; /* XXX probably not required */ 2066 } 2067 2068 /* 2069 * If we have a timestamp reply, update smoothed 2070 * round trip time. If no timestamp is present but 2071 * transmit timer is running and timed sequence 2072 * number was acked, update smoothed round trip time. 2073 * Since we now have an rtt measurement, cancel the 2074 * timer backoff (cf., Phil Karn's retransmit alg.). 2075 * Recompute the initial retransmit timer. 2076 * 2077 * Some boxes send broken timestamp replies 2078 * during the SYN+ACK phase, ignore 2079 * timestamps of 0 or we could calculate a 2080 * huge RTT and blow up the retransmit timer. 2081 */ 2082 if ((to.to_flags & TOF_TS) != 0 && 2083 to.to_tsecr) { 2084 if (!tp->t_rttlow || tp->t_rttlow > ticks - to.to_tsecr) 2085 tp->t_rttlow = ticks - to.to_tsecr; 2086 tcp_xmit_timer(tp, ticks - to.to_tsecr + 1); 2087 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) { 2088 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime) 2089 tp->t_rttlow = ticks - tp->t_rtttime; 2090 tcp_xmit_timer(tp, ticks - tp->t_rtttime); 2091 } 2092 tcp_xmit_bandwidth_limit(tp, th->th_ack); 2093 2094 /* 2095 * If all outstanding data is acked, stop retransmit 2096 * timer and remember to restart (more output or persist). 2097 * If there is more data to be acked, restart retransmit 2098 * timer, using current (possibly backed-off) value. 2099 */ 2100 if (th->th_ack == tp->snd_max) { 2101 tcp_timer_activate(tp, TT_REXMT, 0); 2102 needoutput = 1; 2103 } else if (!tcp_timer_active(tp, TT_PERSIST)) 2104 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur); 2105 2106 /* 2107 * If no data (only SYN) was ACK'd, 2108 * skip rest of ACK processing. 2109 */ 2110 if (acked == 0) 2111 goto step6; 2112 2113 /* 2114 * When new data is acked, open the congestion window. 2115 * If the window gives us less than ssthresh packets 2116 * in flight, open exponentially (maxseg per packet). 2117 * Otherwise open linearly: maxseg per window 2118 * (maxseg^2 / cwnd per packet). 2119 * If cwnd > maxseg^2, fix the cwnd increment at 1 byte 2120 * to avoid capping cwnd (as suggested in RFC2581). 2121 */ 2122 if ((!V_tcp_do_newreno && !(tp->t_flags & TF_SACK_PERMIT)) || 2123 !IN_FASTRECOVERY(tp)) { 2124 u_int cw = tp->snd_cwnd; 2125 u_int incr = tp->t_maxseg; 2126 if (cw > tp->snd_ssthresh) 2127 incr = max((incr * incr / cw), 1); 2128 tp->snd_cwnd = min(cw+incr, TCP_MAXWIN<<tp->snd_scale); 2129 } 2130 SOCKBUF_LOCK(&so->so_snd); 2131 if (acked > so->so_snd.sb_cc) { 2132 tp->snd_wnd -= so->so_snd.sb_cc; 2133 sbdrop_locked(&so->so_snd, (int)so->so_snd.sb_cc); 2134 ourfinisacked = 1; 2135 } else { 2136 sbdrop_locked(&so->so_snd, acked); 2137 tp->snd_wnd -= acked; 2138 ourfinisacked = 0; 2139 } 2140 /* NB: sowwakeup_locked() does an implicit unlock. */ 2141 sowwakeup_locked(so); 2142 /* Detect una wraparound. */ 2143 if ((V_tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) && 2144 !IN_FASTRECOVERY(tp) && 2145 SEQ_GT(tp->snd_una, tp->snd_recover) && 2146 SEQ_LEQ(th->th_ack, tp->snd_recover)) 2147 tp->snd_recover = th->th_ack - 1; 2148 if ((V_tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) && 2149 IN_FASTRECOVERY(tp) && 2150 SEQ_GEQ(th->th_ack, tp->snd_recover)) 2151 EXIT_FASTRECOVERY(tp); 2152 tp->snd_una = th->th_ack; 2153 if (tp->t_flags & TF_SACK_PERMIT) { 2154 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 2155 tp->snd_recover = tp->snd_una; 2156 } 2157 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 2158 tp->snd_nxt = tp->snd_una; 2159 2160 switch (tp->t_state) { 2161 2162 /* 2163 * In FIN_WAIT_1 STATE in addition to the processing 2164 * for the ESTABLISHED state if our FIN is now acknowledged 2165 * then enter FIN_WAIT_2. 2166 */ 2167 case TCPS_FIN_WAIT_1: 2168 if (ourfinisacked) { 2169 /* 2170 * If we can't receive any more 2171 * data, then closing user can proceed. 2172 * Starting the timer is contrary to the 2173 * specification, but if we don't get a FIN 2174 * we'll hang forever. 2175 * 2176 * XXXjl: 2177 * we should release the tp also, and use a 2178 * compressed state. 2179 */ 2180 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 2181 int timeout; 2182 2183 soisdisconnected(so); 2184 timeout = (tcp_fast_finwait2_recycle) ? 2185 tcp_finwait2_timeout : tcp_maxidle; 2186 tcp_timer_activate(tp, TT_2MSL, timeout); 2187 } 2188 tp->t_state = TCPS_FIN_WAIT_2; 2189 } 2190 break; 2191 2192 /* 2193 * In CLOSING STATE in addition to the processing for 2194 * the ESTABLISHED state if the ACK acknowledges our FIN 2195 * then enter the TIME-WAIT state, otherwise ignore 2196 * the segment. 2197 */ 2198 case TCPS_CLOSING: 2199 if (ourfinisacked) { 2200 KASSERT(headlocked, ("%s: process_ACK: " 2201 "head not locked", __func__)); 2202 tcp_twstart(tp); 2203 INP_INFO_WUNLOCK(&V_tcbinfo); 2204 headlocked = 0; 2205 m_freem(m); 2206 return; 2207 } 2208 break; 2209 2210 /* 2211 * In LAST_ACK, we may still be waiting for data to drain 2212 * and/or to be acked, as well as for the ack of our FIN. 2213 * If our FIN is now acknowledged, delete the TCB, 2214 * enter the closed state and return. 2215 */ 2216 case TCPS_LAST_ACK: 2217 if (ourfinisacked) { 2218 KASSERT(headlocked, ("%s: process_ACK: " 2219 "tcp_close: head not locked", __func__)); 2220 tp = tcp_close(tp); 2221 goto drop; 2222 } 2223 break; 2224 } 2225 } 2226 2227step6: 2228 KASSERT(headlocked, ("%s: step6: head not locked", __func__)); 2229 INP_WLOCK_ASSERT(tp->t_inpcb); 2230 2231 /* 2232 * Update window information. 2233 * Don't look at window if no ACK: TAC's send garbage on first SYN. 2234 */ 2235 if ((thflags & TH_ACK) && 2236 (SEQ_LT(tp->snd_wl1, th->th_seq) || 2237 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 2238 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 2239 /* keep track of pure window updates */ 2240 if (tlen == 0 && 2241 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 2242 V_tcpstat.tcps_rcvwinupd++; 2243 tp->snd_wnd = tiwin; 2244 tp->snd_wl1 = th->th_seq; 2245 tp->snd_wl2 = th->th_ack; 2246 if (tp->snd_wnd > tp->max_sndwnd) 2247 tp->max_sndwnd = tp->snd_wnd; 2248 needoutput = 1; 2249 } 2250 2251 /* 2252 * Process segments with URG. 2253 */ 2254 if ((thflags & TH_URG) && th->th_urp && 2255 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2256 /* 2257 * This is a kludge, but if we receive and accept 2258 * random urgent pointers, we'll crash in 2259 * soreceive. It's hard to imagine someone 2260 * actually wanting to send this much urgent data. 2261 */ 2262 SOCKBUF_LOCK(&so->so_rcv); 2263 if (th->th_urp + so->so_rcv.sb_cc > sb_max) { 2264 th->th_urp = 0; /* XXX */ 2265 thflags &= ~TH_URG; /* XXX */ 2266 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */ 2267 goto dodata; /* XXX */ 2268 } 2269 /* 2270 * If this segment advances the known urgent pointer, 2271 * then mark the data stream. This should not happen 2272 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since 2273 * a FIN has been received from the remote side. 2274 * In these states we ignore the URG. 2275 * 2276 * According to RFC961 (Assigned Protocols), 2277 * the urgent pointer points to the last octet 2278 * of urgent data. We continue, however, 2279 * to consider it to indicate the first octet 2280 * of data past the urgent section as the original 2281 * spec states (in one of two places). 2282 */ 2283 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) { 2284 tp->rcv_up = th->th_seq + th->th_urp; 2285 so->so_oobmark = so->so_rcv.sb_cc + 2286 (tp->rcv_up - tp->rcv_nxt) - 1; 2287 if (so->so_oobmark == 0) 2288 so->so_rcv.sb_state |= SBS_RCVATMARK; 2289 sohasoutofband(so); 2290 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); 2291 } 2292 SOCKBUF_UNLOCK(&so->so_rcv); 2293 /* 2294 * Remove out of band data so doesn't get presented to user. 2295 * This can happen independent of advancing the URG pointer, 2296 * but if two URG's are pending at once, some out-of-band 2297 * data may creep in... ick. 2298 */ 2299 if (th->th_urp <= (u_long)tlen && 2300 !(so->so_options & SO_OOBINLINE)) { 2301 /* hdr drop is delayed */ 2302 tcp_pulloutofband(so, th, m, drop_hdrlen); 2303 } 2304 } else { 2305 /* 2306 * If no out of band data is expected, 2307 * pull receive urgent pointer along 2308 * with the receive window. 2309 */ 2310 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) 2311 tp->rcv_up = tp->rcv_nxt; 2312 } 2313dodata: /* XXX */ 2314 KASSERT(headlocked, ("%s: dodata: head not locked", __func__)); 2315 INP_WLOCK_ASSERT(tp->t_inpcb); 2316 2317 /* 2318 * Process the segment text, merging it into the TCP sequencing queue, 2319 * and arranging for acknowledgment of receipt if necessary. 2320 * This process logically involves adjusting tp->rcv_wnd as data 2321 * is presented to the user (this happens in tcp_usrreq.c, 2322 * case PRU_RCVD). If a FIN has already been received on this 2323 * connection then we just ignore the text. 2324 */ 2325 if ((tlen || (thflags & TH_FIN)) && 2326 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2327 tcp_seq save_start = th->th_seq; 2328 m_adj(m, drop_hdrlen); /* delayed header drop */ 2329 /* 2330 * Insert segment which includes th into TCP reassembly queue 2331 * with control block tp. Set thflags to whether reassembly now 2332 * includes a segment with FIN. This handles the common case 2333 * inline (segment is the next to be received on an established 2334 * connection, and the queue is empty), avoiding linkage into 2335 * and removal from the queue and repetition of various 2336 * conversions. 2337 * Set DELACK for segments received in order, but ack 2338 * immediately when segments are out of order (so 2339 * fast retransmit can work). 2340 */ 2341 if (th->th_seq == tp->rcv_nxt && 2342 LIST_EMPTY(&tp->t_segq) && 2343 TCPS_HAVEESTABLISHED(tp->t_state)) { 2344 if (DELAY_ACK(tp)) 2345 tp->t_flags |= TF_DELACK; 2346 else 2347 tp->t_flags |= TF_ACKNOW; 2348 tp->rcv_nxt += tlen; 2349 thflags = th->th_flags & TH_FIN; 2350 V_tcpstat.tcps_rcvpack++; 2351 V_tcpstat.tcps_rcvbyte += tlen; 2352 ND6_HINT(tp); 2353 SOCKBUF_LOCK(&so->so_rcv); 2354 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 2355 m_freem(m); 2356 else 2357 sbappendstream_locked(&so->so_rcv, m); 2358 /* NB: sorwakeup_locked() does an implicit unlock. */ 2359 sorwakeup_locked(so); 2360 } else { 2361 /* 2362 * XXX: Due to the header drop above "th" is 2363 * theoretically invalid by now. Fortunately 2364 * m_adj() doesn't actually frees any mbufs 2365 * when trimming from the head. 2366 */ 2367 thflags = tcp_reass(tp, th, &tlen, m); 2368 tp->t_flags |= TF_ACKNOW; 2369 } 2370 if (tlen > 0 && (tp->t_flags & TF_SACK_PERMIT)) 2371 tcp_update_sack_list(tp, save_start, save_start + tlen); 2372#if 0 2373 /* 2374 * Note the amount of data that peer has sent into 2375 * our window, in order to estimate the sender's 2376 * buffer size. 2377 * XXX: Unused. 2378 */ 2379 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt); 2380#endif 2381 } else { 2382 m_freem(m); 2383 thflags &= ~TH_FIN; 2384 } 2385 2386 /* 2387 * If FIN is received ACK the FIN and let the user know 2388 * that the connection is closing. 2389 */ 2390 if (thflags & TH_FIN) { 2391 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2392 socantrcvmore(so); 2393 /* 2394 * If connection is half-synchronized 2395 * (ie NEEDSYN flag on) then delay ACK, 2396 * so it may be piggybacked when SYN is sent. 2397 * Otherwise, since we received a FIN then no 2398 * more input can be expected, send ACK now. 2399 */ 2400 if (tp->t_flags & TF_NEEDSYN) 2401 tp->t_flags |= TF_DELACK; 2402 else 2403 tp->t_flags |= TF_ACKNOW; 2404 tp->rcv_nxt++; 2405 } 2406 switch (tp->t_state) { 2407 2408 /* 2409 * In SYN_RECEIVED and ESTABLISHED STATES 2410 * enter the CLOSE_WAIT state. 2411 */ 2412 case TCPS_SYN_RECEIVED: 2413 tp->t_starttime = ticks; 2414 /* FALLTHROUGH */ 2415 case TCPS_ESTABLISHED: 2416 tp->t_state = TCPS_CLOSE_WAIT; 2417 break; 2418 2419 /* 2420 * If still in FIN_WAIT_1 STATE FIN has not been acked so 2421 * enter the CLOSING state. 2422 */ 2423 case TCPS_FIN_WAIT_1: 2424 tp->t_state = TCPS_CLOSING; 2425 break; 2426 2427 /* 2428 * In FIN_WAIT_2 state enter the TIME_WAIT state, 2429 * starting the time-wait timer, turning off the other 2430 * standard timers. 2431 */ 2432 case TCPS_FIN_WAIT_2: 2433 KASSERT(headlocked == 1, ("%s: dodata: " 2434 "TCP_FIN_WAIT_2: head not locked", __func__)); 2435 tcp_twstart(tp); 2436 INP_INFO_WUNLOCK(&V_tcbinfo); 2437 return; 2438 } 2439 } 2440 INP_INFO_WUNLOCK(&V_tcbinfo); 2441 headlocked = 0; 2442#ifdef TCPDEBUG 2443 if (so->so_options & SO_DEBUG) 2444 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen, 2445 &tcp_savetcp, 0); 2446#endif 2447 2448 /* 2449 * Return any desired output. 2450 */ 2451 if (needoutput || (tp->t_flags & TF_ACKNOW)) 2452 (void) tcp_output(tp); 2453 2454check_delack: 2455 KASSERT(headlocked == 0, ("%s: check_delack: head locked", 2456 __func__)); 2457 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 2458 INP_WLOCK_ASSERT(tp->t_inpcb); 2459 if (tp->t_flags & TF_DELACK) { 2460 tp->t_flags &= ~TF_DELACK; 2461 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime); 2462 } 2463 INP_WUNLOCK(tp->t_inpcb); 2464 return; 2465 2466dropafterack: 2467 KASSERT(headlocked, ("%s: dropafterack: head not locked", __func__)); 2468 /* 2469 * Generate an ACK dropping incoming segment if it occupies 2470 * sequence space, where the ACK reflects our state. 2471 * 2472 * We can now skip the test for the RST flag since all 2473 * paths to this code happen after packets containing 2474 * RST have been dropped. 2475 * 2476 * In the SYN-RECEIVED state, don't send an ACK unless the 2477 * segment we received passes the SYN-RECEIVED ACK test. 2478 * If it fails send a RST. This breaks the loop in the 2479 * "LAND" DoS attack, and also prevents an ACK storm 2480 * between two listening ports that have been sent forged 2481 * SYN segments, each with the source address of the other. 2482 */ 2483 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && 2484 (SEQ_GT(tp->snd_una, th->th_ack) || 2485 SEQ_GT(th->th_ack, tp->snd_max)) ) { 2486 rstreason = BANDLIM_RST_OPENPORT; 2487 goto dropwithreset; 2488 } 2489#ifdef TCPDEBUG 2490 if (so->so_options & SO_DEBUG) 2491 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 2492 &tcp_savetcp, 0); 2493#endif 2494 KASSERT(headlocked, ("%s: headlocked should be 1", __func__)); 2495 INP_INFO_WUNLOCK(&V_tcbinfo); 2496 tp->t_flags |= TF_ACKNOW; 2497 (void) tcp_output(tp); 2498 INP_WUNLOCK(tp->t_inpcb); 2499 m_freem(m); 2500 return; 2501 2502dropwithreset: 2503 KASSERT(headlocked, ("%s: dropwithreset: head not locked", __func__)); 2504 INP_INFO_WUNLOCK(&V_tcbinfo); 2505 2506 if (tp != NULL) { 2507 tcp_dropwithreset(m, th, tp, tlen, rstreason); 2508 INP_WUNLOCK(tp->t_inpcb); 2509 } else 2510 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 2511 return; 2512 2513drop: 2514 /* 2515 * Drop space held by incoming segment and return. 2516 */ 2517#ifdef TCPDEBUG 2518 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 2519 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 2520 &tcp_savetcp, 0); 2521#endif 2522 if (tp != NULL) 2523 INP_WUNLOCK(tp->t_inpcb); 2524 if (headlocked) 2525 INP_INFO_WUNLOCK(&V_tcbinfo); 2526 m_freem(m); 2527} 2528 2529/* 2530 * Issue RST and make ACK acceptable to originator of segment. 2531 * The mbuf must still include the original packet header. 2532 * tp may be NULL. 2533 */ 2534static void 2535tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, 2536 int tlen, int rstreason) 2537{ 2538 struct ip *ip; 2539#ifdef INET6 2540 struct ip6_hdr *ip6; 2541#endif 2542 2543 if (tp != NULL) { 2544 INP_WLOCK_ASSERT(tp->t_inpcb); 2545 } 2546 2547 /* Don't bother if destination was broadcast/multicast. */ 2548 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST)) 2549 goto drop; 2550#ifdef INET6 2551 if (mtod(m, struct ip *)->ip_v == 6) { 2552 ip6 = mtod(m, struct ip6_hdr *); 2553 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 2554 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 2555 goto drop; 2556 /* IPv6 anycast check is done at tcp6_input() */ 2557 } else 2558#endif 2559 { 2560 ip = mtod(m, struct ip *); 2561 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 2562 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 2563 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 2564 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 2565 goto drop; 2566 } 2567 2568 /* Perform bandwidth limiting. */ 2569 if (badport_bandlim(rstreason) < 0) 2570 goto drop; 2571 2572 /* tcp_respond consumes the mbuf chain. */ 2573 if (th->th_flags & TH_ACK) { 2574 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, 2575 th->th_ack, TH_RST); 2576 } else { 2577 if (th->th_flags & TH_SYN) 2578 tlen++; 2579 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen, 2580 (tcp_seq)0, TH_RST|TH_ACK); 2581 } 2582 return; 2583drop: 2584 m_freem(m); 2585} 2586 2587/* 2588 * Parse TCP options and place in tcpopt. 2589 */ 2590static void 2591tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags) 2592{ 2593 INIT_VNET_INET(curvnet); 2594 int opt, optlen; 2595 2596 to->to_flags = 0; 2597 for (; cnt > 0; cnt -= optlen, cp += optlen) { 2598 opt = cp[0]; 2599 if (opt == TCPOPT_EOL) 2600 break; 2601 if (opt == TCPOPT_NOP) 2602 optlen = 1; 2603 else { 2604 if (cnt < 2) 2605 break; 2606 optlen = cp[1]; 2607 if (optlen < 2 || optlen > cnt) 2608 break; 2609 } 2610 switch (opt) { 2611 case TCPOPT_MAXSEG: 2612 if (optlen != TCPOLEN_MAXSEG) 2613 continue; 2614 if (!(flags & TO_SYN)) 2615 continue; 2616 to->to_flags |= TOF_MSS; 2617 bcopy((char *)cp + 2, 2618 (char *)&to->to_mss, sizeof(to->to_mss)); 2619 to->to_mss = ntohs(to->to_mss); 2620 break; 2621 case TCPOPT_WINDOW: 2622 if (optlen != TCPOLEN_WINDOW) 2623 continue; 2624 if (!(flags & TO_SYN)) 2625 continue; 2626 to->to_flags |= TOF_SCALE; 2627 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT); 2628 break; 2629 case TCPOPT_TIMESTAMP: 2630 if (optlen != TCPOLEN_TIMESTAMP) 2631 continue; 2632 to->to_flags |= TOF_TS; 2633 bcopy((char *)cp + 2, 2634 (char *)&to->to_tsval, sizeof(to->to_tsval)); 2635 to->to_tsval = ntohl(to->to_tsval); 2636 bcopy((char *)cp + 6, 2637 (char *)&to->to_tsecr, sizeof(to->to_tsecr)); 2638 to->to_tsecr = ntohl(to->to_tsecr); 2639 break; 2640#ifdef TCP_SIGNATURE 2641 /* 2642 * XXX In order to reply to a host which has set the 2643 * TCP_SIGNATURE option in its initial SYN, we have to 2644 * record the fact that the option was observed here 2645 * for the syncache code to perform the correct response. 2646 */ 2647 case TCPOPT_SIGNATURE: 2648 if (optlen != TCPOLEN_SIGNATURE) 2649 continue; 2650 to->to_flags |= TOF_SIGNATURE; 2651 to->to_signature = cp + 2; 2652 break; 2653#endif 2654 case TCPOPT_SACK_PERMITTED: 2655 if (optlen != TCPOLEN_SACK_PERMITTED) 2656 continue; 2657 if (!(flags & TO_SYN)) 2658 continue; 2659 if (!V_tcp_do_sack) 2660 continue; 2661 to->to_flags |= TOF_SACKPERM; 2662 break; 2663 case TCPOPT_SACK: 2664 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0) 2665 continue; 2666 if (flags & TO_SYN) 2667 continue; 2668 to->to_flags |= TOF_SACK; 2669 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK; 2670 to->to_sacks = cp + 2; 2671 V_tcpstat.tcps_sack_rcv_blocks++; 2672 break; 2673 default: 2674 continue; 2675 } 2676 } 2677} 2678 2679/* 2680 * Pull out of band byte out of a segment so 2681 * it doesn't appear in the user's data queue. 2682 * It is still reflected in the segment length for 2683 * sequencing purposes. 2684 */ 2685static void 2686tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, 2687 int off) 2688{ 2689 int cnt = off + th->th_urp - 1; 2690 2691 while (cnt >= 0) { 2692 if (m->m_len > cnt) { 2693 char *cp = mtod(m, caddr_t) + cnt; 2694 struct tcpcb *tp = sototcpcb(so); 2695 2696 INP_WLOCK_ASSERT(tp->t_inpcb); 2697 2698 tp->t_iobc = *cp; 2699 tp->t_oobflags |= TCPOOB_HAVEDATA; 2700 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1)); 2701 m->m_len--; 2702 if (m->m_flags & M_PKTHDR) 2703 m->m_pkthdr.len--; 2704 return; 2705 } 2706 cnt -= m->m_len; 2707 m = m->m_next; 2708 if (m == NULL) 2709 break; 2710 } 2711 panic("tcp_pulloutofband"); 2712} 2713 2714/* 2715 * Collect new round-trip time estimate 2716 * and update averages and current timeout. 2717 */ 2718static void 2719tcp_xmit_timer(struct tcpcb *tp, int rtt) 2720{ 2721 INIT_VNET_INET(tp->t_inpcb->inp_vnet); 2722 int delta; 2723 2724 INP_WLOCK_ASSERT(tp->t_inpcb); 2725 2726 V_tcpstat.tcps_rttupdated++; 2727 tp->t_rttupdated++; 2728 if (tp->t_srtt != 0) { 2729 /* 2730 * srtt is stored as fixed point with 5 bits after the 2731 * binary point (i.e., scaled by 8). The following magic 2732 * is equivalent to the smoothing algorithm in rfc793 with 2733 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed 2734 * point). Adjust rtt to origin 0. 2735 */ 2736 delta = ((rtt - 1) << TCP_DELTA_SHIFT) 2737 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); 2738 2739 if ((tp->t_srtt += delta) <= 0) 2740 tp->t_srtt = 1; 2741 2742 /* 2743 * We accumulate a smoothed rtt variance (actually, a 2744 * smoothed mean difference), then set the retransmit 2745 * timer to smoothed rtt + 4 times the smoothed variance. 2746 * rttvar is stored as fixed point with 4 bits after the 2747 * binary point (scaled by 16). The following is 2748 * equivalent to rfc793 smoothing with an alpha of .75 2749 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces 2750 * rfc793's wired-in beta. 2751 */ 2752 if (delta < 0) 2753 delta = -delta; 2754 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); 2755 if ((tp->t_rttvar += delta) <= 0) 2756 tp->t_rttvar = 1; 2757 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 2758 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 2759 } else { 2760 /* 2761 * No rtt measurement yet - use the unsmoothed rtt. 2762 * Set the variance to half the rtt (so our first 2763 * retransmit happens at 3*rtt). 2764 */ 2765 tp->t_srtt = rtt << TCP_RTT_SHIFT; 2766 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); 2767 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 2768 } 2769 tp->t_rtttime = 0; 2770 tp->t_rxtshift = 0; 2771 2772 /* 2773 * the retransmit should happen at rtt + 4 * rttvar. 2774 * Because of the way we do the smoothing, srtt and rttvar 2775 * will each average +1/2 tick of bias. When we compute 2776 * the retransmit timer, we want 1/2 tick of rounding and 2777 * 1 extra tick because of +-1/2 tick uncertainty in the 2778 * firing of the timer. The bias will give us exactly the 2779 * 1.5 tick we need. But, because the bias is 2780 * statistical, we have to test that we don't drop below 2781 * the minimum feasible timer (which is 2 ticks). 2782 */ 2783 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 2784 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX); 2785 2786 /* 2787 * We received an ack for a packet that wasn't retransmitted; 2788 * it is probably safe to discard any error indications we've 2789 * received recently. This isn't quite right, but close enough 2790 * for now (a route might have failed after we sent a segment, 2791 * and the return path might not be symmetrical). 2792 */ 2793 tp->t_softerror = 0; 2794} 2795 2796/* 2797 * Determine a reasonable value for maxseg size. 2798 * If the route is known, check route for mtu. 2799 * If none, use an mss that can be handled on the outgoing 2800 * interface without forcing IP to fragment; if bigger than 2801 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES 2802 * to utilize large mbufs. If no route is found, route has no mtu, 2803 * or the destination isn't local, use a default, hopefully conservative 2804 * size (usually 512 or the default IP max size, but no more than the mtu 2805 * of the interface), as we can't discover anything about intervening 2806 * gateways or networks. We also initialize the congestion/slow start 2807 * window to be a single segment if the destination isn't local. 2808 * While looking at the routing entry, we also initialize other path-dependent 2809 * parameters from pre-set or cached values in the routing entry. 2810 * 2811 * Also take into account the space needed for options that we 2812 * send regularly. Make maxseg shorter by that amount to assure 2813 * that we can send maxseg amount of data even when the options 2814 * are present. Store the upper limit of the length of options plus 2815 * data in maxopd. 2816 * 2817 * In case of T/TCP, we call this routine during implicit connection 2818 * setup as well (offer = -1), to initialize maxseg from the cached 2819 * MSS of our peer. 2820 * 2821 * NOTE that this routine is only called when we process an incoming 2822 * segment. Outgoing SYN/ACK MSS settings are handled in tcp_mssopt(). 2823 */ 2824void 2825tcp_mss_update(struct tcpcb *tp, int offer, 2826 struct hc_metrics_lite *metricptr, int *mtuflags) 2827{ 2828 INIT_VNET_INET(tp->t_inpcb->inp_vnet); 2829 int mss; 2830 u_long maxmtu; 2831 struct inpcb *inp = tp->t_inpcb; 2832 struct hc_metrics_lite metrics; 2833 int origoffer = offer; 2834#ifdef INET6 2835 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0; 2836 size_t min_protoh = isipv6 ? 2837 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) : 2838 sizeof (struct tcpiphdr); 2839#else 2840 const size_t min_protoh = sizeof(struct tcpiphdr); 2841#endif 2842 2843 INP_WLOCK_ASSERT(tp->t_inpcb); 2844 2845 /* Initialize. */ 2846#ifdef INET6 2847 if (isipv6) { 2848 maxmtu = tcp_maxmtu6(&inp->inp_inc, mtuflags); 2849 tp->t_maxopd = tp->t_maxseg = V_tcp_v6mssdflt; 2850 } else 2851#endif 2852 { 2853 maxmtu = tcp_maxmtu(&inp->inp_inc, mtuflags); 2854 tp->t_maxopd = tp->t_maxseg = V_tcp_mssdflt; 2855 } 2856 2857 /* 2858 * No route to sender, stay with default mss and return. 2859 */ 2860 if (maxmtu == 0) { 2861 /* 2862 * In case we return early we need to initialize metrics 2863 * to a defined state as tcp_hc_get() would do for us 2864 * if there was no cache hit. 2865 */ 2866 if (metricptr != NULL) 2867 bzero(metricptr, sizeof(struct hc_metrics_lite)); 2868 return; 2869 } 2870 2871 /* What have we got? */ 2872 switch (offer) { 2873 case 0: 2874 /* 2875 * Offer == 0 means that there was no MSS on the SYN 2876 * segment, in this case we use tcp_mssdflt as 2877 * already assigned to t_maxopd above. 2878 */ 2879 offer = tp->t_maxopd; 2880 break; 2881 2882 case -1: 2883 /* 2884 * Offer == -1 means that we didn't receive SYN yet. 2885 */ 2886 /* FALLTHROUGH */ 2887 2888 default: 2889 /* 2890 * Prevent DoS attack with too small MSS. Round up 2891 * to at least minmss. 2892 */ 2893 offer = max(offer, V_tcp_minmss); 2894 } 2895 2896 /* 2897 * rmx information is now retrieved from tcp_hostcache. 2898 */ 2899 tcp_hc_get(&inp->inp_inc, &metrics); 2900 if (metricptr != NULL) 2901 bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite)); 2902 2903 /* 2904 * If there's a discovered mtu int tcp hostcache, use it 2905 * else, use the link mtu. 2906 */ 2907 if (metrics.rmx_mtu) 2908 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh; 2909 else { 2910#ifdef INET6 2911 if (isipv6) { 2912 mss = maxmtu - min_protoh; 2913 if (!V_path_mtu_discovery && 2914 !in6_localaddr(&inp->in6p_faddr)) 2915 mss = min(mss, V_tcp_v6mssdflt); 2916 } else 2917#endif 2918 { 2919 mss = maxmtu - min_protoh; 2920 if (!V_path_mtu_discovery && 2921 !in_localaddr(inp->inp_faddr)) 2922 mss = min(mss, V_tcp_mssdflt); 2923 } 2924 /* 2925 * XXX - The above conditional (mss = maxmtu - min_protoh) 2926 * probably violates the TCP spec. 2927 * The problem is that, since we don't know the 2928 * other end's MSS, we are supposed to use a conservative 2929 * default. But, if we do that, then MTU discovery will 2930 * never actually take place, because the conservative 2931 * default is much less than the MTUs typically seen 2932 * on the Internet today. For the moment, we'll sweep 2933 * this under the carpet. 2934 * 2935 * The conservative default might not actually be a problem 2936 * if the only case this occurs is when sending an initial 2937 * SYN with options and data to a host we've never talked 2938 * to before. Then, they will reply with an MSS value which 2939 * will get recorded and the new parameters should get 2940 * recomputed. For Further Study. 2941 */ 2942 } 2943 mss = min(mss, offer); 2944 2945 /* 2946 * Sanity check: make sure that maxopd will be large 2947 * enough to allow some data on segments even if the 2948 * all the option space is used (40bytes). Otherwise 2949 * funny things may happen in tcp_output. 2950 */ 2951 mss = max(mss, 64); 2952 2953 /* 2954 * maxopd stores the maximum length of data AND options 2955 * in a segment; maxseg is the amount of data in a normal 2956 * segment. We need to store this value (maxopd) apart 2957 * from maxseg, because now every segment carries options 2958 * and thus we normally have somewhat less data in segments. 2959 */ 2960 tp->t_maxopd = mss; 2961 2962 /* 2963 * origoffer==-1 indicates that no segments were received yet. 2964 * In this case we just guess. 2965 */ 2966 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && 2967 (origoffer == -1 || 2968 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)) 2969 mss -= TCPOLEN_TSTAMP_APPA; 2970 2971#if (MCLBYTES & (MCLBYTES - 1)) == 0 2972 if (mss > MCLBYTES) 2973 mss &= ~(MCLBYTES-1); 2974#else 2975 if (mss > MCLBYTES) 2976 mss = mss / MCLBYTES * MCLBYTES; 2977#endif 2978 tp->t_maxseg = mss; 2979} 2980 2981void 2982tcp_mss(struct tcpcb *tp, int offer) 2983{ 2984 int rtt, mss; 2985 u_long bufsize; 2986 struct inpcb *inp; 2987 struct socket *so; 2988 struct hc_metrics_lite metrics; 2989 int mtuflags = 0; 2990#ifdef INET6 2991 int isipv6; 2992#endif 2993 KASSERT(tp != NULL, ("%s: tp == NULL", __func__)); 2994 INIT_VNET_INET(tp->t_vnet); 2995 2996 tcp_mss_update(tp, offer, &metrics, &mtuflags); 2997 2998 mss = tp->t_maxseg; 2999 inp = tp->t_inpcb; 3000#ifdef INET6 3001 isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0; 3002#endif 3003 3004 /* 3005 * If there's a pipesize, change the socket buffer to that size, 3006 * don't change if sb_hiwat is different than default (then it 3007 * has been changed on purpose with setsockopt). 3008 * Make the socket buffers an integral number of mss units; 3009 * if the mss is larger than the socket buffer, decrease the mss. 3010 */ 3011 so = inp->inp_socket; 3012 SOCKBUF_LOCK(&so->so_snd); 3013 if ((so->so_snd.sb_hiwat == tcp_sendspace) && metrics.rmx_sendpipe) 3014 bufsize = metrics.rmx_sendpipe; 3015 else 3016 bufsize = so->so_snd.sb_hiwat; 3017 if (bufsize < mss) 3018 mss = bufsize; 3019 else { 3020 bufsize = roundup(bufsize, mss); 3021 if (bufsize > sb_max) 3022 bufsize = sb_max; 3023 if (bufsize > so->so_snd.sb_hiwat) 3024 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL); 3025 } 3026 SOCKBUF_UNLOCK(&so->so_snd); 3027 tp->t_maxseg = mss; 3028 3029 SOCKBUF_LOCK(&so->so_rcv); 3030 if ((so->so_rcv.sb_hiwat == tcp_recvspace) && metrics.rmx_recvpipe) 3031 bufsize = metrics.rmx_recvpipe; 3032 else 3033 bufsize = so->so_rcv.sb_hiwat; 3034 if (bufsize > mss) { 3035 bufsize = roundup(bufsize, mss); 3036 if (bufsize > sb_max) 3037 bufsize = sb_max; 3038 if (bufsize > so->so_rcv.sb_hiwat) 3039 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL); 3040 } 3041 SOCKBUF_UNLOCK(&so->so_rcv); 3042 /* 3043 * While we're here, check the others too. 3044 */ 3045 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) { 3046 tp->t_srtt = rtt; 3047 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE; 3048 V_tcpstat.tcps_usedrtt++; 3049 if (metrics.rmx_rttvar) { 3050 tp->t_rttvar = metrics.rmx_rttvar; 3051 V_tcpstat.tcps_usedrttvar++; 3052 } else { 3053 /* default variation is +- 1 rtt */ 3054 tp->t_rttvar = 3055 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE; 3056 } 3057 TCPT_RANGESET(tp->t_rxtcur, 3058 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, 3059 tp->t_rttmin, TCPTV_REXMTMAX); 3060 } 3061 if (metrics.rmx_ssthresh) { 3062 /* 3063 * There's some sort of gateway or interface 3064 * buffer limit on the path. Use this to set 3065 * the slow start threshhold, but set the 3066 * threshold to no less than 2*mss. 3067 */ 3068 tp->snd_ssthresh = max(2 * mss, metrics.rmx_ssthresh); 3069 V_tcpstat.tcps_usedssthresh++; 3070 } 3071 if (metrics.rmx_bandwidth) 3072 tp->snd_bandwidth = metrics.rmx_bandwidth; 3073 3074 /* 3075 * Set the slow-start flight size depending on whether this 3076 * is a local network or not. 3077 * 3078 * Extend this so we cache the cwnd too and retrieve it here. 3079 * Make cwnd even bigger than RFC3390 suggests but only if we 3080 * have previous experience with the remote host. Be careful 3081 * not make cwnd bigger than remote receive window or our own 3082 * send socket buffer. Maybe put some additional upper bound 3083 * on the retrieved cwnd. Should do incremental updates to 3084 * hostcache when cwnd collapses so next connection doesn't 3085 * overloads the path again. 3086 * 3087 * RFC3390 says only do this if SYN or SYN/ACK didn't got lost. 3088 * We currently check only in syncache_socket for that. 3089 */ 3090#define TCP_METRICS_CWND 3091#ifdef TCP_METRICS_CWND 3092 if (metrics.rmx_cwnd) 3093 tp->snd_cwnd = max(mss, 3094 min(metrics.rmx_cwnd / 2, 3095 min(tp->snd_wnd, so->so_snd.sb_hiwat))); 3096 else 3097#endif 3098 if (V_tcp_do_rfc3390) 3099 tp->snd_cwnd = min(4 * mss, max(2 * mss, 4380)); 3100#ifdef INET6 3101 else if ((isipv6 && in6_localaddr(&inp->in6p_faddr)) || 3102 (!isipv6 && in_localaddr(inp->inp_faddr))) 3103#else 3104 else if (in_localaddr(inp->inp_faddr)) 3105#endif 3106 tp->snd_cwnd = mss * V_ss_fltsz_local; 3107 else 3108 tp->snd_cwnd = mss * V_ss_fltsz; 3109 3110 /* Check the interface for TSO capabilities. */ 3111 if (mtuflags & CSUM_TSO) 3112 tp->t_flags |= TF_TSO; 3113} 3114 3115/* 3116 * Determine the MSS option to send on an outgoing SYN. 3117 */ 3118int 3119tcp_mssopt(struct in_conninfo *inc) 3120{ 3121 INIT_VNET_INET(curvnet); 3122 int mss = 0; 3123 u_long maxmtu = 0; 3124 u_long thcmtu = 0; 3125 size_t min_protoh; 3126#ifdef INET6 3127 int isipv6 = inc->inc_isipv6 ? 1 : 0; 3128#endif 3129 3130 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer")); 3131 3132#ifdef INET6 3133 if (isipv6) { 3134 mss = V_tcp_v6mssdflt; 3135 maxmtu = tcp_maxmtu6(inc, NULL); 3136 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */ 3137 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 3138 } else 3139#endif 3140 { 3141 mss = V_tcp_mssdflt; 3142 maxmtu = tcp_maxmtu(inc, NULL); 3143 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */ 3144 min_protoh = sizeof(struct tcpiphdr); 3145 } 3146 if (maxmtu && thcmtu) 3147 mss = min(maxmtu, thcmtu) - min_protoh; 3148 else if (maxmtu || thcmtu) 3149 mss = max(maxmtu, thcmtu) - min_protoh; 3150 3151 return (mss); 3152} 3153 3154 3155/* 3156 * On a partial ack arrives, force the retransmission of the 3157 * next unacknowledged segment. Do not clear tp->t_dupacks. 3158 * By setting snd_nxt to ti_ack, this forces retransmission timer to 3159 * be started again. 3160 */ 3161static void 3162tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th) 3163{ 3164 tcp_seq onxt = tp->snd_nxt; 3165 u_long ocwnd = tp->snd_cwnd; 3166 3167 INP_WLOCK_ASSERT(tp->t_inpcb); 3168 3169 tcp_timer_activate(tp, TT_REXMT, 0); 3170 tp->t_rtttime = 0; 3171 tp->snd_nxt = th->th_ack; 3172 /* 3173 * Set snd_cwnd to one segment beyond acknowledged offset. 3174 * (tp->snd_una has not yet been updated when this function is called.) 3175 */ 3176 tp->snd_cwnd = tp->t_maxseg + (th->th_ack - tp->snd_una); 3177 tp->t_flags |= TF_ACKNOW; 3178 (void) tcp_output(tp); 3179 tp->snd_cwnd = ocwnd; 3180 if (SEQ_GT(onxt, tp->snd_nxt)) 3181 tp->snd_nxt = onxt; 3182 /* 3183 * Partial window deflation. Relies on fact that tp->snd_una 3184 * not updated yet. 3185 */ 3186 if (tp->snd_cwnd > th->th_ack - tp->snd_una) 3187 tp->snd_cwnd -= th->th_ack - tp->snd_una; 3188 else 3189 tp->snd_cwnd = 0; 3190 tp->snd_cwnd += tp->t_maxseg; 3191} 3192