tcp_input.c revision 170516
1/*- 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95 30 * $FreeBSD: head/sys/netinet/tcp_input.c 170516 2007-06-10 21:07:21Z andre $ 31 */ 32 33#include "opt_ipfw.h" /* for ipfw_fwd */ 34#include "opt_inet.h" 35#include "opt_inet6.h" 36#include "opt_ipsec.h" 37#include "opt_mac.h" 38#include "opt_tcpdebug.h" 39 40#include <sys/param.h> 41#include <sys/kernel.h> 42#include <sys/malloc.h> 43#include <sys/mbuf.h> 44#include <sys/proc.h> /* for proc0 declaration */ 45#include <sys/protosw.h> 46#include <sys/signalvar.h> 47#include <sys/socket.h> 48#include <sys/socketvar.h> 49#include <sys/sysctl.h> 50#include <sys/syslog.h> 51#include <sys/systm.h> 52 53#include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */ 54 55#include <vm/uma.h> 56 57#include <net/if.h> 58#include <net/route.h> 59 60#include <netinet/in.h> 61#include <netinet/in_pcb.h> 62#include <netinet/in_systm.h> 63#include <netinet/in_var.h> 64#include <netinet/ip.h> 65#include <netinet/ip_icmp.h> /* required for icmp_var.h */ 66#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 67#include <netinet/ip_var.h> 68#include <netinet/ip_options.h> 69#include <netinet/ip6.h> 70#include <netinet/icmp6.h> 71#include <netinet6/in6_pcb.h> 72#include <netinet6/ip6_var.h> 73#include <netinet6/nd6.h> 74#include <netinet/tcp.h> 75#include <netinet/tcp_fsm.h> 76#include <netinet/tcp_seq.h> 77#include <netinet/tcp_timer.h> 78#include <netinet/tcp_var.h> 79#include <netinet6/tcp6_var.h> 80#include <netinet/tcpip.h> 81#ifdef TCPDEBUG 82#include <netinet/tcp_debug.h> 83#endif /* TCPDEBUG */ 84 85#ifdef FAST_IPSEC 86#include <netipsec/ipsec.h> 87#include <netipsec/ipsec6.h> 88#endif /*FAST_IPSEC*/ 89 90#ifdef IPSEC 91#include <netinet6/ipsec.h> 92#include <netinet6/ipsec6.h> 93#include <netkey/key.h> 94#endif /*IPSEC*/ 95 96#include <machine/in_cksum.h> 97 98#include <security/mac/mac_framework.h> 99 100static const int tcprexmtthresh = 3; 101 102struct tcpstat tcpstat; 103SYSCTL_STRUCT(_net_inet_tcp, TCPCTL_STATS, stats, CTLFLAG_RW, 104 &tcpstat , tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)"); 105 106static int tcp_log_in_vain = 0; 107SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW, 108 &tcp_log_in_vain, 0, "Log all incoming TCP segments to closed ports"); 109 110static int blackhole = 0; 111SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW, 112 &blackhole, 0, "Do not send RST on segments to closed ports"); 113 114int tcp_delack_enabled = 1; 115SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW, 116 &tcp_delack_enabled, 0, 117 "Delay ACK to try and piggyback it onto a data packet"); 118 119static int drop_synfin = 0; 120SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW, 121 &drop_synfin, 0, "Drop TCP packets with SYN+FIN set"); 122 123static int tcp_do_rfc3042 = 1; 124SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_RW, 125 &tcp_do_rfc3042, 0, "Enable RFC 3042 (Limited Transmit)"); 126 127static int tcp_do_rfc3390 = 1; 128SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW, 129 &tcp_do_rfc3390, 0, 130 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)"); 131 132static int tcp_insecure_rst = 0; 133SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_RW, 134 &tcp_insecure_rst, 0, 135 "Follow the old (insecure) criteria for accepting RST packets"); 136 137int tcp_do_autorcvbuf = 1; 138SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_RW, 139 &tcp_do_autorcvbuf, 0, "Enable automatic receive buffer sizing"); 140 141int tcp_autorcvbuf_inc = 16*1024; 142SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_RW, 143 &tcp_autorcvbuf_inc, 0, 144 "Incrementor step size of automatic receive buffer"); 145 146int tcp_autorcvbuf_max = 256*1024; 147SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_RW, 148 &tcp_autorcvbuf_max, 0, "Max size of automatic receive buffer"); 149 150struct inpcbhead tcb; 151#define tcb6 tcb /* for KAME src sync over BSD*'s */ 152struct inpcbinfo tcbinfo; 153 154static void tcp_dooptions(struct tcpopt *, u_char *, int, int); 155static void tcp_do_segment(struct mbuf *, struct tcphdr *, 156 struct socket *, struct tcpcb *, int, int); 157static void tcp_dropwithreset(struct mbuf *, struct tcphdr *, 158 struct tcpcb *, int, int); 159static void tcp_pulloutofband(struct socket *, 160 struct tcphdr *, struct mbuf *, int); 161static void tcp_xmit_timer(struct tcpcb *, int); 162static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *); 163 164/* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */ 165#ifdef INET6 166#define ND6_HINT(tp) \ 167do { \ 168 if ((tp) && (tp)->t_inpcb && \ 169 ((tp)->t_inpcb->inp_vflag & INP_IPV6) != 0) \ 170 nd6_nud_hint(NULL, NULL, 0); \ 171} while (0) 172#else 173#define ND6_HINT(tp) 174#endif 175 176/* 177 * Indicate whether this ack should be delayed. We can delay the ack if 178 * - there is no delayed ack timer in progress and 179 * - our last ack wasn't a 0-sized window. We never want to delay 180 * the ack that opens up a 0-sized window and 181 * - delayed acks are enabled or 182 * - this is a half-synchronized T/TCP connection. 183 */ 184#define DELAY_ACK(tp) \ 185 ((!tcp_timer_active(tp, TT_DELACK) && \ 186 (tp->t_flags & TF_RXWIN0SENT) == 0) && \ 187 (tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN))) 188 189 190/* 191 * TCP input handling is split into multiple parts: 192 * tcp6_input is a thin wrapper around tcp_input for the extended 193 * ip6_protox[] call format in ip6_input 194 * tcp_input handles primary segment validation, inpcb lookup and 195 * SYN processing on listen sockets 196 * tcp_do_segment processes the ACK and text of the segment for 197 * establishing, established and closing connections 198 */ 199#ifdef INET6 200int 201tcp6_input(struct mbuf **mp, int *offp, int proto) 202{ 203 struct mbuf *m = *mp; 204 struct in6_ifaddr *ia6; 205 206 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE); 207 208 /* 209 * draft-itojun-ipv6-tcp-to-anycast 210 * better place to put this in? 211 */ 212 ia6 = ip6_getdstifaddr(m); 213 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) { 214 struct ip6_hdr *ip6; 215 216 ip6 = mtod(m, struct ip6_hdr *); 217 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR, 218 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6); 219 return IPPROTO_DONE; 220 } 221 222 tcp_input(m, *offp); 223 return IPPROTO_DONE; 224} 225#endif 226 227void 228tcp_input(struct mbuf *m, int off0) 229{ 230 struct tcphdr *th; 231 struct ip *ip = NULL; 232 struct ipovly *ipov; 233 struct inpcb *inp = NULL; 234 struct tcpcb *tp = NULL; 235 struct socket *so = NULL; 236 u_char *optp = NULL; 237 int optlen = 0; 238 int len, tlen, off; 239 int drop_hdrlen; 240 int thflags; 241 int rstreason = 0; /* For badport_bandlim accounting purposes */ 242#ifdef IPFIREWALL_FORWARD 243 struct m_tag *fwd_tag; 244#endif 245#ifdef INET6 246 struct ip6_hdr *ip6 = NULL; 247 int isipv6; 248#else 249 const void *ip6 = NULL; 250 const int isipv6 = 0; 251#endif 252 struct tcpopt to; /* options in this segment */ 253 char *s = NULL; /* address and port logging */ 254 255#ifdef TCPDEBUG 256 /* 257 * The size of tcp_saveipgen must be the size of the max ip header, 258 * now IPv6. 259 */ 260 u_char tcp_saveipgen[IP6_HDR_LEN]; 261 struct tcphdr tcp_savetcp; 262 short ostate = 0; 263#endif 264 265#ifdef INET6 266 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0; 267#endif 268 269 to.to_flags = 0; 270 tcpstat.tcps_rcvtotal++; 271 272 if (isipv6) { 273#ifdef INET6 274 /* IP6_EXTHDR_CHECK() is already done at tcp6_input(). */ 275 ip6 = mtod(m, struct ip6_hdr *); 276 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0; 277 if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) { 278 tcpstat.tcps_rcvbadsum++; 279 goto drop; 280 } 281 th = (struct tcphdr *)((caddr_t)ip6 + off0); 282 283 /* 284 * Be proactive about unspecified IPv6 address in source. 285 * As we use all-zero to indicate unbounded/unconnected pcb, 286 * unspecified IPv6 address can be used to confuse us. 287 * 288 * Note that packets with unspecified IPv6 destination is 289 * already dropped in ip6_input. 290 */ 291 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { 292 /* XXX stat */ 293 goto drop; 294 } 295#else 296 th = NULL; /* XXX: Avoid compiler warning. */ 297#endif 298 } else { 299 /* 300 * Get IP and TCP header together in first mbuf. 301 * Note: IP leaves IP header in first mbuf. 302 */ 303 if (off0 > sizeof (struct ip)) { 304 ip_stripoptions(m, (struct mbuf *)0); 305 off0 = sizeof(struct ip); 306 } 307 if (m->m_len < sizeof (struct tcpiphdr)) { 308 if ((m = m_pullup(m, sizeof (struct tcpiphdr))) 309 == NULL) { 310 tcpstat.tcps_rcvshort++; 311 return; 312 } 313 } 314 ip = mtod(m, struct ip *); 315 ipov = (struct ipovly *)ip; 316 th = (struct tcphdr *)((caddr_t)ip + off0); 317 tlen = ip->ip_len; 318 319 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 320 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 321 th->th_sum = m->m_pkthdr.csum_data; 322 else 323 th->th_sum = in_pseudo(ip->ip_src.s_addr, 324 ip->ip_dst.s_addr, 325 htonl(m->m_pkthdr.csum_data + 326 ip->ip_len + 327 IPPROTO_TCP)); 328 th->th_sum ^= 0xffff; 329#ifdef TCPDEBUG 330 ipov->ih_len = (u_short)tlen; 331 ipov->ih_len = htons(ipov->ih_len); 332#endif 333 } else { 334 /* 335 * Checksum extended TCP header and data. 336 */ 337 len = sizeof (struct ip) + tlen; 338 bzero(ipov->ih_x1, sizeof(ipov->ih_x1)); 339 ipov->ih_len = (u_short)tlen; 340 ipov->ih_len = htons(ipov->ih_len); 341 th->th_sum = in_cksum(m, len); 342 } 343 if (th->th_sum) { 344 tcpstat.tcps_rcvbadsum++; 345 goto drop; 346 } 347 /* Re-initialization for later version check */ 348 ip->ip_v = IPVERSION; 349 } 350 351 /* 352 * Check that TCP offset makes sense, 353 * pull out TCP options and adjust length. XXX 354 */ 355 off = th->th_off << 2; 356 if (off < sizeof (struct tcphdr) || off > tlen) { 357 tcpstat.tcps_rcvbadoff++; 358 goto drop; 359 } 360 tlen -= off; /* tlen is used instead of ti->ti_len */ 361 if (off > sizeof (struct tcphdr)) { 362 if (isipv6) { 363#ifdef INET6 364 IP6_EXTHDR_CHECK(m, off0, off, ); 365 ip6 = mtod(m, struct ip6_hdr *); 366 th = (struct tcphdr *)((caddr_t)ip6 + off0); 367#endif 368 } else { 369 if (m->m_len < sizeof(struct ip) + off) { 370 if ((m = m_pullup(m, sizeof (struct ip) + off)) 371 == NULL) { 372 tcpstat.tcps_rcvshort++; 373 return; 374 } 375 ip = mtod(m, struct ip *); 376 ipov = (struct ipovly *)ip; 377 th = (struct tcphdr *)((caddr_t)ip + off0); 378 } 379 } 380 optlen = off - sizeof (struct tcphdr); 381 optp = (u_char *)(th + 1); 382 } 383 thflags = th->th_flags; 384 385 /* 386 * Convert TCP protocol specific fields to host format. 387 */ 388 th->th_seq = ntohl(th->th_seq); 389 th->th_ack = ntohl(th->th_ack); 390 th->th_win = ntohs(th->th_win); 391 th->th_urp = ntohs(th->th_urp); 392 393 /* 394 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options. 395 */ 396 drop_hdrlen = off0 + off; 397 398 /* 399 * Locate pcb for segment. 400 */ 401 INP_INFO_WLOCK(&tcbinfo); 402findpcb: 403 INP_INFO_WLOCK_ASSERT(&tcbinfo); 404#ifdef IPFIREWALL_FORWARD 405 /* 406 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. 407 */ 408 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL); 409 410 if (fwd_tag != NULL && isipv6 == 0) { /* IPv6 support is not yet */ 411 struct sockaddr_in *next_hop; 412 413 next_hop = (struct sockaddr_in *)(fwd_tag+1); 414 /* 415 * Transparently forwarded. Pretend to be the destination. 416 * already got one like this? 417 */ 418 inp = in_pcblookup_hash(&tcbinfo, 419 ip->ip_src, th->th_sport, 420 ip->ip_dst, th->th_dport, 421 0, m->m_pkthdr.rcvif); 422 if (!inp) { 423 /* It's new. Try to find the ambushing socket. */ 424 inp = in_pcblookup_hash(&tcbinfo, 425 ip->ip_src, th->th_sport, 426 next_hop->sin_addr, 427 next_hop->sin_port ? 428 ntohs(next_hop->sin_port) : 429 th->th_dport, 430 INPLOOKUP_WILDCARD, 431 m->m_pkthdr.rcvif); 432 } 433 /* Remove the tag from the packet. We don't need it anymore. */ 434 m_tag_delete(m, fwd_tag); 435 } else 436#endif /* IPFIREWALL_FORWARD */ 437 { 438 if (isipv6) { 439#ifdef INET6 440 inp = in6_pcblookup_hash(&tcbinfo, 441 &ip6->ip6_src, th->th_sport, 442 &ip6->ip6_dst, th->th_dport, 443 INPLOOKUP_WILDCARD, 444 m->m_pkthdr.rcvif); 445#endif 446 } else 447 inp = in_pcblookup_hash(&tcbinfo, 448 ip->ip_src, th->th_sport, 449 ip->ip_dst, th->th_dport, 450 INPLOOKUP_WILDCARD, 451 m->m_pkthdr.rcvif); 452 } 453 454#if defined(IPSEC) || defined(FAST_IPSEC) 455#ifdef INET6 456 if (isipv6 && inp != NULL && ipsec6_in_reject(m, inp)) { 457#ifdef IPSEC 458 ipsec6stat.in_polvio++; 459#endif 460 goto dropunlock; 461 } else 462#endif /* INET6 */ 463 if (inp != NULL && ipsec4_in_reject(m, inp)) { 464#ifdef IPSEC 465 ipsecstat.in_polvio++; 466#endif 467 goto dropunlock; 468 } 469#endif /*IPSEC || FAST_IPSEC*/ 470 471 /* 472 * If the INPCB does not exist then all data in the incoming 473 * segment is discarded and an appropriate RST is sent back. 474 */ 475 if (inp == NULL) { 476 /* 477 * Log communication attempts to ports that are not 478 * in use. 479 */ 480 if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) || 481 tcp_log_in_vain == 2) { 482 if ((s = tcp_log_addrs(NULL, th, (void *)ip, 483 (void *)ip6))) 484 log(LOG_INFO, "%s; %s: Connection attempt " 485 "to closed port\n", s, __func__); 486 } 487 /* 488 * When blackholing do not respond with a RST but 489 * completely ignore the segment and drop it. 490 */ 491 if ((blackhole == 1 && (thflags & TH_SYN)) || 492 blackhole == 2) 493 goto dropunlock; 494 495 rstreason = BANDLIM_RST_CLOSEDPORT; 496 goto dropwithreset; 497 } 498 INP_LOCK(inp); 499 500 /* 501 * Check the minimum TTL for socket. 502 */ 503 if (inp->inp_ip_minttl != 0) { 504#ifdef INET6 505 if (isipv6 && inp->inp_ip_minttl > ip6->ip6_hlim) 506 goto dropunlock; 507 else 508#endif 509 if (inp->inp_ip_minttl > ip->ip_ttl) 510 goto dropunlock; 511 } 512 513 /* 514 * A previous connection in TIMEWAIT state is supposed to catch 515 * stray or duplicate segments arriving late. If this segment 516 * was a legitimate new connection attempt the old INPCB gets 517 * removed and we can try again to find a listening socket. 518 */ 519 if (inp->inp_vflag & INP_TIMEWAIT) { 520 if (thflags & TH_SYN) 521 tcp_dooptions(&to, optp, optlen, TO_SYN); 522 /* 523 * NB: tcp_twcheck unlocks the INP and frees the mbuf. 524 */ 525 if (tcp_twcheck(inp, &to, th, m, tlen)) 526 goto findpcb; 527 INP_INFO_WUNLOCK(&tcbinfo); 528 return; 529 } 530 /* 531 * The TCPCB may no longer exist if the connection is winding 532 * down or it is in the CLOSED state. Either way we drop the 533 * segment and send an appropriate response. 534 */ 535 tp = intotcpcb(inp); 536 if (tp == NULL || tp->t_state == TCPS_CLOSED) { 537 rstreason = BANDLIM_RST_CLOSEDPORT; 538 goto dropwithreset; 539 } 540 541#ifdef MAC 542 INP_LOCK_ASSERT(inp); 543 if (mac_check_inpcb_deliver(inp, m)) 544 goto dropunlock; 545#endif 546 so = inp->inp_socket; 547 KASSERT(so != NULL, ("%s: so == NULL", __func__)); 548#ifdef TCPDEBUG 549 if (so->so_options & SO_DEBUG) { 550 ostate = tp->t_state; 551 if (isipv6) { 552#ifdef INET6 553 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6)); 554#endif 555 } else 556 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip)); 557 tcp_savetcp = *th; 558 } 559#endif 560 /* 561 * When the socket is accepting connections (the INPCB is in LISTEN 562 * state) we look into the SYN cache if this is a new connection 563 * attempt or the completion of a previous one. 564 */ 565 if (so->so_options & SO_ACCEPTCONN) { 566 struct in_conninfo inc; 567 568 KASSERT(tp->t_state == TCPS_LISTEN, ("%s: so accepting but " 569 "tp not listening", __func__)); 570 571 bzero(&inc, sizeof(inc)); 572 inc.inc_isipv6 = isipv6; 573#ifdef INET6 574 if (isipv6) { 575 inc.inc6_faddr = ip6->ip6_src; 576 inc.inc6_laddr = ip6->ip6_dst; 577 } else 578#endif 579 { 580 inc.inc_faddr = ip->ip_src; 581 inc.inc_laddr = ip->ip_dst; 582 } 583 inc.inc_fport = th->th_sport; 584 inc.inc_lport = th->th_dport; 585 586 /* 587 * Check for an existing connection attempt in syncache if 588 * the flag is only ACK. A successful lookup creates a new 589 * socket appended to the listen queue in SYN_RECEIVED state. 590 */ 591 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) { 592 /* 593 * Parse the TCP options here because 594 * syncookies need access to the reflected 595 * timestamp. 596 */ 597 tcp_dooptions(&to, optp, optlen, 0); 598 /* 599 * NB: syncache_expand() doesn't unlock 600 * inp and tcpinfo locks. 601 */ 602 if (!syncache_expand(&inc, &to, th, &so, m)) { 603 /* 604 * No syncache entry or ACK was not 605 * for our SYN/ACK. Send a RST. 606 * NB: syncache did its own logging 607 * of the failure cause. 608 */ 609 rstreason = BANDLIM_RST_OPENPORT; 610 goto dropwithreset; 611 } 612 if (so == NULL) { 613 /* 614 * We completed the 3-way handshake 615 * but could not allocate a socket 616 * either due to memory shortage, 617 * listen queue length limits or 618 * global socket limits. Send RST 619 * or wait and have the remote end 620 * retransmit the ACK for another 621 * try. 622 */ 623 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 624 log(LOG_DEBUG, "%s; %s: Listen socket: " 625 "Socket allocation failed due to " 626 "limits or memory shortage, %s\n", 627 s, __func__, (tcp_sc_rst_sock_fail ? 628 "sending RST" : "try again")); 629 if (tcp_sc_rst_sock_fail) { 630 rstreason = BANDLIM_UNLIMITED; 631 goto dropwithreset; 632 } else 633 goto dropunlock; 634 } 635 /* 636 * Socket is created in state SYN_RECEIVED. 637 * Unlock the listen socket, lock the newly 638 * created socket and update the tp variable. 639 */ 640 INP_UNLOCK(inp); /* listen socket */ 641 inp = sotoinpcb(so); 642 INP_LOCK(inp); /* new connection */ 643 tp = intotcpcb(inp); 644 KASSERT(tp->t_state == TCPS_SYN_RECEIVED, 645 ("%s: ", __func__)); 646 /* 647 * Process the segment and the data it 648 * contains. tcp_do_segment() consumes 649 * the mbuf chain and unlocks the inpcb. 650 */ 651 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen); 652 INP_INFO_UNLOCK_ASSERT(&tcbinfo); 653 return; 654 } 655 /* 656 * Segment flag validation for new connection attempts: 657 * 658 * Our (SYN|ACK) response was rejected. 659 * Check with syncache and remove entry to prevent 660 * retransmits. 661 */ 662 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) { 663 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 664 log(LOG_DEBUG, "%s; %s: Listen socket: " 665 "Our SYN|ACK was rejected, connection " 666 "attempt aborted by remote endpoint\n", 667 s, __func__); 668 syncache_chkrst(&inc, th); 669 goto dropunlock; 670 } 671 /* 672 * Spurious RST. Ignore. 673 */ 674 if (thflags & TH_RST) { 675 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 676 log(LOG_DEBUG, "%s; %s: Listen socket: " 677 "Spurious RST, segment rejected\n", 678 s, __func__); 679 goto dropunlock; 680 } 681 /* 682 * We can't do anything without SYN. 683 */ 684 if ((thflags & TH_SYN) == 0) { 685 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 686 log(LOG_DEBUG, "%s; %s: Listen socket: " 687 "SYN is missing, segment rejected\n", 688 s, __func__); 689 tcpstat.tcps_badsyn++; 690 goto dropunlock; 691 } 692 /* 693 * (SYN|ACK) is bogus on a listen socket. 694 */ 695 if (thflags & TH_ACK) { 696 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 697 log(LOG_DEBUG, "%s; %s: Listen socket: " 698 "SYN|ACK invalid, segment rejected\n", 699 s, __func__); 700 syncache_badack(&inc); /* XXX: Not needed! */ 701 tcpstat.tcps_badsyn++; 702 rstreason = BANDLIM_RST_OPENPORT; 703 goto dropwithreset; 704 } 705 /* 706 * If the drop_synfin option is enabled, drop all 707 * segments with both the SYN and FIN bits set. 708 * This prevents e.g. nmap from identifying the 709 * TCP/IP stack. 710 * XXX: Poor reasoning. nmap has other methods 711 * and is constantly refining its stack detection 712 * strategies. 713 * XXX: This is a violation of the TCP specification 714 * and was used by RFC1644. 715 */ 716 if ((thflags & TH_FIN) && drop_synfin) { 717 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 718 log(LOG_DEBUG, "%s; %s: Listen socket: " 719 "SYN|FIN segment rejected (based on " 720 "sysctl setting)\n", s, __func__); 721 tcpstat.tcps_badsyn++; 722 goto dropunlock; 723 } 724 /* 725 * Segment's flags are (SYN) or (SYN|FIN). 726 * 727 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored 728 * as they do not affect the state of the TCP FSM. 729 * The data pointed to by TH_URG and th_urp is ignored. 730 */ 731 KASSERT((thflags & (TH_RST|TH_ACK)) == 0, 732 ("%s: Listen socket: TH_RST or TH_ACK set", __func__)); 733 KASSERT(thflags & (TH_SYN), 734 ("%s: Listen socket: TH_SYN not set", __func__)); 735#ifdef INET6 736 /* 737 * If deprecated address is forbidden, 738 * we do not accept SYN to deprecated interface 739 * address to prevent any new inbound connection from 740 * getting established. 741 * When we do not accept SYN, we send a TCP RST, 742 * with deprecated source address (instead of dropping 743 * it). We compromise it as it is much better for peer 744 * to send a RST, and RST will be the final packet 745 * for the exchange. 746 * 747 * If we do not forbid deprecated addresses, we accept 748 * the SYN packet. RFC2462 does not suggest dropping 749 * SYN in this case. 750 * If we decipher RFC2462 5.5.4, it says like this: 751 * 1. use of deprecated addr with existing 752 * communication is okay - "SHOULD continue to be 753 * used" 754 * 2. use of it with new communication: 755 * (2a) "SHOULD NOT be used if alternate address 756 * with sufficient scope is available" 757 * (2b) nothing mentioned otherwise. 758 * Here we fall into (2b) case as we have no choice in 759 * our source address selection - we must obey the peer. 760 * 761 * The wording in RFC2462 is confusing, and there are 762 * multiple description text for deprecated address 763 * handling - worse, they are not exactly the same. 764 * I believe 5.5.4 is the best one, so we follow 5.5.4. 765 */ 766 if (isipv6 && !ip6_use_deprecated) { 767 struct in6_ifaddr *ia6; 768 769 if ((ia6 = ip6_getdstifaddr(m)) && 770 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) { 771 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 772 log(LOG_DEBUG, "%s; %s: Listen socket: " 773 "Connection attempt to deprecated " 774 "IPv6 address rejected\n", 775 s, __func__); 776 rstreason = BANDLIM_RST_OPENPORT; 777 goto dropwithreset; 778 } 779 } 780#endif 781 /* 782 * Basic sanity checks on incoming SYN requests: 783 * Don't respond if the destination is a link layer 784 * broadcast according to RFC1122 4.2.3.10, p. 104. 785 * If it is from this socket it must be forged. 786 * Don't respond if the source or destination is a 787 * global or subnet broad- or multicast address. 788 * Note that it is quite possible to receive unicast 789 * link-layer packets with a broadcast IP address. Use 790 * in_broadcast() to find them. 791 */ 792 if (m->m_flags & (M_BCAST|M_MCAST)) { 793 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 794 log(LOG_DEBUG, "%s; %s: Listen socket: " 795 "Connection attempt from broad- or multicast " 796 "link layer address rejected\n", s, __func__); 797 goto dropunlock; 798 } 799 if (isipv6) { 800#ifdef INET6 801 if (th->th_dport == th->th_sport && 802 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) { 803 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 804 log(LOG_DEBUG, "%s; %s: Listen socket: " 805 "Connection attempt to/from self " 806 "rejected\n", s, __func__); 807 goto dropunlock; 808 } 809 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 810 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) { 811 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 812 log(LOG_DEBUG, "%s; %s: Listen socket: " 813 "Connection attempt from/to multicast " 814 "address rejected\n", s, __func__); 815 goto dropunlock; 816 } 817#endif 818 } else { 819 if (th->th_dport == th->th_sport && 820 ip->ip_dst.s_addr == ip->ip_src.s_addr) { 821 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 822 log(LOG_DEBUG, "%s; %s: Listen socket: " 823 "Connection attempt from/to self " 824 "rejected\n", s, __func__); 825 goto dropunlock; 826 } 827 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 828 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 829 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 830 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) { 831 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 832 log(LOG_DEBUG, "%s; %s: Listen socket: " 833 "Connection attempt from/to broad- " 834 "or multicast address rejected\n", 835 s, __func__); 836 goto dropunlock; 837 } 838 } 839 /* 840 * SYN appears to be valid. Create compressed TCP state 841 * for syncache. 842 */ 843#ifdef TCPDEBUG 844 if (so->so_options & SO_DEBUG) 845 tcp_trace(TA_INPUT, ostate, tp, 846 (void *)tcp_saveipgen, &tcp_savetcp, 0); 847#endif 848 tcp_dooptions(&to, optp, optlen, TO_SYN); 849 syncache_add(&inc, &to, th, inp, &so, m); 850 /* 851 * Entry added to syncache and mbuf consumed. 852 * Everything already unlocked by syncache_add(). 853 */ 854 INP_INFO_UNLOCK_ASSERT(&tcbinfo); 855 return; 856 } 857 858 /* 859 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later 860 * state. tcp_do_segment() always consumes the mbuf chain, unlocks 861 * the inpcb, and unlocks pcbinfo. 862 */ 863 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen); 864 INP_INFO_UNLOCK_ASSERT(&tcbinfo); 865 return; 866 867dropwithreset: 868 INP_INFO_WLOCK_ASSERT(&tcbinfo); 869 tcp_dropwithreset(m, th, tp, tlen, rstreason); 870 m = NULL; /* mbuf chain got consumed. */ 871dropunlock: 872 INP_INFO_WLOCK_ASSERT(&tcbinfo); 873 if (inp != NULL) 874 INP_UNLOCK(inp); 875 INP_INFO_WUNLOCK(&tcbinfo); 876drop: 877 INP_INFO_UNLOCK_ASSERT(&tcbinfo); 878 if (s != NULL) 879 free(s, M_TCPLOG); 880 if (m != NULL) 881 m_freem(m); 882 return; 883} 884 885static void 886tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 887 struct tcpcb *tp, int drop_hdrlen, int tlen) 888{ 889 int thflags, acked, ourfinisacked, needoutput = 0; 890 int headlocked = 1; 891 int rstreason, todrop, win; 892 u_long tiwin; 893 struct tcpopt to; 894 895#ifdef TCPDEBUG 896 /* 897 * The size of tcp_saveipgen must be the size of the max ip header, 898 * now IPv6. 899 */ 900 u_char tcp_saveipgen[IP6_HDR_LEN]; 901 struct tcphdr tcp_savetcp; 902 short ostate = 0; 903#endif 904 thflags = th->th_flags; 905 906 INP_INFO_WLOCK_ASSERT(&tcbinfo); 907 INP_LOCK_ASSERT(tp->t_inpcb); 908 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 909 __func__)); 910 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 911 __func__)); 912 913 /* 914 * Segment received on connection. 915 * Reset idle time and keep-alive timer. 916 * XXX: This should be done after segment 917 * validation to ignore broken/spoofed segs. 918 */ 919 tp->t_rcvtime = ticks; 920 if (TCPS_HAVEESTABLISHED(tp->t_state)) 921 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle); 922 923 /* 924 * Unscale the window into a 32-bit value. 925 * For the SYN_SENT state the scale is zero. 926 */ 927 tiwin = th->th_win << tp->snd_scale; 928 929 /* 930 * Parse options on any incoming segment. 931 */ 932 tcp_dooptions(&to, (u_char *)(th + 1), 933 (th->th_off << 2) - sizeof(struct tcphdr), 934 (thflags & TH_SYN) ? TO_SYN : 0); 935 936 /* 937 * If echoed timestamp is later than the current time, 938 * fall back to non RFC1323 RTT calculation. Normalize 939 * timestamp if syncookies were used when this connection 940 * was established. 941 */ 942 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 943 to.to_tsecr -= tp->ts_offset; 944 if (TSTMP_GT(to.to_tsecr, ticks)) 945 to.to_tsecr = 0; 946 } 947 948 /* 949 * Process options only when we get SYN/ACK back. The SYN case 950 * for incoming connections is handled in tcp_syncache. 951 * According to RFC1323 the window field in a SYN (i.e., a <SYN> 952 * or <SYN,ACK>) segment itself is never scaled. 953 * XXX this is traditional behavior, may need to be cleaned up. 954 */ 955 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 956 if ((to.to_flags & TOF_SCALE) && 957 (tp->t_flags & TF_REQ_SCALE)) { 958 tp->t_flags |= TF_RCVD_SCALE; 959 tp->snd_scale = to.to_wscale; 960 } 961 /* 962 * Initial send window. It will be updated with 963 * the next incoming segment to the scaled value. 964 */ 965 tp->snd_wnd = th->th_win; 966 if (to.to_flags & TOF_TS) { 967 tp->t_flags |= TF_RCVD_TSTMP; 968 tp->ts_recent = to.to_tsval; 969 tp->ts_recent_age = ticks; 970 } 971 if (to.to_flags & TOF_MSS) 972 tcp_mss(tp, to.to_mss); 973 if ((tp->t_flags & TF_SACK_PERMIT) && 974 (to.to_flags & TOF_SACKPERM) == 0) 975 tp->t_flags &= ~TF_SACK_PERMIT; 976 } 977 978 /* 979 * Header prediction: check for the two common cases 980 * of a uni-directional data xfer. If the packet has 981 * no control flags, is in-sequence, the window didn't 982 * change and we're not retransmitting, it's a 983 * candidate. If the length is zero and the ack moved 984 * forward, we're the sender side of the xfer. Just 985 * free the data acked & wake any higher level process 986 * that was blocked waiting for space. If the length 987 * is non-zero and the ack didn't move, we're the 988 * receiver side. If we're getting packets in-order 989 * (the reassembly queue is empty), add the data to 990 * the socket buffer and note that we need a delayed ack. 991 * Make sure that the hidden state-flags are also off. 992 * Since we check for TCPS_ESTABLISHED first, it can only 993 * be TH_NEEDSYN. 994 */ 995 if (tp->t_state == TCPS_ESTABLISHED && 996 th->th_seq == tp->rcv_nxt && 997 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && 998 tp->snd_nxt == tp->snd_max && 999 tiwin && tiwin == tp->snd_wnd && 1000 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) && 1001 LIST_EMPTY(&tp->t_segq) && 1002 ((to.to_flags & TOF_TS) == 0 || 1003 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) { 1004 1005 /* 1006 * If last ACK falls within this segment's sequence numbers, 1007 * record the timestamp. 1008 * NOTE that the test is modified according to the latest 1009 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1010 */ 1011 if ((to.to_flags & TOF_TS) != 0 && 1012 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 1013 tp->ts_recent_age = ticks; 1014 tp->ts_recent = to.to_tsval; 1015 } 1016 1017 if (tlen == 0) { 1018 if (SEQ_GT(th->th_ack, tp->snd_una) && 1019 SEQ_LEQ(th->th_ack, tp->snd_max) && 1020 tp->snd_cwnd >= tp->snd_wnd && 1021 ((!tcp_do_newreno && 1022 !(tp->t_flags & TF_SACK_PERMIT) && 1023 tp->t_dupacks < tcprexmtthresh) || 1024 ((tcp_do_newreno || 1025 (tp->t_flags & TF_SACK_PERMIT)) && 1026 !IN_FASTRECOVERY(tp) && 1027 (to.to_flags & TOF_SACK) == 0 && 1028 TAILQ_EMPTY(&tp->snd_holes)))) { 1029 KASSERT(headlocked, 1030 ("%s: headlocked", __func__)); 1031 INP_INFO_WUNLOCK(&tcbinfo); 1032 headlocked = 0; 1033 /* 1034 * This is a pure ack for outstanding data. 1035 */ 1036 ++tcpstat.tcps_predack; 1037 /* 1038 * "bad retransmit" recovery. 1039 */ 1040 if (tp->t_rxtshift == 1 && 1041 ticks < tp->t_badrxtwin) { 1042 ++tcpstat.tcps_sndrexmitbad; 1043 tp->snd_cwnd = tp->snd_cwnd_prev; 1044 tp->snd_ssthresh = 1045 tp->snd_ssthresh_prev; 1046 tp->snd_recover = tp->snd_recover_prev; 1047 if (tp->t_flags & TF_WASFRECOVERY) 1048 ENTER_FASTRECOVERY(tp); 1049 tp->snd_nxt = tp->snd_max; 1050 tp->t_badrxtwin = 0; 1051 } 1052 1053 /* 1054 * Recalculate the transmit timer / rtt. 1055 * 1056 * Some boxes send broken timestamp replies 1057 * during the SYN+ACK phase, ignore 1058 * timestamps of 0 or we could calculate a 1059 * huge RTT and blow up the retransmit timer. 1060 */ 1061 if ((to.to_flags & TOF_TS) != 0 && 1062 to.to_tsecr) { 1063 if (!tp->t_rttlow || 1064 tp->t_rttlow > ticks - to.to_tsecr) 1065 tp->t_rttlow = ticks - to.to_tsecr; 1066 tcp_xmit_timer(tp, 1067 ticks - to.to_tsecr + 1); 1068 } else if (tp->t_rtttime && 1069 SEQ_GT(th->th_ack, tp->t_rtseq)) { 1070 if (!tp->t_rttlow || 1071 tp->t_rttlow > ticks - tp->t_rtttime) 1072 tp->t_rttlow = ticks - tp->t_rtttime; 1073 tcp_xmit_timer(tp, 1074 ticks - tp->t_rtttime); 1075 } 1076 tcp_xmit_bandwidth_limit(tp, th->th_ack); 1077 acked = th->th_ack - tp->snd_una; 1078 tcpstat.tcps_rcvackpack++; 1079 tcpstat.tcps_rcvackbyte += acked; 1080 sbdrop(&so->so_snd, acked); 1081 if (SEQ_GT(tp->snd_una, tp->snd_recover) && 1082 SEQ_LEQ(th->th_ack, tp->snd_recover)) 1083 tp->snd_recover = th->th_ack - 1; 1084 tp->snd_una = th->th_ack; 1085 /* 1086 * Pull snd_wl2 up to prevent seq wrap relative 1087 * to th_ack. 1088 */ 1089 tp->snd_wl2 = th->th_ack; 1090 tp->t_dupacks = 0; 1091 m_freem(m); 1092 ND6_HINT(tp); /* Some progress has been made. */ 1093 1094 /* 1095 * If all outstanding data are acked, stop 1096 * retransmit timer, otherwise restart timer 1097 * using current (possibly backed-off) value. 1098 * If process is waiting for space, 1099 * wakeup/selwakeup/signal. If data 1100 * are ready to send, let tcp_output 1101 * decide between more output or persist. 1102 */ 1103#ifdef TCPDEBUG 1104 if (so->so_options & SO_DEBUG) 1105 tcp_trace(TA_INPUT, ostate, tp, 1106 (void *)tcp_saveipgen, 1107 &tcp_savetcp, 0); 1108#endif 1109 if (tp->snd_una == tp->snd_max) 1110 tcp_timer_activate(tp, TT_REXMT, 0); 1111 else if (!tcp_timer_active(tp, TT_PERSIST)) 1112 tcp_timer_activate(tp, TT_REXMT, 1113 tp->t_rxtcur); 1114 /* 1115 * NB: sowwakeup_locked() does an 1116 * implicit unlock. 1117 */ 1118 sowwakeup(so); 1119 if (so->so_snd.sb_cc) 1120 (void) tcp_output(tp); 1121 goto check_delack; 1122 } 1123 } else if (th->th_ack == tp->snd_una && 1124 tlen <= sbspace(&so->so_rcv)) { 1125 int newsize = 0; /* automatic sockbuf scaling */ 1126 1127 KASSERT(headlocked, ("%s: headlocked", __func__)); 1128 INP_INFO_WUNLOCK(&tcbinfo); 1129 headlocked = 0; 1130 /* 1131 * This is a pure, in-sequence data packet 1132 * with nothing on the reassembly queue and 1133 * we have enough buffer space to take it. 1134 */ 1135 /* Clean receiver SACK report if present */ 1136 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks) 1137 tcp_clean_sackreport(tp); 1138 ++tcpstat.tcps_preddat; 1139 tp->rcv_nxt += tlen; 1140 /* 1141 * Pull snd_wl1 up to prevent seq wrap relative to 1142 * th_seq. 1143 */ 1144 tp->snd_wl1 = th->th_seq; 1145 /* 1146 * Pull rcv_up up to prevent seq wrap relative to 1147 * rcv_nxt. 1148 */ 1149 tp->rcv_up = tp->rcv_nxt; 1150 tcpstat.tcps_rcvpack++; 1151 tcpstat.tcps_rcvbyte += tlen; 1152 ND6_HINT(tp); /* Some progress has been made */ 1153#ifdef TCPDEBUG 1154 if (so->so_options & SO_DEBUG) 1155 tcp_trace(TA_INPUT, ostate, tp, 1156 (void *)tcp_saveipgen, &tcp_savetcp, 0); 1157#endif 1158 /* 1159 * Automatic sizing of receive socket buffer. Often the send 1160 * buffer size is not optimally adjusted to the actual network 1161 * conditions at hand (delay bandwidth product). Setting the 1162 * buffer size too small limits throughput on links with high 1163 * bandwidth and high delay (eg. trans-continental/oceanic links). 1164 * 1165 * On the receive side the socket buffer memory is only rarely 1166 * used to any significant extent. This allows us to be much 1167 * more aggressive in scaling the receive socket buffer. For 1168 * the case that the buffer space is actually used to a large 1169 * extent and we run out of kernel memory we can simply drop 1170 * the new segments; TCP on the sender will just retransmit it 1171 * later. Setting the buffer size too big may only consume too 1172 * much kernel memory if the application doesn't read() from 1173 * the socket or packet loss or reordering makes use of the 1174 * reassembly queue. 1175 * 1176 * The criteria to step up the receive buffer one notch are: 1177 * 1. the number of bytes received during the time it takes 1178 * one timestamp to be reflected back to us (the RTT); 1179 * 2. received bytes per RTT is within seven eighth of the 1180 * current socket buffer size; 1181 * 3. receive buffer size has not hit maximal automatic size; 1182 * 1183 * This algorithm does one step per RTT at most and only if 1184 * we receive a bulk stream w/o packet losses or reorderings. 1185 * Shrinking the buffer during idle times is not necessary as 1186 * it doesn't consume any memory when idle. 1187 * 1188 * TODO: Only step up if the application is actually serving 1189 * the buffer to better manage the socket buffer resources. 1190 */ 1191 if (tcp_do_autorcvbuf && 1192 to.to_tsecr && 1193 (so->so_rcv.sb_flags & SB_AUTOSIZE)) { 1194 if (to.to_tsecr > tp->rfbuf_ts && 1195 to.to_tsecr - tp->rfbuf_ts < hz) { 1196 if (tp->rfbuf_cnt > 1197 (so->so_rcv.sb_hiwat / 8 * 7) && 1198 so->so_rcv.sb_hiwat < 1199 tcp_autorcvbuf_max) { 1200 newsize = 1201 min(so->so_rcv.sb_hiwat + 1202 tcp_autorcvbuf_inc, 1203 tcp_autorcvbuf_max); 1204 } 1205 /* Start over with next RTT. */ 1206 tp->rfbuf_ts = 0; 1207 tp->rfbuf_cnt = 0; 1208 } else 1209 tp->rfbuf_cnt += tlen; /* add up */ 1210 } 1211 1212 /* Add data to socket buffer. */ 1213 SOCKBUF_LOCK(&so->so_rcv); 1214 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1215 m_freem(m); 1216 } else { 1217 /* 1218 * Set new socket buffer size. 1219 * Give up when limit is reached. 1220 */ 1221 if (newsize) 1222 if (!sbreserve_locked(&so->so_rcv, 1223 newsize, so, curthread)) 1224 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 1225 m_adj(m, drop_hdrlen); /* delayed header drop */ 1226 sbappendstream_locked(&so->so_rcv, m); 1227 } 1228 /* NB: sorwakeup_locked() does an implicit unlock. */ 1229 sorwakeup_locked(so); 1230 if (DELAY_ACK(tp)) { 1231 tp->t_flags |= TF_DELACK; 1232 } else { 1233 tp->t_flags |= TF_ACKNOW; 1234 tcp_output(tp); 1235 } 1236 goto check_delack; 1237 } 1238 } 1239 1240 /* 1241 * Calculate amount of space in receive window, 1242 * and then do TCP input processing. 1243 * Receive window is amount of space in rcv queue, 1244 * but not less than advertised window. 1245 */ 1246 win = sbspace(&so->so_rcv); 1247 if (win < 0) 1248 win = 0; 1249 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt)); 1250 1251 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 1252 tp->rfbuf_ts = 0; 1253 tp->rfbuf_cnt = 0; 1254 1255 switch (tp->t_state) { 1256 1257 /* 1258 * If the state is SYN_RECEIVED: 1259 * if seg contains an ACK, but not for our SYN/ACK, send a RST. 1260 */ 1261 case TCPS_SYN_RECEIVED: 1262 if ((thflags & TH_ACK) && 1263 (SEQ_LEQ(th->th_ack, tp->snd_una) || 1264 SEQ_GT(th->th_ack, tp->snd_max))) { 1265 rstreason = BANDLIM_RST_OPENPORT; 1266 goto dropwithreset; 1267 } 1268 break; 1269 1270 /* 1271 * If the state is SYN_SENT: 1272 * if seg contains an ACK, but not for our SYN, drop the input. 1273 * if seg contains a RST, then drop the connection. 1274 * if seg does not contain SYN, then drop it. 1275 * Otherwise this is an acceptable SYN segment 1276 * initialize tp->rcv_nxt and tp->irs 1277 * if seg contains ack then advance tp->snd_una 1278 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state 1279 * arrange for segment to be acked (eventually) 1280 * continue processing rest of data/controls, beginning with URG 1281 */ 1282 case TCPS_SYN_SENT: 1283 if ((thflags & TH_ACK) && 1284 (SEQ_LEQ(th->th_ack, tp->iss) || 1285 SEQ_GT(th->th_ack, tp->snd_max))) { 1286 rstreason = BANDLIM_UNLIMITED; 1287 goto dropwithreset; 1288 } 1289 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) 1290 tp = tcp_drop(tp, ECONNREFUSED); 1291 if (thflags & TH_RST) 1292 goto drop; 1293 if (!(thflags & TH_SYN)) 1294 goto drop; 1295 1296 tp->irs = th->th_seq; 1297 tcp_rcvseqinit(tp); 1298 if (thflags & TH_ACK) { 1299 tcpstat.tcps_connects++; 1300 soisconnected(so); 1301#ifdef MAC 1302 SOCK_LOCK(so); 1303 mac_set_socket_peer_from_mbuf(m, so); 1304 SOCK_UNLOCK(so); 1305#endif 1306 /* Do window scaling on this connection? */ 1307 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 1308 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 1309 tp->rcv_scale = tp->request_r_scale; 1310 } 1311 tp->rcv_adv += tp->rcv_wnd; 1312 tp->snd_una++; /* SYN is acked */ 1313 /* 1314 * If there's data, delay ACK; if there's also a FIN 1315 * ACKNOW will be turned on later. 1316 */ 1317 if (DELAY_ACK(tp) && tlen != 0) 1318 tcp_timer_activate(tp, TT_DELACK, 1319 tcp_delacktime); 1320 else 1321 tp->t_flags |= TF_ACKNOW; 1322 /* 1323 * Received <SYN,ACK> in SYN_SENT[*] state. 1324 * Transitions: 1325 * SYN_SENT --> ESTABLISHED 1326 * SYN_SENT* --> FIN_WAIT_1 1327 */ 1328 tp->t_starttime = ticks; 1329 if (tp->t_flags & TF_NEEDFIN) { 1330 tp->t_state = TCPS_FIN_WAIT_1; 1331 tp->t_flags &= ~TF_NEEDFIN; 1332 thflags &= ~TH_SYN; 1333 } else { 1334 tp->t_state = TCPS_ESTABLISHED; 1335 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle); 1336 } 1337 } else { 1338 /* 1339 * Received initial SYN in SYN-SENT[*] state => 1340 * simultaneous open. If segment contains CC option 1341 * and there is a cached CC, apply TAO test. 1342 * If it succeeds, connection is * half-synchronized. 1343 * Otherwise, do 3-way handshake: 1344 * SYN-SENT -> SYN-RECEIVED 1345 * SYN-SENT* -> SYN-RECEIVED* 1346 * If there was no CC option, clear cached CC value. 1347 */ 1348 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 1349 tcp_timer_activate(tp, TT_REXMT, 0); 1350 tp->t_state = TCPS_SYN_RECEIVED; 1351 } 1352 1353 KASSERT(headlocked, ("%s: trimthenstep6: head not locked", 1354 __func__)); 1355 INP_LOCK_ASSERT(tp->t_inpcb); 1356 1357 /* 1358 * Advance th->th_seq to correspond to first data byte. 1359 * If data, trim to stay within window, 1360 * dropping FIN if necessary. 1361 */ 1362 th->th_seq++; 1363 if (tlen > tp->rcv_wnd) { 1364 todrop = tlen - tp->rcv_wnd; 1365 m_adj(m, -todrop); 1366 tlen = tp->rcv_wnd; 1367 thflags &= ~TH_FIN; 1368 tcpstat.tcps_rcvpackafterwin++; 1369 tcpstat.tcps_rcvbyteafterwin += todrop; 1370 } 1371 tp->snd_wl1 = th->th_seq - 1; 1372 tp->rcv_up = th->th_seq; 1373 /* 1374 * Client side of transaction: already sent SYN and data. 1375 * If the remote host used T/TCP to validate the SYN, 1376 * our data will be ACK'd; if so, enter normal data segment 1377 * processing in the middle of step 5, ack processing. 1378 * Otherwise, goto step 6. 1379 */ 1380 if (thflags & TH_ACK) 1381 goto process_ACK; 1382 1383 goto step6; 1384 1385 /* 1386 * If the state is LAST_ACK or CLOSING or TIME_WAIT: 1387 * do normal processing. 1388 * 1389 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later. 1390 */ 1391 case TCPS_LAST_ACK: 1392 case TCPS_CLOSING: 1393 break; /* continue normal processing */ 1394 } 1395 1396 /* 1397 * States other than LISTEN or SYN_SENT. 1398 * First check the RST flag and sequence number since reset segments 1399 * are exempt from the timestamp and connection count tests. This 1400 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix 1401 * below which allowed reset segments in half the sequence space 1402 * to fall though and be processed (which gives forged reset 1403 * segments with a random sequence number a 50 percent chance of 1404 * killing a connection). 1405 * Then check timestamp, if present. 1406 * Then check the connection count, if present. 1407 * Then check that at least some bytes of segment are within 1408 * receive window. If segment begins before rcv_nxt, 1409 * drop leading data (and SYN); if nothing left, just ack. 1410 * 1411 * 1412 * If the RST bit is set, check the sequence number to see 1413 * if this is a valid reset segment. 1414 * RFC 793 page 37: 1415 * In all states except SYN-SENT, all reset (RST) segments 1416 * are validated by checking their SEQ-fields. A reset is 1417 * valid if its sequence number is in the window. 1418 * Note: this does not take into account delayed ACKs, so 1419 * we should test against last_ack_sent instead of rcv_nxt. 1420 * The sequence number in the reset segment is normally an 1421 * echo of our outgoing acknowlegement numbers, but some hosts 1422 * send a reset with the sequence number at the rightmost edge 1423 * of our receive window, and we have to handle this case. 1424 * Note 2: Paul Watson's paper "Slipping in the Window" has shown 1425 * that brute force RST attacks are possible. To combat this, 1426 * we use a much stricter check while in the ESTABLISHED state, 1427 * only accepting RSTs where the sequence number is equal to 1428 * last_ack_sent. In all other states (the states in which a 1429 * RST is more likely), the more permissive check is used. 1430 * If we have multiple segments in flight, the intial reset 1431 * segment sequence numbers will be to the left of last_ack_sent, 1432 * but they will eventually catch up. 1433 * In any case, it never made sense to trim reset segments to 1434 * fit the receive window since RFC 1122 says: 1435 * 4.2.2.12 RST Segment: RFC-793 Section 3.4 1436 * 1437 * A TCP SHOULD allow a received RST segment to include data. 1438 * 1439 * DISCUSSION 1440 * It has been suggested that a RST segment could contain 1441 * ASCII text that encoded and explained the cause of the 1442 * RST. No standard has yet been established for such 1443 * data. 1444 * 1445 * If the reset segment passes the sequence number test examine 1446 * the state: 1447 * SYN_RECEIVED STATE: 1448 * If passive open, return to LISTEN state. 1449 * If active open, inform user that connection was refused. 1450 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES: 1451 * Inform user that connection was reset, and close tcb. 1452 * CLOSING, LAST_ACK STATES: 1453 * Close the tcb. 1454 * TIME_WAIT STATE: 1455 * Drop the segment - see Stevens, vol. 2, p. 964 and 1456 * RFC 1337. 1457 */ 1458 if (thflags & TH_RST) { 1459 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) && 1460 SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) { 1461 switch (tp->t_state) { 1462 1463 case TCPS_SYN_RECEIVED: 1464 so->so_error = ECONNREFUSED; 1465 goto close; 1466 1467 case TCPS_ESTABLISHED: 1468 if (tcp_insecure_rst == 0 && 1469 !(SEQ_GEQ(th->th_seq, tp->rcv_nxt - 1) && 1470 SEQ_LEQ(th->th_seq, tp->rcv_nxt + 1)) && 1471 !(SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) && 1472 SEQ_LEQ(th->th_seq, tp->last_ack_sent + 1))) { 1473 tcpstat.tcps_badrst++; 1474 goto drop; 1475 } 1476 case TCPS_FIN_WAIT_1: 1477 case TCPS_FIN_WAIT_2: 1478 case TCPS_CLOSE_WAIT: 1479 so->so_error = ECONNRESET; 1480 close: 1481 tp->t_state = TCPS_CLOSED; 1482 tcpstat.tcps_drops++; 1483 KASSERT(headlocked, ("%s: trimthenstep6: " 1484 "tcp_close: head not locked", __func__)); 1485 tp = tcp_close(tp); 1486 break; 1487 1488 case TCPS_CLOSING: 1489 case TCPS_LAST_ACK: 1490 KASSERT(headlocked, ("%s: trimthenstep6: " 1491 "tcp_close.2: head not locked", __func__)); 1492 tp = tcp_close(tp); 1493 break; 1494 } 1495 } 1496 goto drop; 1497 } 1498 1499 /* 1500 * RFC 1323 PAWS: If we have a timestamp reply on this segment 1501 * and it's less than ts_recent, drop it. 1502 */ 1503 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent && 1504 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 1505 1506 /* Check to see if ts_recent is over 24 days old. */ 1507 if ((int)(ticks - tp->ts_recent_age) > TCP_PAWS_IDLE) { 1508 /* 1509 * Invalidate ts_recent. If this segment updates 1510 * ts_recent, the age will be reset later and ts_recent 1511 * will get a valid value. If it does not, setting 1512 * ts_recent to zero will at least satisfy the 1513 * requirement that zero be placed in the timestamp 1514 * echo reply when ts_recent isn't valid. The 1515 * age isn't reset until we get a valid ts_recent 1516 * because we don't want out-of-order segments to be 1517 * dropped when ts_recent is old. 1518 */ 1519 tp->ts_recent = 0; 1520 } else { 1521 tcpstat.tcps_rcvduppack++; 1522 tcpstat.tcps_rcvdupbyte += tlen; 1523 tcpstat.tcps_pawsdrop++; 1524 if (tlen) 1525 goto dropafterack; 1526 goto drop; 1527 } 1528 } 1529 1530 /* 1531 * In the SYN-RECEIVED state, validate that the packet belongs to 1532 * this connection before trimming the data to fit the receive 1533 * window. Check the sequence number versus IRS since we know 1534 * the sequence numbers haven't wrapped. This is a partial fix 1535 * for the "LAND" DoS attack. 1536 */ 1537 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) { 1538 rstreason = BANDLIM_RST_OPENPORT; 1539 goto dropwithreset; 1540 } 1541 1542 todrop = tp->rcv_nxt - th->th_seq; 1543 if (todrop > 0) { 1544 if (thflags & TH_SYN) { 1545 thflags &= ~TH_SYN; 1546 th->th_seq++; 1547 if (th->th_urp > 1) 1548 th->th_urp--; 1549 else 1550 thflags &= ~TH_URG; 1551 todrop--; 1552 } 1553 /* 1554 * Following if statement from Stevens, vol. 2, p. 960. 1555 */ 1556 if (todrop > tlen 1557 || (todrop == tlen && (thflags & TH_FIN) == 0)) { 1558 /* 1559 * Any valid FIN must be to the left of the window. 1560 * At this point the FIN must be a duplicate or out 1561 * of sequence; drop it. 1562 */ 1563 thflags &= ~TH_FIN; 1564 1565 /* 1566 * Send an ACK to resynchronize and drop any data. 1567 * But keep on processing for RST or ACK. 1568 */ 1569 tp->t_flags |= TF_ACKNOW; 1570 todrop = tlen; 1571 tcpstat.tcps_rcvduppack++; 1572 tcpstat.tcps_rcvdupbyte += todrop; 1573 } else { 1574 tcpstat.tcps_rcvpartduppack++; 1575 tcpstat.tcps_rcvpartdupbyte += todrop; 1576 } 1577 drop_hdrlen += todrop; /* drop from the top afterwards */ 1578 th->th_seq += todrop; 1579 tlen -= todrop; 1580 if (th->th_urp > todrop) 1581 th->th_urp -= todrop; 1582 else { 1583 thflags &= ~TH_URG; 1584 th->th_urp = 0; 1585 } 1586 } 1587 1588 /* 1589 * If new data are received on a connection after the 1590 * user processes are gone, then RST the other end. 1591 */ 1592 if ((so->so_state & SS_NOFDREF) && 1593 tp->t_state > TCPS_CLOSE_WAIT && tlen) { 1594 KASSERT(headlocked, ("%s: trimthenstep6: tcp_close.3: head " 1595 "not locked", __func__)); 1596 tp = tcp_close(tp); 1597 tcpstat.tcps_rcvafterclose++; 1598 rstreason = BANDLIM_UNLIMITED; 1599 goto dropwithreset; 1600 } 1601 1602 /* 1603 * If segment ends after window, drop trailing data 1604 * (and PUSH and FIN); if nothing left, just ACK. 1605 */ 1606 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd); 1607 if (todrop > 0) { 1608 tcpstat.tcps_rcvpackafterwin++; 1609 if (todrop >= tlen) { 1610 tcpstat.tcps_rcvbyteafterwin += tlen; 1611 /* 1612 * If window is closed can only take segments at 1613 * window edge, and have to drop data and PUSH from 1614 * incoming segments. Continue processing, but 1615 * remember to ack. Otherwise, drop segment 1616 * and ack. 1617 */ 1618 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { 1619 tp->t_flags |= TF_ACKNOW; 1620 tcpstat.tcps_rcvwinprobe++; 1621 } else 1622 goto dropafterack; 1623 } else 1624 tcpstat.tcps_rcvbyteafterwin += todrop; 1625 m_adj(m, -todrop); 1626 tlen -= todrop; 1627 thflags &= ~(TH_PUSH|TH_FIN); 1628 } 1629 1630 /* 1631 * If last ACK falls within this segment's sequence numbers, 1632 * record its timestamp. 1633 * NOTE: 1634 * 1) That the test incorporates suggestions from the latest 1635 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1636 * 2) That updating only on newer timestamps interferes with 1637 * our earlier PAWS tests, so this check should be solely 1638 * predicated on the sequence space of this segment. 1639 * 3) That we modify the segment boundary check to be 1640 * Last.ACK.Sent <= SEG.SEQ + SEG.Len 1641 * instead of RFC1323's 1642 * Last.ACK.Sent < SEG.SEQ + SEG.Len, 1643 * This modified check allows us to overcome RFC1323's 1644 * limitations as described in Stevens TCP/IP Illustrated 1645 * Vol. 2 p.869. In such cases, we can still calculate the 1646 * RTT correctly when RCV.NXT == Last.ACK.Sent. 1647 */ 1648 if ((to.to_flags & TOF_TS) != 0 && 1649 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 1650 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 1651 ((thflags & (TH_SYN|TH_FIN)) != 0))) { 1652 tp->ts_recent_age = ticks; 1653 tp->ts_recent = to.to_tsval; 1654 } 1655 1656 /* 1657 * If a SYN is in the window, then this is an 1658 * error and we send an RST and drop the connection. 1659 */ 1660 if (thflags & TH_SYN) { 1661 KASSERT(headlocked, ("%s: tcp_drop: trimthenstep6: " 1662 "head not locked", __func__)); 1663 tp = tcp_drop(tp, ECONNRESET); 1664 rstreason = BANDLIM_UNLIMITED; 1665 goto drop; 1666 } 1667 1668 /* 1669 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN 1670 * flag is on (half-synchronized state), then queue data for 1671 * later processing; else drop segment and return. 1672 */ 1673 if ((thflags & TH_ACK) == 0) { 1674 if (tp->t_state == TCPS_SYN_RECEIVED || 1675 (tp->t_flags & TF_NEEDSYN)) 1676 goto step6; 1677 else if (tp->t_flags & TF_ACKNOW) 1678 goto dropafterack; 1679 else 1680 goto drop; 1681 } 1682 1683 /* 1684 * Ack processing. 1685 */ 1686 switch (tp->t_state) { 1687 1688 /* 1689 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter 1690 * ESTABLISHED state and continue processing. 1691 * The ACK was checked above. 1692 */ 1693 case TCPS_SYN_RECEIVED: 1694 1695 tcpstat.tcps_connects++; 1696 soisconnected(so); 1697 /* Do window scaling? */ 1698 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 1699 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 1700 tp->rcv_scale = tp->request_r_scale; 1701 tp->snd_wnd = tiwin; 1702 } 1703 /* 1704 * Make transitions: 1705 * SYN-RECEIVED -> ESTABLISHED 1706 * SYN-RECEIVED* -> FIN-WAIT-1 1707 */ 1708 tp->t_starttime = ticks; 1709 if (tp->t_flags & TF_NEEDFIN) { 1710 tp->t_state = TCPS_FIN_WAIT_1; 1711 tp->t_flags &= ~TF_NEEDFIN; 1712 } else { 1713 tp->t_state = TCPS_ESTABLISHED; 1714 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle); 1715 } 1716 /* 1717 * If segment contains data or ACK, will call tcp_reass() 1718 * later; if not, do so now to pass queued data to user. 1719 */ 1720 if (tlen == 0 && (thflags & TH_FIN) == 0) 1721 (void) tcp_reass(tp, (struct tcphdr *)0, 0, 1722 (struct mbuf *)0); 1723 tp->snd_wl1 = th->th_seq - 1; 1724 /* FALLTHROUGH */ 1725 1726 /* 1727 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range 1728 * ACKs. If the ack is in the range 1729 * tp->snd_una < th->th_ack <= tp->snd_max 1730 * then advance tp->snd_una to th->th_ack and drop 1731 * data from the retransmission queue. If this ACK reflects 1732 * more up to date window information we update our window information. 1733 */ 1734 case TCPS_ESTABLISHED: 1735 case TCPS_FIN_WAIT_1: 1736 case TCPS_FIN_WAIT_2: 1737 case TCPS_CLOSE_WAIT: 1738 case TCPS_CLOSING: 1739 case TCPS_LAST_ACK: 1740 if (SEQ_GT(th->th_ack, tp->snd_max)) { 1741 tcpstat.tcps_rcvacktoomuch++; 1742 goto dropafterack; 1743 } 1744 if ((tp->t_flags & TF_SACK_PERMIT) && 1745 ((to.to_flags & TOF_SACK) || 1746 !TAILQ_EMPTY(&tp->snd_holes))) 1747 tcp_sack_doack(tp, &to, th->th_ack); 1748 if (SEQ_LEQ(th->th_ack, tp->snd_una)) { 1749 if (tlen == 0 && tiwin == tp->snd_wnd) { 1750 tcpstat.tcps_rcvdupack++; 1751 /* 1752 * If we have outstanding data (other than 1753 * a window probe), this is a completely 1754 * duplicate ack (ie, window info didn't 1755 * change), the ack is the biggest we've 1756 * seen and we've seen exactly our rexmt 1757 * threshhold of them, assume a packet 1758 * has been dropped and retransmit it. 1759 * Kludge snd_nxt & the congestion 1760 * window so we send only this one 1761 * packet. 1762 * 1763 * We know we're losing at the current 1764 * window size so do congestion avoidance 1765 * (set ssthresh to half the current window 1766 * and pull our congestion window back to 1767 * the new ssthresh). 1768 * 1769 * Dup acks mean that packets have left the 1770 * network (they're now cached at the receiver) 1771 * so bump cwnd by the amount in the receiver 1772 * to keep a constant cwnd packets in the 1773 * network. 1774 */ 1775 if (!tcp_timer_active(tp, TT_REXMT) || 1776 th->th_ack != tp->snd_una) 1777 tp->t_dupacks = 0; 1778 else if (++tp->t_dupacks > tcprexmtthresh || 1779 ((tcp_do_newreno || 1780 (tp->t_flags & TF_SACK_PERMIT)) && 1781 IN_FASTRECOVERY(tp))) { 1782 if ((tp->t_flags & TF_SACK_PERMIT) && 1783 IN_FASTRECOVERY(tp)) { 1784 int awnd; 1785 1786 /* 1787 * Compute the amount of data in flight first. 1788 * We can inject new data into the pipe iff 1789 * we have less than 1/2 the original window's 1790 * worth of data in flight. 1791 */ 1792 awnd = (tp->snd_nxt - tp->snd_fack) + 1793 tp->sackhint.sack_bytes_rexmit; 1794 if (awnd < tp->snd_ssthresh) { 1795 tp->snd_cwnd += tp->t_maxseg; 1796 if (tp->snd_cwnd > tp->snd_ssthresh) 1797 tp->snd_cwnd = tp->snd_ssthresh; 1798 } 1799 } else 1800 tp->snd_cwnd += tp->t_maxseg; 1801 (void) tcp_output(tp); 1802 goto drop; 1803 } else if (tp->t_dupacks == tcprexmtthresh) { 1804 tcp_seq onxt = tp->snd_nxt; 1805 u_int win; 1806 1807 /* 1808 * If we're doing sack, check to 1809 * see if we're already in sack 1810 * recovery. If we're not doing sack, 1811 * check to see if we're in newreno 1812 * recovery. 1813 */ 1814 if (tp->t_flags & TF_SACK_PERMIT) { 1815 if (IN_FASTRECOVERY(tp)) { 1816 tp->t_dupacks = 0; 1817 break; 1818 } 1819 } else if (tcp_do_newreno) { 1820 if (SEQ_LEQ(th->th_ack, 1821 tp->snd_recover)) { 1822 tp->t_dupacks = 0; 1823 break; 1824 } 1825 } 1826 win = min(tp->snd_wnd, tp->snd_cwnd) / 1827 2 / tp->t_maxseg; 1828 if (win < 2) 1829 win = 2; 1830 tp->snd_ssthresh = win * tp->t_maxseg; 1831 ENTER_FASTRECOVERY(tp); 1832 tp->snd_recover = tp->snd_max; 1833 tcp_timer_activate(tp, TT_REXMT, 0); 1834 tp->t_rtttime = 0; 1835 if (tp->t_flags & TF_SACK_PERMIT) { 1836 tcpstat.tcps_sack_recovery_episode++; 1837 tp->sack_newdata = tp->snd_nxt; 1838 tp->snd_cwnd = tp->t_maxseg; 1839 (void) tcp_output(tp); 1840 goto drop; 1841 } 1842 tp->snd_nxt = th->th_ack; 1843 tp->snd_cwnd = tp->t_maxseg; 1844 (void) tcp_output(tp); 1845 KASSERT(tp->snd_limited <= 2, 1846 ("%s: tp->snd_limited too big", 1847 __func__)); 1848 tp->snd_cwnd = tp->snd_ssthresh + 1849 tp->t_maxseg * 1850 (tp->t_dupacks - tp->snd_limited); 1851 if (SEQ_GT(onxt, tp->snd_nxt)) 1852 tp->snd_nxt = onxt; 1853 goto drop; 1854 } else if (tcp_do_rfc3042) { 1855 u_long oldcwnd = tp->snd_cwnd; 1856 tcp_seq oldsndmax = tp->snd_max; 1857 u_int sent; 1858 1859 KASSERT(tp->t_dupacks == 1 || 1860 tp->t_dupacks == 2, 1861 ("%s: dupacks not 1 or 2", 1862 __func__)); 1863 if (tp->t_dupacks == 1) 1864 tp->snd_limited = 0; 1865 tp->snd_cwnd = 1866 (tp->snd_nxt - tp->snd_una) + 1867 (tp->t_dupacks - tp->snd_limited) * 1868 tp->t_maxseg; 1869 (void) tcp_output(tp); 1870 sent = tp->snd_max - oldsndmax; 1871 if (sent > tp->t_maxseg) { 1872 KASSERT((tp->t_dupacks == 2 && 1873 tp->snd_limited == 0) || 1874 (sent == tp->t_maxseg + 1 && 1875 tp->t_flags & TF_SENTFIN), 1876 ("%s: sent too much", 1877 __func__)); 1878 tp->snd_limited = 2; 1879 } else if (sent > 0) 1880 ++tp->snd_limited; 1881 tp->snd_cwnd = oldcwnd; 1882 goto drop; 1883 } 1884 } else 1885 tp->t_dupacks = 0; 1886 break; 1887 } 1888 1889 KASSERT(SEQ_GT(th->th_ack, tp->snd_una), 1890 ("%s: th_ack <= snd_una", __func__)); 1891 1892 /* 1893 * If the congestion window was inflated to account 1894 * for the other side's cached packets, retract it. 1895 */ 1896 if (tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) { 1897 if (IN_FASTRECOVERY(tp)) { 1898 if (SEQ_LT(th->th_ack, tp->snd_recover)) { 1899 if (tp->t_flags & TF_SACK_PERMIT) 1900 tcp_sack_partialack(tp, th); 1901 else 1902 tcp_newreno_partial_ack(tp, th); 1903 } else { 1904 /* 1905 * Out of fast recovery. 1906 * Window inflation should have left us 1907 * with approximately snd_ssthresh 1908 * outstanding data. 1909 * But in case we would be inclined to 1910 * send a burst, better to do it via 1911 * the slow start mechanism. 1912 */ 1913 if (SEQ_GT(th->th_ack + 1914 tp->snd_ssthresh, 1915 tp->snd_max)) 1916 tp->snd_cwnd = tp->snd_max - 1917 th->th_ack + 1918 tp->t_maxseg; 1919 else 1920 tp->snd_cwnd = tp->snd_ssthresh; 1921 } 1922 } 1923 } else { 1924 if (tp->t_dupacks >= tcprexmtthresh && 1925 tp->snd_cwnd > tp->snd_ssthresh) 1926 tp->snd_cwnd = tp->snd_ssthresh; 1927 } 1928 tp->t_dupacks = 0; 1929 /* 1930 * If we reach this point, ACK is not a duplicate, 1931 * i.e., it ACKs something we sent. 1932 */ 1933 if (tp->t_flags & TF_NEEDSYN) { 1934 /* 1935 * T/TCP: Connection was half-synchronized, and our 1936 * SYN has been ACK'd (so connection is now fully 1937 * synchronized). Go to non-starred state, 1938 * increment snd_una for ACK of SYN, and check if 1939 * we can do window scaling. 1940 */ 1941 tp->t_flags &= ~TF_NEEDSYN; 1942 tp->snd_una++; 1943 /* Do window scaling? */ 1944 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 1945 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 1946 tp->rcv_scale = tp->request_r_scale; 1947 /* Send window already scaled. */ 1948 } 1949 } 1950 1951process_ACK: 1952 KASSERT(headlocked, ("%s: process_ACK: head not locked", 1953 __func__)); 1954 INP_LOCK_ASSERT(tp->t_inpcb); 1955 1956 acked = th->th_ack - tp->snd_una; 1957 tcpstat.tcps_rcvackpack++; 1958 tcpstat.tcps_rcvackbyte += acked; 1959 1960 /* 1961 * If we just performed our first retransmit, and the ACK 1962 * arrives within our recovery window, then it was a mistake 1963 * to do the retransmit in the first place. Recover our 1964 * original cwnd and ssthresh, and proceed to transmit where 1965 * we left off. 1966 */ 1967 if (tp->t_rxtshift == 1 && ticks < tp->t_badrxtwin) { 1968 ++tcpstat.tcps_sndrexmitbad; 1969 tp->snd_cwnd = tp->snd_cwnd_prev; 1970 tp->snd_ssthresh = tp->snd_ssthresh_prev; 1971 tp->snd_recover = tp->snd_recover_prev; 1972 if (tp->t_flags & TF_WASFRECOVERY) 1973 ENTER_FASTRECOVERY(tp); 1974 tp->snd_nxt = tp->snd_max; 1975 tp->t_badrxtwin = 0; /* XXX probably not required */ 1976 } 1977 1978 /* 1979 * If we have a timestamp reply, update smoothed 1980 * round trip time. If no timestamp is present but 1981 * transmit timer is running and timed sequence 1982 * number was acked, update smoothed round trip time. 1983 * Since we now have an rtt measurement, cancel the 1984 * timer backoff (cf., Phil Karn's retransmit alg.). 1985 * Recompute the initial retransmit timer. 1986 * 1987 * Some boxes send broken timestamp replies 1988 * during the SYN+ACK phase, ignore 1989 * timestamps of 0 or we could calculate a 1990 * huge RTT and blow up the retransmit timer. 1991 */ 1992 if ((to.to_flags & TOF_TS) != 0 && 1993 to.to_tsecr) { 1994 if (!tp->t_rttlow || tp->t_rttlow > ticks - to.to_tsecr) 1995 tp->t_rttlow = ticks - to.to_tsecr; 1996 tcp_xmit_timer(tp, ticks - to.to_tsecr + 1); 1997 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) { 1998 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime) 1999 tp->t_rttlow = ticks - tp->t_rtttime; 2000 tcp_xmit_timer(tp, ticks - tp->t_rtttime); 2001 } 2002 tcp_xmit_bandwidth_limit(tp, th->th_ack); 2003 2004 /* 2005 * If all outstanding data is acked, stop retransmit 2006 * timer and remember to restart (more output or persist). 2007 * If there is more data to be acked, restart retransmit 2008 * timer, using current (possibly backed-off) value. 2009 */ 2010 if (th->th_ack == tp->snd_max) { 2011 tcp_timer_activate(tp, TT_REXMT, 0); 2012 needoutput = 1; 2013 } else if (!tcp_timer_active(tp, TT_PERSIST)) 2014 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur); 2015 2016 /* 2017 * If no data (only SYN) was ACK'd, 2018 * skip rest of ACK processing. 2019 */ 2020 if (acked == 0) 2021 goto step6; 2022 2023 /* 2024 * When new data is acked, open the congestion window. 2025 * If the window gives us less than ssthresh packets 2026 * in flight, open exponentially (maxseg per packet). 2027 * Otherwise open linearly: maxseg per window 2028 * (maxseg^2 / cwnd per packet). 2029 */ 2030 if ((!tcp_do_newreno && !(tp->t_flags & TF_SACK_PERMIT)) || 2031 !IN_FASTRECOVERY(tp)) { 2032 u_int cw = tp->snd_cwnd; 2033 u_int incr = tp->t_maxseg; 2034 if (cw > tp->snd_ssthresh) 2035 incr = incr * incr / cw; 2036 tp->snd_cwnd = min(cw+incr, TCP_MAXWIN<<tp->snd_scale); 2037 } 2038 SOCKBUF_LOCK(&so->so_snd); 2039 if (acked > so->so_snd.sb_cc) { 2040 tp->snd_wnd -= so->so_snd.sb_cc; 2041 sbdrop_locked(&so->so_snd, (int)so->so_snd.sb_cc); 2042 ourfinisacked = 1; 2043 } else { 2044 sbdrop_locked(&so->so_snd, acked); 2045 tp->snd_wnd -= acked; 2046 ourfinisacked = 0; 2047 } 2048 sowwakeup_locked(so); 2049 /* Detect una wraparound. */ 2050 if ((tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) && 2051 !IN_FASTRECOVERY(tp) && 2052 SEQ_GT(tp->snd_una, tp->snd_recover) && 2053 SEQ_LEQ(th->th_ack, tp->snd_recover)) 2054 tp->snd_recover = th->th_ack - 1; 2055 if ((tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) && 2056 IN_FASTRECOVERY(tp) && 2057 SEQ_GEQ(th->th_ack, tp->snd_recover)) 2058 EXIT_FASTRECOVERY(tp); 2059 tp->snd_una = th->th_ack; 2060 if (tp->t_flags & TF_SACK_PERMIT) { 2061 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 2062 tp->snd_recover = tp->snd_una; 2063 } 2064 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 2065 tp->snd_nxt = tp->snd_una; 2066 2067 switch (tp->t_state) { 2068 2069 /* 2070 * In FIN_WAIT_1 STATE in addition to the processing 2071 * for the ESTABLISHED state if our FIN is now acknowledged 2072 * then enter FIN_WAIT_2. 2073 */ 2074 case TCPS_FIN_WAIT_1: 2075 if (ourfinisacked) { 2076 /* 2077 * If we can't receive any more 2078 * data, then closing user can proceed. 2079 * Starting the timer is contrary to the 2080 * specification, but if we don't get a FIN 2081 * we'll hang forever. 2082 * 2083 * XXXjl: 2084 * we should release the tp also, and use a 2085 * compressed state. 2086 */ 2087 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 2088 int timeout; 2089 2090 soisdisconnected(so); 2091 timeout = (tcp_fast_finwait2_recycle) ? 2092 tcp_finwait2_timeout : tcp_maxidle; 2093 tcp_timer_activate(tp, TT_2MSL, timeout); 2094 } 2095 tp->t_state = TCPS_FIN_WAIT_2; 2096 } 2097 break; 2098 2099 /* 2100 * In CLOSING STATE in addition to the processing for 2101 * the ESTABLISHED state if the ACK acknowledges our FIN 2102 * then enter the TIME-WAIT state, otherwise ignore 2103 * the segment. 2104 */ 2105 case TCPS_CLOSING: 2106 if (ourfinisacked) { 2107 KASSERT(headlocked, ("%s: process_ACK: " 2108 "head not locked", __func__)); 2109 tcp_twstart(tp); 2110 INP_INFO_WUNLOCK(&tcbinfo); 2111 headlocked = 0; 2112 m_freem(m); 2113 return; 2114 } 2115 break; 2116 2117 /* 2118 * In LAST_ACK, we may still be waiting for data to drain 2119 * and/or to be acked, as well as for the ack of our FIN. 2120 * If our FIN is now acknowledged, delete the TCB, 2121 * enter the closed state and return. 2122 */ 2123 case TCPS_LAST_ACK: 2124 if (ourfinisacked) { 2125 KASSERT(headlocked, ("%s: process_ACK: " 2126 "tcp_close: head not locked", __func__)); 2127 tp = tcp_close(tp); 2128 goto drop; 2129 } 2130 break; 2131 } 2132 } 2133 2134step6: 2135 KASSERT(headlocked, ("%s: step6: head not locked", __func__)); 2136 INP_LOCK_ASSERT(tp->t_inpcb); 2137 2138 /* 2139 * Update window information. 2140 * Don't look at window if no ACK: TAC's send garbage on first SYN. 2141 */ 2142 if ((thflags & TH_ACK) && 2143 (SEQ_LT(tp->snd_wl1, th->th_seq) || 2144 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 2145 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 2146 /* keep track of pure window updates */ 2147 if (tlen == 0 && 2148 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 2149 tcpstat.tcps_rcvwinupd++; 2150 tp->snd_wnd = tiwin; 2151 tp->snd_wl1 = th->th_seq; 2152 tp->snd_wl2 = th->th_ack; 2153 if (tp->snd_wnd > tp->max_sndwnd) 2154 tp->max_sndwnd = tp->snd_wnd; 2155 needoutput = 1; 2156 } 2157 2158 /* 2159 * Process segments with URG. 2160 */ 2161 if ((thflags & TH_URG) && th->th_urp && 2162 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2163 /* 2164 * This is a kludge, but if we receive and accept 2165 * random urgent pointers, we'll crash in 2166 * soreceive. It's hard to imagine someone 2167 * actually wanting to send this much urgent data. 2168 */ 2169 SOCKBUF_LOCK(&so->so_rcv); 2170 if (th->th_urp + so->so_rcv.sb_cc > sb_max) { 2171 th->th_urp = 0; /* XXX */ 2172 thflags &= ~TH_URG; /* XXX */ 2173 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */ 2174 goto dodata; /* XXX */ 2175 } 2176 /* 2177 * If this segment advances the known urgent pointer, 2178 * then mark the data stream. This should not happen 2179 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since 2180 * a FIN has been received from the remote side. 2181 * In these states we ignore the URG. 2182 * 2183 * According to RFC961 (Assigned Protocols), 2184 * the urgent pointer points to the last octet 2185 * of urgent data. We continue, however, 2186 * to consider it to indicate the first octet 2187 * of data past the urgent section as the original 2188 * spec states (in one of two places). 2189 */ 2190 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) { 2191 tp->rcv_up = th->th_seq + th->th_urp; 2192 so->so_oobmark = so->so_rcv.sb_cc + 2193 (tp->rcv_up - tp->rcv_nxt) - 1; 2194 if (so->so_oobmark == 0) 2195 so->so_rcv.sb_state |= SBS_RCVATMARK; 2196 sohasoutofband(so); 2197 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); 2198 } 2199 SOCKBUF_UNLOCK(&so->so_rcv); 2200 /* 2201 * Remove out of band data so doesn't get presented to user. 2202 * This can happen independent of advancing the URG pointer, 2203 * but if two URG's are pending at once, some out-of-band 2204 * data may creep in... ick. 2205 */ 2206 if (th->th_urp <= (u_long)tlen && 2207 !(so->so_options & SO_OOBINLINE)) { 2208 /* hdr drop is delayed */ 2209 tcp_pulloutofband(so, th, m, drop_hdrlen); 2210 } 2211 } else { 2212 /* 2213 * If no out of band data is expected, 2214 * pull receive urgent pointer along 2215 * with the receive window. 2216 */ 2217 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) 2218 tp->rcv_up = tp->rcv_nxt; 2219 } 2220dodata: /* XXX */ 2221 KASSERT(headlocked, ("%s: dodata: head not locked", __func__)); 2222 INP_LOCK_ASSERT(tp->t_inpcb); 2223 2224 /* 2225 * Process the segment text, merging it into the TCP sequencing queue, 2226 * and arranging for acknowledgment of receipt if necessary. 2227 * This process logically involves adjusting tp->rcv_wnd as data 2228 * is presented to the user (this happens in tcp_usrreq.c, 2229 * case PRU_RCVD). If a FIN has already been received on this 2230 * connection then we just ignore the text. 2231 */ 2232 if ((tlen || (thflags & TH_FIN)) && 2233 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2234 tcp_seq save_start = th->th_seq; 2235 m_adj(m, drop_hdrlen); /* delayed header drop */ 2236 /* 2237 * Insert segment which includes th into TCP reassembly queue 2238 * with control block tp. Set thflags to whether reassembly now 2239 * includes a segment with FIN. This handles the common case 2240 * inline (segment is the next to be received on an established 2241 * connection, and the queue is empty), avoiding linkage into 2242 * and removal from the queue and repetition of various 2243 * conversions. 2244 * Set DELACK for segments received in order, but ack 2245 * immediately when segments are out of order (so 2246 * fast retransmit can work). 2247 */ 2248 if (th->th_seq == tp->rcv_nxt && 2249 LIST_EMPTY(&tp->t_segq) && 2250 TCPS_HAVEESTABLISHED(tp->t_state)) { 2251 if (DELAY_ACK(tp)) 2252 tp->t_flags |= TF_DELACK; 2253 else 2254 tp->t_flags |= TF_ACKNOW; 2255 tp->rcv_nxt += tlen; 2256 thflags = th->th_flags & TH_FIN; 2257 tcpstat.tcps_rcvpack++; 2258 tcpstat.tcps_rcvbyte += tlen; 2259 ND6_HINT(tp); 2260 SOCKBUF_LOCK(&so->so_rcv); 2261 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 2262 m_freem(m); 2263 else 2264 sbappendstream_locked(&so->so_rcv, m); 2265 /* NB: sorwakeup_locked() does an implicit unlock. */ 2266 sorwakeup_locked(so); 2267 } else { 2268 /* 2269 * XXX: Due to the header drop above "th" is 2270 * theoretically invalid by now. Fortunately 2271 * m_adj() doesn't actually frees any mbufs 2272 * when trimming from the head. 2273 */ 2274 thflags = tcp_reass(tp, th, &tlen, m); 2275 tp->t_flags |= TF_ACKNOW; 2276 } 2277 if (tlen > 0 && (tp->t_flags & TF_SACK_PERMIT)) 2278 tcp_update_sack_list(tp, save_start, save_start + tlen); 2279#if 0 2280 /* 2281 * Note the amount of data that peer has sent into 2282 * our window, in order to estimate the sender's 2283 * buffer size. 2284 * XXX: Unused. 2285 */ 2286 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt); 2287#endif 2288 } else { 2289 m_freem(m); 2290 thflags &= ~TH_FIN; 2291 } 2292 2293 /* 2294 * If FIN is received ACK the FIN and let the user know 2295 * that the connection is closing. 2296 */ 2297 if (thflags & TH_FIN) { 2298 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2299 socantrcvmore(so); 2300 /* 2301 * If connection is half-synchronized 2302 * (ie NEEDSYN flag on) then delay ACK, 2303 * so it may be piggybacked when SYN is sent. 2304 * Otherwise, since we received a FIN then no 2305 * more input can be expected, send ACK now. 2306 */ 2307 if (tp->t_flags & TF_NEEDSYN) 2308 tp->t_flags |= TF_DELACK; 2309 else 2310 tp->t_flags |= TF_ACKNOW; 2311 tp->rcv_nxt++; 2312 } 2313 switch (tp->t_state) { 2314 2315 /* 2316 * In SYN_RECEIVED and ESTABLISHED STATES 2317 * enter the CLOSE_WAIT state. 2318 */ 2319 case TCPS_SYN_RECEIVED: 2320 tp->t_starttime = ticks; 2321 /*FALLTHROUGH*/ 2322 case TCPS_ESTABLISHED: 2323 tp->t_state = TCPS_CLOSE_WAIT; 2324 break; 2325 2326 /* 2327 * If still in FIN_WAIT_1 STATE FIN has not been acked so 2328 * enter the CLOSING state. 2329 */ 2330 case TCPS_FIN_WAIT_1: 2331 tp->t_state = TCPS_CLOSING; 2332 break; 2333 2334 /* 2335 * In FIN_WAIT_2 state enter the TIME_WAIT state, 2336 * starting the time-wait timer, turning off the other 2337 * standard timers. 2338 */ 2339 case TCPS_FIN_WAIT_2: 2340 KASSERT(headlocked == 1, ("%s: dodata: " 2341 "TCP_FIN_WAIT_2: head not locked", __func__)); 2342 tcp_twstart(tp); 2343 INP_INFO_WUNLOCK(&tcbinfo); 2344 return; 2345 } 2346 } 2347 INP_INFO_WUNLOCK(&tcbinfo); 2348 headlocked = 0; 2349#ifdef TCPDEBUG 2350 if (so->so_options & SO_DEBUG) 2351 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen, 2352 &tcp_savetcp, 0); 2353#endif 2354 2355 /* 2356 * Return any desired output. 2357 */ 2358 if (needoutput || (tp->t_flags & TF_ACKNOW)) 2359 (void) tcp_output(tp); 2360 2361check_delack: 2362 KASSERT(headlocked == 0, ("%s: check_delack: head locked", 2363 __func__)); 2364 INP_INFO_UNLOCK_ASSERT(&tcbinfo); 2365 INP_LOCK_ASSERT(tp->t_inpcb); 2366 if (tp->t_flags & TF_DELACK) { 2367 tp->t_flags &= ~TF_DELACK; 2368 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime); 2369 } 2370 INP_UNLOCK(tp->t_inpcb); 2371 return; 2372 2373dropafterack: 2374 KASSERT(headlocked, ("%s: dropafterack: head not locked", __func__)); 2375 /* 2376 * Generate an ACK dropping incoming segment if it occupies 2377 * sequence space, where the ACK reflects our state. 2378 * 2379 * We can now skip the test for the RST flag since all 2380 * paths to this code happen after packets containing 2381 * RST have been dropped. 2382 * 2383 * In the SYN-RECEIVED state, don't send an ACK unless the 2384 * segment we received passes the SYN-RECEIVED ACK test. 2385 * If it fails send a RST. This breaks the loop in the 2386 * "LAND" DoS attack, and also prevents an ACK storm 2387 * between two listening ports that have been sent forged 2388 * SYN segments, each with the source address of the other. 2389 */ 2390 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && 2391 (SEQ_GT(tp->snd_una, th->th_ack) || 2392 SEQ_GT(th->th_ack, tp->snd_max)) ) { 2393 rstreason = BANDLIM_RST_OPENPORT; 2394 goto dropwithreset; 2395 } 2396#ifdef TCPDEBUG 2397 if (so->so_options & SO_DEBUG) 2398 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 2399 &tcp_savetcp, 0); 2400#endif 2401 KASSERT(headlocked, ("%s: headlocked should be 1", __func__)); 2402 INP_INFO_WUNLOCK(&tcbinfo); 2403 tp->t_flags |= TF_ACKNOW; 2404 (void) tcp_output(tp); 2405 INP_UNLOCK(tp->t_inpcb); 2406 m_freem(m); 2407 return; 2408 2409dropwithreset: 2410 KASSERT(headlocked, ("%s: dropwithreset: head not locked", __func__)); 2411 2412 tcp_dropwithreset(m, th, tp, tlen, rstreason); 2413 2414 if (tp != NULL) 2415 INP_UNLOCK(tp->t_inpcb); 2416 if (headlocked) 2417 INP_INFO_WUNLOCK(&tcbinfo); 2418 return; 2419 2420drop: 2421 /* 2422 * Drop space held by incoming segment and return. 2423 */ 2424#ifdef TCPDEBUG 2425 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 2426 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 2427 &tcp_savetcp, 0); 2428#endif 2429 if (tp != NULL) 2430 INP_UNLOCK(tp->t_inpcb); 2431 if (headlocked) 2432 INP_INFO_WUNLOCK(&tcbinfo); 2433 m_freem(m); 2434 return; 2435} 2436 2437/* 2438 * Issue RST and make ACK acceptable to originator of segment. 2439 * The mbuf must still include the original packet header. 2440 * tp may be NULL. 2441 */ 2442static void 2443tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, 2444 int tlen, int rstreason) 2445{ 2446 struct ip *ip; 2447#ifdef INET6 2448 struct ip6_hdr *ip6; 2449#endif 2450 /* Don't bother if destination was broadcast/multicast. */ 2451 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST)) 2452 goto drop; 2453#ifdef INET6 2454 if (mtod(m, struct ip *)->ip_v == 6) { 2455 ip6 = mtod(m, struct ip6_hdr *); 2456 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 2457 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 2458 goto drop; 2459 /* IPv6 anycast check is done at tcp6_input() */ 2460 } else 2461#endif 2462 { 2463 ip = mtod(m, struct ip *); 2464 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 2465 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 2466 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 2467 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 2468 goto drop; 2469 } 2470 2471 /* Perform bandwidth limiting. */ 2472 if (badport_bandlim(rstreason) < 0) 2473 goto drop; 2474 2475 /* tcp_respond consumes the mbuf chain. */ 2476 if (th->th_flags & TH_ACK) { 2477 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, 2478 th->th_ack, TH_RST); 2479 } else { 2480 if (th->th_flags & TH_SYN) 2481 tlen++; 2482 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen, 2483 (tcp_seq)0, TH_RST|TH_ACK); 2484 } 2485 return; 2486drop: 2487 m_freem(m); 2488 return; 2489} 2490 2491/* 2492 * Parse TCP options and place in tcpopt. 2493 */ 2494static void 2495tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags) 2496{ 2497 int opt, optlen; 2498 2499 to->to_flags = 0; 2500 for (; cnt > 0; cnt -= optlen, cp += optlen) { 2501 opt = cp[0]; 2502 if (opt == TCPOPT_EOL) 2503 break; 2504 if (opt == TCPOPT_NOP) 2505 optlen = 1; 2506 else { 2507 if (cnt < 2) 2508 break; 2509 optlen = cp[1]; 2510 if (optlen < 2 || optlen > cnt) 2511 break; 2512 } 2513 switch (opt) { 2514 case TCPOPT_MAXSEG: 2515 if (optlen != TCPOLEN_MAXSEG) 2516 continue; 2517 if (!(flags & TO_SYN)) 2518 continue; 2519 to->to_flags |= TOF_MSS; 2520 bcopy((char *)cp + 2, 2521 (char *)&to->to_mss, sizeof(to->to_mss)); 2522 to->to_mss = ntohs(to->to_mss); 2523 break; 2524 case TCPOPT_WINDOW: 2525 if (optlen != TCPOLEN_WINDOW) 2526 continue; 2527 if (!(flags & TO_SYN)) 2528 continue; 2529 to->to_flags |= TOF_SCALE; 2530 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT); 2531 break; 2532 case TCPOPT_TIMESTAMP: 2533 if (optlen != TCPOLEN_TIMESTAMP) 2534 continue; 2535 to->to_flags |= TOF_TS; 2536 bcopy((char *)cp + 2, 2537 (char *)&to->to_tsval, sizeof(to->to_tsval)); 2538 to->to_tsval = ntohl(to->to_tsval); 2539 bcopy((char *)cp + 6, 2540 (char *)&to->to_tsecr, sizeof(to->to_tsecr)); 2541 to->to_tsecr = ntohl(to->to_tsecr); 2542 break; 2543#ifdef TCP_SIGNATURE 2544 /* 2545 * XXX In order to reply to a host which has set the 2546 * TCP_SIGNATURE option in its initial SYN, we have to 2547 * record the fact that the option was observed here 2548 * for the syncache code to perform the correct response. 2549 */ 2550 case TCPOPT_SIGNATURE: 2551 if (optlen != TCPOLEN_SIGNATURE) 2552 continue; 2553 to->to_flags |= TOF_SIGNATURE; 2554 to->to_signature = cp + 2; 2555 break; 2556#endif 2557 case TCPOPT_SACK_PERMITTED: 2558 if (optlen != TCPOLEN_SACK_PERMITTED) 2559 continue; 2560 if (!(flags & TO_SYN)) 2561 continue; 2562 if (!tcp_do_sack) 2563 continue; 2564 to->to_flags |= TOF_SACKPERM; 2565 break; 2566 case TCPOPT_SACK: 2567 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0) 2568 continue; 2569 if (flags & TO_SYN) 2570 continue; 2571 to->to_flags |= TOF_SACK; 2572 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK; 2573 to->to_sacks = cp + 2; 2574 tcpstat.tcps_sack_rcv_blocks++; 2575 break; 2576 default: 2577 continue; 2578 } 2579 } 2580} 2581 2582/* 2583 * Pull out of band byte out of a segment so 2584 * it doesn't appear in the user's data queue. 2585 * It is still reflected in the segment length for 2586 * sequencing purposes. 2587 */ 2588static void 2589tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, 2590 int off) 2591{ 2592 int cnt = off + th->th_urp - 1; 2593 2594 while (cnt >= 0) { 2595 if (m->m_len > cnt) { 2596 char *cp = mtod(m, caddr_t) + cnt; 2597 struct tcpcb *tp = sototcpcb(so); 2598 2599 tp->t_iobc = *cp; 2600 tp->t_oobflags |= TCPOOB_HAVEDATA; 2601 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1)); 2602 m->m_len--; 2603 if (m->m_flags & M_PKTHDR) 2604 m->m_pkthdr.len--; 2605 return; 2606 } 2607 cnt -= m->m_len; 2608 m = m->m_next; 2609 if (m == NULL) 2610 break; 2611 } 2612 panic("tcp_pulloutofband"); 2613} 2614 2615/* 2616 * Collect new round-trip time estimate 2617 * and update averages and current timeout. 2618 */ 2619static void 2620tcp_xmit_timer(struct tcpcb *tp, int rtt) 2621{ 2622 int delta; 2623 2624 INP_LOCK_ASSERT(tp->t_inpcb); 2625 2626 tcpstat.tcps_rttupdated++; 2627 tp->t_rttupdated++; 2628 if (tp->t_srtt != 0) { 2629 /* 2630 * srtt is stored as fixed point with 5 bits after the 2631 * binary point (i.e., scaled by 8). The following magic 2632 * is equivalent to the smoothing algorithm in rfc793 with 2633 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed 2634 * point). Adjust rtt to origin 0. 2635 */ 2636 delta = ((rtt - 1) << TCP_DELTA_SHIFT) 2637 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); 2638 2639 if ((tp->t_srtt += delta) <= 0) 2640 tp->t_srtt = 1; 2641 2642 /* 2643 * We accumulate a smoothed rtt variance (actually, a 2644 * smoothed mean difference), then set the retransmit 2645 * timer to smoothed rtt + 4 times the smoothed variance. 2646 * rttvar is stored as fixed point with 4 bits after the 2647 * binary point (scaled by 16). The following is 2648 * equivalent to rfc793 smoothing with an alpha of .75 2649 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces 2650 * rfc793's wired-in beta. 2651 */ 2652 if (delta < 0) 2653 delta = -delta; 2654 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); 2655 if ((tp->t_rttvar += delta) <= 0) 2656 tp->t_rttvar = 1; 2657 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 2658 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 2659 } else { 2660 /* 2661 * No rtt measurement yet - use the unsmoothed rtt. 2662 * Set the variance to half the rtt (so our first 2663 * retransmit happens at 3*rtt). 2664 */ 2665 tp->t_srtt = rtt << TCP_RTT_SHIFT; 2666 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); 2667 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 2668 } 2669 tp->t_rtttime = 0; 2670 tp->t_rxtshift = 0; 2671 2672 /* 2673 * the retransmit should happen at rtt + 4 * rttvar. 2674 * Because of the way we do the smoothing, srtt and rttvar 2675 * will each average +1/2 tick of bias. When we compute 2676 * the retransmit timer, we want 1/2 tick of rounding and 2677 * 1 extra tick because of +-1/2 tick uncertainty in the 2678 * firing of the timer. The bias will give us exactly the 2679 * 1.5 tick we need. But, because the bias is 2680 * statistical, we have to test that we don't drop below 2681 * the minimum feasible timer (which is 2 ticks). 2682 */ 2683 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 2684 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX); 2685 2686 /* 2687 * We received an ack for a packet that wasn't retransmitted; 2688 * it is probably safe to discard any error indications we've 2689 * received recently. This isn't quite right, but close enough 2690 * for now (a route might have failed after we sent a segment, 2691 * and the return path might not be symmetrical). 2692 */ 2693 tp->t_softerror = 0; 2694} 2695 2696/* 2697 * Determine a reasonable value for maxseg size. 2698 * If the route is known, check route for mtu. 2699 * If none, use an mss that can be handled on the outgoing 2700 * interface without forcing IP to fragment; if bigger than 2701 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES 2702 * to utilize large mbufs. If no route is found, route has no mtu, 2703 * or the destination isn't local, use a default, hopefully conservative 2704 * size (usually 512 or the default IP max size, but no more than the mtu 2705 * of the interface), as we can't discover anything about intervening 2706 * gateways or networks. We also initialize the congestion/slow start 2707 * window to be a single segment if the destination isn't local. 2708 * While looking at the routing entry, we also initialize other path-dependent 2709 * parameters from pre-set or cached values in the routing entry. 2710 * 2711 * Also take into account the space needed for options that we 2712 * send regularly. Make maxseg shorter by that amount to assure 2713 * that we can send maxseg amount of data even when the options 2714 * are present. Store the upper limit of the length of options plus 2715 * data in maxopd. 2716 * 2717 * In case of T/TCP, we call this routine during implicit connection 2718 * setup as well (offer = -1), to initialize maxseg from the cached 2719 * MSS of our peer. 2720 * 2721 * NOTE that this routine is only called when we process an incoming 2722 * segment. Outgoing SYN/ACK MSS settings are handled in tcp_mssopt(). 2723 */ 2724void 2725tcp_mss(struct tcpcb *tp, int offer) 2726{ 2727 int rtt, mss; 2728 u_long bufsize; 2729 u_long maxmtu; 2730 struct inpcb *inp = tp->t_inpcb; 2731 struct socket *so; 2732 struct hc_metrics_lite metrics; 2733 int origoffer = offer; 2734 int mtuflags = 0; 2735#ifdef INET6 2736 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0; 2737 size_t min_protoh = isipv6 ? 2738 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) : 2739 sizeof (struct tcpiphdr); 2740#else 2741 const size_t min_protoh = sizeof(struct tcpiphdr); 2742#endif 2743 2744 /* Initialize. */ 2745#ifdef INET6 2746 if (isipv6) { 2747 maxmtu = tcp_maxmtu6(&inp->inp_inc, &mtuflags); 2748 tp->t_maxopd = tp->t_maxseg = tcp_v6mssdflt; 2749 } else 2750#endif 2751 { 2752 maxmtu = tcp_maxmtu(&inp->inp_inc, &mtuflags); 2753 tp->t_maxopd = tp->t_maxseg = tcp_mssdflt; 2754 } 2755 so = inp->inp_socket; 2756 2757 /* 2758 * No route to sender, stay with default mss and return. 2759 */ 2760 if (maxmtu == 0) 2761 return; 2762 2763 /* What have we got? */ 2764 switch (offer) { 2765 case 0: 2766 /* 2767 * Offer == 0 means that there was no MSS on the SYN 2768 * segment, in this case we use tcp_mssdflt. 2769 */ 2770 offer = 2771#ifdef INET6 2772 isipv6 ? tcp_v6mssdflt : 2773#endif 2774 tcp_mssdflt; 2775 break; 2776 2777 case -1: 2778 /* 2779 * Offer == -1 means that we didn't receive SYN yet. 2780 */ 2781 /* FALLTHROUGH */ 2782 2783 default: 2784 /* 2785 * Prevent DoS attack with too small MSS. Round up 2786 * to at least minmss. 2787 */ 2788 offer = max(offer, tcp_minmss); 2789 /* 2790 * Sanity check: make sure that maxopd will be large 2791 * enough to allow some data on segments even if the 2792 * all the option space is used (40bytes). Otherwise 2793 * funny things may happen in tcp_output. 2794 */ 2795 offer = max(offer, 64); 2796 } 2797 2798 /* 2799 * rmx information is now retrieved from tcp_hostcache. 2800 */ 2801 tcp_hc_get(&inp->inp_inc, &metrics); 2802 2803 /* 2804 * If there's a discovered mtu int tcp hostcache, use it 2805 * else, use the link mtu. 2806 */ 2807 if (metrics.rmx_mtu) 2808 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh; 2809 else { 2810#ifdef INET6 2811 if (isipv6) { 2812 mss = maxmtu - min_protoh; 2813 if (!path_mtu_discovery && 2814 !in6_localaddr(&inp->in6p_faddr)) 2815 mss = min(mss, tcp_v6mssdflt); 2816 } else 2817#endif 2818 { 2819 mss = maxmtu - min_protoh; 2820 if (!path_mtu_discovery && 2821 !in_localaddr(inp->inp_faddr)) 2822 mss = min(mss, tcp_mssdflt); 2823 } 2824 } 2825 mss = min(mss, offer); 2826 2827 /* 2828 * maxopd stores the maximum length of data AND options 2829 * in a segment; maxseg is the amount of data in a normal 2830 * segment. We need to store this value (maxopd) apart 2831 * from maxseg, because now every segment carries options 2832 * and thus we normally have somewhat less data in segments. 2833 */ 2834 tp->t_maxopd = mss; 2835 2836 /* 2837 * origoffer==-1 indicates that no segments were received yet. 2838 * In this case we just guess. 2839 */ 2840 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && 2841 (origoffer == -1 || 2842 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)) 2843 mss -= TCPOLEN_TSTAMP_APPA; 2844 tp->t_maxseg = mss; 2845 2846#if (MCLBYTES & (MCLBYTES - 1)) == 0 2847 if (mss > MCLBYTES) 2848 mss &= ~(MCLBYTES-1); 2849#else 2850 if (mss > MCLBYTES) 2851 mss = mss / MCLBYTES * MCLBYTES; 2852#endif 2853 tp->t_maxseg = mss; 2854 2855 /* 2856 * If there's a pipesize, change the socket buffer to that size, 2857 * don't change if sb_hiwat is different than default (then it 2858 * has been changed on purpose with setsockopt). 2859 * Make the socket buffers an integral number of mss units; 2860 * if the mss is larger than the socket buffer, decrease the mss. 2861 */ 2862 SOCKBUF_LOCK(&so->so_snd); 2863 if ((so->so_snd.sb_hiwat == tcp_sendspace) && metrics.rmx_sendpipe) 2864 bufsize = metrics.rmx_sendpipe; 2865 else 2866 bufsize = so->so_snd.sb_hiwat; 2867 if (bufsize < mss) 2868 mss = bufsize; 2869 else { 2870 bufsize = roundup(bufsize, mss); 2871 if (bufsize > sb_max) 2872 bufsize = sb_max; 2873 if (bufsize > so->so_snd.sb_hiwat) 2874 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL); 2875 } 2876 SOCKBUF_UNLOCK(&so->so_snd); 2877 tp->t_maxseg = mss; 2878 2879 SOCKBUF_LOCK(&so->so_rcv); 2880 if ((so->so_rcv.sb_hiwat == tcp_recvspace) && metrics.rmx_recvpipe) 2881 bufsize = metrics.rmx_recvpipe; 2882 else 2883 bufsize = so->so_rcv.sb_hiwat; 2884 if (bufsize > mss) { 2885 bufsize = roundup(bufsize, mss); 2886 if (bufsize > sb_max) 2887 bufsize = sb_max; 2888 if (bufsize > so->so_rcv.sb_hiwat) 2889 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL); 2890 } 2891 SOCKBUF_UNLOCK(&so->so_rcv); 2892 /* 2893 * While we're here, check the others too. 2894 */ 2895 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) { 2896 tp->t_srtt = rtt; 2897 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE; 2898 tcpstat.tcps_usedrtt++; 2899 if (metrics.rmx_rttvar) { 2900 tp->t_rttvar = metrics.rmx_rttvar; 2901 tcpstat.tcps_usedrttvar++; 2902 } else { 2903 /* default variation is +- 1 rtt */ 2904 tp->t_rttvar = 2905 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE; 2906 } 2907 TCPT_RANGESET(tp->t_rxtcur, 2908 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, 2909 tp->t_rttmin, TCPTV_REXMTMAX); 2910 } 2911 if (metrics.rmx_ssthresh) { 2912 /* 2913 * There's some sort of gateway or interface 2914 * buffer limit on the path. Use this to set 2915 * the slow start threshhold, but set the 2916 * threshold to no less than 2*mss. 2917 */ 2918 tp->snd_ssthresh = max(2 * mss, metrics.rmx_ssthresh); 2919 tcpstat.tcps_usedssthresh++; 2920 } 2921 if (metrics.rmx_bandwidth) 2922 tp->snd_bandwidth = metrics.rmx_bandwidth; 2923 2924 /* 2925 * Set the slow-start flight size depending on whether this 2926 * is a local network or not. 2927 * 2928 * Extend this so we cache the cwnd too and retrieve it here. 2929 * Make cwnd even bigger than RFC3390 suggests but only if we 2930 * have previous experience with the remote host. Be careful 2931 * not make cwnd bigger than remote receive window or our own 2932 * send socket buffer. Maybe put some additional upper bound 2933 * on the retrieved cwnd. Should do incremental updates to 2934 * hostcache when cwnd collapses so next connection doesn't 2935 * overloads the path again. 2936 * 2937 * RFC3390 says only do this if SYN or SYN/ACK didn't got lost. 2938 * We currently check only in syncache_socket for that. 2939 */ 2940#define TCP_METRICS_CWND 2941#ifdef TCP_METRICS_CWND 2942 if (metrics.rmx_cwnd) 2943 tp->snd_cwnd = max(mss, 2944 min(metrics.rmx_cwnd / 2, 2945 min(tp->snd_wnd, so->so_snd.sb_hiwat))); 2946 else 2947#endif 2948 if (tcp_do_rfc3390) 2949 tp->snd_cwnd = min(4 * mss, max(2 * mss, 4380)); 2950#ifdef INET6 2951 else if ((isipv6 && in6_localaddr(&inp->in6p_faddr)) || 2952 (!isipv6 && in_localaddr(inp->inp_faddr))) 2953#else 2954 else if (in_localaddr(inp->inp_faddr)) 2955#endif 2956 tp->snd_cwnd = mss * ss_fltsz_local; 2957 else 2958 tp->snd_cwnd = mss * ss_fltsz; 2959 2960 /* Check the interface for TSO capabilities. */ 2961 if (mtuflags & CSUM_TSO) 2962 tp->t_flags |= TF_TSO; 2963} 2964 2965/* 2966 * Determine the MSS option to send on an outgoing SYN. 2967 */ 2968int 2969tcp_mssopt(struct in_conninfo *inc) 2970{ 2971 int mss = 0; 2972 u_long maxmtu = 0; 2973 u_long thcmtu = 0; 2974 size_t min_protoh; 2975#ifdef INET6 2976 int isipv6 = inc->inc_isipv6 ? 1 : 0; 2977#endif 2978 2979 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer")); 2980 2981#ifdef INET6 2982 if (isipv6) { 2983 mss = tcp_v6mssdflt; 2984 maxmtu = tcp_maxmtu6(inc, NULL); 2985 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */ 2986 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 2987 } else 2988#endif 2989 { 2990 mss = tcp_mssdflt; 2991 maxmtu = tcp_maxmtu(inc, NULL); 2992 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */ 2993 min_protoh = sizeof(struct tcpiphdr); 2994 } 2995 if (maxmtu && thcmtu) 2996 mss = min(maxmtu, thcmtu) - min_protoh; 2997 else if (maxmtu || thcmtu) 2998 mss = max(maxmtu, thcmtu) - min_protoh; 2999 3000 return (mss); 3001} 3002 3003 3004/* 3005 * On a partial ack arrives, force the retransmission of the 3006 * next unacknowledged segment. Do not clear tp->t_dupacks. 3007 * By setting snd_nxt to ti_ack, this forces retransmission timer to 3008 * be started again. 3009 */ 3010static void 3011tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th) 3012{ 3013 tcp_seq onxt = tp->snd_nxt; 3014 u_long ocwnd = tp->snd_cwnd; 3015 3016 tcp_timer_activate(tp, TT_REXMT, 0); 3017 tp->t_rtttime = 0; 3018 tp->snd_nxt = th->th_ack; 3019 /* 3020 * Set snd_cwnd to one segment beyond acknowledged offset. 3021 * (tp->snd_una has not yet been updated when this function is called.) 3022 */ 3023 tp->snd_cwnd = tp->t_maxseg + (th->th_ack - tp->snd_una); 3024 tp->t_flags |= TF_ACKNOW; 3025 (void) tcp_output(tp); 3026 tp->snd_cwnd = ocwnd; 3027 if (SEQ_GT(onxt, tp->snd_nxt)) 3028 tp->snd_nxt = onxt; 3029 /* 3030 * Partial window deflation. Relies on fact that tp->snd_una 3031 * not updated yet. 3032 */ 3033 if (tp->snd_cwnd > th->th_ack - tp->snd_una) 3034 tp->snd_cwnd -= th->th_ack - tp->snd_una; 3035 else 3036 tp->snd_cwnd = 0; 3037 tp->snd_cwnd += tp->t_maxseg; 3038} 3039