tcp_input.c revision 193511
1/*- 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95 30 */ 31 32#include <sys/cdefs.h> 33__FBSDID("$FreeBSD: head/sys/netinet/tcp_input.c 193511 2009-06-05 14:55:22Z rwatson $"); 34 35#include "opt_ipfw.h" /* for ipfw_fwd */ 36#include "opt_inet.h" 37#include "opt_inet6.h" 38#include "opt_ipsec.h" 39#include "opt_tcpdebug.h" 40 41#include <sys/param.h> 42#include <sys/kernel.h> 43#include <sys/malloc.h> 44#include <sys/mbuf.h> 45#include <sys/proc.h> /* for proc0 declaration */ 46#include <sys/protosw.h> 47#include <sys/signalvar.h> 48#include <sys/socket.h> 49#include <sys/socketvar.h> 50#include <sys/sysctl.h> 51#include <sys/syslog.h> 52#include <sys/systm.h> 53#include <sys/vimage.h> 54 55#include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */ 56 57#include <vm/uma.h> 58 59#include <net/if.h> 60#include <net/route.h> 61 62#define TCPSTATES /* for logging */ 63 64#include <netinet/in.h> 65#include <netinet/in_pcb.h> 66#include <netinet/in_systm.h> 67#include <netinet/in_var.h> 68#include <netinet/ip.h> 69#include <netinet/ip_icmp.h> /* required for icmp_var.h */ 70#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 71#include <netinet/ip_var.h> 72#include <netinet/ip_options.h> 73#include <netinet/ip6.h> 74#include <netinet/icmp6.h> 75#include <netinet6/in6_pcb.h> 76#include <netinet6/ip6_var.h> 77#include <netinet6/nd6.h> 78#include <netinet/tcp.h> 79#include <netinet/tcp_fsm.h> 80#include <netinet/tcp_seq.h> 81#include <netinet/tcp_timer.h> 82#include <netinet/tcp_var.h> 83#include <netinet6/tcp6_var.h> 84#include <netinet/tcpip.h> 85#include <netinet/tcp_syncache.h> 86#ifdef TCPDEBUG 87#include <netinet/tcp_debug.h> 88#endif /* TCPDEBUG */ 89#include <netinet/vinet.h> 90 91#ifdef INET6 92#include <netinet6/vinet6.h> 93#endif 94 95#ifdef IPSEC 96#include <netipsec/ipsec.h> 97#include <netipsec/ipsec6.h> 98#endif /*IPSEC*/ 99 100#include <machine/in_cksum.h> 101 102#include <security/mac/mac_framework.h> 103 104static const int tcprexmtthresh = 3; 105 106#ifdef VIMAGE_GLOBALS 107struct tcpstat tcpstat; 108int blackhole; 109int tcp_delack_enabled; 110int drop_synfin; 111int tcp_do_rfc3042; 112int tcp_do_rfc3390; 113int tcp_do_ecn; 114int tcp_ecn_maxretries; 115int tcp_insecure_rst; 116int tcp_do_autorcvbuf; 117int tcp_autorcvbuf_inc; 118int tcp_autorcvbuf_max; 119int tcp_do_rfc3465; 120int tcp_abc_l_var; 121#endif 122 123SYSCTL_V_STRUCT(V_NET, vnet_inet, _net_inet_tcp, TCPCTL_STATS, stats, 124 CTLFLAG_RW, tcpstat , tcpstat, 125 "TCP statistics (struct tcpstat, netinet/tcp_var.h)"); 126 127int tcp_log_in_vain = 0; 128SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW, 129 &tcp_log_in_vain, 0, "Log all incoming TCP segments to closed ports"); 130 131SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW, 132 blackhole, 0, "Do not send RST on segments to closed ports"); 133 134SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp, OID_AUTO, delayed_ack, 135 CTLFLAG_RW, tcp_delack_enabled, 0, 136 "Delay ACK to try and piggyback it onto a data packet"); 137 138SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp, OID_AUTO, drop_synfin, 139 CTLFLAG_RW, drop_synfin, 0, "Drop TCP packets with SYN+FIN set"); 140 141SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_RW, 142 tcp_do_rfc3042, 0, "Enable RFC 3042 (Limited Transmit)"); 143 144SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW, 145 tcp_do_rfc3390, 0, 146 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)"); 147 148SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp, OID_AUTO, rfc3465, CTLFLAG_RW, 149 tcp_do_rfc3465, 0, 150 "Enable RFC 3465 (Appropriate Byte Counting)"); 151SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp, OID_AUTO, abc_l_var, CTLFLAG_RW, 152 tcp_abc_l_var, 2, 153 "Cap the max cwnd increment during slow-start to this number of segments"); 154 155SYSCTL_NODE(_net_inet_tcp, OID_AUTO, ecn, CTLFLAG_RW, 0, "TCP ECN"); 156SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp_ecn, OID_AUTO, enable, 157 CTLFLAG_RW, tcp_do_ecn, 0, "TCP ECN support"); 158SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp_ecn, OID_AUTO, maxretries, 159 CTLFLAG_RW, tcp_ecn_maxretries, 0, "Max retries before giving up on ECN"); 160 161SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp, OID_AUTO, insecure_rst, 162 CTLFLAG_RW, tcp_insecure_rst, 0, 163 "Follow the old (insecure) criteria for accepting RST packets"); 164 165SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp, OID_AUTO, recvbuf_auto, 166 CTLFLAG_RW, tcp_do_autorcvbuf, 0, 167 "Enable automatic receive buffer sizing"); 168 169SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp, OID_AUTO, recvbuf_inc, 170 CTLFLAG_RW, tcp_autorcvbuf_inc, 0, 171 "Incrementor step size of automatic receive buffer"); 172 173SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_tcp, OID_AUTO, recvbuf_max, 174 CTLFLAG_RW, tcp_autorcvbuf_max, 0, 175 "Max size of automatic receive buffer"); 176 177int tcp_read_locking = 1; 178SYSCTL_INT(_net_inet_tcp, OID_AUTO, read_locking, CTLFLAG_RW, 179 &tcp_read_locking, 0, "Enable read locking strategy"); 180 181int tcp_rlock_atfirst; 182SYSCTL_INT(_net_inet_tcp, OID_AUTO, rlock_atfirst, CTLFLAG_RD, 183 &tcp_rlock_atfirst, 0, ""); 184 185int tcp_wlock_atfirst; 186SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcp_wlock_atfirst, CTLFLAG_RD, 187 &tcp_wlock_atfirst, 0, ""); 188 189int tcp_wlock_upgraded; 190SYSCTL_INT(_net_inet_tcp, OID_AUTO, wlock_upgraded, CTLFLAG_RD, 191 &tcp_wlock_upgraded, 0, ""); 192 193int tcp_wlock_relocked; 194SYSCTL_INT(_net_inet_tcp, OID_AUTO, wlock_relocked, CTLFLAG_RD, 195 &tcp_wlock_relocked, 0, ""); 196 197int tcp_wlock_looped; 198SYSCTL_INT(_net_inet_tcp, OID_AUTO, wlock_looped, CTLFLAG_RD, 199 &tcp_wlock_looped, 0, ""); 200 201#ifdef VIMAGE_GLOBALS 202struct inpcbhead tcb; 203struct inpcbinfo tcbinfo; 204#endif 205#define tcb6 tcb /* for KAME src sync over BSD*'s */ 206 207static void tcp_dooptions(struct tcpopt *, u_char *, int, int); 208static void tcp_do_segment(struct mbuf *, struct tcphdr *, 209 struct socket *, struct tcpcb *, int, int, uint8_t, 210 int); 211static void tcp_dropwithreset(struct mbuf *, struct tcphdr *, 212 struct tcpcb *, int, int); 213static void tcp_pulloutofband(struct socket *, 214 struct tcphdr *, struct mbuf *, int); 215static void tcp_xmit_timer(struct tcpcb *, int); 216static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *); 217static void inline 218 tcp_congestion_exp(struct tcpcb *); 219 220static void inline 221tcp_congestion_exp(struct tcpcb *tp) 222{ 223 u_int win; 224 225 win = min(tp->snd_wnd, tp->snd_cwnd) / 226 2 / tp->t_maxseg; 227 if (win < 2) 228 win = 2; 229 tp->snd_ssthresh = win * tp->t_maxseg; 230 ENTER_FASTRECOVERY(tp); 231 tp->snd_recover = tp->snd_max; 232 if (tp->t_flags & TF_ECN_PERMIT) 233 tp->t_flags |= TF_ECN_SND_CWR; 234} 235 236/* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */ 237#ifdef INET6 238#define ND6_HINT(tp) \ 239do { \ 240 if ((tp) && (tp)->t_inpcb && \ 241 ((tp)->t_inpcb->inp_vflag & INP_IPV6) != 0) \ 242 nd6_nud_hint(NULL, NULL, 0); \ 243} while (0) 244#else 245#define ND6_HINT(tp) 246#endif 247 248/* 249 * Indicate whether this ack should be delayed. We can delay the ack if 250 * - there is no delayed ack timer in progress and 251 * - our last ack wasn't a 0-sized window. We never want to delay 252 * the ack that opens up a 0-sized window and 253 * - delayed acks are enabled or 254 * - this is a half-synchronized T/TCP connection. 255 */ 256#define DELAY_ACK(tp) \ 257 ((!tcp_timer_active(tp, TT_DELACK) && \ 258 (tp->t_flags & TF_RXWIN0SENT) == 0) && \ 259 (V_tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN))) 260 261/* 262 * TCP input handling is split into multiple parts: 263 * tcp6_input is a thin wrapper around tcp_input for the extended 264 * ip6_protox[] call format in ip6_input 265 * tcp_input handles primary segment validation, inpcb lookup and 266 * SYN processing on listen sockets 267 * tcp_do_segment processes the ACK and text of the segment for 268 * establishing, established and closing connections 269 */ 270#ifdef INET6 271int 272tcp6_input(struct mbuf **mp, int *offp, int proto) 273{ 274 INIT_VNET_INET6(curvnet); 275 struct mbuf *m = *mp; 276 struct in6_ifaddr *ia6; 277 278 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE); 279 280 /* 281 * draft-itojun-ipv6-tcp-to-anycast 282 * better place to put this in? 283 */ 284 ia6 = ip6_getdstifaddr(m); 285 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) { 286 struct ip6_hdr *ip6; 287 288 ip6 = mtod(m, struct ip6_hdr *); 289 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR, 290 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6); 291 return IPPROTO_DONE; 292 } 293 294 tcp_input(m, *offp); 295 return IPPROTO_DONE; 296} 297#endif 298 299void 300tcp_input(struct mbuf *m, int off0) 301{ 302 INIT_VNET_INET(curvnet); 303#ifdef INET6 304 INIT_VNET_INET6(curvnet); 305#endif 306#ifdef IPSEC 307 INIT_VNET_IPSEC(curvnet); 308#endif 309 struct tcphdr *th; 310 struct ip *ip = NULL; 311 struct ipovly *ipov; 312 struct inpcb *inp = NULL; 313 struct tcpcb *tp = NULL; 314 struct socket *so = NULL; 315 u_char *optp = NULL; 316 int optlen = 0; 317 int len, tlen, off; 318 int drop_hdrlen; 319 int thflags; 320 int rstreason = 0; /* For badport_bandlim accounting purposes */ 321 uint8_t iptos; 322#ifdef IPFIREWALL_FORWARD 323 struct m_tag *fwd_tag; 324#endif 325#ifdef INET6 326 struct ip6_hdr *ip6 = NULL; 327 int isipv6; 328#else 329 const void *ip6 = NULL; 330 const int isipv6 = 0; 331#endif 332 struct tcpopt to; /* options in this segment */ 333 char *s = NULL; /* address and port logging */ 334 int ti_locked; 335#define TI_UNLOCKED 1 336#define TI_RLOCKED 2 337#define TI_WLOCKED 3 338 339#ifdef TCPDEBUG 340 /* 341 * The size of tcp_saveipgen must be the size of the max ip header, 342 * now IPv6. 343 */ 344 u_char tcp_saveipgen[IP6_HDR_LEN]; 345 struct tcphdr tcp_savetcp; 346 short ostate = 0; 347#endif 348 349#ifdef INET6 350 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0; 351#endif 352 353 to.to_flags = 0; 354 TCPSTAT_INC(tcps_rcvtotal); 355 356 if (isipv6) { 357#ifdef INET6 358 /* IP6_EXTHDR_CHECK() is already done at tcp6_input(). */ 359 ip6 = mtod(m, struct ip6_hdr *); 360 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0; 361 if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) { 362 TCPSTAT_INC(tcps_rcvbadsum); 363 goto drop; 364 } 365 th = (struct tcphdr *)((caddr_t)ip6 + off0); 366 367 /* 368 * Be proactive about unspecified IPv6 address in source. 369 * As we use all-zero to indicate unbounded/unconnected pcb, 370 * unspecified IPv6 address can be used to confuse us. 371 * 372 * Note that packets with unspecified IPv6 destination is 373 * already dropped in ip6_input. 374 */ 375 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { 376 /* XXX stat */ 377 goto drop; 378 } 379#else 380 th = NULL; /* XXX: Avoid compiler warning. */ 381#endif 382 } else { 383 /* 384 * Get IP and TCP header together in first mbuf. 385 * Note: IP leaves IP header in first mbuf. 386 */ 387 if (off0 > sizeof (struct ip)) { 388 ip_stripoptions(m, (struct mbuf *)0); 389 off0 = sizeof(struct ip); 390 } 391 if (m->m_len < sizeof (struct tcpiphdr)) { 392 if ((m = m_pullup(m, sizeof (struct tcpiphdr))) 393 == NULL) { 394 TCPSTAT_INC(tcps_rcvshort); 395 return; 396 } 397 } 398 ip = mtod(m, struct ip *); 399 ipov = (struct ipovly *)ip; 400 th = (struct tcphdr *)((caddr_t)ip + off0); 401 tlen = ip->ip_len; 402 403 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 404 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 405 th->th_sum = m->m_pkthdr.csum_data; 406 else 407 th->th_sum = in_pseudo(ip->ip_src.s_addr, 408 ip->ip_dst.s_addr, 409 htonl(m->m_pkthdr.csum_data + 410 ip->ip_len + 411 IPPROTO_TCP)); 412 th->th_sum ^= 0xffff; 413#ifdef TCPDEBUG 414 ipov->ih_len = (u_short)tlen; 415 ipov->ih_len = htons(ipov->ih_len); 416#endif 417 } else { 418 /* 419 * Checksum extended TCP header and data. 420 */ 421 len = sizeof (struct ip) + tlen; 422 bzero(ipov->ih_x1, sizeof(ipov->ih_x1)); 423 ipov->ih_len = (u_short)tlen; 424 ipov->ih_len = htons(ipov->ih_len); 425 th->th_sum = in_cksum(m, len); 426 } 427 if (th->th_sum) { 428 TCPSTAT_INC(tcps_rcvbadsum); 429 goto drop; 430 } 431 /* Re-initialization for later version check */ 432 ip->ip_v = IPVERSION; 433 } 434 435#ifdef INET6 436 if (isipv6) 437 iptos = (ntohl(ip6->ip6_flow) >> 20) & 0xff; 438 else 439#endif 440 iptos = ip->ip_tos; 441 442 /* 443 * Check that TCP offset makes sense, 444 * pull out TCP options and adjust length. XXX 445 */ 446 off = th->th_off << 2; 447 if (off < sizeof (struct tcphdr) || off > tlen) { 448 TCPSTAT_INC(tcps_rcvbadoff); 449 goto drop; 450 } 451 tlen -= off; /* tlen is used instead of ti->ti_len */ 452 if (off > sizeof (struct tcphdr)) { 453 if (isipv6) { 454#ifdef INET6 455 IP6_EXTHDR_CHECK(m, off0, off, ); 456 ip6 = mtod(m, struct ip6_hdr *); 457 th = (struct tcphdr *)((caddr_t)ip6 + off0); 458#endif 459 } else { 460 if (m->m_len < sizeof(struct ip) + off) { 461 if ((m = m_pullup(m, sizeof (struct ip) + off)) 462 == NULL) { 463 TCPSTAT_INC(tcps_rcvshort); 464 return; 465 } 466 ip = mtod(m, struct ip *); 467 ipov = (struct ipovly *)ip; 468 th = (struct tcphdr *)((caddr_t)ip + off0); 469 } 470 } 471 optlen = off - sizeof (struct tcphdr); 472 optp = (u_char *)(th + 1); 473 } 474 thflags = th->th_flags; 475 476 /* 477 * Convert TCP protocol specific fields to host format. 478 */ 479 th->th_seq = ntohl(th->th_seq); 480 th->th_ack = ntohl(th->th_ack); 481 th->th_win = ntohs(th->th_win); 482 th->th_urp = ntohs(th->th_urp); 483 484 /* 485 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options. 486 */ 487 drop_hdrlen = off0 + off; 488 489 /* 490 * Locate pcb for segment, which requires a lock on tcbinfo. 491 * Optimisticaly acquire a global read lock rather than a write lock 492 * unless header flags necessarily imply a state change. There are 493 * two cases where we might discover later we need a write lock 494 * despite the flags: ACKs moving a connection out of the syncache, 495 * and ACKs for a connection in TIMEWAIT. 496 */ 497 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 || 498 tcp_read_locking == 0) { 499 INP_INFO_WLOCK(&V_tcbinfo); 500 ti_locked = TI_WLOCKED; 501 tcp_wlock_atfirst++; 502 } else { 503 INP_INFO_RLOCK(&V_tcbinfo); 504 ti_locked = TI_RLOCKED; 505 tcp_rlock_atfirst++; 506 } 507 508findpcb: 509#ifdef INVARIANTS 510 if (ti_locked == TI_RLOCKED) 511 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 512 else if (ti_locked == TI_WLOCKED) 513 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 514 else 515 panic("%s: findpcb ti_locked %d\n", __func__, ti_locked); 516#endif 517 518#ifdef IPFIREWALL_FORWARD 519 /* 520 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. 521 */ 522 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL); 523 524 if (fwd_tag != NULL && isipv6 == 0) { /* IPv6 support is not yet */ 525 struct sockaddr_in *next_hop; 526 527 next_hop = (struct sockaddr_in *)(fwd_tag+1); 528 /* 529 * Transparently forwarded. Pretend to be the destination. 530 * already got one like this? 531 */ 532 inp = in_pcblookup_hash(&V_tcbinfo, 533 ip->ip_src, th->th_sport, 534 ip->ip_dst, th->th_dport, 535 0, m->m_pkthdr.rcvif); 536 if (!inp) { 537 /* It's new. Try to find the ambushing socket. */ 538 inp = in_pcblookup_hash(&V_tcbinfo, 539 ip->ip_src, th->th_sport, 540 next_hop->sin_addr, 541 next_hop->sin_port ? 542 ntohs(next_hop->sin_port) : 543 th->th_dport, 544 INPLOOKUP_WILDCARD, 545 m->m_pkthdr.rcvif); 546 } 547 /* Remove the tag from the packet. We don't need it anymore. */ 548 m_tag_delete(m, fwd_tag); 549 } else 550#endif /* IPFIREWALL_FORWARD */ 551 { 552 if (isipv6) { 553#ifdef INET6 554 inp = in6_pcblookup_hash(&V_tcbinfo, 555 &ip6->ip6_src, th->th_sport, 556 &ip6->ip6_dst, th->th_dport, 557 INPLOOKUP_WILDCARD, 558 m->m_pkthdr.rcvif); 559#endif 560 } else 561 inp = in_pcblookup_hash(&V_tcbinfo, 562 ip->ip_src, th->th_sport, 563 ip->ip_dst, th->th_dport, 564 INPLOOKUP_WILDCARD, 565 m->m_pkthdr.rcvif); 566 } 567 568 /* 569 * If the INPCB does not exist then all data in the incoming 570 * segment is discarded and an appropriate RST is sent back. 571 * XXX MRT Send RST using which routing table? 572 */ 573 if (inp == NULL) { 574 /* 575 * Log communication attempts to ports that are not 576 * in use. 577 */ 578 if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) || 579 tcp_log_in_vain == 2) { 580 if ((s = tcp_log_addrs(NULL, th, (void *)ip, ip6))) 581 log(LOG_INFO, "%s; %s: Connection attempt " 582 "to closed port\n", s, __func__); 583 } 584 /* 585 * When blackholing do not respond with a RST but 586 * completely ignore the segment and drop it. 587 */ 588 if ((V_blackhole == 1 && (thflags & TH_SYN)) || 589 V_blackhole == 2) 590 goto dropunlock; 591 592 rstreason = BANDLIM_RST_CLOSEDPORT; 593 goto dropwithreset; 594 } 595 INP_WLOCK(inp); 596 if (!(inp->inp_flags & INP_HW_FLOWID) 597 && (m->m_flags & M_FLOWID) 598 && ((inp->inp_socket == NULL) 599 || !(inp->inp_socket->so_options & SO_ACCEPTCONN))) { 600 inp->inp_flags |= INP_HW_FLOWID; 601 inp->inp_flags &= ~INP_SW_FLOWID; 602 inp->inp_flowid = m->m_pkthdr.flowid; 603 } 604#ifdef IPSEC 605#ifdef INET6 606 if (isipv6 && ipsec6_in_reject(m, inp)) { 607 V_ipsec6stat.in_polvio++; 608 goto dropunlock; 609 } else 610#endif /* INET6 */ 611 if (ipsec4_in_reject(m, inp) != 0) { 612 V_ipsec4stat.in_polvio++; 613 goto dropunlock; 614 } 615#endif /* IPSEC */ 616 617 /* 618 * Check the minimum TTL for socket. 619 */ 620 if (inp->inp_ip_minttl != 0) { 621#ifdef INET6 622 if (isipv6 && inp->inp_ip_minttl > ip6->ip6_hlim) 623 goto dropunlock; 624 else 625#endif 626 if (inp->inp_ip_minttl > ip->ip_ttl) 627 goto dropunlock; 628 } 629 630 /* 631 * A previous connection in TIMEWAIT state is supposed to catch stray 632 * or duplicate segments arriving late. If this segment was a 633 * legitimate new connection attempt the old INPCB gets removed and 634 * we can try again to find a listening socket. 635 * 636 * At this point, due to earlier optimism, we may hold a read lock on 637 * the inpcbinfo, rather than a write lock. If so, we need to 638 * upgrade, or if that fails, acquire a reference on the inpcb, drop 639 * all locks, acquire a global write lock, and then re-acquire the 640 * inpcb lock. We may at that point discover that another thread has 641 * tried to free the inpcb, in which case we need to loop back and 642 * try to find a new inpcb to deliver to. 643 */ 644 if (inp->inp_flags & INP_TIMEWAIT) { 645 KASSERT(ti_locked == TI_RLOCKED || ti_locked == TI_WLOCKED, 646 ("%s: INP_TIMEWAIT ti_locked %d", __func__, ti_locked)); 647 648 if (ti_locked == TI_RLOCKED) { 649 if (rw_try_upgrade(&V_tcbinfo.ipi_lock) == 0) { 650 in_pcbref(inp); 651 INP_WUNLOCK(inp); 652 INP_INFO_RUNLOCK(&V_tcbinfo); 653 INP_INFO_WLOCK(&V_tcbinfo); 654 ti_locked = TI_WLOCKED; 655 INP_WLOCK(inp); 656 if (in_pcbrele(inp)) { 657 tcp_wlock_looped++; 658 inp = NULL; 659 goto findpcb; 660 } 661 tcp_wlock_relocked++; 662 } else { 663 ti_locked = TI_WLOCKED; 664 tcp_wlock_upgraded++; 665 } 666 } 667 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 668 669 if (thflags & TH_SYN) 670 tcp_dooptions(&to, optp, optlen, TO_SYN); 671 /* 672 * NB: tcp_twcheck unlocks the INP and frees the mbuf. 673 */ 674 if (tcp_twcheck(inp, &to, th, m, tlen)) 675 goto findpcb; 676 INP_INFO_WUNLOCK(&V_tcbinfo); 677 return; 678 } 679 /* 680 * The TCPCB may no longer exist if the connection is winding 681 * down or it is in the CLOSED state. Either way we drop the 682 * segment and send an appropriate response. 683 */ 684 tp = intotcpcb(inp); 685 if (tp == NULL || tp->t_state == TCPS_CLOSED) { 686 rstreason = BANDLIM_RST_CLOSEDPORT; 687 goto dropwithreset; 688 } 689 690 /* 691 * We've identified a valid inpcb, but it could be that we need an 692 * inpcbinfo write lock and have only a read lock. In this case, 693 * attempt to upgrade/relock using the same strategy as the TIMEWAIT 694 * case above. 695 */ 696 if (tp->t_state != TCPS_ESTABLISHED || 697 (thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 || 698 tcp_read_locking == 0) { 699 KASSERT(ti_locked == TI_RLOCKED || ti_locked == TI_WLOCKED, 700 ("%s: upgrade check ti_locked %d", __func__, ti_locked)); 701 702 if (ti_locked == TI_RLOCKED) { 703 if (rw_try_upgrade(&V_tcbinfo.ipi_lock) == 0) { 704 in_pcbref(inp); 705 INP_WUNLOCK(inp); 706 INP_INFO_RUNLOCK(&V_tcbinfo); 707 INP_INFO_WLOCK(&V_tcbinfo); 708 ti_locked = TI_WLOCKED; 709 INP_WLOCK(inp); 710 if (in_pcbrele(inp)) { 711 tcp_wlock_looped++; 712 inp = NULL; 713 goto findpcb; 714 } 715 tcp_wlock_relocked++; 716 } else { 717 ti_locked = TI_WLOCKED; 718 tcp_wlock_upgraded++; 719 } 720 } 721 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 722 } 723 724#ifdef MAC 725 INP_WLOCK_ASSERT(inp); 726 if (mac_inpcb_check_deliver(inp, m)) 727 goto dropunlock; 728#endif 729 so = inp->inp_socket; 730 KASSERT(so != NULL, ("%s: so == NULL", __func__)); 731#ifdef TCPDEBUG 732 if (so->so_options & SO_DEBUG) { 733 ostate = tp->t_state; 734 if (isipv6) { 735#ifdef INET6 736 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6)); 737#endif 738 } else 739 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip)); 740 tcp_savetcp = *th; 741 } 742#endif 743 /* 744 * When the socket is accepting connections (the INPCB is in LISTEN 745 * state) we look into the SYN cache if this is a new connection 746 * attempt or the completion of a previous one. 747 */ 748 if (so->so_options & SO_ACCEPTCONN) { 749 struct in_conninfo inc; 750 751 KASSERT(tp->t_state == TCPS_LISTEN, ("%s: so accepting but " 752 "tp not listening", __func__)); 753 754 bzero(&inc, sizeof(inc)); 755#ifdef INET6 756 if (isipv6) { 757 inc.inc_flags |= INC_ISIPV6; 758 inc.inc6_faddr = ip6->ip6_src; 759 inc.inc6_laddr = ip6->ip6_dst; 760 } else 761#endif 762 { 763 inc.inc_faddr = ip->ip_src; 764 inc.inc_laddr = ip->ip_dst; 765 } 766 inc.inc_fport = th->th_sport; 767 inc.inc_lport = th->th_dport; 768 769 /* 770 * Check for an existing connection attempt in syncache if 771 * the flag is only ACK. A successful lookup creates a new 772 * socket appended to the listen queue in SYN_RECEIVED state. 773 */ 774 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) { 775 /* 776 * Parse the TCP options here because 777 * syncookies need access to the reflected 778 * timestamp. 779 */ 780 tcp_dooptions(&to, optp, optlen, 0); 781 /* 782 * NB: syncache_expand() doesn't unlock 783 * inp and tcpinfo locks. 784 */ 785 if (!syncache_expand(&inc, &to, th, &so, m)) { 786 /* 787 * No syncache entry or ACK was not 788 * for our SYN/ACK. Send a RST. 789 * NB: syncache did its own logging 790 * of the failure cause. 791 */ 792 rstreason = BANDLIM_RST_OPENPORT; 793 goto dropwithreset; 794 } 795 if (so == NULL) { 796 /* 797 * We completed the 3-way handshake 798 * but could not allocate a socket 799 * either due to memory shortage, 800 * listen queue length limits or 801 * global socket limits. Send RST 802 * or wait and have the remote end 803 * retransmit the ACK for another 804 * try. 805 */ 806 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 807 log(LOG_DEBUG, "%s; %s: Listen socket: " 808 "Socket allocation failed due to " 809 "limits or memory shortage, %s\n", 810 s, __func__, 811 V_tcp_sc_rst_sock_fail ? 812 "sending RST" : "try again"); 813 if (V_tcp_sc_rst_sock_fail) { 814 rstreason = BANDLIM_UNLIMITED; 815 goto dropwithreset; 816 } else 817 goto dropunlock; 818 } 819 /* 820 * Socket is created in state SYN_RECEIVED. 821 * Unlock the listen socket, lock the newly 822 * created socket and update the tp variable. 823 */ 824 INP_WUNLOCK(inp); /* listen socket */ 825 inp = sotoinpcb(so); 826 INP_WLOCK(inp); /* new connection */ 827 tp = intotcpcb(inp); 828 KASSERT(tp->t_state == TCPS_SYN_RECEIVED, 829 ("%s: ", __func__)); 830 /* 831 * Process the segment and the data it 832 * contains. tcp_do_segment() consumes 833 * the mbuf chain and unlocks the inpcb. 834 */ 835 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, 836 iptos, ti_locked); 837 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 838 return; 839 } 840 /* 841 * Segment flag validation for new connection attempts: 842 * 843 * Our (SYN|ACK) response was rejected. 844 * Check with syncache and remove entry to prevent 845 * retransmits. 846 * 847 * NB: syncache_chkrst does its own logging of failure 848 * causes. 849 */ 850 if (thflags & TH_RST) { 851 syncache_chkrst(&inc, th); 852 goto dropunlock; 853 } 854 /* 855 * We can't do anything without SYN. 856 */ 857 if ((thflags & TH_SYN) == 0) { 858 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 859 log(LOG_DEBUG, "%s; %s: Listen socket: " 860 "SYN is missing, segment ignored\n", 861 s, __func__); 862 TCPSTAT_INC(tcps_badsyn); 863 goto dropunlock; 864 } 865 /* 866 * (SYN|ACK) is bogus on a listen socket. 867 */ 868 if (thflags & TH_ACK) { 869 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 870 log(LOG_DEBUG, "%s; %s: Listen socket: " 871 "SYN|ACK invalid, segment rejected\n", 872 s, __func__); 873 syncache_badack(&inc); /* XXX: Not needed! */ 874 TCPSTAT_INC(tcps_badsyn); 875 rstreason = BANDLIM_RST_OPENPORT; 876 goto dropwithreset; 877 } 878 /* 879 * If the drop_synfin option is enabled, drop all 880 * segments with both the SYN and FIN bits set. 881 * This prevents e.g. nmap from identifying the 882 * TCP/IP stack. 883 * XXX: Poor reasoning. nmap has other methods 884 * and is constantly refining its stack detection 885 * strategies. 886 * XXX: This is a violation of the TCP specification 887 * and was used by RFC1644. 888 */ 889 if ((thflags & TH_FIN) && V_drop_synfin) { 890 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 891 log(LOG_DEBUG, "%s; %s: Listen socket: " 892 "SYN|FIN segment ignored (based on " 893 "sysctl setting)\n", s, __func__); 894 TCPSTAT_INC(tcps_badsyn); 895 goto dropunlock; 896 } 897 /* 898 * Segment's flags are (SYN) or (SYN|FIN). 899 * 900 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored 901 * as they do not affect the state of the TCP FSM. 902 * The data pointed to by TH_URG and th_urp is ignored. 903 */ 904 KASSERT((thflags & (TH_RST|TH_ACK)) == 0, 905 ("%s: Listen socket: TH_RST or TH_ACK set", __func__)); 906 KASSERT(thflags & (TH_SYN), 907 ("%s: Listen socket: TH_SYN not set", __func__)); 908#ifdef INET6 909 /* 910 * If deprecated address is forbidden, 911 * we do not accept SYN to deprecated interface 912 * address to prevent any new inbound connection from 913 * getting established. 914 * When we do not accept SYN, we send a TCP RST, 915 * with deprecated source address (instead of dropping 916 * it). We compromise it as it is much better for peer 917 * to send a RST, and RST will be the final packet 918 * for the exchange. 919 * 920 * If we do not forbid deprecated addresses, we accept 921 * the SYN packet. RFC2462 does not suggest dropping 922 * SYN in this case. 923 * If we decipher RFC2462 5.5.4, it says like this: 924 * 1. use of deprecated addr with existing 925 * communication is okay - "SHOULD continue to be 926 * used" 927 * 2. use of it with new communication: 928 * (2a) "SHOULD NOT be used if alternate address 929 * with sufficient scope is available" 930 * (2b) nothing mentioned otherwise. 931 * Here we fall into (2b) case as we have no choice in 932 * our source address selection - we must obey the peer. 933 * 934 * The wording in RFC2462 is confusing, and there are 935 * multiple description text for deprecated address 936 * handling - worse, they are not exactly the same. 937 * I believe 5.5.4 is the best one, so we follow 5.5.4. 938 */ 939 if (isipv6 && !V_ip6_use_deprecated) { 940 struct in6_ifaddr *ia6; 941 942 if ((ia6 = ip6_getdstifaddr(m)) && 943 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) { 944 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 945 log(LOG_DEBUG, "%s; %s: Listen socket: " 946 "Connection attempt to deprecated " 947 "IPv6 address rejected\n", 948 s, __func__); 949 rstreason = BANDLIM_RST_OPENPORT; 950 goto dropwithreset; 951 } 952 } 953#endif 954 /* 955 * Basic sanity checks on incoming SYN requests: 956 * Don't respond if the destination is a link layer 957 * broadcast according to RFC1122 4.2.3.10, p. 104. 958 * If it is from this socket it must be forged. 959 * Don't respond if the source or destination is a 960 * global or subnet broad- or multicast address. 961 * Note that it is quite possible to receive unicast 962 * link-layer packets with a broadcast IP address. Use 963 * in_broadcast() to find them. 964 */ 965 if (m->m_flags & (M_BCAST|M_MCAST)) { 966 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 967 log(LOG_DEBUG, "%s; %s: Listen socket: " 968 "Connection attempt from broad- or multicast " 969 "link layer address ignored\n", s, __func__); 970 goto dropunlock; 971 } 972 if (isipv6) { 973#ifdef INET6 974 if (th->th_dport == th->th_sport && 975 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) { 976 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 977 log(LOG_DEBUG, "%s; %s: Listen socket: " 978 "Connection attempt to/from self " 979 "ignored\n", s, __func__); 980 goto dropunlock; 981 } 982 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 983 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) { 984 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 985 log(LOG_DEBUG, "%s; %s: Listen socket: " 986 "Connection attempt from/to multicast " 987 "address ignored\n", s, __func__); 988 goto dropunlock; 989 } 990#endif 991 } else { 992 if (th->th_dport == th->th_sport && 993 ip->ip_dst.s_addr == ip->ip_src.s_addr) { 994 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 995 log(LOG_DEBUG, "%s; %s: Listen socket: " 996 "Connection attempt from/to self " 997 "ignored\n", s, __func__); 998 goto dropunlock; 999 } 1000 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 1001 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 1002 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 1003 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) { 1004 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 1005 log(LOG_DEBUG, "%s; %s: Listen socket: " 1006 "Connection attempt from/to broad- " 1007 "or multicast address ignored\n", 1008 s, __func__); 1009 goto dropunlock; 1010 } 1011 } 1012 /* 1013 * SYN appears to be valid. Create compressed TCP state 1014 * for syncache. 1015 */ 1016#ifdef TCPDEBUG 1017 if (so->so_options & SO_DEBUG) 1018 tcp_trace(TA_INPUT, ostate, tp, 1019 (void *)tcp_saveipgen, &tcp_savetcp, 0); 1020#endif 1021 tcp_dooptions(&to, optp, optlen, TO_SYN); 1022 syncache_add(&inc, &to, th, inp, &so, m); 1023 /* 1024 * Entry added to syncache and mbuf consumed. 1025 * Everything already unlocked by syncache_add(). 1026 */ 1027 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1028 return; 1029 } 1030 1031 /* 1032 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later 1033 * state. tcp_do_segment() always consumes the mbuf chain, unlocks 1034 * the inpcb, and unlocks pcbinfo. 1035 */ 1036 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen, iptos, ti_locked); 1037 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1038 return; 1039 1040dropwithreset: 1041 if (ti_locked == TI_RLOCKED) 1042 INP_INFO_RUNLOCK(&V_tcbinfo); 1043 else if (ti_locked == TI_WLOCKED) 1044 INP_INFO_WUNLOCK(&V_tcbinfo); 1045 else 1046 panic("%s: dropwithreset ti_locked %d", __func__, ti_locked); 1047 ti_locked = TI_UNLOCKED; 1048 1049 if (inp != NULL) { 1050 tcp_dropwithreset(m, th, tp, tlen, rstreason); 1051 INP_WUNLOCK(inp); 1052 } else 1053 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 1054 m = NULL; /* mbuf chain got consumed. */ 1055 goto drop; 1056 1057dropunlock: 1058 if (ti_locked == TI_RLOCKED) 1059 INP_INFO_RUNLOCK(&V_tcbinfo); 1060 else if (ti_locked == TI_WLOCKED) 1061 INP_INFO_WUNLOCK(&V_tcbinfo); 1062 else 1063 panic("%s: dropunlock ti_locked %d", __func__, ti_locked); 1064 ti_locked = TI_UNLOCKED; 1065 1066 if (inp != NULL) 1067 INP_WUNLOCK(inp); 1068 1069drop: 1070 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 1071 if (s != NULL) 1072 free(s, M_TCPLOG); 1073 if (m != NULL) 1074 m_freem(m); 1075} 1076 1077static void 1078tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 1079 struct tcpcb *tp, int drop_hdrlen, int tlen, uint8_t iptos, 1080 int ti_locked) 1081{ 1082 INIT_VNET_INET(tp->t_vnet); 1083 int thflags, acked, ourfinisacked, needoutput = 0; 1084 int rstreason, todrop, win; 1085 u_long tiwin; 1086 struct tcpopt to; 1087 1088#ifdef TCPDEBUG 1089 /* 1090 * The size of tcp_saveipgen must be the size of the max ip header, 1091 * now IPv6. 1092 */ 1093 u_char tcp_saveipgen[IP6_HDR_LEN]; 1094 struct tcphdr tcp_savetcp; 1095 short ostate = 0; 1096#endif 1097 thflags = th->th_flags; 1098 1099 /* 1100 * If this is either a state-changing packet or current state isn't 1101 * established, we require a write lock on tcbinfo. Otherwise, we 1102 * allow either a read lock or a write lock, as we may have acquired 1103 * a write lock due to a race. 1104 * 1105 * Require a global write lock for SYN/FIN/RST segments or 1106 * non-established connections; otherwise accept either a read or 1107 * write lock, as we may have conservatively acquired a write lock in 1108 * certain cases in tcp_input() (is this still true?). Currently we 1109 * will never enter with no lock, so we try to drop it quickly in the 1110 * common pure ack/pure data cases. 1111 */ 1112 if ((thflags & (TH_SYN | TH_FIN | TH_RST)) != 0 || 1113 tp->t_state != TCPS_ESTABLISHED) { 1114 KASSERT(ti_locked == TI_WLOCKED, ("%s ti_locked %d for " 1115 "SYN/FIN/RST/!EST", __func__, ti_locked)); 1116 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 1117 } else { 1118#ifdef INVARIANTS 1119 if (ti_locked == TI_RLOCKED) 1120 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 1121 else if (ti_locked == TI_WLOCKED) 1122 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 1123 else 1124 panic("%s: ti_locked %d for EST", __func__, 1125 ti_locked); 1126#endif 1127 } 1128 INP_WLOCK_ASSERT(tp->t_inpcb); 1129 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 1130 __func__)); 1131 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 1132 __func__)); 1133 1134 /* 1135 * Segment received on connection. 1136 * Reset idle time and keep-alive timer. 1137 * XXX: This should be done after segment 1138 * validation to ignore broken/spoofed segs. 1139 */ 1140 tp->t_rcvtime = ticks; 1141 if (TCPS_HAVEESTABLISHED(tp->t_state)) 1142 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle); 1143 1144 /* 1145 * Unscale the window into a 32-bit value. 1146 * For the SYN_SENT state the scale is zero. 1147 */ 1148 tiwin = th->th_win << tp->snd_scale; 1149 1150 /* 1151 * TCP ECN processing. 1152 */ 1153 if (tp->t_flags & TF_ECN_PERMIT) { 1154 switch (iptos & IPTOS_ECN_MASK) { 1155 case IPTOS_ECN_CE: 1156 tp->t_flags |= TF_ECN_SND_ECE; 1157 TCPSTAT_INC(tcps_ecn_ce); 1158 break; 1159 case IPTOS_ECN_ECT0: 1160 TCPSTAT_INC(tcps_ecn_ect0); 1161 break; 1162 case IPTOS_ECN_ECT1: 1163 TCPSTAT_INC(tcps_ecn_ect1); 1164 break; 1165 } 1166 1167 if (thflags & TH_CWR) 1168 tp->t_flags &= ~TF_ECN_SND_ECE; 1169 1170 /* 1171 * Congestion experienced. 1172 * Ignore if we are already trying to recover. 1173 */ 1174 if ((thflags & TH_ECE) && 1175 SEQ_LEQ(th->th_ack, tp->snd_recover)) { 1176 TCPSTAT_INC(tcps_ecn_rcwnd); 1177 tcp_congestion_exp(tp); 1178 } 1179 } 1180 1181 /* 1182 * Parse options on any incoming segment. 1183 */ 1184 tcp_dooptions(&to, (u_char *)(th + 1), 1185 (th->th_off << 2) - sizeof(struct tcphdr), 1186 (thflags & TH_SYN) ? TO_SYN : 0); 1187 1188 /* 1189 * If echoed timestamp is later than the current time, 1190 * fall back to non RFC1323 RTT calculation. Normalize 1191 * timestamp if syncookies were used when this connection 1192 * was established. 1193 */ 1194 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 1195 to.to_tsecr -= tp->ts_offset; 1196 if (TSTMP_GT(to.to_tsecr, ticks)) 1197 to.to_tsecr = 0; 1198 } 1199 1200 /* 1201 * Process options only when we get SYN/ACK back. The SYN case 1202 * for incoming connections is handled in tcp_syncache. 1203 * According to RFC1323 the window field in a SYN (i.e., a <SYN> 1204 * or <SYN,ACK>) segment itself is never scaled. 1205 * XXX this is traditional behavior, may need to be cleaned up. 1206 */ 1207 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 1208 if ((to.to_flags & TOF_SCALE) && 1209 (tp->t_flags & TF_REQ_SCALE)) { 1210 tp->t_flags |= TF_RCVD_SCALE; 1211 tp->snd_scale = to.to_wscale; 1212 } 1213 /* 1214 * Initial send window. It will be updated with 1215 * the next incoming segment to the scaled value. 1216 */ 1217 tp->snd_wnd = th->th_win; 1218 if (to.to_flags & TOF_TS) { 1219 tp->t_flags |= TF_RCVD_TSTMP; 1220 tp->ts_recent = to.to_tsval; 1221 tp->ts_recent_age = ticks; 1222 } 1223 if (to.to_flags & TOF_MSS) 1224 tcp_mss(tp, to.to_mss); 1225 if ((tp->t_flags & TF_SACK_PERMIT) && 1226 (to.to_flags & TOF_SACKPERM) == 0) 1227 tp->t_flags &= ~TF_SACK_PERMIT; 1228 } 1229 1230 /* 1231 * Header prediction: check for the two common cases 1232 * of a uni-directional data xfer. If the packet has 1233 * no control flags, is in-sequence, the window didn't 1234 * change and we're not retransmitting, it's a 1235 * candidate. If the length is zero and the ack moved 1236 * forward, we're the sender side of the xfer. Just 1237 * free the data acked & wake any higher level process 1238 * that was blocked waiting for space. If the length 1239 * is non-zero and the ack didn't move, we're the 1240 * receiver side. If we're getting packets in-order 1241 * (the reassembly queue is empty), add the data to 1242 * the socket buffer and note that we need a delayed ack. 1243 * Make sure that the hidden state-flags are also off. 1244 * Since we check for TCPS_ESTABLISHED first, it can only 1245 * be TH_NEEDSYN. 1246 */ 1247 if (tp->t_state == TCPS_ESTABLISHED && 1248 th->th_seq == tp->rcv_nxt && 1249 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && 1250 tp->snd_nxt == tp->snd_max && 1251 tiwin && tiwin == tp->snd_wnd && 1252 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) && 1253 LIST_EMPTY(&tp->t_segq) && 1254 ((to.to_flags & TOF_TS) == 0 || 1255 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) { 1256 1257 /* 1258 * If last ACK falls within this segment's sequence numbers, 1259 * record the timestamp. 1260 * NOTE that the test is modified according to the latest 1261 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1262 */ 1263 if ((to.to_flags & TOF_TS) != 0 && 1264 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 1265 tp->ts_recent_age = ticks; 1266 tp->ts_recent = to.to_tsval; 1267 } 1268 1269 if (tlen == 0) { 1270 if (SEQ_GT(th->th_ack, tp->snd_una) && 1271 SEQ_LEQ(th->th_ack, tp->snd_max) && 1272 tp->snd_cwnd >= tp->snd_wnd && 1273 ((!V_tcp_do_newreno && 1274 !(tp->t_flags & TF_SACK_PERMIT) && 1275 tp->t_dupacks < tcprexmtthresh) || 1276 ((V_tcp_do_newreno || 1277 (tp->t_flags & TF_SACK_PERMIT)) && 1278 !IN_FASTRECOVERY(tp) && 1279 (to.to_flags & TOF_SACK) == 0 && 1280 TAILQ_EMPTY(&tp->snd_holes)))) { 1281 /* 1282 * This is a pure ack for outstanding data. 1283 */ 1284 if (ti_locked == TI_RLOCKED) 1285 INP_INFO_RUNLOCK(&V_tcbinfo); 1286 else if (ti_locked == TI_WLOCKED) 1287 INP_INFO_WUNLOCK(&V_tcbinfo); 1288 else 1289 panic("%s: ti_locked %d on pure ACK", 1290 __func__, ti_locked); 1291 ti_locked = TI_UNLOCKED; 1292 1293 TCPSTAT_INC(tcps_predack); 1294 1295 /* 1296 * "bad retransmit" recovery. 1297 */ 1298 if (tp->t_rxtshift == 1 && 1299 ticks < tp->t_badrxtwin) { 1300 TCPSTAT_INC(tcps_sndrexmitbad); 1301 tp->snd_cwnd = tp->snd_cwnd_prev; 1302 tp->snd_ssthresh = 1303 tp->snd_ssthresh_prev; 1304 tp->snd_recover = tp->snd_recover_prev; 1305 if (tp->t_flags & TF_WASFRECOVERY) 1306 ENTER_FASTRECOVERY(tp); 1307 tp->snd_nxt = tp->snd_max; 1308 tp->t_badrxtwin = 0; 1309 } 1310 1311 /* 1312 * Recalculate the transmit timer / rtt. 1313 * 1314 * Some boxes send broken timestamp replies 1315 * during the SYN+ACK phase, ignore 1316 * timestamps of 0 or we could calculate a 1317 * huge RTT and blow up the retransmit timer. 1318 */ 1319 if ((to.to_flags & TOF_TS) != 0 && 1320 to.to_tsecr) { 1321 if (!tp->t_rttlow || 1322 tp->t_rttlow > ticks - to.to_tsecr) 1323 tp->t_rttlow = ticks - to.to_tsecr; 1324 tcp_xmit_timer(tp, 1325 ticks - to.to_tsecr + 1); 1326 } else if (tp->t_rtttime && 1327 SEQ_GT(th->th_ack, tp->t_rtseq)) { 1328 if (!tp->t_rttlow || 1329 tp->t_rttlow > ticks - tp->t_rtttime) 1330 tp->t_rttlow = ticks - tp->t_rtttime; 1331 tcp_xmit_timer(tp, 1332 ticks - tp->t_rtttime); 1333 } 1334 tcp_xmit_bandwidth_limit(tp, th->th_ack); 1335 acked = th->th_ack - tp->snd_una; 1336 TCPSTAT_INC(tcps_rcvackpack); 1337 TCPSTAT_ADD(tcps_rcvackbyte, acked); 1338 sbdrop(&so->so_snd, acked); 1339 if (SEQ_GT(tp->snd_una, tp->snd_recover) && 1340 SEQ_LEQ(th->th_ack, tp->snd_recover)) 1341 tp->snd_recover = th->th_ack - 1; 1342 tp->snd_una = th->th_ack; 1343 /* 1344 * Pull snd_wl2 up to prevent seq wrap relative 1345 * to th_ack. 1346 */ 1347 tp->snd_wl2 = th->th_ack; 1348 tp->t_dupacks = 0; 1349 m_freem(m); 1350 ND6_HINT(tp); /* Some progress has been made. */ 1351 1352 /* 1353 * If all outstanding data are acked, stop 1354 * retransmit timer, otherwise restart timer 1355 * using current (possibly backed-off) value. 1356 * If process is waiting for space, 1357 * wakeup/selwakeup/signal. If data 1358 * are ready to send, let tcp_output 1359 * decide between more output or persist. 1360 */ 1361#ifdef TCPDEBUG 1362 if (so->so_options & SO_DEBUG) 1363 tcp_trace(TA_INPUT, ostate, tp, 1364 (void *)tcp_saveipgen, 1365 &tcp_savetcp, 0); 1366#endif 1367 if (tp->snd_una == tp->snd_max) 1368 tcp_timer_activate(tp, TT_REXMT, 0); 1369 else if (!tcp_timer_active(tp, TT_PERSIST)) 1370 tcp_timer_activate(tp, TT_REXMT, 1371 tp->t_rxtcur); 1372 sowwakeup(so); 1373 if (so->so_snd.sb_cc) 1374 (void) tcp_output(tp); 1375 goto check_delack; 1376 } 1377 } else if (th->th_ack == tp->snd_una && 1378 tlen <= sbspace(&so->so_rcv)) { 1379 int newsize = 0; /* automatic sockbuf scaling */ 1380 1381 /* 1382 * This is a pure, in-sequence data packet with 1383 * nothing on the reassembly queue and we have enough 1384 * buffer space to take it. 1385 */ 1386 if (ti_locked == TI_RLOCKED) 1387 INP_INFO_RUNLOCK(&V_tcbinfo); 1388 else if (ti_locked == TI_WLOCKED) 1389 INP_INFO_WUNLOCK(&V_tcbinfo); 1390 else 1391 panic("%s: ti_locked %d on pure data " 1392 "segment", __func__, ti_locked); 1393 ti_locked = TI_UNLOCKED; 1394 1395 /* Clean receiver SACK report if present */ 1396 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks) 1397 tcp_clean_sackreport(tp); 1398 TCPSTAT_INC(tcps_preddat); 1399 tp->rcv_nxt += tlen; 1400 /* 1401 * Pull snd_wl1 up to prevent seq wrap relative to 1402 * th_seq. 1403 */ 1404 tp->snd_wl1 = th->th_seq; 1405 /* 1406 * Pull rcv_up up to prevent seq wrap relative to 1407 * rcv_nxt. 1408 */ 1409 tp->rcv_up = tp->rcv_nxt; 1410 TCPSTAT_INC(tcps_rcvpack); 1411 TCPSTAT_ADD(tcps_rcvbyte, tlen); 1412 ND6_HINT(tp); /* Some progress has been made */ 1413#ifdef TCPDEBUG 1414 if (so->so_options & SO_DEBUG) 1415 tcp_trace(TA_INPUT, ostate, tp, 1416 (void *)tcp_saveipgen, &tcp_savetcp, 0); 1417#endif 1418 /* 1419 * Automatic sizing of receive socket buffer. Often the send 1420 * buffer size is not optimally adjusted to the actual network 1421 * conditions at hand (delay bandwidth product). Setting the 1422 * buffer size too small limits throughput on links with high 1423 * bandwidth and high delay (eg. trans-continental/oceanic links). 1424 * 1425 * On the receive side the socket buffer memory is only rarely 1426 * used to any significant extent. This allows us to be much 1427 * more aggressive in scaling the receive socket buffer. For 1428 * the case that the buffer space is actually used to a large 1429 * extent and we run out of kernel memory we can simply drop 1430 * the new segments; TCP on the sender will just retransmit it 1431 * later. Setting the buffer size too big may only consume too 1432 * much kernel memory if the application doesn't read() from 1433 * the socket or packet loss or reordering makes use of the 1434 * reassembly queue. 1435 * 1436 * The criteria to step up the receive buffer one notch are: 1437 * 1. the number of bytes received during the time it takes 1438 * one timestamp to be reflected back to us (the RTT); 1439 * 2. received bytes per RTT is within seven eighth of the 1440 * current socket buffer size; 1441 * 3. receive buffer size has not hit maximal automatic size; 1442 * 1443 * This algorithm does one step per RTT at most and only if 1444 * we receive a bulk stream w/o packet losses or reorderings. 1445 * Shrinking the buffer during idle times is not necessary as 1446 * it doesn't consume any memory when idle. 1447 * 1448 * TODO: Only step up if the application is actually serving 1449 * the buffer to better manage the socket buffer resources. 1450 */ 1451 if (V_tcp_do_autorcvbuf && 1452 to.to_tsecr && 1453 (so->so_rcv.sb_flags & SB_AUTOSIZE)) { 1454 if (to.to_tsecr > tp->rfbuf_ts && 1455 to.to_tsecr - tp->rfbuf_ts < hz) { 1456 if (tp->rfbuf_cnt > 1457 (so->so_rcv.sb_hiwat / 8 * 7) && 1458 so->so_rcv.sb_hiwat < 1459 V_tcp_autorcvbuf_max) { 1460 newsize = 1461 min(so->so_rcv.sb_hiwat + 1462 V_tcp_autorcvbuf_inc, 1463 V_tcp_autorcvbuf_max); 1464 } 1465 /* Start over with next RTT. */ 1466 tp->rfbuf_ts = 0; 1467 tp->rfbuf_cnt = 0; 1468 } else 1469 tp->rfbuf_cnt += tlen; /* add up */ 1470 } 1471 1472 /* Add data to socket buffer. */ 1473 SOCKBUF_LOCK(&so->so_rcv); 1474 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1475 m_freem(m); 1476 } else { 1477 /* 1478 * Set new socket buffer size. 1479 * Give up when limit is reached. 1480 */ 1481 if (newsize) 1482 if (!sbreserve_locked(&so->so_rcv, 1483 newsize, so, NULL)) 1484 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 1485 m_adj(m, drop_hdrlen); /* delayed header drop */ 1486 sbappendstream_locked(&so->so_rcv, m); 1487 } 1488 /* NB: sorwakeup_locked() does an implicit unlock. */ 1489 sorwakeup_locked(so); 1490 if (DELAY_ACK(tp)) { 1491 tp->t_flags |= TF_DELACK; 1492 } else { 1493 tp->t_flags |= TF_ACKNOW; 1494 tcp_output(tp); 1495 } 1496 goto check_delack; 1497 } 1498 } 1499 1500 /* 1501 * Calculate amount of space in receive window, 1502 * and then do TCP input processing. 1503 * Receive window is amount of space in rcv queue, 1504 * but not less than advertised window. 1505 */ 1506 win = sbspace(&so->so_rcv); 1507 if (win < 0) 1508 win = 0; 1509 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt)); 1510 1511 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 1512 tp->rfbuf_ts = 0; 1513 tp->rfbuf_cnt = 0; 1514 1515 switch (tp->t_state) { 1516 1517 /* 1518 * If the state is SYN_RECEIVED: 1519 * if seg contains an ACK, but not for our SYN/ACK, send a RST. 1520 */ 1521 case TCPS_SYN_RECEIVED: 1522 if ((thflags & TH_ACK) && 1523 (SEQ_LEQ(th->th_ack, tp->snd_una) || 1524 SEQ_GT(th->th_ack, tp->snd_max))) { 1525 rstreason = BANDLIM_RST_OPENPORT; 1526 goto dropwithreset; 1527 } 1528 break; 1529 1530 /* 1531 * If the state is SYN_SENT: 1532 * if seg contains an ACK, but not for our SYN, drop the input. 1533 * if seg contains a RST, then drop the connection. 1534 * if seg does not contain SYN, then drop it. 1535 * Otherwise this is an acceptable SYN segment 1536 * initialize tp->rcv_nxt and tp->irs 1537 * if seg contains ack then advance tp->snd_una 1538 * if seg contains an ECE and ECN support is enabled, the stream 1539 * is ECN capable. 1540 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state 1541 * arrange for segment to be acked (eventually) 1542 * continue processing rest of data/controls, beginning with URG 1543 */ 1544 case TCPS_SYN_SENT: 1545 if ((thflags & TH_ACK) && 1546 (SEQ_LEQ(th->th_ack, tp->iss) || 1547 SEQ_GT(th->th_ack, tp->snd_max))) { 1548 rstreason = BANDLIM_UNLIMITED; 1549 goto dropwithreset; 1550 } 1551 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) 1552 tp = tcp_drop(tp, ECONNREFUSED); 1553 if (thflags & TH_RST) 1554 goto drop; 1555 if (!(thflags & TH_SYN)) 1556 goto drop; 1557 1558 tp->irs = th->th_seq; 1559 tcp_rcvseqinit(tp); 1560 if (thflags & TH_ACK) { 1561 TCPSTAT_INC(tcps_connects); 1562 soisconnected(so); 1563#ifdef MAC 1564 mac_socketpeer_set_from_mbuf(m, so); 1565#endif 1566 /* Do window scaling on this connection? */ 1567 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 1568 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 1569 tp->rcv_scale = tp->request_r_scale; 1570 } 1571 tp->rcv_adv += tp->rcv_wnd; 1572 tp->snd_una++; /* SYN is acked */ 1573 /* 1574 * If there's data, delay ACK; if there's also a FIN 1575 * ACKNOW will be turned on later. 1576 */ 1577 if (DELAY_ACK(tp) && tlen != 0) 1578 tcp_timer_activate(tp, TT_DELACK, 1579 tcp_delacktime); 1580 else 1581 tp->t_flags |= TF_ACKNOW; 1582 1583 if ((thflags & TH_ECE) && V_tcp_do_ecn) { 1584 tp->t_flags |= TF_ECN_PERMIT; 1585 TCPSTAT_INC(tcps_ecn_shs); 1586 } 1587 1588 /* 1589 * Received <SYN,ACK> in SYN_SENT[*] state. 1590 * Transitions: 1591 * SYN_SENT --> ESTABLISHED 1592 * SYN_SENT* --> FIN_WAIT_1 1593 */ 1594 tp->t_starttime = ticks; 1595 if (tp->t_flags & TF_NEEDFIN) { 1596 tp->t_state = TCPS_FIN_WAIT_1; 1597 tp->t_flags &= ~TF_NEEDFIN; 1598 thflags &= ~TH_SYN; 1599 } else { 1600 tp->t_state = TCPS_ESTABLISHED; 1601 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle); 1602 } 1603 } else { 1604 /* 1605 * Received initial SYN in SYN-SENT[*] state => 1606 * simultaneous open. If segment contains CC option 1607 * and there is a cached CC, apply TAO test. 1608 * If it succeeds, connection is * half-synchronized. 1609 * Otherwise, do 3-way handshake: 1610 * SYN-SENT -> SYN-RECEIVED 1611 * SYN-SENT* -> SYN-RECEIVED* 1612 * If there was no CC option, clear cached CC value. 1613 */ 1614 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 1615 tcp_timer_activate(tp, TT_REXMT, 0); 1616 tp->t_state = TCPS_SYN_RECEIVED; 1617 } 1618 1619 KASSERT(ti_locked == TI_WLOCKED, ("%s: trimthenstep6: " 1620 "ti_locked %d", __func__, ti_locked)); 1621 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 1622 INP_WLOCK_ASSERT(tp->t_inpcb); 1623 1624 /* 1625 * Advance th->th_seq to correspond to first data byte. 1626 * If data, trim to stay within window, 1627 * dropping FIN if necessary. 1628 */ 1629 th->th_seq++; 1630 if (tlen > tp->rcv_wnd) { 1631 todrop = tlen - tp->rcv_wnd; 1632 m_adj(m, -todrop); 1633 tlen = tp->rcv_wnd; 1634 thflags &= ~TH_FIN; 1635 TCPSTAT_INC(tcps_rcvpackafterwin); 1636 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 1637 } 1638 tp->snd_wl1 = th->th_seq - 1; 1639 tp->rcv_up = th->th_seq; 1640 /* 1641 * Client side of transaction: already sent SYN and data. 1642 * If the remote host used T/TCP to validate the SYN, 1643 * our data will be ACK'd; if so, enter normal data segment 1644 * processing in the middle of step 5, ack processing. 1645 * Otherwise, goto step 6. 1646 */ 1647 if (thflags & TH_ACK) 1648 goto process_ACK; 1649 1650 goto step6; 1651 1652 /* 1653 * If the state is LAST_ACK or CLOSING or TIME_WAIT: 1654 * do normal processing. 1655 * 1656 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later. 1657 */ 1658 case TCPS_LAST_ACK: 1659 case TCPS_CLOSING: 1660 break; /* continue normal processing */ 1661 } 1662 1663 /* 1664 * States other than LISTEN or SYN_SENT. 1665 * First check the RST flag and sequence number since reset segments 1666 * are exempt from the timestamp and connection count tests. This 1667 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix 1668 * below which allowed reset segments in half the sequence space 1669 * to fall though and be processed (which gives forged reset 1670 * segments with a random sequence number a 50 percent chance of 1671 * killing a connection). 1672 * Then check timestamp, if present. 1673 * Then check the connection count, if present. 1674 * Then check that at least some bytes of segment are within 1675 * receive window. If segment begins before rcv_nxt, 1676 * drop leading data (and SYN); if nothing left, just ack. 1677 * 1678 * 1679 * If the RST bit is set, check the sequence number to see 1680 * if this is a valid reset segment. 1681 * RFC 793 page 37: 1682 * In all states except SYN-SENT, all reset (RST) segments 1683 * are validated by checking their SEQ-fields. A reset is 1684 * valid if its sequence number is in the window. 1685 * Note: this does not take into account delayed ACKs, so 1686 * we should test against last_ack_sent instead of rcv_nxt. 1687 * The sequence number in the reset segment is normally an 1688 * echo of our outgoing acknowlegement numbers, but some hosts 1689 * send a reset with the sequence number at the rightmost edge 1690 * of our receive window, and we have to handle this case. 1691 * Note 2: Paul Watson's paper "Slipping in the Window" has shown 1692 * that brute force RST attacks are possible. To combat this, 1693 * we use a much stricter check while in the ESTABLISHED state, 1694 * only accepting RSTs where the sequence number is equal to 1695 * last_ack_sent. In all other states (the states in which a 1696 * RST is more likely), the more permissive check is used. 1697 * If we have multiple segments in flight, the initial reset 1698 * segment sequence numbers will be to the left of last_ack_sent, 1699 * but they will eventually catch up. 1700 * In any case, it never made sense to trim reset segments to 1701 * fit the receive window since RFC 1122 says: 1702 * 4.2.2.12 RST Segment: RFC-793 Section 3.4 1703 * 1704 * A TCP SHOULD allow a received RST segment to include data. 1705 * 1706 * DISCUSSION 1707 * It has been suggested that a RST segment could contain 1708 * ASCII text that encoded and explained the cause of the 1709 * RST. No standard has yet been established for such 1710 * data. 1711 * 1712 * If the reset segment passes the sequence number test examine 1713 * the state: 1714 * SYN_RECEIVED STATE: 1715 * If passive open, return to LISTEN state. 1716 * If active open, inform user that connection was refused. 1717 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES: 1718 * Inform user that connection was reset, and close tcb. 1719 * CLOSING, LAST_ACK STATES: 1720 * Close the tcb. 1721 * TIME_WAIT STATE: 1722 * Drop the segment - see Stevens, vol. 2, p. 964 and 1723 * RFC 1337. 1724 */ 1725 if (thflags & TH_RST) { 1726 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) && 1727 SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) { 1728 switch (tp->t_state) { 1729 1730 case TCPS_SYN_RECEIVED: 1731 so->so_error = ECONNREFUSED; 1732 goto close; 1733 1734 case TCPS_ESTABLISHED: 1735 if (V_tcp_insecure_rst == 0 && 1736 !(SEQ_GEQ(th->th_seq, tp->rcv_nxt - 1) && 1737 SEQ_LEQ(th->th_seq, tp->rcv_nxt + 1)) && 1738 !(SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) && 1739 SEQ_LEQ(th->th_seq, tp->last_ack_sent + 1))) { 1740 TCPSTAT_INC(tcps_badrst); 1741 goto drop; 1742 } 1743 /* FALLTHROUGH */ 1744 case TCPS_FIN_WAIT_1: 1745 case TCPS_FIN_WAIT_2: 1746 case TCPS_CLOSE_WAIT: 1747 so->so_error = ECONNRESET; 1748 close: 1749 KASSERT(ti_locked == TI_WLOCKED, 1750 ("tcp_do_segment: TH_RST 1 ti_locked %d", 1751 ti_locked)); 1752 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 1753 1754 tp->t_state = TCPS_CLOSED; 1755 TCPSTAT_INC(tcps_drops); 1756 tp = tcp_close(tp); 1757 break; 1758 1759 case TCPS_CLOSING: 1760 case TCPS_LAST_ACK: 1761 KASSERT(ti_locked == TI_WLOCKED, 1762 ("tcp_do_segment: TH_RST 2 ti_locked %d", 1763 ti_locked)); 1764 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 1765 1766 tp = tcp_close(tp); 1767 break; 1768 } 1769 } 1770 goto drop; 1771 } 1772 1773 /* 1774 * RFC 1323 PAWS: If we have a timestamp reply on this segment 1775 * and it's less than ts_recent, drop it. 1776 */ 1777 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent && 1778 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 1779 1780 /* Check to see if ts_recent is over 24 days old. */ 1781 if ((int)(ticks - tp->ts_recent_age) > TCP_PAWS_IDLE) { 1782 /* 1783 * Invalidate ts_recent. If this segment updates 1784 * ts_recent, the age will be reset later and ts_recent 1785 * will get a valid value. If it does not, setting 1786 * ts_recent to zero will at least satisfy the 1787 * requirement that zero be placed in the timestamp 1788 * echo reply when ts_recent isn't valid. The 1789 * age isn't reset until we get a valid ts_recent 1790 * because we don't want out-of-order segments to be 1791 * dropped when ts_recent is old. 1792 */ 1793 tp->ts_recent = 0; 1794 } else { 1795 TCPSTAT_INC(tcps_rcvduppack); 1796 TCPSTAT_ADD(tcps_rcvdupbyte, tlen); 1797 TCPSTAT_INC(tcps_pawsdrop); 1798 if (tlen) 1799 goto dropafterack; 1800 goto drop; 1801 } 1802 } 1803 1804 /* 1805 * In the SYN-RECEIVED state, validate that the packet belongs to 1806 * this connection before trimming the data to fit the receive 1807 * window. Check the sequence number versus IRS since we know 1808 * the sequence numbers haven't wrapped. This is a partial fix 1809 * for the "LAND" DoS attack. 1810 */ 1811 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) { 1812 rstreason = BANDLIM_RST_OPENPORT; 1813 goto dropwithreset; 1814 } 1815 1816 todrop = tp->rcv_nxt - th->th_seq; 1817 if (todrop > 0) { 1818 /* 1819 * If this is a duplicate SYN for our current connection, 1820 * advance over it and pretend and it's not a SYN. 1821 */ 1822 if (thflags & TH_SYN && th->th_seq == tp->irs) { 1823 thflags &= ~TH_SYN; 1824 th->th_seq++; 1825 if (th->th_urp > 1) 1826 th->th_urp--; 1827 else 1828 thflags &= ~TH_URG; 1829 todrop--; 1830 } 1831 /* 1832 * Following if statement from Stevens, vol. 2, p. 960. 1833 */ 1834 if (todrop > tlen 1835 || (todrop == tlen && (thflags & TH_FIN) == 0)) { 1836 /* 1837 * Any valid FIN must be to the left of the window. 1838 * At this point the FIN must be a duplicate or out 1839 * of sequence; drop it. 1840 */ 1841 thflags &= ~TH_FIN; 1842 1843 /* 1844 * Send an ACK to resynchronize and drop any data. 1845 * But keep on processing for RST or ACK. 1846 */ 1847 tp->t_flags |= TF_ACKNOW; 1848 todrop = tlen; 1849 TCPSTAT_INC(tcps_rcvduppack); 1850 TCPSTAT_ADD(tcps_rcvdupbyte, todrop); 1851 } else { 1852 TCPSTAT_INC(tcps_rcvpartduppack); 1853 TCPSTAT_ADD(tcps_rcvpartdupbyte, todrop); 1854 } 1855 drop_hdrlen += todrop; /* drop from the top afterwards */ 1856 th->th_seq += todrop; 1857 tlen -= todrop; 1858 if (th->th_urp > todrop) 1859 th->th_urp -= todrop; 1860 else { 1861 thflags &= ~TH_URG; 1862 th->th_urp = 0; 1863 } 1864 } 1865 1866 /* 1867 * If new data are received on a connection after the 1868 * user processes are gone, then RST the other end. 1869 */ 1870 if ((so->so_state & SS_NOFDREF) && 1871 tp->t_state > TCPS_CLOSE_WAIT && tlen) { 1872 char *s; 1873 1874 KASSERT(ti_locked == TI_WLOCKED, ("%s: SS_NOFDEREF && " 1875 "CLOSE_WAIT && tlen ti_locked %d", __func__, ti_locked)); 1876 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 1877 1878 if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL, NULL))) { 1879 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data after socket " 1880 "was closed, sending RST and removing tcpcb\n", 1881 s, __func__, tcpstates[tp->t_state], tlen); 1882 free(s, M_TCPLOG); 1883 } 1884 tp = tcp_close(tp); 1885 TCPSTAT_INC(tcps_rcvafterclose); 1886 rstreason = BANDLIM_UNLIMITED; 1887 goto dropwithreset; 1888 } 1889 1890 /* 1891 * If segment ends after window, drop trailing data 1892 * (and PUSH and FIN); if nothing left, just ACK. 1893 */ 1894 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd); 1895 if (todrop > 0) { 1896 TCPSTAT_INC(tcps_rcvpackafterwin); 1897 if (todrop >= tlen) { 1898 TCPSTAT_ADD(tcps_rcvbyteafterwin, tlen); 1899 /* 1900 * If window is closed can only take segments at 1901 * window edge, and have to drop data and PUSH from 1902 * incoming segments. Continue processing, but 1903 * remember to ack. Otherwise, drop segment 1904 * and ack. 1905 */ 1906 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { 1907 tp->t_flags |= TF_ACKNOW; 1908 TCPSTAT_INC(tcps_rcvwinprobe); 1909 } else 1910 goto dropafterack; 1911 } else 1912 TCPSTAT_ADD(tcps_rcvbyteafterwin, todrop); 1913 m_adj(m, -todrop); 1914 tlen -= todrop; 1915 thflags &= ~(TH_PUSH|TH_FIN); 1916 } 1917 1918 /* 1919 * If last ACK falls within this segment's sequence numbers, 1920 * record its timestamp. 1921 * NOTE: 1922 * 1) That the test incorporates suggestions from the latest 1923 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1924 * 2) That updating only on newer timestamps interferes with 1925 * our earlier PAWS tests, so this check should be solely 1926 * predicated on the sequence space of this segment. 1927 * 3) That we modify the segment boundary check to be 1928 * Last.ACK.Sent <= SEG.SEQ + SEG.Len 1929 * instead of RFC1323's 1930 * Last.ACK.Sent < SEG.SEQ + SEG.Len, 1931 * This modified check allows us to overcome RFC1323's 1932 * limitations as described in Stevens TCP/IP Illustrated 1933 * Vol. 2 p.869. In such cases, we can still calculate the 1934 * RTT correctly when RCV.NXT == Last.ACK.Sent. 1935 */ 1936 if ((to.to_flags & TOF_TS) != 0 && 1937 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 1938 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 1939 ((thflags & (TH_SYN|TH_FIN)) != 0))) { 1940 tp->ts_recent_age = ticks; 1941 tp->ts_recent = to.to_tsval; 1942 } 1943 1944 /* 1945 * If a SYN is in the window, then this is an 1946 * error and we send an RST and drop the connection. 1947 */ 1948 if (thflags & TH_SYN) { 1949 KASSERT(ti_locked == TI_WLOCKED, 1950 ("tcp_do_segment: TH_SYN ti_locked %d", ti_locked)); 1951 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 1952 1953 tp = tcp_drop(tp, ECONNRESET); 1954 rstreason = BANDLIM_UNLIMITED; 1955 goto drop; 1956 } 1957 1958 /* 1959 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN 1960 * flag is on (half-synchronized state), then queue data for 1961 * later processing; else drop segment and return. 1962 */ 1963 if ((thflags & TH_ACK) == 0) { 1964 if (tp->t_state == TCPS_SYN_RECEIVED || 1965 (tp->t_flags & TF_NEEDSYN)) 1966 goto step6; 1967 else if (tp->t_flags & TF_ACKNOW) 1968 goto dropafterack; 1969 else 1970 goto drop; 1971 } 1972 1973 /* 1974 * Ack processing. 1975 */ 1976 switch (tp->t_state) { 1977 1978 /* 1979 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter 1980 * ESTABLISHED state and continue processing. 1981 * The ACK was checked above. 1982 */ 1983 case TCPS_SYN_RECEIVED: 1984 1985 TCPSTAT_INC(tcps_connects); 1986 soisconnected(so); 1987 /* Do window scaling? */ 1988 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 1989 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 1990 tp->rcv_scale = tp->request_r_scale; 1991 tp->snd_wnd = tiwin; 1992 } 1993 /* 1994 * Make transitions: 1995 * SYN-RECEIVED -> ESTABLISHED 1996 * SYN-RECEIVED* -> FIN-WAIT-1 1997 */ 1998 tp->t_starttime = ticks; 1999 if (tp->t_flags & TF_NEEDFIN) { 2000 tp->t_state = TCPS_FIN_WAIT_1; 2001 tp->t_flags &= ~TF_NEEDFIN; 2002 } else { 2003 tp->t_state = TCPS_ESTABLISHED; 2004 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle); 2005 } 2006 /* 2007 * If segment contains data or ACK, will call tcp_reass() 2008 * later; if not, do so now to pass queued data to user. 2009 */ 2010 if (tlen == 0 && (thflags & TH_FIN) == 0) 2011 (void) tcp_reass(tp, (struct tcphdr *)0, 0, 2012 (struct mbuf *)0); 2013 tp->snd_wl1 = th->th_seq - 1; 2014 /* FALLTHROUGH */ 2015 2016 /* 2017 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range 2018 * ACKs. If the ack is in the range 2019 * tp->snd_una < th->th_ack <= tp->snd_max 2020 * then advance tp->snd_una to th->th_ack and drop 2021 * data from the retransmission queue. If this ACK reflects 2022 * more up to date window information we update our window information. 2023 */ 2024 case TCPS_ESTABLISHED: 2025 case TCPS_FIN_WAIT_1: 2026 case TCPS_FIN_WAIT_2: 2027 case TCPS_CLOSE_WAIT: 2028 case TCPS_CLOSING: 2029 case TCPS_LAST_ACK: 2030 if (SEQ_GT(th->th_ack, tp->snd_max)) { 2031 TCPSTAT_INC(tcps_rcvacktoomuch); 2032 goto dropafterack; 2033 } 2034 if ((tp->t_flags & TF_SACK_PERMIT) && 2035 ((to.to_flags & TOF_SACK) || 2036 !TAILQ_EMPTY(&tp->snd_holes))) 2037 tcp_sack_doack(tp, &to, th->th_ack); 2038 if (SEQ_LEQ(th->th_ack, tp->snd_una)) { 2039 if (tlen == 0 && tiwin == tp->snd_wnd) { 2040 TCPSTAT_INC(tcps_rcvdupack); 2041 /* 2042 * If we have outstanding data (other than 2043 * a window probe), this is a completely 2044 * duplicate ack (ie, window info didn't 2045 * change), the ack is the biggest we've 2046 * seen and we've seen exactly our rexmt 2047 * threshhold of them, assume a packet 2048 * has been dropped and retransmit it. 2049 * Kludge snd_nxt & the congestion 2050 * window so we send only this one 2051 * packet. 2052 * 2053 * We know we're losing at the current 2054 * window size so do congestion avoidance 2055 * (set ssthresh to half the current window 2056 * and pull our congestion window back to 2057 * the new ssthresh). 2058 * 2059 * Dup acks mean that packets have left the 2060 * network (they're now cached at the receiver) 2061 * so bump cwnd by the amount in the receiver 2062 * to keep a constant cwnd packets in the 2063 * network. 2064 * 2065 * When using TCP ECN, notify the peer that 2066 * we reduced the cwnd. 2067 */ 2068 if (!tcp_timer_active(tp, TT_REXMT) || 2069 th->th_ack != tp->snd_una) 2070 tp->t_dupacks = 0; 2071 else if (++tp->t_dupacks > tcprexmtthresh || 2072 ((V_tcp_do_newreno || 2073 (tp->t_flags & TF_SACK_PERMIT)) && 2074 IN_FASTRECOVERY(tp))) { 2075 if ((tp->t_flags & TF_SACK_PERMIT) && 2076 IN_FASTRECOVERY(tp)) { 2077 int awnd; 2078 2079 /* 2080 * Compute the amount of data in flight first. 2081 * We can inject new data into the pipe iff 2082 * we have less than 1/2 the original window's 2083 * worth of data in flight. 2084 */ 2085 awnd = (tp->snd_nxt - tp->snd_fack) + 2086 tp->sackhint.sack_bytes_rexmit; 2087 if (awnd < tp->snd_ssthresh) { 2088 tp->snd_cwnd += tp->t_maxseg; 2089 if (tp->snd_cwnd > tp->snd_ssthresh) 2090 tp->snd_cwnd = tp->snd_ssthresh; 2091 } 2092 } else 2093 tp->snd_cwnd += tp->t_maxseg; 2094 (void) tcp_output(tp); 2095 goto drop; 2096 } else if (tp->t_dupacks == tcprexmtthresh) { 2097 tcp_seq onxt = tp->snd_nxt; 2098 2099 /* 2100 * If we're doing sack, check to 2101 * see if we're already in sack 2102 * recovery. If we're not doing sack, 2103 * check to see if we're in newreno 2104 * recovery. 2105 */ 2106 if (tp->t_flags & TF_SACK_PERMIT) { 2107 if (IN_FASTRECOVERY(tp)) { 2108 tp->t_dupacks = 0; 2109 break; 2110 } 2111 } else if (V_tcp_do_newreno || 2112 V_tcp_do_ecn) { 2113 if (SEQ_LEQ(th->th_ack, 2114 tp->snd_recover)) { 2115 tp->t_dupacks = 0; 2116 break; 2117 } 2118 } 2119 tcp_congestion_exp(tp); 2120 tcp_timer_activate(tp, TT_REXMT, 0); 2121 tp->t_rtttime = 0; 2122 if (tp->t_flags & TF_SACK_PERMIT) { 2123 TCPSTAT_INC( 2124 tcps_sack_recovery_episode); 2125 tp->sack_newdata = tp->snd_nxt; 2126 tp->snd_cwnd = tp->t_maxseg; 2127 (void) tcp_output(tp); 2128 goto drop; 2129 } 2130 tp->snd_nxt = th->th_ack; 2131 tp->snd_cwnd = tp->t_maxseg; 2132 (void) tcp_output(tp); 2133 KASSERT(tp->snd_limited <= 2, 2134 ("%s: tp->snd_limited too big", 2135 __func__)); 2136 tp->snd_cwnd = tp->snd_ssthresh + 2137 tp->t_maxseg * 2138 (tp->t_dupacks - tp->snd_limited); 2139 if (SEQ_GT(onxt, tp->snd_nxt)) 2140 tp->snd_nxt = onxt; 2141 goto drop; 2142 } else if (V_tcp_do_rfc3042) { 2143 u_long oldcwnd = tp->snd_cwnd; 2144 tcp_seq oldsndmax = tp->snd_max; 2145 u_int sent; 2146 2147 KASSERT(tp->t_dupacks == 1 || 2148 tp->t_dupacks == 2, 2149 ("%s: dupacks not 1 or 2", 2150 __func__)); 2151 if (tp->t_dupacks == 1) 2152 tp->snd_limited = 0; 2153 tp->snd_cwnd = 2154 (tp->snd_nxt - tp->snd_una) + 2155 (tp->t_dupacks - tp->snd_limited) * 2156 tp->t_maxseg; 2157 (void) tcp_output(tp); 2158 sent = tp->snd_max - oldsndmax; 2159 if (sent > tp->t_maxseg) { 2160 KASSERT((tp->t_dupacks == 2 && 2161 tp->snd_limited == 0) || 2162 (sent == tp->t_maxseg + 1 && 2163 tp->t_flags & TF_SENTFIN), 2164 ("%s: sent too much", 2165 __func__)); 2166 tp->snd_limited = 2; 2167 } else if (sent > 0) 2168 ++tp->snd_limited; 2169 tp->snd_cwnd = oldcwnd; 2170 goto drop; 2171 } 2172 } else 2173 tp->t_dupacks = 0; 2174 break; 2175 } 2176 2177 KASSERT(SEQ_GT(th->th_ack, tp->snd_una), 2178 ("%s: th_ack <= snd_una", __func__)); 2179 2180 /* 2181 * If the congestion window was inflated to account 2182 * for the other side's cached packets, retract it. 2183 */ 2184 if (V_tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) { 2185 if (IN_FASTRECOVERY(tp)) { 2186 if (SEQ_LT(th->th_ack, tp->snd_recover)) { 2187 if (tp->t_flags & TF_SACK_PERMIT) 2188 tcp_sack_partialack(tp, th); 2189 else 2190 tcp_newreno_partial_ack(tp, th); 2191 } else { 2192 /* 2193 * Out of fast recovery. 2194 * Window inflation should have left us 2195 * with approximately snd_ssthresh 2196 * outstanding data. 2197 * But in case we would be inclined to 2198 * send a burst, better to do it via 2199 * the slow start mechanism. 2200 */ 2201 if (SEQ_GT(th->th_ack + 2202 tp->snd_ssthresh, 2203 tp->snd_max)) 2204 tp->snd_cwnd = tp->snd_max - 2205 th->th_ack + 2206 tp->t_maxseg; 2207 else 2208 tp->snd_cwnd = tp->snd_ssthresh; 2209 } 2210 } 2211 } else { 2212 if (tp->t_dupacks >= tcprexmtthresh && 2213 tp->snd_cwnd > tp->snd_ssthresh) 2214 tp->snd_cwnd = tp->snd_ssthresh; 2215 } 2216 tp->t_dupacks = 0; 2217 /* 2218 * If we reach this point, ACK is not a duplicate, 2219 * i.e., it ACKs something we sent. 2220 */ 2221 if (tp->t_flags & TF_NEEDSYN) { 2222 /* 2223 * T/TCP: Connection was half-synchronized, and our 2224 * SYN has been ACK'd (so connection is now fully 2225 * synchronized). Go to non-starred state, 2226 * increment snd_una for ACK of SYN, and check if 2227 * we can do window scaling. 2228 */ 2229 tp->t_flags &= ~TF_NEEDSYN; 2230 tp->snd_una++; 2231 /* Do window scaling? */ 2232 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2233 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2234 tp->rcv_scale = tp->request_r_scale; 2235 /* Send window already scaled. */ 2236 } 2237 } 2238 2239process_ACK: 2240 INP_INFO_LOCK_ASSERT(&V_tcbinfo); 2241 KASSERT(ti_locked == TI_RLOCKED || ti_locked == TI_WLOCKED, 2242 ("tcp_input: process_ACK ti_locked %d", ti_locked)); 2243 INP_WLOCK_ASSERT(tp->t_inpcb); 2244 2245 acked = th->th_ack - tp->snd_una; 2246 TCPSTAT_INC(tcps_rcvackpack); 2247 TCPSTAT_ADD(tcps_rcvackbyte, acked); 2248 2249 /* 2250 * If we just performed our first retransmit, and the ACK 2251 * arrives within our recovery window, then it was a mistake 2252 * to do the retransmit in the first place. Recover our 2253 * original cwnd and ssthresh, and proceed to transmit where 2254 * we left off. 2255 */ 2256 if (tp->t_rxtshift == 1 && ticks < tp->t_badrxtwin) { 2257 TCPSTAT_INC(tcps_sndrexmitbad); 2258 tp->snd_cwnd = tp->snd_cwnd_prev; 2259 tp->snd_ssthresh = tp->snd_ssthresh_prev; 2260 tp->snd_recover = tp->snd_recover_prev; 2261 if (tp->t_flags & TF_WASFRECOVERY) 2262 ENTER_FASTRECOVERY(tp); 2263 tp->snd_nxt = tp->snd_max; 2264 tp->t_badrxtwin = 0; /* XXX probably not required */ 2265 } 2266 2267 /* 2268 * If we have a timestamp reply, update smoothed 2269 * round trip time. If no timestamp is present but 2270 * transmit timer is running and timed sequence 2271 * number was acked, update smoothed round trip time. 2272 * Since we now have an rtt measurement, cancel the 2273 * timer backoff (cf., Phil Karn's retransmit alg.). 2274 * Recompute the initial retransmit timer. 2275 * 2276 * Some boxes send broken timestamp replies 2277 * during the SYN+ACK phase, ignore 2278 * timestamps of 0 or we could calculate a 2279 * huge RTT and blow up the retransmit timer. 2280 */ 2281 if ((to.to_flags & TOF_TS) != 0 && 2282 to.to_tsecr) { 2283 if (!tp->t_rttlow || tp->t_rttlow > ticks - to.to_tsecr) 2284 tp->t_rttlow = ticks - to.to_tsecr; 2285 tcp_xmit_timer(tp, ticks - to.to_tsecr + 1); 2286 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) { 2287 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime) 2288 tp->t_rttlow = ticks - tp->t_rtttime; 2289 tcp_xmit_timer(tp, ticks - tp->t_rtttime); 2290 } 2291 tcp_xmit_bandwidth_limit(tp, th->th_ack); 2292 2293 /* 2294 * If all outstanding data is acked, stop retransmit 2295 * timer and remember to restart (more output or persist). 2296 * If there is more data to be acked, restart retransmit 2297 * timer, using current (possibly backed-off) value. 2298 */ 2299 if (th->th_ack == tp->snd_max) { 2300 tcp_timer_activate(tp, TT_REXMT, 0); 2301 needoutput = 1; 2302 } else if (!tcp_timer_active(tp, TT_PERSIST)) 2303 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur); 2304 2305 /* 2306 * If no data (only SYN) was ACK'd, 2307 * skip rest of ACK processing. 2308 */ 2309 if (acked == 0) 2310 goto step6; 2311 2312 /* 2313 * When new data is acked, open the congestion window. 2314 * Method depends on which congestion control state we're 2315 * in (slow start or cong avoid) and if ABC (RFC 3465) is 2316 * enabled. 2317 * 2318 * slow start: cwnd <= ssthresh 2319 * cong avoid: cwnd > ssthresh 2320 * 2321 * slow start and ABC (RFC 3465): 2322 * Grow cwnd exponentially by the amount of data 2323 * ACKed capping the max increment per ACK to 2324 * (abc_l_var * maxseg) bytes. 2325 * 2326 * slow start without ABC (RFC 2581): 2327 * Grow cwnd exponentially by maxseg per ACK. 2328 * 2329 * cong avoid and ABC (RFC 3465): 2330 * Grow cwnd linearly by maxseg per RTT for each 2331 * cwnd worth of ACKed data. 2332 * 2333 * cong avoid without ABC (RFC 2581): 2334 * Grow cwnd linearly by approximately maxseg per RTT using 2335 * maxseg^2 / cwnd per ACK as the increment. 2336 * If cwnd > maxseg^2, fix the cwnd increment at 1 byte to 2337 * avoid capping cwnd. 2338 */ 2339 if ((!V_tcp_do_newreno && !(tp->t_flags & TF_SACK_PERMIT)) || 2340 !IN_FASTRECOVERY(tp)) { 2341 u_int cw = tp->snd_cwnd; 2342 u_int incr = tp->t_maxseg; 2343 /* In congestion avoidance? */ 2344 if (cw > tp->snd_ssthresh) { 2345 if (V_tcp_do_rfc3465) { 2346 tp->t_bytes_acked += acked; 2347 if (tp->t_bytes_acked >= tp->snd_cwnd) 2348 tp->t_bytes_acked -= cw; 2349 else 2350 incr = 0; 2351 } 2352 else 2353 incr = max((incr * incr / cw), 1); 2354 /* 2355 * In slow-start with ABC enabled and no RTO in sight? 2356 * (Must not use abc_l_var > 1 if slow starting after an 2357 * RTO. On RTO, snd_nxt = snd_una, so the snd_nxt == 2358 * snd_max check is sufficient to handle this). 2359 */ 2360 } else if (V_tcp_do_rfc3465 && 2361 tp->snd_nxt == tp->snd_max) 2362 incr = min(acked, 2363 V_tcp_abc_l_var * tp->t_maxseg); 2364 /* ABC is on by default, so (incr == 0) frequently. */ 2365 if (incr > 0) 2366 tp->snd_cwnd = min(cw+incr, TCP_MAXWIN<<tp->snd_scale); 2367 } 2368 SOCKBUF_LOCK(&so->so_snd); 2369 if (acked > so->so_snd.sb_cc) { 2370 tp->snd_wnd -= so->so_snd.sb_cc; 2371 sbdrop_locked(&so->so_snd, (int)so->so_snd.sb_cc); 2372 ourfinisacked = 1; 2373 } else { 2374 sbdrop_locked(&so->so_snd, acked); 2375 tp->snd_wnd -= acked; 2376 ourfinisacked = 0; 2377 } 2378 /* NB: sowwakeup_locked() does an implicit unlock. */ 2379 sowwakeup_locked(so); 2380 /* Detect una wraparound. */ 2381 if ((V_tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) && 2382 !IN_FASTRECOVERY(tp) && 2383 SEQ_GT(tp->snd_una, tp->snd_recover) && 2384 SEQ_LEQ(th->th_ack, tp->snd_recover)) 2385 tp->snd_recover = th->th_ack - 1; 2386 if ((V_tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) && 2387 IN_FASTRECOVERY(tp) && 2388 SEQ_GEQ(th->th_ack, tp->snd_recover)) { 2389 EXIT_FASTRECOVERY(tp); 2390 tp->t_bytes_acked = 0; 2391 } 2392 tp->snd_una = th->th_ack; 2393 if (tp->t_flags & TF_SACK_PERMIT) { 2394 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 2395 tp->snd_recover = tp->snd_una; 2396 } 2397 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 2398 tp->snd_nxt = tp->snd_una; 2399 2400 switch (tp->t_state) { 2401 2402 /* 2403 * In FIN_WAIT_1 STATE in addition to the processing 2404 * for the ESTABLISHED state if our FIN is now acknowledged 2405 * then enter FIN_WAIT_2. 2406 */ 2407 case TCPS_FIN_WAIT_1: 2408 if (ourfinisacked) { 2409 /* 2410 * If we can't receive any more 2411 * data, then closing user can proceed. 2412 * Starting the timer is contrary to the 2413 * specification, but if we don't get a FIN 2414 * we'll hang forever. 2415 * 2416 * XXXjl: 2417 * we should release the tp also, and use a 2418 * compressed state. 2419 */ 2420 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 2421 int timeout; 2422 2423 soisdisconnected(so); 2424 timeout = (tcp_fast_finwait2_recycle) ? 2425 tcp_finwait2_timeout : tcp_maxidle; 2426 tcp_timer_activate(tp, TT_2MSL, timeout); 2427 } 2428 tp->t_state = TCPS_FIN_WAIT_2; 2429 } 2430 break; 2431 2432 /* 2433 * In CLOSING STATE in addition to the processing for 2434 * the ESTABLISHED state if the ACK acknowledges our FIN 2435 * then enter the TIME-WAIT state, otherwise ignore 2436 * the segment. 2437 */ 2438 case TCPS_CLOSING: 2439 if (ourfinisacked) { 2440 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 2441 tcp_twstart(tp); 2442 INP_INFO_WUNLOCK(&V_tcbinfo); 2443 m_freem(m); 2444 return; 2445 } 2446 break; 2447 2448 /* 2449 * In LAST_ACK, we may still be waiting for data to drain 2450 * and/or to be acked, as well as for the ack of our FIN. 2451 * If our FIN is now acknowledged, delete the TCB, 2452 * enter the closed state and return. 2453 */ 2454 case TCPS_LAST_ACK: 2455 if (ourfinisacked) { 2456 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 2457 tp = tcp_close(tp); 2458 goto drop; 2459 } 2460 break; 2461 } 2462 } 2463 2464step6: 2465 INP_INFO_LOCK_ASSERT(&V_tcbinfo); 2466 KASSERT(ti_locked == TI_RLOCKED || ti_locked == TI_WLOCKED, 2467 ("tcp_do_segment: step6 ti_locked %d", ti_locked)); 2468 INP_WLOCK_ASSERT(tp->t_inpcb); 2469 2470 /* 2471 * Update window information. 2472 * Don't look at window if no ACK: TAC's send garbage on first SYN. 2473 */ 2474 if ((thflags & TH_ACK) && 2475 (SEQ_LT(tp->snd_wl1, th->th_seq) || 2476 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 2477 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 2478 /* keep track of pure window updates */ 2479 if (tlen == 0 && 2480 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 2481 TCPSTAT_INC(tcps_rcvwinupd); 2482 tp->snd_wnd = tiwin; 2483 tp->snd_wl1 = th->th_seq; 2484 tp->snd_wl2 = th->th_ack; 2485 if (tp->snd_wnd > tp->max_sndwnd) 2486 tp->max_sndwnd = tp->snd_wnd; 2487 needoutput = 1; 2488 } 2489 2490 /* 2491 * Process segments with URG. 2492 */ 2493 if ((thflags & TH_URG) && th->th_urp && 2494 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2495 /* 2496 * This is a kludge, but if we receive and accept 2497 * random urgent pointers, we'll crash in 2498 * soreceive. It's hard to imagine someone 2499 * actually wanting to send this much urgent data. 2500 */ 2501 SOCKBUF_LOCK(&so->so_rcv); 2502 if (th->th_urp + so->so_rcv.sb_cc > sb_max) { 2503 th->th_urp = 0; /* XXX */ 2504 thflags &= ~TH_URG; /* XXX */ 2505 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */ 2506 goto dodata; /* XXX */ 2507 } 2508 /* 2509 * If this segment advances the known urgent pointer, 2510 * then mark the data stream. This should not happen 2511 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since 2512 * a FIN has been received from the remote side. 2513 * In these states we ignore the URG. 2514 * 2515 * According to RFC961 (Assigned Protocols), 2516 * the urgent pointer points to the last octet 2517 * of urgent data. We continue, however, 2518 * to consider it to indicate the first octet 2519 * of data past the urgent section as the original 2520 * spec states (in one of two places). 2521 */ 2522 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) { 2523 tp->rcv_up = th->th_seq + th->th_urp; 2524 so->so_oobmark = so->so_rcv.sb_cc + 2525 (tp->rcv_up - tp->rcv_nxt) - 1; 2526 if (so->so_oobmark == 0) 2527 so->so_rcv.sb_state |= SBS_RCVATMARK; 2528 sohasoutofband(so); 2529 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); 2530 } 2531 SOCKBUF_UNLOCK(&so->so_rcv); 2532 /* 2533 * Remove out of band data so doesn't get presented to user. 2534 * This can happen independent of advancing the URG pointer, 2535 * but if two URG's are pending at once, some out-of-band 2536 * data may creep in... ick. 2537 */ 2538 if (th->th_urp <= (u_long)tlen && 2539 !(so->so_options & SO_OOBINLINE)) { 2540 /* hdr drop is delayed */ 2541 tcp_pulloutofband(so, th, m, drop_hdrlen); 2542 } 2543 } else { 2544 /* 2545 * If no out of band data is expected, 2546 * pull receive urgent pointer along 2547 * with the receive window. 2548 */ 2549 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) 2550 tp->rcv_up = tp->rcv_nxt; 2551 } 2552dodata: /* XXX */ 2553 INP_INFO_LOCK_ASSERT(&V_tcbinfo); 2554 KASSERT(ti_locked == TI_RLOCKED || ti_locked == TI_WLOCKED, 2555 ("tcp_do_segment: dodata ti_locked %d", ti_locked)); 2556 INP_WLOCK_ASSERT(tp->t_inpcb); 2557 2558 /* 2559 * Process the segment text, merging it into the TCP sequencing queue, 2560 * and arranging for acknowledgment of receipt if necessary. 2561 * This process logically involves adjusting tp->rcv_wnd as data 2562 * is presented to the user (this happens in tcp_usrreq.c, 2563 * case PRU_RCVD). If a FIN has already been received on this 2564 * connection then we just ignore the text. 2565 */ 2566 if ((tlen || (thflags & TH_FIN)) && 2567 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2568 tcp_seq save_start = th->th_seq; 2569 m_adj(m, drop_hdrlen); /* delayed header drop */ 2570 /* 2571 * Insert segment which includes th into TCP reassembly queue 2572 * with control block tp. Set thflags to whether reassembly now 2573 * includes a segment with FIN. This handles the common case 2574 * inline (segment is the next to be received on an established 2575 * connection, and the queue is empty), avoiding linkage into 2576 * and removal from the queue and repetition of various 2577 * conversions. 2578 * Set DELACK for segments received in order, but ack 2579 * immediately when segments are out of order (so 2580 * fast retransmit can work). 2581 */ 2582 if (th->th_seq == tp->rcv_nxt && 2583 LIST_EMPTY(&tp->t_segq) && 2584 TCPS_HAVEESTABLISHED(tp->t_state)) { 2585 if (DELAY_ACK(tp)) 2586 tp->t_flags |= TF_DELACK; 2587 else 2588 tp->t_flags |= TF_ACKNOW; 2589 tp->rcv_nxt += tlen; 2590 thflags = th->th_flags & TH_FIN; 2591 TCPSTAT_INC(tcps_rcvpack); 2592 TCPSTAT_ADD(tcps_rcvbyte, tlen); 2593 ND6_HINT(tp); 2594 SOCKBUF_LOCK(&so->so_rcv); 2595 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 2596 m_freem(m); 2597 else 2598 sbappendstream_locked(&so->so_rcv, m); 2599 /* NB: sorwakeup_locked() does an implicit unlock. */ 2600 sorwakeup_locked(so); 2601 } else { 2602 /* 2603 * XXX: Due to the header drop above "th" is 2604 * theoretically invalid by now. Fortunately 2605 * m_adj() doesn't actually frees any mbufs 2606 * when trimming from the head. 2607 */ 2608 thflags = tcp_reass(tp, th, &tlen, m); 2609 tp->t_flags |= TF_ACKNOW; 2610 } 2611 if (tlen > 0 && (tp->t_flags & TF_SACK_PERMIT)) 2612 tcp_update_sack_list(tp, save_start, save_start + tlen); 2613#if 0 2614 /* 2615 * Note the amount of data that peer has sent into 2616 * our window, in order to estimate the sender's 2617 * buffer size. 2618 * XXX: Unused. 2619 */ 2620 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt); 2621#endif 2622 } else { 2623 m_freem(m); 2624 thflags &= ~TH_FIN; 2625 } 2626 2627 /* 2628 * If FIN is received ACK the FIN and let the user know 2629 * that the connection is closing. 2630 */ 2631 if (thflags & TH_FIN) { 2632 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2633 socantrcvmore(so); 2634 /* 2635 * If connection is half-synchronized 2636 * (ie NEEDSYN flag on) then delay ACK, 2637 * so it may be piggybacked when SYN is sent. 2638 * Otherwise, since we received a FIN then no 2639 * more input can be expected, send ACK now. 2640 */ 2641 if (tp->t_flags & TF_NEEDSYN) 2642 tp->t_flags |= TF_DELACK; 2643 else 2644 tp->t_flags |= TF_ACKNOW; 2645 tp->rcv_nxt++; 2646 } 2647 switch (tp->t_state) { 2648 2649 /* 2650 * In SYN_RECEIVED and ESTABLISHED STATES 2651 * enter the CLOSE_WAIT state. 2652 */ 2653 case TCPS_SYN_RECEIVED: 2654 tp->t_starttime = ticks; 2655 /* FALLTHROUGH */ 2656 case TCPS_ESTABLISHED: 2657 tp->t_state = TCPS_CLOSE_WAIT; 2658 break; 2659 2660 /* 2661 * If still in FIN_WAIT_1 STATE FIN has not been acked so 2662 * enter the CLOSING state. 2663 */ 2664 case TCPS_FIN_WAIT_1: 2665 tp->t_state = TCPS_CLOSING; 2666 break; 2667 2668 /* 2669 * In FIN_WAIT_2 state enter the TIME_WAIT state, 2670 * starting the time-wait timer, turning off the other 2671 * standard timers. 2672 */ 2673 case TCPS_FIN_WAIT_2: 2674 INP_INFO_WLOCK_ASSERT(&V_tcbinfo); 2675 KASSERT(ti_locked == TI_WLOCKED, ("%s: dodata " 2676 "TCP_FIN_WAIT_2 ti_locked: %d", __func__, 2677 ti_locked)); 2678 2679 tcp_twstart(tp); 2680 INP_INFO_WUNLOCK(&V_tcbinfo); 2681 return; 2682 } 2683 } 2684 if (ti_locked == TI_RLOCKED) 2685 INP_INFO_RUNLOCK(&V_tcbinfo); 2686 else if (ti_locked == TI_WLOCKED) 2687 INP_INFO_WUNLOCK(&V_tcbinfo); 2688 else 2689 panic("%s: dodata epilogue ti_locked %d", __func__, 2690 ti_locked); 2691 ti_locked = TI_UNLOCKED; 2692 2693#ifdef TCPDEBUG 2694 if (so->so_options & SO_DEBUG) 2695 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen, 2696 &tcp_savetcp, 0); 2697#endif 2698 2699 /* 2700 * Return any desired output. 2701 */ 2702 if (needoutput || (tp->t_flags & TF_ACKNOW)) 2703 (void) tcp_output(tp); 2704 2705check_delack: 2706 KASSERT(ti_locked == TI_UNLOCKED, ("%s: check_delack ti_locked %d", 2707 __func__, ti_locked)); 2708 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 2709 INP_WLOCK_ASSERT(tp->t_inpcb); 2710 2711 if (tp->t_flags & TF_DELACK) { 2712 tp->t_flags &= ~TF_DELACK; 2713 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime); 2714 } 2715 INP_WUNLOCK(tp->t_inpcb); 2716 return; 2717 2718dropafterack: 2719 KASSERT(ti_locked == TI_RLOCKED || ti_locked == TI_WLOCKED, 2720 ("tcp_do_segment: dropafterack ti_locked %d", ti_locked)); 2721 2722 /* 2723 * Generate an ACK dropping incoming segment if it occupies 2724 * sequence space, where the ACK reflects our state. 2725 * 2726 * We can now skip the test for the RST flag since all 2727 * paths to this code happen after packets containing 2728 * RST have been dropped. 2729 * 2730 * In the SYN-RECEIVED state, don't send an ACK unless the 2731 * segment we received passes the SYN-RECEIVED ACK test. 2732 * If it fails send a RST. This breaks the loop in the 2733 * "LAND" DoS attack, and also prevents an ACK storm 2734 * between two listening ports that have been sent forged 2735 * SYN segments, each with the source address of the other. 2736 */ 2737 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && 2738 (SEQ_GT(tp->snd_una, th->th_ack) || 2739 SEQ_GT(th->th_ack, tp->snd_max)) ) { 2740 rstreason = BANDLIM_RST_OPENPORT; 2741 goto dropwithreset; 2742 } 2743#ifdef TCPDEBUG 2744 if (so->so_options & SO_DEBUG) 2745 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 2746 &tcp_savetcp, 0); 2747#endif 2748 if (ti_locked == TI_RLOCKED) 2749 INP_INFO_RUNLOCK(&V_tcbinfo); 2750 else if (ti_locked == TI_WLOCKED) 2751 INP_INFO_WUNLOCK(&V_tcbinfo); 2752 else 2753 panic("%s: dropafterack epilogue ti_locked %d", __func__, 2754 ti_locked); 2755 ti_locked = TI_UNLOCKED; 2756 2757 tp->t_flags |= TF_ACKNOW; 2758 (void) tcp_output(tp); 2759 INP_WUNLOCK(tp->t_inpcb); 2760 m_freem(m); 2761 return; 2762 2763dropwithreset: 2764 if (ti_locked == TI_RLOCKED) 2765 INP_INFO_RUNLOCK(&V_tcbinfo); 2766 else if (ti_locked == TI_WLOCKED) 2767 INP_INFO_WUNLOCK(&V_tcbinfo); 2768 else 2769 panic("%s: dropwithreset ti_locked %d", __func__, ti_locked); 2770 ti_locked = TI_UNLOCKED; 2771 2772 if (tp != NULL) { 2773 tcp_dropwithreset(m, th, tp, tlen, rstreason); 2774 INP_WUNLOCK(tp->t_inpcb); 2775 } else 2776 tcp_dropwithreset(m, th, NULL, tlen, rstreason); 2777 return; 2778 2779drop: 2780 if (ti_locked == TI_RLOCKED) 2781 INP_INFO_RUNLOCK(&V_tcbinfo); 2782 else if (ti_locked == TI_WLOCKED) 2783 INP_INFO_WUNLOCK(&V_tcbinfo); 2784#ifdef INVARIANTS 2785 else 2786 INP_INFO_UNLOCK_ASSERT(&V_tcbinfo); 2787#endif 2788 ti_locked = TI_UNLOCKED; 2789 2790 /* 2791 * Drop space held by incoming segment and return. 2792 */ 2793#ifdef TCPDEBUG 2794 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 2795 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 2796 &tcp_savetcp, 0); 2797#endif 2798 if (tp != NULL) 2799 INP_WUNLOCK(tp->t_inpcb); 2800 m_freem(m); 2801} 2802 2803/* 2804 * Issue RST and make ACK acceptable to originator of segment. 2805 * The mbuf must still include the original packet header. 2806 * tp may be NULL. 2807 */ 2808static void 2809tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, 2810 int tlen, int rstreason) 2811{ 2812 struct ip *ip; 2813#ifdef INET6 2814 struct ip6_hdr *ip6; 2815#endif 2816 2817 if (tp != NULL) { 2818 INP_WLOCK_ASSERT(tp->t_inpcb); 2819 } 2820 2821 /* Don't bother if destination was broadcast/multicast. */ 2822 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST)) 2823 goto drop; 2824#ifdef INET6 2825 if (mtod(m, struct ip *)->ip_v == 6) { 2826 ip6 = mtod(m, struct ip6_hdr *); 2827 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 2828 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 2829 goto drop; 2830 /* IPv6 anycast check is done at tcp6_input() */ 2831 } else 2832#endif 2833 { 2834 ip = mtod(m, struct ip *); 2835 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 2836 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 2837 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 2838 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 2839 goto drop; 2840 } 2841 2842 /* Perform bandwidth limiting. */ 2843 if (badport_bandlim(rstreason) < 0) 2844 goto drop; 2845 2846 /* tcp_respond consumes the mbuf chain. */ 2847 if (th->th_flags & TH_ACK) { 2848 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, 2849 th->th_ack, TH_RST); 2850 } else { 2851 if (th->th_flags & TH_SYN) 2852 tlen++; 2853 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen, 2854 (tcp_seq)0, TH_RST|TH_ACK); 2855 } 2856 return; 2857drop: 2858 m_freem(m); 2859} 2860 2861/* 2862 * Parse TCP options and place in tcpopt. 2863 */ 2864static void 2865tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags) 2866{ 2867 INIT_VNET_INET(curvnet); 2868 int opt, optlen; 2869 2870 to->to_flags = 0; 2871 for (; cnt > 0; cnt -= optlen, cp += optlen) { 2872 opt = cp[0]; 2873 if (opt == TCPOPT_EOL) 2874 break; 2875 if (opt == TCPOPT_NOP) 2876 optlen = 1; 2877 else { 2878 if (cnt < 2) 2879 break; 2880 optlen = cp[1]; 2881 if (optlen < 2 || optlen > cnt) 2882 break; 2883 } 2884 switch (opt) { 2885 case TCPOPT_MAXSEG: 2886 if (optlen != TCPOLEN_MAXSEG) 2887 continue; 2888 if (!(flags & TO_SYN)) 2889 continue; 2890 to->to_flags |= TOF_MSS; 2891 bcopy((char *)cp + 2, 2892 (char *)&to->to_mss, sizeof(to->to_mss)); 2893 to->to_mss = ntohs(to->to_mss); 2894 break; 2895 case TCPOPT_WINDOW: 2896 if (optlen != TCPOLEN_WINDOW) 2897 continue; 2898 if (!(flags & TO_SYN)) 2899 continue; 2900 to->to_flags |= TOF_SCALE; 2901 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT); 2902 break; 2903 case TCPOPT_TIMESTAMP: 2904 if (optlen != TCPOLEN_TIMESTAMP) 2905 continue; 2906 to->to_flags |= TOF_TS; 2907 bcopy((char *)cp + 2, 2908 (char *)&to->to_tsval, sizeof(to->to_tsval)); 2909 to->to_tsval = ntohl(to->to_tsval); 2910 bcopy((char *)cp + 6, 2911 (char *)&to->to_tsecr, sizeof(to->to_tsecr)); 2912 to->to_tsecr = ntohl(to->to_tsecr); 2913 break; 2914#ifdef TCP_SIGNATURE 2915 /* 2916 * XXX In order to reply to a host which has set the 2917 * TCP_SIGNATURE option in its initial SYN, we have to 2918 * record the fact that the option was observed here 2919 * for the syncache code to perform the correct response. 2920 */ 2921 case TCPOPT_SIGNATURE: 2922 if (optlen != TCPOLEN_SIGNATURE) 2923 continue; 2924 to->to_flags |= TOF_SIGNATURE; 2925 to->to_signature = cp + 2; 2926 break; 2927#endif 2928 case TCPOPT_SACK_PERMITTED: 2929 if (optlen != TCPOLEN_SACK_PERMITTED) 2930 continue; 2931 if (!(flags & TO_SYN)) 2932 continue; 2933 if (!V_tcp_do_sack) 2934 continue; 2935 to->to_flags |= TOF_SACKPERM; 2936 break; 2937 case TCPOPT_SACK: 2938 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0) 2939 continue; 2940 if (flags & TO_SYN) 2941 continue; 2942 to->to_flags |= TOF_SACK; 2943 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK; 2944 to->to_sacks = cp + 2; 2945 TCPSTAT_INC(tcps_sack_rcv_blocks); 2946 break; 2947 default: 2948 continue; 2949 } 2950 } 2951} 2952 2953/* 2954 * Pull out of band byte out of a segment so 2955 * it doesn't appear in the user's data queue. 2956 * It is still reflected in the segment length for 2957 * sequencing purposes. 2958 */ 2959static void 2960tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, 2961 int off) 2962{ 2963 int cnt = off + th->th_urp - 1; 2964 2965 while (cnt >= 0) { 2966 if (m->m_len > cnt) { 2967 char *cp = mtod(m, caddr_t) + cnt; 2968 struct tcpcb *tp = sototcpcb(so); 2969 2970 INP_WLOCK_ASSERT(tp->t_inpcb); 2971 2972 tp->t_iobc = *cp; 2973 tp->t_oobflags |= TCPOOB_HAVEDATA; 2974 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1)); 2975 m->m_len--; 2976 if (m->m_flags & M_PKTHDR) 2977 m->m_pkthdr.len--; 2978 return; 2979 } 2980 cnt -= m->m_len; 2981 m = m->m_next; 2982 if (m == NULL) 2983 break; 2984 } 2985 panic("tcp_pulloutofband"); 2986} 2987 2988/* 2989 * Collect new round-trip time estimate 2990 * and update averages and current timeout. 2991 */ 2992static void 2993tcp_xmit_timer(struct tcpcb *tp, int rtt) 2994{ 2995 INIT_VNET_INET(tp->t_inpcb->inp_vnet); 2996 int delta; 2997 2998 INP_WLOCK_ASSERT(tp->t_inpcb); 2999 3000 TCPSTAT_INC(tcps_rttupdated); 3001 tp->t_rttupdated++; 3002 if (tp->t_srtt != 0) { 3003 /* 3004 * srtt is stored as fixed point with 5 bits after the 3005 * binary point (i.e., scaled by 8). The following magic 3006 * is equivalent to the smoothing algorithm in rfc793 with 3007 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed 3008 * point). Adjust rtt to origin 0. 3009 */ 3010 delta = ((rtt - 1) << TCP_DELTA_SHIFT) 3011 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); 3012 3013 if ((tp->t_srtt += delta) <= 0) 3014 tp->t_srtt = 1; 3015 3016 /* 3017 * We accumulate a smoothed rtt variance (actually, a 3018 * smoothed mean difference), then set the retransmit 3019 * timer to smoothed rtt + 4 times the smoothed variance. 3020 * rttvar is stored as fixed point with 4 bits after the 3021 * binary point (scaled by 16). The following is 3022 * equivalent to rfc793 smoothing with an alpha of .75 3023 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces 3024 * rfc793's wired-in beta. 3025 */ 3026 if (delta < 0) 3027 delta = -delta; 3028 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); 3029 if ((tp->t_rttvar += delta) <= 0) 3030 tp->t_rttvar = 1; 3031 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 3032 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 3033 } else { 3034 /* 3035 * No rtt measurement yet - use the unsmoothed rtt. 3036 * Set the variance to half the rtt (so our first 3037 * retransmit happens at 3*rtt). 3038 */ 3039 tp->t_srtt = rtt << TCP_RTT_SHIFT; 3040 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); 3041 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 3042 } 3043 tp->t_rtttime = 0; 3044 tp->t_rxtshift = 0; 3045 3046 /* 3047 * the retransmit should happen at rtt + 4 * rttvar. 3048 * Because of the way we do the smoothing, srtt and rttvar 3049 * will each average +1/2 tick of bias. When we compute 3050 * the retransmit timer, we want 1/2 tick of rounding and 3051 * 1 extra tick because of +-1/2 tick uncertainty in the 3052 * firing of the timer. The bias will give us exactly the 3053 * 1.5 tick we need. But, because the bias is 3054 * statistical, we have to test that we don't drop below 3055 * the minimum feasible timer (which is 2 ticks). 3056 */ 3057 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 3058 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX); 3059 3060 /* 3061 * We received an ack for a packet that wasn't retransmitted; 3062 * it is probably safe to discard any error indications we've 3063 * received recently. This isn't quite right, but close enough 3064 * for now (a route might have failed after we sent a segment, 3065 * and the return path might not be symmetrical). 3066 */ 3067 tp->t_softerror = 0; 3068} 3069 3070/* 3071 * Determine a reasonable value for maxseg size. 3072 * If the route is known, check route for mtu. 3073 * If none, use an mss that can be handled on the outgoing 3074 * interface without forcing IP to fragment; if bigger than 3075 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES 3076 * to utilize large mbufs. If no route is found, route has no mtu, 3077 * or the destination isn't local, use a default, hopefully conservative 3078 * size (usually 512 or the default IP max size, but no more than the mtu 3079 * of the interface), as we can't discover anything about intervening 3080 * gateways or networks. We also initialize the congestion/slow start 3081 * window to be a single segment if the destination isn't local. 3082 * While looking at the routing entry, we also initialize other path-dependent 3083 * parameters from pre-set or cached values in the routing entry. 3084 * 3085 * Also take into account the space needed for options that we 3086 * send regularly. Make maxseg shorter by that amount to assure 3087 * that we can send maxseg amount of data even when the options 3088 * are present. Store the upper limit of the length of options plus 3089 * data in maxopd. 3090 * 3091 * In case of T/TCP, we call this routine during implicit connection 3092 * setup as well (offer = -1), to initialize maxseg from the cached 3093 * MSS of our peer. 3094 * 3095 * NOTE that this routine is only called when we process an incoming 3096 * segment. Outgoing SYN/ACK MSS settings are handled in tcp_mssopt(). 3097 */ 3098void 3099tcp_mss_update(struct tcpcb *tp, int offer, 3100 struct hc_metrics_lite *metricptr, int *mtuflags) 3101{ 3102 INIT_VNET_INET(tp->t_inpcb->inp_vnet); 3103 int mss; 3104 u_long maxmtu; 3105 struct inpcb *inp = tp->t_inpcb; 3106 struct hc_metrics_lite metrics; 3107 int origoffer = offer; 3108#ifdef INET6 3109 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0; 3110 size_t min_protoh = isipv6 ? 3111 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) : 3112 sizeof (struct tcpiphdr); 3113#else 3114 const size_t min_protoh = sizeof(struct tcpiphdr); 3115#endif 3116 3117 INP_WLOCK_ASSERT(tp->t_inpcb); 3118 3119 /* Initialize. */ 3120#ifdef INET6 3121 if (isipv6) { 3122 maxmtu = tcp_maxmtu6(&inp->inp_inc, mtuflags); 3123 tp->t_maxopd = tp->t_maxseg = V_tcp_v6mssdflt; 3124 } else 3125#endif 3126 { 3127 maxmtu = tcp_maxmtu(&inp->inp_inc, mtuflags); 3128 tp->t_maxopd = tp->t_maxseg = V_tcp_mssdflt; 3129 } 3130 3131 /* 3132 * No route to sender, stay with default mss and return. 3133 */ 3134 if (maxmtu == 0) { 3135 /* 3136 * In case we return early we need to initialize metrics 3137 * to a defined state as tcp_hc_get() would do for us 3138 * if there was no cache hit. 3139 */ 3140 if (metricptr != NULL) 3141 bzero(metricptr, sizeof(struct hc_metrics_lite)); 3142 return; 3143 } 3144 3145 /* What have we got? */ 3146 switch (offer) { 3147 case 0: 3148 /* 3149 * Offer == 0 means that there was no MSS on the SYN 3150 * segment, in this case we use tcp_mssdflt as 3151 * already assigned to t_maxopd above. 3152 */ 3153 offer = tp->t_maxopd; 3154 break; 3155 3156 case -1: 3157 /* 3158 * Offer == -1 means that we didn't receive SYN yet. 3159 */ 3160 /* FALLTHROUGH */ 3161 3162 default: 3163 /* 3164 * Prevent DoS attack with too small MSS. Round up 3165 * to at least minmss. 3166 */ 3167 offer = max(offer, V_tcp_minmss); 3168 } 3169 3170 /* 3171 * rmx information is now retrieved from tcp_hostcache. 3172 */ 3173 tcp_hc_get(&inp->inp_inc, &metrics); 3174 if (metricptr != NULL) 3175 bcopy(&metrics, metricptr, sizeof(struct hc_metrics_lite)); 3176 3177 /* 3178 * If there's a discovered mtu int tcp hostcache, use it 3179 * else, use the link mtu. 3180 */ 3181 if (metrics.rmx_mtu) 3182 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh; 3183 else { 3184#ifdef INET6 3185 if (isipv6) { 3186 mss = maxmtu - min_protoh; 3187 if (!V_path_mtu_discovery && 3188 !in6_localaddr(&inp->in6p_faddr)) 3189 mss = min(mss, V_tcp_v6mssdflt); 3190 } else 3191#endif 3192 { 3193 mss = maxmtu - min_protoh; 3194 if (!V_path_mtu_discovery && 3195 !in_localaddr(inp->inp_faddr)) 3196 mss = min(mss, V_tcp_mssdflt); 3197 } 3198 /* 3199 * XXX - The above conditional (mss = maxmtu - min_protoh) 3200 * probably violates the TCP spec. 3201 * The problem is that, since we don't know the 3202 * other end's MSS, we are supposed to use a conservative 3203 * default. But, if we do that, then MTU discovery will 3204 * never actually take place, because the conservative 3205 * default is much less than the MTUs typically seen 3206 * on the Internet today. For the moment, we'll sweep 3207 * this under the carpet. 3208 * 3209 * The conservative default might not actually be a problem 3210 * if the only case this occurs is when sending an initial 3211 * SYN with options and data to a host we've never talked 3212 * to before. Then, they will reply with an MSS value which 3213 * will get recorded and the new parameters should get 3214 * recomputed. For Further Study. 3215 */ 3216 } 3217 mss = min(mss, offer); 3218 3219 /* 3220 * Sanity check: make sure that maxopd will be large 3221 * enough to allow some data on segments even if the 3222 * all the option space is used (40bytes). Otherwise 3223 * funny things may happen in tcp_output. 3224 */ 3225 mss = max(mss, 64); 3226 3227 /* 3228 * maxopd stores the maximum length of data AND options 3229 * in a segment; maxseg is the amount of data in a normal 3230 * segment. We need to store this value (maxopd) apart 3231 * from maxseg, because now every segment carries options 3232 * and thus we normally have somewhat less data in segments. 3233 */ 3234 tp->t_maxopd = mss; 3235 3236 /* 3237 * origoffer==-1 indicates that no segments were received yet. 3238 * In this case we just guess. 3239 */ 3240 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && 3241 (origoffer == -1 || 3242 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)) 3243 mss -= TCPOLEN_TSTAMP_APPA; 3244 3245#if (MCLBYTES & (MCLBYTES - 1)) == 0 3246 if (mss > MCLBYTES) 3247 mss &= ~(MCLBYTES-1); 3248#else 3249 if (mss > MCLBYTES) 3250 mss = mss / MCLBYTES * MCLBYTES; 3251#endif 3252 tp->t_maxseg = mss; 3253} 3254 3255void 3256tcp_mss(struct tcpcb *tp, int offer) 3257{ 3258 int rtt, mss; 3259 u_long bufsize; 3260 struct inpcb *inp; 3261 struct socket *so; 3262 struct hc_metrics_lite metrics; 3263 int mtuflags = 0; 3264#ifdef INET6 3265 int isipv6; 3266#endif 3267 KASSERT(tp != NULL, ("%s: tp == NULL", __func__)); 3268 INIT_VNET_INET(tp->t_vnet); 3269 3270 tcp_mss_update(tp, offer, &metrics, &mtuflags); 3271 3272 mss = tp->t_maxseg; 3273 inp = tp->t_inpcb; 3274#ifdef INET6 3275 isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0; 3276#endif 3277 3278 /* 3279 * If there's a pipesize, change the socket buffer to that size, 3280 * don't change if sb_hiwat is different than default (then it 3281 * has been changed on purpose with setsockopt). 3282 * Make the socket buffers an integral number of mss units; 3283 * if the mss is larger than the socket buffer, decrease the mss. 3284 */ 3285 so = inp->inp_socket; 3286 SOCKBUF_LOCK(&so->so_snd); 3287 if ((so->so_snd.sb_hiwat == tcp_sendspace) && metrics.rmx_sendpipe) 3288 bufsize = metrics.rmx_sendpipe; 3289 else 3290 bufsize = so->so_snd.sb_hiwat; 3291 if (bufsize < mss) 3292 mss = bufsize; 3293 else { 3294 bufsize = roundup(bufsize, mss); 3295 if (bufsize > sb_max) 3296 bufsize = sb_max; 3297 if (bufsize > so->so_snd.sb_hiwat) 3298 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL); 3299 } 3300 SOCKBUF_UNLOCK(&so->so_snd); 3301 tp->t_maxseg = mss; 3302 3303 SOCKBUF_LOCK(&so->so_rcv); 3304 if ((so->so_rcv.sb_hiwat == tcp_recvspace) && metrics.rmx_recvpipe) 3305 bufsize = metrics.rmx_recvpipe; 3306 else 3307 bufsize = so->so_rcv.sb_hiwat; 3308 if (bufsize > mss) { 3309 bufsize = roundup(bufsize, mss); 3310 if (bufsize > sb_max) 3311 bufsize = sb_max; 3312 if (bufsize > so->so_rcv.sb_hiwat) 3313 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL); 3314 } 3315 SOCKBUF_UNLOCK(&so->so_rcv); 3316 /* 3317 * While we're here, check the others too. 3318 */ 3319 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) { 3320 tp->t_srtt = rtt; 3321 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE; 3322 TCPSTAT_INC(tcps_usedrtt); 3323 if (metrics.rmx_rttvar) { 3324 tp->t_rttvar = metrics.rmx_rttvar; 3325 TCPSTAT_INC(tcps_usedrttvar); 3326 } else { 3327 /* default variation is +- 1 rtt */ 3328 tp->t_rttvar = 3329 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE; 3330 } 3331 TCPT_RANGESET(tp->t_rxtcur, 3332 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, 3333 tp->t_rttmin, TCPTV_REXMTMAX); 3334 } 3335 if (metrics.rmx_ssthresh) { 3336 /* 3337 * There's some sort of gateway or interface 3338 * buffer limit on the path. Use this to set 3339 * the slow start threshhold, but set the 3340 * threshold to no less than 2*mss. 3341 */ 3342 tp->snd_ssthresh = max(2 * mss, metrics.rmx_ssthresh); 3343 TCPSTAT_INC(tcps_usedssthresh); 3344 } 3345 if (metrics.rmx_bandwidth) 3346 tp->snd_bandwidth = metrics.rmx_bandwidth; 3347 3348 /* 3349 * Set the slow-start flight size depending on whether this 3350 * is a local network or not. 3351 * 3352 * Extend this so we cache the cwnd too and retrieve it here. 3353 * Make cwnd even bigger than RFC3390 suggests but only if we 3354 * have previous experience with the remote host. Be careful 3355 * not make cwnd bigger than remote receive window or our own 3356 * send socket buffer. Maybe put some additional upper bound 3357 * on the retrieved cwnd. Should do incremental updates to 3358 * hostcache when cwnd collapses so next connection doesn't 3359 * overloads the path again. 3360 * 3361 * RFC3390 says only do this if SYN or SYN/ACK didn't got lost. 3362 * We currently check only in syncache_socket for that. 3363 */ 3364#define TCP_METRICS_CWND 3365#ifdef TCP_METRICS_CWND 3366 if (metrics.rmx_cwnd) 3367 tp->snd_cwnd = max(mss, 3368 min(metrics.rmx_cwnd / 2, 3369 min(tp->snd_wnd, so->so_snd.sb_hiwat))); 3370 else 3371#endif 3372 if (V_tcp_do_rfc3390) 3373 tp->snd_cwnd = min(4 * mss, max(2 * mss, 4380)); 3374#ifdef INET6 3375 else if ((isipv6 && in6_localaddr(&inp->in6p_faddr)) || 3376 (!isipv6 && in_localaddr(inp->inp_faddr))) 3377#else 3378 else if (in_localaddr(inp->inp_faddr)) 3379#endif 3380 tp->snd_cwnd = mss * V_ss_fltsz_local; 3381 else 3382 tp->snd_cwnd = mss * V_ss_fltsz; 3383 3384 /* Check the interface for TSO capabilities. */ 3385 if (mtuflags & CSUM_TSO) 3386 tp->t_flags |= TF_TSO; 3387} 3388 3389/* 3390 * Determine the MSS option to send on an outgoing SYN. 3391 */ 3392int 3393tcp_mssopt(struct in_conninfo *inc) 3394{ 3395 INIT_VNET_INET(curvnet); 3396 int mss = 0; 3397 u_long maxmtu = 0; 3398 u_long thcmtu = 0; 3399 size_t min_protoh; 3400 3401 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer")); 3402 3403#ifdef INET6 3404 if (inc->inc_flags & INC_ISIPV6) { 3405 mss = V_tcp_v6mssdflt; 3406 maxmtu = tcp_maxmtu6(inc, NULL); 3407 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */ 3408 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 3409 } else 3410#endif 3411 { 3412 mss = V_tcp_mssdflt; 3413 maxmtu = tcp_maxmtu(inc, NULL); 3414 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */ 3415 min_protoh = sizeof(struct tcpiphdr); 3416 } 3417 if (maxmtu && thcmtu) 3418 mss = min(maxmtu, thcmtu) - min_protoh; 3419 else if (maxmtu || thcmtu) 3420 mss = max(maxmtu, thcmtu) - min_protoh; 3421 3422 return (mss); 3423} 3424 3425 3426/* 3427 * On a partial ack arrives, force the retransmission of the 3428 * next unacknowledged segment. Do not clear tp->t_dupacks. 3429 * By setting snd_nxt to ti_ack, this forces retransmission timer to 3430 * be started again. 3431 */ 3432static void 3433tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th) 3434{ 3435 tcp_seq onxt = tp->snd_nxt; 3436 u_long ocwnd = tp->snd_cwnd; 3437 3438 INP_WLOCK_ASSERT(tp->t_inpcb); 3439 3440 tcp_timer_activate(tp, TT_REXMT, 0); 3441 tp->t_rtttime = 0; 3442 tp->snd_nxt = th->th_ack; 3443 /* 3444 * Set snd_cwnd to one segment beyond acknowledged offset. 3445 * (tp->snd_una has not yet been updated when this function is called.) 3446 */ 3447 tp->snd_cwnd = tp->t_maxseg + (th->th_ack - tp->snd_una); 3448 tp->t_flags |= TF_ACKNOW; 3449 (void) tcp_output(tp); 3450 tp->snd_cwnd = ocwnd; 3451 if (SEQ_GT(onxt, tp->snd_nxt)) 3452 tp->snd_nxt = onxt; 3453 /* 3454 * Partial window deflation. Relies on fact that tp->snd_una 3455 * not updated yet. 3456 */ 3457 if (tp->snd_cwnd > th->th_ack - tp->snd_una) 3458 tp->snd_cwnd -= th->th_ack - tp->snd_una; 3459 else 3460 tp->snd_cwnd = 0; 3461 tp->snd_cwnd += tp->t_maxseg; 3462} 3463