tcp_input.c revision 168903
1/*- 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95 30 * $FreeBSD: head/sys/netinet/tcp_input.c 168903 2007-04-20 14:34:54Z andre $ 31 */ 32 33#include "opt_ipfw.h" /* for ipfw_fwd */ 34#include "opt_inet.h" 35#include "opt_inet6.h" 36#include "opt_ipsec.h" 37#include "opt_mac.h" 38#include "opt_tcpdebug.h" 39 40#include <sys/param.h> 41#include <sys/kernel.h> 42#include <sys/malloc.h> 43#include <sys/mbuf.h> 44#include <sys/proc.h> /* for proc0 declaration */ 45#include <sys/protosw.h> 46#include <sys/signalvar.h> 47#include <sys/socket.h> 48#include <sys/socketvar.h> 49#include <sys/sysctl.h> 50#include <sys/syslog.h> 51#include <sys/systm.h> 52 53#include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */ 54 55#include <vm/uma.h> 56 57#include <net/if.h> 58#include <net/route.h> 59 60#include <netinet/in.h> 61#include <netinet/in_pcb.h> 62#include <netinet/in_systm.h> 63#include <netinet/in_var.h> 64#include <netinet/ip.h> 65#include <netinet/ip_icmp.h> /* required for icmp_var.h */ 66#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 67#include <netinet/ip_var.h> 68#include <netinet/ip_options.h> 69#include <netinet/ip6.h> 70#include <netinet/icmp6.h> 71#include <netinet6/in6_pcb.h> 72#include <netinet6/ip6_var.h> 73#include <netinet6/nd6.h> 74#include <netinet/tcp.h> 75#include <netinet/tcp_fsm.h> 76#include <netinet/tcp_seq.h> 77#include <netinet/tcp_timer.h> 78#include <netinet/tcp_var.h> 79#include <netinet6/tcp6_var.h> 80#include <netinet/tcpip.h> 81#ifdef TCPDEBUG 82#include <netinet/tcp_debug.h> 83#endif /* TCPDEBUG */ 84 85#ifdef FAST_IPSEC 86#include <netipsec/ipsec.h> 87#include <netipsec/ipsec6.h> 88#endif /*FAST_IPSEC*/ 89 90#ifdef IPSEC 91#include <netinet6/ipsec.h> 92#include <netinet6/ipsec6.h> 93#include <netkey/key.h> 94#endif /*IPSEC*/ 95 96#include <machine/in_cksum.h> 97 98#include <security/mac/mac_framework.h> 99 100static const int tcprexmtthresh = 3; 101 102struct tcpstat tcpstat; 103SYSCTL_STRUCT(_net_inet_tcp, TCPCTL_STATS, stats, CTLFLAG_RW, 104 &tcpstat , tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)"); 105 106static int tcp_log_in_vain = 0; 107SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW, 108 &tcp_log_in_vain, 0, "Log all incoming TCP segments to closed ports"); 109 110static int blackhole = 0; 111SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW, 112 &blackhole, 0, "Do not send RST on segments to closed ports"); 113 114int tcp_delack_enabled = 1; 115SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW, 116 &tcp_delack_enabled, 0, 117 "Delay ACK to try and piggyback it onto a data packet"); 118 119static int drop_synfin = 0; 120SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW, 121 &drop_synfin, 0, "Drop TCP packets with SYN+FIN set"); 122 123static int tcp_do_rfc3042 = 1; 124SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_RW, 125 &tcp_do_rfc3042, 0, "Enable RFC 3042 (Limited Transmit)"); 126 127static int tcp_do_rfc3390 = 1; 128SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW, 129 &tcp_do_rfc3390, 0, 130 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)"); 131 132static int tcp_insecure_rst = 0; 133SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_RW, 134 &tcp_insecure_rst, 0, 135 "Follow the old (insecure) criteria for accepting RST packets"); 136 137SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass, CTLFLAG_RW, 0, 138 "TCP Segment Reassembly Queue"); 139 140static int tcp_reass_maxseg = 0; 141SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, maxsegments, CTLFLAG_RDTUN, 142 &tcp_reass_maxseg, 0, 143 "Global maximum number of TCP Segments in Reassembly Queue"); 144 145int tcp_reass_qsize = 0; 146SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, cursegments, CTLFLAG_RD, 147 &tcp_reass_qsize, 0, 148 "Global number of TCP Segments currently in Reassembly Queue"); 149 150static int tcp_reass_maxqlen = 48; 151SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, maxqlen, CTLFLAG_RW, 152 &tcp_reass_maxqlen, 0, 153 "Maximum number of TCP Segments per individual Reassembly Queue"); 154 155static int tcp_reass_overflows = 0; 156SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, overflows, CTLFLAG_RD, 157 &tcp_reass_overflows, 0, 158 "Global number of TCP Segment Reassembly Queue Overflows"); 159 160int tcp_do_autorcvbuf = 1; 161SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_RW, 162 &tcp_do_autorcvbuf, 0, "Enable automatic receive buffer sizing"); 163 164int tcp_autorcvbuf_inc = 16*1024; 165SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_RW, 166 &tcp_autorcvbuf_inc, 0, 167 "Incrementor step size of automatic receive buffer"); 168 169int tcp_autorcvbuf_max = 256*1024; 170SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_RW, 171 &tcp_autorcvbuf_max, 0, "Max size of automatic receive buffer"); 172 173struct inpcbhead tcb; 174#define tcb6 tcb /* for KAME src sync over BSD*'s */ 175struct inpcbinfo tcbinfo; 176 177static void tcp_dooptions(struct tcpopt *, u_char *, int, int); 178static int tcp_do_segment(struct mbuf *, struct tcphdr *, 179 struct socket *, struct tcpcb *, int, int); 180static void tcp_dropwithreset(struct mbuf *, struct tcphdr *, 181 struct tcpcb *, int, int); 182static void tcp_pulloutofband(struct socket *, 183 struct tcphdr *, struct mbuf *, int); 184static int tcp_reass(struct tcpcb *, struct tcphdr *, int *, 185 struct mbuf *); 186static void tcp_xmit_timer(struct tcpcb *, int); 187static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *); 188static int tcp_timewait(struct inpcb *, struct tcpopt *, 189 struct tcphdr *, struct mbuf *, int); 190 191/* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */ 192#ifdef INET6 193#define ND6_HINT(tp) \ 194do { \ 195 if ((tp) && (tp)->t_inpcb && \ 196 ((tp)->t_inpcb->inp_vflag & INP_IPV6) != 0) \ 197 nd6_nud_hint(NULL, NULL, 0); \ 198} while (0) 199#else 200#define ND6_HINT(tp) 201#endif 202 203/* 204 * Indicate whether this ack should be delayed. We can delay the ack if 205 * - there is no delayed ack timer in progress and 206 * - our last ack wasn't a 0-sized window. We never want to delay 207 * the ack that opens up a 0-sized window and 208 * - delayed acks are enabled or 209 * - this is a half-synchronized T/TCP connection. 210 */ 211#define DELAY_ACK(tp) \ 212 ((!tcp_timer_active(tp, TT_DELACK) && \ 213 (tp->t_flags & TF_RXWIN0SENT) == 0) && \ 214 (tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN))) 215 216/* Initialize TCP reassembly queue */ 217static void 218tcp_reass_zone_change(void *tag) 219{ 220 221 tcp_reass_maxseg = nmbclusters / 16; 222 uma_zone_set_max(tcp_reass_zone, tcp_reass_maxseg); 223} 224 225uma_zone_t tcp_reass_zone; 226void 227tcp_reass_init() 228{ 229 tcp_reass_maxseg = nmbclusters / 16; 230 TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments", 231 &tcp_reass_maxseg); 232 tcp_reass_zone = uma_zcreate("tcpreass", sizeof (struct tseg_qent), 233 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 234 uma_zone_set_max(tcp_reass_zone, tcp_reass_maxseg); 235 EVENTHANDLER_REGISTER(nmbclusters_change, 236 tcp_reass_zone_change, NULL, EVENTHANDLER_PRI_ANY); 237} 238 239static int 240tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m) 241{ 242 struct tseg_qent *q; 243 struct tseg_qent *p = NULL; 244 struct tseg_qent *nq; 245 struct tseg_qent *te = NULL; 246 struct socket *so = tp->t_inpcb->inp_socket; 247 int flags; 248 249 INP_LOCK_ASSERT(tp->t_inpcb); 250 251 /* 252 * XXX: tcp_reass() is rather inefficient with its data structures 253 * and should be rewritten (see NetBSD for optimizations). While 254 * doing that it should move to its own file tcp_reass.c. 255 */ 256 257 /* 258 * Call with th==NULL after become established to 259 * force pre-ESTABLISHED data up to user socket. 260 */ 261 if (th == NULL) 262 goto present; 263 264 /* 265 * Limit the number of segments in the reassembly queue to prevent 266 * holding on to too many segments (and thus running out of mbufs). 267 * Make sure to let the missing segment through which caused this 268 * queue. Always keep one global queue entry spare to be able to 269 * process the missing segment. 270 */ 271 if (th->th_seq != tp->rcv_nxt && 272 (tcp_reass_qsize + 1 >= tcp_reass_maxseg || 273 tp->t_segqlen >= tcp_reass_maxqlen)) { 274 tcp_reass_overflows++; 275 tcpstat.tcps_rcvmemdrop++; 276 m_freem(m); 277 *tlenp = 0; 278 return (0); 279 } 280 281 /* 282 * Allocate a new queue entry. If we can't, or hit the zone limit 283 * just drop the pkt. 284 */ 285 te = uma_zalloc(tcp_reass_zone, M_NOWAIT); 286 if (te == NULL) { 287 tcpstat.tcps_rcvmemdrop++; 288 m_freem(m); 289 *tlenp = 0; 290 return (0); 291 } 292 tp->t_segqlen++; 293 tcp_reass_qsize++; 294 295 /* 296 * Find a segment which begins after this one does. 297 */ 298 LIST_FOREACH(q, &tp->t_segq, tqe_q) { 299 if (SEQ_GT(q->tqe_th->th_seq, th->th_seq)) 300 break; 301 p = q; 302 } 303 304 /* 305 * If there is a preceding segment, it may provide some of 306 * our data already. If so, drop the data from the incoming 307 * segment. If it provides all of our data, drop us. 308 */ 309 if (p != NULL) { 310 int i; 311 /* conversion to int (in i) handles seq wraparound */ 312 i = p->tqe_th->th_seq + p->tqe_len - th->th_seq; 313 if (i > 0) { 314 if (i >= *tlenp) { 315 tcpstat.tcps_rcvduppack++; 316 tcpstat.tcps_rcvdupbyte += *tlenp; 317 m_freem(m); 318 uma_zfree(tcp_reass_zone, te); 319 tp->t_segqlen--; 320 tcp_reass_qsize--; 321 /* 322 * Try to present any queued data 323 * at the left window edge to the user. 324 * This is needed after the 3-WHS 325 * completes. 326 */ 327 goto present; /* ??? */ 328 } 329 m_adj(m, i); 330 *tlenp -= i; 331 th->th_seq += i; 332 } 333 } 334 tcpstat.tcps_rcvoopack++; 335 tcpstat.tcps_rcvoobyte += *tlenp; 336 337 /* 338 * While we overlap succeeding segments trim them or, 339 * if they are completely covered, dequeue them. 340 */ 341 while (q) { 342 int i = (th->th_seq + *tlenp) - q->tqe_th->th_seq; 343 if (i <= 0) 344 break; 345 if (i < q->tqe_len) { 346 q->tqe_th->th_seq += i; 347 q->tqe_len -= i; 348 m_adj(q->tqe_m, i); 349 break; 350 } 351 352 nq = LIST_NEXT(q, tqe_q); 353 LIST_REMOVE(q, tqe_q); 354 m_freem(q->tqe_m); 355 uma_zfree(tcp_reass_zone, q); 356 tp->t_segqlen--; 357 tcp_reass_qsize--; 358 q = nq; 359 } 360 361 /* Insert the new segment queue entry into place. */ 362 te->tqe_m = m; 363 te->tqe_th = th; 364 te->tqe_len = *tlenp; 365 366 if (p == NULL) { 367 LIST_INSERT_HEAD(&tp->t_segq, te, tqe_q); 368 } else { 369 LIST_INSERT_AFTER(p, te, tqe_q); 370 } 371 372present: 373 /* 374 * Present data to user, advancing rcv_nxt through 375 * completed sequence space. 376 */ 377 if (!TCPS_HAVEESTABLISHED(tp->t_state)) 378 return (0); 379 q = LIST_FIRST(&tp->t_segq); 380 if (!q || q->tqe_th->th_seq != tp->rcv_nxt) 381 return (0); 382 SOCKBUF_LOCK(&so->so_rcv); 383 do { 384 tp->rcv_nxt += q->tqe_len; 385 flags = q->tqe_th->th_flags & TH_FIN; 386 nq = LIST_NEXT(q, tqe_q); 387 LIST_REMOVE(q, tqe_q); 388 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 389 m_freem(q->tqe_m); 390 else 391 sbappendstream_locked(&so->so_rcv, q->tqe_m); 392 uma_zfree(tcp_reass_zone, q); 393 tp->t_segqlen--; 394 tcp_reass_qsize--; 395 q = nq; 396 } while (q && q->tqe_th->th_seq == tp->rcv_nxt); 397 ND6_HINT(tp); 398 sorwakeup_locked(so); 399 return (flags); 400} 401 402/* 403 * TCP input routine, follows pages 65-76 of the 404 * protocol specification dated September, 1981 very closely. 405 */ 406#ifdef INET6 407int 408tcp6_input(struct mbuf **mp, int *offp, int proto) 409{ 410 struct mbuf *m = *mp; 411 struct in6_ifaddr *ia6; 412 413 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE); 414 415 /* 416 * draft-itojun-ipv6-tcp-to-anycast 417 * better place to put this in? 418 */ 419 ia6 = ip6_getdstifaddr(m); 420 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) { 421 struct ip6_hdr *ip6; 422 423 ip6 = mtod(m, struct ip6_hdr *); 424 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR, 425 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6); 426 return IPPROTO_DONE; 427 } 428 429 tcp_input(m, *offp); 430 return IPPROTO_DONE; 431} 432#endif 433 434void 435tcp_input(struct mbuf *m, int off0) 436{ 437 struct tcphdr *th; 438 struct ip *ip = NULL; 439 struct ipovly *ipov; 440 struct inpcb *inp = NULL; 441 struct tcpcb *tp = NULL; 442 struct socket *so = NULL; 443 u_char *optp = NULL; 444 int optlen = 0; 445 int len, tlen, off; 446 int drop_hdrlen; 447 int thflags; 448 int rstreason = 0; /* For badport_bandlim accounting purposes */ 449#ifdef IPFIREWALL_FORWARD 450 struct m_tag *fwd_tag; 451#endif 452#ifdef INET6 453 struct ip6_hdr *ip6 = NULL; 454 int isipv6; 455 char ip6buf[INET6_ADDRSTRLEN]; 456#else 457 const int isipv6 = 0; 458#endif 459 struct tcpopt to; /* options in this segment */ 460 461#ifdef TCPDEBUG 462 /* 463 * The size of tcp_saveipgen must be the size of the max ip header, 464 * now IPv6. 465 */ 466 u_char tcp_saveipgen[IP6_HDR_LEN]; 467 struct tcphdr tcp_savetcp; 468 short ostate = 0; 469#endif 470 471#ifdef INET6 472 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0; 473#endif 474 475 to.to_flags = 0; 476 tcpstat.tcps_rcvtotal++; 477 478 if (isipv6) { 479#ifdef INET6 480 /* IP6_EXTHDR_CHECK() is already done at tcp6_input() */ 481 ip6 = mtod(m, struct ip6_hdr *); 482 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0; 483 if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) { 484 tcpstat.tcps_rcvbadsum++; 485 goto drop; 486 } 487 th = (struct tcphdr *)((caddr_t)ip6 + off0); 488 489 /* 490 * Be proactive about unspecified IPv6 address in source. 491 * As we use all-zero to indicate unbounded/unconnected pcb, 492 * unspecified IPv6 address can be used to confuse us. 493 * 494 * Note that packets with unspecified IPv6 destination is 495 * already dropped in ip6_input. 496 */ 497 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { 498 /* XXX stat */ 499 goto drop; 500 } 501#else 502 th = NULL; /* XXX: avoid compiler warning */ 503#endif 504 } else { 505 /* 506 * Get IP and TCP header together in first mbuf. 507 * Note: IP leaves IP header in first mbuf. 508 */ 509 if (off0 > sizeof (struct ip)) { 510 ip_stripoptions(m, (struct mbuf *)0); 511 off0 = sizeof(struct ip); 512 } 513 if (m->m_len < sizeof (struct tcpiphdr)) { 514 if ((m = m_pullup(m, sizeof (struct tcpiphdr))) 515 == NULL) { 516 tcpstat.tcps_rcvshort++; 517 return; 518 } 519 } 520 ip = mtod(m, struct ip *); 521 ipov = (struct ipovly *)ip; 522 th = (struct tcphdr *)((caddr_t)ip + off0); 523 tlen = ip->ip_len; 524 525 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 526 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 527 th->th_sum = m->m_pkthdr.csum_data; 528 else 529 th->th_sum = in_pseudo(ip->ip_src.s_addr, 530 ip->ip_dst.s_addr, 531 htonl(m->m_pkthdr.csum_data + 532 ip->ip_len + 533 IPPROTO_TCP)); 534 th->th_sum ^= 0xffff; 535#ifdef TCPDEBUG 536 ipov->ih_len = (u_short)tlen; 537 ipov->ih_len = htons(ipov->ih_len); 538#endif 539 } else { 540 /* 541 * Checksum extended TCP header and data. 542 */ 543 len = sizeof (struct ip) + tlen; 544 bzero(ipov->ih_x1, sizeof(ipov->ih_x1)); 545 ipov->ih_len = (u_short)tlen; 546 ipov->ih_len = htons(ipov->ih_len); 547 th->th_sum = in_cksum(m, len); 548 } 549 if (th->th_sum) { 550 tcpstat.tcps_rcvbadsum++; 551 goto drop; 552 } 553 /* Re-initialization for later version check */ 554 ip->ip_v = IPVERSION; 555 } 556 557 /* 558 * Check that TCP offset makes sense, 559 * pull out TCP options and adjust length. XXX 560 */ 561 off = th->th_off << 2; 562 if (off < sizeof (struct tcphdr) || off > tlen) { 563 tcpstat.tcps_rcvbadoff++; 564 goto drop; 565 } 566 tlen -= off; /* tlen is used instead of ti->ti_len */ 567 if (off > sizeof (struct tcphdr)) { 568 if (isipv6) { 569#ifdef INET6 570 IP6_EXTHDR_CHECK(m, off0, off, ); 571 ip6 = mtod(m, struct ip6_hdr *); 572 th = (struct tcphdr *)((caddr_t)ip6 + off0); 573#endif 574 } else { 575 if (m->m_len < sizeof(struct ip) + off) { 576 if ((m = m_pullup(m, sizeof (struct ip) + off)) 577 == NULL) { 578 tcpstat.tcps_rcvshort++; 579 return; 580 } 581 ip = mtod(m, struct ip *); 582 ipov = (struct ipovly *)ip; 583 th = (struct tcphdr *)((caddr_t)ip + off0); 584 } 585 } 586 optlen = off - sizeof (struct tcphdr); 587 optp = (u_char *)(th + 1); 588 } 589 thflags = th->th_flags; 590 591 /* 592 * If the drop_synfin option is enabled, drop all packets with 593 * both the SYN and FIN bits set. This prevents e.g. nmap from 594 * identifying the TCP/IP stack. 595 * 596 * This is a violation of the TCP specification. 597 */ 598 if (drop_synfin && (thflags & (TH_SYN|TH_FIN)) == (TH_SYN|TH_FIN)) 599 goto drop; 600 601 /* 602 * Convert TCP protocol specific fields to host format. 603 */ 604 th->th_seq = ntohl(th->th_seq); 605 th->th_ack = ntohl(th->th_ack); 606 th->th_win = ntohs(th->th_win); 607 th->th_urp = ntohs(th->th_urp); 608 609 /* 610 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options. 611 */ 612 drop_hdrlen = off0 + off; 613 614 /* 615 * Locate pcb for segment. 616 */ 617 INP_INFO_WLOCK(&tcbinfo); 618findpcb: 619 INP_INFO_WLOCK_ASSERT(&tcbinfo); 620#ifdef IPFIREWALL_FORWARD 621 /* Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. */ 622 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL); 623 624 if (fwd_tag != NULL && isipv6 == 0) { /* IPv6 support is not yet */ 625 struct sockaddr_in *next_hop; 626 627 next_hop = (struct sockaddr_in *)(fwd_tag+1); 628 /* 629 * Transparently forwarded. Pretend to be the destination. 630 * already got one like this? 631 */ 632 inp = in_pcblookup_hash(&tcbinfo, 633 ip->ip_src, th->th_sport, 634 ip->ip_dst, th->th_dport, 635 0, m->m_pkthdr.rcvif); 636 if (!inp) { 637 /* It's new. Try to find the ambushing socket. */ 638 inp = in_pcblookup_hash(&tcbinfo, 639 ip->ip_src, th->th_sport, 640 next_hop->sin_addr, 641 next_hop->sin_port ? 642 ntohs(next_hop->sin_port) : 643 th->th_dport, 644 INPLOOKUP_WILDCARD, 645 m->m_pkthdr.rcvif); 646 } 647 /* Remove the tag from the packet. We don't need it anymore. */ 648 m_tag_delete(m, fwd_tag); 649 } else 650#endif /* IPFIREWALL_FORWARD */ 651 { 652 if (isipv6) { 653#ifdef INET6 654 inp = in6_pcblookup_hash(&tcbinfo, 655 &ip6->ip6_src, th->th_sport, 656 &ip6->ip6_dst, th->th_dport, 657 INPLOOKUP_WILDCARD, 658 m->m_pkthdr.rcvif); 659#endif 660 } else 661 inp = in_pcblookup_hash(&tcbinfo, 662 ip->ip_src, th->th_sport, 663 ip->ip_dst, th->th_dport, 664 INPLOOKUP_WILDCARD, 665 m->m_pkthdr.rcvif); 666 } 667 668#if defined(IPSEC) || defined(FAST_IPSEC) 669#ifdef INET6 670 if (isipv6 && inp != NULL && ipsec6_in_reject(m, inp)) { 671#ifdef IPSEC 672 ipsec6stat.in_polvio++; 673#endif 674 goto dropunlock; 675 } else 676#endif /* INET6 */ 677 if (inp != NULL && ipsec4_in_reject(m, inp)) { 678#ifdef IPSEC 679 ipsecstat.in_polvio++; 680#endif 681 goto dropunlock; 682 } 683#endif /*IPSEC || FAST_IPSEC*/ 684 685 /* 686 * If the INPCB does not exist then all data in the incoming 687 * segment is discarded and an appropriate RST is sent back. 688 */ 689 if (inp == NULL) { 690 /* 691 * Log communication attempts to ports that are not 692 * in use. 693 */ 694 if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) || 695 tcp_log_in_vain == 2) { 696#ifndef INET6 697 char dbuf[4*sizeof "123"], sbuf[4*sizeof "123"]; 698#else 699 char dbuf[INET6_ADDRSTRLEN+2], sbuf[INET6_ADDRSTRLEN+2]; 700 if (isipv6) { 701 strcpy(dbuf, "["); 702 strcat(dbuf, 703 ip6_sprintf(ip6buf, &ip6->ip6_dst)); 704 strcat(dbuf, "]"); 705 strcpy(sbuf, "["); 706 strcat(sbuf, 707 ip6_sprintf(ip6buf, &ip6->ip6_src)); 708 strcat(sbuf, "]"); 709 } else 710#endif /* INET6 */ 711 { 712 strcpy(dbuf, inet_ntoa(ip->ip_dst)); 713 strcpy(sbuf, inet_ntoa(ip->ip_src)); 714 } 715 log(LOG_INFO, 716 "Connection attempt to TCP %s:%d " 717 "from %s:%d flags:0x%02x\n", 718 dbuf, ntohs(th->th_dport), sbuf, 719 ntohs(th->th_sport), thflags); 720 } 721 /* 722 * When blackholing do not respond with a RST but 723 * completely ignore the segment and drop it. 724 */ 725 if ((blackhole == 1 && (thflags & TH_SYN)) || 726 blackhole == 2) 727 goto dropunlock; 728 729 rstreason = BANDLIM_RST_CLOSEDPORT; 730 goto dropwithreset; 731 } 732 INP_LOCK(inp); 733 734 /* Check the minimum TTL for socket. */ 735 if (inp->inp_ip_minttl != 0) { 736#ifdef INET6 737 if (isipv6 && inp->inp_ip_minttl > ip6->ip6_hlim) 738 goto dropunlock; 739 else 740#endif 741 if (inp->inp_ip_minttl > ip->ip_ttl) 742 goto dropunlock; 743 } 744 745 /* 746 * A previous connection in TIMEWAIT state is supposed to catch 747 * stray or duplicate segments arriving late. If this segment 748 * was a legitimate new connection attempt the old INPCB gets 749 * removed and we can try again to find a listening socket. 750 */ 751 if (inp->inp_vflag & INP_TIMEWAIT) { 752 if (thflags & TH_SYN) 753 tcp_dooptions(&to, optp, optlen, TO_SYN); 754 if (tcp_timewait(inp, &to, th, m, tlen)) 755 goto findpcb; 756 /* tcp_timewait unlocks inp. */ 757 INP_INFO_WUNLOCK(&tcbinfo); 758 return; 759 } 760 /* 761 * The TCPCB may no longer exist if the connection is winding 762 * down or it is in the CLOSED state. Either way we drop the 763 * segment and send an appropriate response. 764 */ 765 tp = intotcpcb(inp); 766 if (tp == NULL) { 767 INP_UNLOCK(inp); 768 rstreason = BANDLIM_RST_CLOSEDPORT; 769 goto dropwithreset; 770 } 771 if (tp->t_state == TCPS_CLOSED) 772 goto dropunlock; /* XXX: dropwithreset??? */ 773 774#ifdef MAC 775 INP_LOCK_ASSERT(inp); 776 if (mac_check_inpcb_deliver(inp, m)) 777 goto dropunlock; 778#endif 779 so = inp->inp_socket; 780 KASSERT(so != NULL, ("%s: so == NULL", __func__)); 781#ifdef TCPDEBUG 782 if (so->so_options & SO_DEBUG) { 783 ostate = tp->t_state; 784 if (isipv6) 785 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6)); 786 else 787 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip)); 788 tcp_savetcp = *th; 789 } 790#endif 791 /* 792 * When the socket is accepting connections (the INPCB is in LISTEN 793 * state) we look into the SYN cache if this is a new connection 794 * attempt or the completion of a previous one. 795 */ 796 if (so->so_options & SO_ACCEPTCONN) { 797 struct in_conninfo inc; 798 799 bzero(&inc, sizeof(inc)); 800 inc.inc_isipv6 = isipv6; 801#ifdef INET6 802 if (isipv6) { 803 inc.inc6_faddr = ip6->ip6_src; 804 inc.inc6_laddr = ip6->ip6_dst; 805 } else 806#endif 807 { 808 inc.inc_faddr = ip->ip_src; 809 inc.inc_laddr = ip->ip_dst; 810 } 811 inc.inc_fport = th->th_sport; 812 inc.inc_lport = th->th_dport; 813 814 /* 815 * If the state is LISTEN then ignore segment if it contains 816 * a RST. If the segment contains an ACK then it is bad and 817 * send a RST. If it does not contain a SYN then it is not 818 * interesting; drop it. 819 * 820 * If the state is SYN_RECEIVED (syncache) and seg contains 821 * an ACK, but not for our SYN/ACK, send a RST. If the seg 822 * contains a RST, check the sequence number to see if it 823 * is a valid reset segment. 824 */ 825 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) != TH_SYN) { 826 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) { 827 /* 828 * Parse the TCP options here because 829 * syncookies need access to the reflected 830 * timestamp. 831 */ 832 tcp_dooptions(&to, optp, optlen, 0); 833 if (!syncache_expand(&inc, &to, th, &so, m)) { 834 /* 835 * No syncache entry or ACK was not 836 * for our SYN/ACK. Send a RST. 837 */ 838 rstreason = BANDLIM_RST_OPENPORT; 839 goto dropwithreset; 840 } 841 if (so == NULL) { 842 /* 843 * We completed the 3-way handshake 844 * but could not allocate a socket 845 * either due to memory shortage, 846 * listen queue length limits or 847 * global socket limits. 848 */ 849 rstreason = BANDLIM_UNLIMITED; 850 goto dropwithreset; 851 } 852 /* 853 * Socket is created in state SYN_RECEIVED. 854 * Continue processing segment. 855 */ 856 INP_UNLOCK(inp); /* listen socket */ 857 inp = sotoinpcb(so); 858 INP_LOCK(inp); /* new connection */ 859 tp = intotcpcb(inp); 860 /* 861 * Process the segment and the data it 862 * contains. tcp_do_segment() consumes 863 * the mbuf chain and unlocks the inpcb. 864 * XXX: The potential return value of 865 * TIME_WAIT nuked is supposed to be 866 * handled above. 867 */ 868 if (tcp_do_segment(m, th, so, tp, 869 drop_hdrlen, tlen)) 870 goto findpcb; /* TIME_WAIT nuked */ 871 return; 872 } 873 if (thflags & TH_RST) { 874 syncache_chkrst(&inc, th); 875 goto dropunlock; 876 } 877 if (thflags & TH_ACK) { 878 syncache_badack(&inc); 879 tcpstat.tcps_badsyn++; 880 rstreason = BANDLIM_RST_OPENPORT; 881 goto dropwithreset; 882 } 883 goto dropunlock; 884 } 885 886 /* 887 * Segment's flags are (SYN) or (SYN|FIN). 888 */ 889#ifdef INET6 890 /* 891 * If deprecated address is forbidden, 892 * we do not accept SYN to deprecated interface 893 * address to prevent any new inbound connection from 894 * getting established. 895 * When we do not accept SYN, we send a TCP RST, 896 * with deprecated source address (instead of dropping 897 * it). We compromise it as it is much better for peer 898 * to send a RST, and RST will be the final packet 899 * for the exchange. 900 * 901 * If we do not forbid deprecated addresses, we accept 902 * the SYN packet. RFC2462 does not suggest dropping 903 * SYN in this case. 904 * If we decipher RFC2462 5.5.4, it says like this: 905 * 1. use of deprecated addr with existing 906 * communication is okay - "SHOULD continue to be 907 * used" 908 * 2. use of it with new communication: 909 * (2a) "SHOULD NOT be used if alternate address 910 * with sufficient scope is available" 911 * (2b) nothing mentioned otherwise. 912 * Here we fall into (2b) case as we have no choice in 913 * our source address selection - we must obey the peer. 914 * 915 * The wording in RFC2462 is confusing, and there are 916 * multiple description text for deprecated address 917 * handling - worse, they are not exactly the same. 918 * I believe 5.5.4 is the best one, so we follow 5.5.4. 919 */ 920 if (isipv6 && !ip6_use_deprecated) { 921 struct in6_ifaddr *ia6; 922 923 if ((ia6 = ip6_getdstifaddr(m)) && 924 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) { 925 INP_UNLOCK(inp); 926 tp = NULL; 927 rstreason = BANDLIM_RST_OPENPORT; 928 goto dropwithreset; 929 } 930 } 931#endif 932 /* 933 * Basic sanity checks on incoming SYN requests: 934 * 935 * Don't bother responding if the destination was a 936 * broadcast according to RFC1122 4.2.3.10, p. 104. 937 * 938 * If it is from this socket, drop it, it must be forged. 939 * 940 * Note that it is quite possible to receive unicast 941 * link-layer packets with a broadcast IP address. Use 942 * in_broadcast() to find them. 943 */ 944 if (m->m_flags & (M_BCAST|M_MCAST)) 945 goto dropunlock; 946 if (isipv6) { 947#ifdef INET6 948 if (th->th_dport == th->th_sport && 949 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) 950 goto dropunlock; 951 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 952 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 953 goto dropunlock; 954#endif 955 } else { 956 if (th->th_dport == th->th_sport && 957 ip->ip_dst.s_addr == ip->ip_src.s_addr) 958 goto dropunlock; 959 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 960 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 961 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 962 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 963 goto dropunlock; 964 } 965 /* 966 * SYN appears to be valid. Create compressed TCP state 967 * for syncache. 968 */ 969#ifdef TCPDEBUG 970 if (so->so_options & SO_DEBUG) 971 tcp_trace(TA_INPUT, ostate, tp, 972 (void *)tcp_saveipgen, &tcp_savetcp, 0); 973#endif 974 tcp_dooptions(&to, optp, optlen, TO_SYN); 975 syncache_add(&inc, &to, th, inp, &so, m); 976 /* 977 * Entry added to syncache and mbuf consumed. 978 * Everything unlocked already by syncache_add(). 979 */ 980 return; 981 } 982 983 /* 984 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or 985 * later state. tcp_do_segment() always consumes the mbuf chain 986 * and unlocks the inpcb. 987 */ 988 if (tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen)) 989 goto findpcb; /* XXX: TIME_WAIT was nuked. */ 990 return; 991 992dropwithreset: 993 INP_INFO_WLOCK_ASSERT(&tcbinfo); 994 tcp_dropwithreset(m, th, tp, tlen, rstreason); 995 m = NULL; /* mbuf chain got consumed. */ 996dropunlock: 997 INP_INFO_WLOCK_ASSERT(&tcbinfo); 998 if (tp != NULL) 999 INP_UNLOCK(inp); 1000 INP_INFO_WUNLOCK(&tcbinfo); 1001drop: 1002 INP_INFO_UNLOCK_ASSERT(&tcbinfo); 1003 if (m != NULL) 1004 m_freem(m); 1005 return; 1006} 1007 1008static int 1009tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 1010 struct tcpcb *tp, int drop_hdrlen, int tlen) 1011{ 1012 int thflags, acked, ourfinisacked, needoutput = 0; 1013 int headlocked = 1; 1014 int rstreason, todrop, win; 1015 u_long tiwin; 1016 struct tcpopt to; 1017 1018#ifdef TCPDEBUG 1019 /* 1020 * The size of tcp_saveipgen must be the size of the max ip header, 1021 * now IPv6. 1022 */ 1023 u_char tcp_saveipgen[IP6_HDR_LEN]; 1024 struct tcphdr tcp_savetcp; 1025 short ostate = 0; 1026#endif 1027 thflags = th->th_flags; 1028 1029 INP_INFO_WLOCK_ASSERT(&tcbinfo); 1030 INP_LOCK_ASSERT(tp->t_inpcb); 1031 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", __func__)); 1032 1033 /* 1034 * Segment received on connection. 1035 * Reset idle time and keep-alive timer. 1036 */ 1037 tp->t_rcvtime = ticks; 1038 if (TCPS_HAVEESTABLISHED(tp->t_state)) 1039 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle); 1040 1041 /* 1042 * Unscale the window into a 32-bit value. 1043 * This value is bogus for the TCPS_SYN_SENT state 1044 * and is overwritten later. 1045 */ 1046 tiwin = th->th_win << tp->snd_scale; 1047 1048 /* 1049 * Parse options on any incoming segment. 1050 */ 1051 tcp_dooptions(&to, (u_char *)(th + 1), 1052 (th->th_off << 2) - sizeof(struct tcphdr), 1053 (thflags & TH_SYN) ? TO_SYN : 0); 1054 1055 /* 1056 * If echoed timestamp is later than the current time, 1057 * fall back to non RFC1323 RTT calculation. Normalize 1058 * timestamp if syncookies were used when this connection 1059 * was established. 1060 */ 1061 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 1062 to.to_tsecr -= tp->ts_offset; 1063 if (TSTMP_GT(to.to_tsecr, ticks)) 1064 to.to_tsecr = 0; 1065 } 1066 1067 /* 1068 * Process options only when we get SYN/ACK back. The SYN case 1069 * for incoming connections is handled in tcp_syncache. 1070 * XXX this is traditional behavior, may need to be cleaned up. 1071 */ 1072 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 1073 if ((to.to_flags & TOF_SCALE) && 1074 (tp->t_flags & TF_REQ_SCALE)) { 1075 tp->t_flags |= TF_RCVD_SCALE; 1076 tp->snd_scale = to.to_wscale; 1077 tp->snd_wnd = th->th_win << tp->snd_scale; 1078 tiwin = tp->snd_wnd; 1079 } 1080 if (to.to_flags & TOF_TS) { 1081 tp->t_flags |= TF_RCVD_TSTMP; 1082 tp->ts_recent = to.to_tsval; 1083 tp->ts_recent_age = ticks; 1084 } 1085 /* Initial send window, already scaled. */ 1086 tp->snd_wnd = th->th_win; 1087 if (to.to_flags & TOF_MSS) 1088 tcp_mss(tp, to.to_mss); 1089 if (tp->sack_enable) { 1090 if (!(to.to_flags & TOF_SACKPERM)) 1091 tp->sack_enable = 0; 1092 else 1093 tp->t_flags |= TF_SACK_PERMIT; 1094 } 1095 1096 } 1097 1098 /* 1099 * Header prediction: check for the two common cases 1100 * of a uni-directional data xfer. If the packet has 1101 * no control flags, is in-sequence, the window didn't 1102 * change and we're not retransmitting, it's a 1103 * candidate. If the length is zero and the ack moved 1104 * forward, we're the sender side of the xfer. Just 1105 * free the data acked & wake any higher level process 1106 * that was blocked waiting for space. If the length 1107 * is non-zero and the ack didn't move, we're the 1108 * receiver side. If we're getting packets in-order 1109 * (the reassembly queue is empty), add the data to 1110 * the socket buffer and note that we need a delayed ack. 1111 * Make sure that the hidden state-flags are also off. 1112 * Since we check for TCPS_ESTABLISHED above, it can only 1113 * be TH_NEEDSYN. 1114 */ 1115 if (tp->t_state == TCPS_ESTABLISHED && 1116 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && 1117 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) && 1118 ((to.to_flags & TOF_TS) == 0 || 1119 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) && 1120 th->th_seq == tp->rcv_nxt && tiwin && tiwin == tp->snd_wnd && 1121 tp->snd_nxt == tp->snd_max) { 1122 1123 /* 1124 * If last ACK falls within this segment's sequence numbers, 1125 * record the timestamp. 1126 * NOTE that the test is modified according to the latest 1127 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1128 */ 1129 if ((to.to_flags & TOF_TS) != 0 && 1130 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 1131 tp->ts_recent_age = ticks; 1132 tp->ts_recent = to.to_tsval; 1133 } 1134 1135 if (tlen == 0) { 1136 if (SEQ_GT(th->th_ack, tp->snd_una) && 1137 SEQ_LEQ(th->th_ack, tp->snd_max) && 1138 tp->snd_cwnd >= tp->snd_wnd && 1139 ((!tcp_do_newreno && !tp->sack_enable && 1140 tp->t_dupacks < tcprexmtthresh) || 1141 ((tcp_do_newreno || tp->sack_enable) && 1142 !IN_FASTRECOVERY(tp) && 1143 (to.to_flags & TOF_SACK) == 0 && 1144 TAILQ_EMPTY(&tp->snd_holes)))) { 1145 KASSERT(headlocked, 1146 ("%s: headlocked", __func__)); 1147 INP_INFO_WUNLOCK(&tcbinfo); 1148 headlocked = 0; 1149 /* 1150 * this is a pure ack for outstanding data. 1151 */ 1152 ++tcpstat.tcps_predack; 1153 /* 1154 * "bad retransmit" recovery 1155 */ 1156 if (tp->t_rxtshift == 1 && 1157 ticks < tp->t_badrxtwin) { 1158 ++tcpstat.tcps_sndrexmitbad; 1159 tp->snd_cwnd = tp->snd_cwnd_prev; 1160 tp->snd_ssthresh = 1161 tp->snd_ssthresh_prev; 1162 tp->snd_recover = tp->snd_recover_prev; 1163 if (tp->t_flags & TF_WASFRECOVERY) 1164 ENTER_FASTRECOVERY(tp); 1165 tp->snd_nxt = tp->snd_max; 1166 tp->t_badrxtwin = 0; 1167 } 1168 1169 /* 1170 * Recalculate the transmit timer / rtt. 1171 * 1172 * Some boxes send broken timestamp replies 1173 * during the SYN+ACK phase, ignore 1174 * timestamps of 0 or we could calculate a 1175 * huge RTT and blow up the retransmit timer. 1176 */ 1177 if ((to.to_flags & TOF_TS) != 0 && 1178 to.to_tsecr) { 1179 if (!tp->t_rttlow || 1180 tp->t_rttlow > ticks - to.to_tsecr) 1181 tp->t_rttlow = ticks - to.to_tsecr; 1182 tcp_xmit_timer(tp, 1183 ticks - to.to_tsecr + 1); 1184 } else if (tp->t_rtttime && 1185 SEQ_GT(th->th_ack, tp->t_rtseq)) { 1186 if (!tp->t_rttlow || 1187 tp->t_rttlow > ticks - tp->t_rtttime) 1188 tp->t_rttlow = ticks - tp->t_rtttime; 1189 tcp_xmit_timer(tp, 1190 ticks - tp->t_rtttime); 1191 } 1192 tcp_xmit_bandwidth_limit(tp, th->th_ack); 1193 acked = th->th_ack - tp->snd_una; 1194 tcpstat.tcps_rcvackpack++; 1195 tcpstat.tcps_rcvackbyte += acked; 1196 sbdrop(&so->so_snd, acked); 1197 if (SEQ_GT(tp->snd_una, tp->snd_recover) && 1198 SEQ_LEQ(th->th_ack, tp->snd_recover)) 1199 tp->snd_recover = th->th_ack - 1; 1200 tp->snd_una = th->th_ack; 1201 /* 1202 * pull snd_wl2 up to prevent seq wrap relative 1203 * to th_ack. 1204 */ 1205 tp->snd_wl2 = th->th_ack; 1206 tp->t_dupacks = 0; 1207 m_freem(m); 1208 ND6_HINT(tp); /* some progress has been done */ 1209 1210 /* 1211 * If all outstanding data are acked, stop 1212 * retransmit timer, otherwise restart timer 1213 * using current (possibly backed-off) value. 1214 * If process is waiting for space, 1215 * wakeup/selwakeup/signal. If data 1216 * are ready to send, let tcp_output 1217 * decide between more output or persist. 1218 1219#ifdef TCPDEBUG 1220 if (so->so_options & SO_DEBUG) 1221 tcp_trace(TA_INPUT, ostate, tp, 1222 (void *)tcp_saveipgen, 1223 &tcp_savetcp, 0); 1224#endif 1225 */ 1226 if (tp->snd_una == tp->snd_max) 1227 tcp_timer_activate(tp, TT_REXMT, 0); 1228 else if (!tcp_timer_active(tp, TT_PERSIST)) 1229 tcp_timer_activate(tp, TT_REXMT, 1230 tp->t_rxtcur); 1231 1232 sowwakeup(so); 1233 if (so->so_snd.sb_cc) 1234 (void) tcp_output(tp); 1235 goto check_delack; 1236 } 1237 } else if (th->th_ack == tp->snd_una && 1238 LIST_EMPTY(&tp->t_segq) && 1239 tlen <= sbspace(&so->so_rcv)) { 1240 int newsize = 0; /* automatic sockbuf scaling */ 1241 1242 KASSERT(headlocked, ("%s: headlocked", __func__)); 1243 INP_INFO_WUNLOCK(&tcbinfo); 1244 headlocked = 0; 1245 /* 1246 * this is a pure, in-sequence data packet 1247 * with nothing on the reassembly queue and 1248 * we have enough buffer space to take it. 1249 */ 1250 /* Clean receiver SACK report if present */ 1251 if (tp->sack_enable && tp->rcv_numsacks) 1252 tcp_clean_sackreport(tp); 1253 ++tcpstat.tcps_preddat; 1254 tp->rcv_nxt += tlen; 1255 /* 1256 * Pull snd_wl1 up to prevent seq wrap relative to 1257 * th_seq. 1258 */ 1259 tp->snd_wl1 = th->th_seq; 1260 /* 1261 * Pull rcv_up up to prevent seq wrap relative to 1262 * rcv_nxt. 1263 */ 1264 tp->rcv_up = tp->rcv_nxt; 1265 tcpstat.tcps_rcvpack++; 1266 tcpstat.tcps_rcvbyte += tlen; 1267 ND6_HINT(tp); /* some progress has been done */ 1268#ifdef TCPDEBUG 1269 if (so->so_options & SO_DEBUG) 1270 tcp_trace(TA_INPUT, ostate, tp, 1271 (void *)tcp_saveipgen, &tcp_savetcp, 0); 1272#endif 1273 /* 1274 * Automatic sizing of receive socket buffer. Often the send 1275 * buffer size is not optimally adjusted to the actual network 1276 * conditions at hand (delay bandwidth product). Setting the 1277 * buffer size too small limits throughput on links with high 1278 * bandwidth and high delay (eg. trans-continental/oceanic links). 1279 * 1280 * On the receive side the socket buffer memory is only rarely 1281 * used to any significant extent. This allows us to be much 1282 * more aggressive in scaling the receive socket buffer. For 1283 * the case that the buffer space is actually used to a large 1284 * extent and we run out of kernel memory we can simply drop 1285 * the new segments; TCP on the sender will just retransmit it 1286 * later. Setting the buffer size too big may only consume too 1287 * much kernel memory if the application doesn't read() from 1288 * the socket or packet loss or reordering makes use of the 1289 * reassembly queue. 1290 * 1291 * The criteria to step up the receive buffer one notch are: 1292 * 1. the number of bytes received during the time it takes 1293 * one timestamp to be reflected back to us (the RTT); 1294 * 2. received bytes per RTT is within seven eighth of the 1295 * current socket buffer size; 1296 * 3. receive buffer size has not hit maximal automatic size; 1297 * 1298 * This algorithm does one step per RTT at most and only if 1299 * we receive a bulk stream w/o packet losses or reorderings. 1300 * Shrinking the buffer during idle times is not necessary as 1301 * it doesn't consume any memory when idle. 1302 * 1303 * TODO: Only step up if the application is actually serving 1304 * the buffer to better manage the socket buffer resources. 1305 */ 1306 if (tcp_do_autorcvbuf && 1307 to.to_tsecr && 1308 (so->so_rcv.sb_flags & SB_AUTOSIZE)) { 1309 if (to.to_tsecr > tp->rfbuf_ts && 1310 to.to_tsecr - tp->rfbuf_ts < hz) { 1311 if (tp->rfbuf_cnt > 1312 (so->so_rcv.sb_hiwat / 8 * 7) && 1313 so->so_rcv.sb_hiwat < 1314 tcp_autorcvbuf_max) { 1315 newsize = 1316 min(so->so_rcv.sb_hiwat + 1317 tcp_autorcvbuf_inc, 1318 tcp_autorcvbuf_max); 1319 } 1320 /* Start over with next RTT. */ 1321 tp->rfbuf_ts = 0; 1322 tp->rfbuf_cnt = 0; 1323 } else 1324 tp->rfbuf_cnt += tlen; /* add up */ 1325 } 1326 1327 /* Add data to socket buffer. */ 1328 SOCKBUF_LOCK(&so->so_rcv); 1329 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1330 m_freem(m); 1331 } else { 1332 /* 1333 * Set new socket buffer size. 1334 * Give up when limit is reached. 1335 */ 1336 if (newsize) 1337 if (!sbreserve_locked(&so->so_rcv, 1338 newsize, so, curthread)) 1339 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 1340 m_adj(m, drop_hdrlen); /* delayed header drop */ 1341 sbappendstream_locked(&so->so_rcv, m); 1342 } 1343 sorwakeup_locked(so); 1344 if (DELAY_ACK(tp)) { 1345 tp->t_flags |= TF_DELACK; 1346 } else { 1347 tp->t_flags |= TF_ACKNOW; 1348 tcp_output(tp); 1349 } 1350 goto check_delack; 1351 } 1352 } 1353 1354 /* 1355 * Calculate amount of space in receive window, 1356 * and then do TCP input processing. 1357 * Receive window is amount of space in rcv queue, 1358 * but not less than advertised window. 1359 */ 1360 win = sbspace(&so->so_rcv); 1361 if (win < 0) 1362 win = 0; 1363 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt)); 1364 1365 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 1366 tp->rfbuf_ts = 0; 1367 tp->rfbuf_cnt = 0; 1368 1369 switch (tp->t_state) { 1370 1371 /* 1372 * If the state is SYN_RECEIVED: 1373 * if seg contains an ACK, but not for our SYN/ACK, send a RST. 1374 */ 1375 case TCPS_SYN_RECEIVED: 1376 if ((thflags & TH_ACK) && 1377 (SEQ_LEQ(th->th_ack, tp->snd_una) || 1378 SEQ_GT(th->th_ack, tp->snd_max))) { 1379 rstreason = BANDLIM_RST_OPENPORT; 1380 goto dropwithreset; 1381 } 1382 break; 1383 1384 /* 1385 * If the state is SYN_SENT: 1386 * if seg contains an ACK, but not for our SYN, drop the input. 1387 * if seg contains a RST, then drop the connection. 1388 * if seg does not contain SYN, then drop it. 1389 * Otherwise this is an acceptable SYN segment 1390 * initialize tp->rcv_nxt and tp->irs 1391 * if seg contains ack then advance tp->snd_una 1392 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state 1393 * arrange for segment to be acked (eventually) 1394 * continue processing rest of data/controls, beginning with URG 1395 */ 1396 case TCPS_SYN_SENT: 1397 if ((thflags & TH_ACK) && 1398 (SEQ_LEQ(th->th_ack, tp->iss) || 1399 SEQ_GT(th->th_ack, tp->snd_max))) { 1400 rstreason = BANDLIM_UNLIMITED; 1401 goto dropwithreset; 1402 } 1403 if (thflags & TH_RST) { 1404 if (thflags & TH_ACK) { 1405 KASSERT(headlocked, ("%s: after_listen: " 1406 "tcp_drop.2: head not locked", __func__)); 1407 tp = tcp_drop(tp, ECONNREFUSED); 1408 } 1409 goto drop; 1410 } 1411 if ((thflags & TH_SYN) == 0) 1412 goto drop; 1413 1414 tp->irs = th->th_seq; 1415 tcp_rcvseqinit(tp); 1416 if (thflags & TH_ACK) { 1417 tcpstat.tcps_connects++; 1418 soisconnected(so); 1419#ifdef MAC 1420 SOCK_LOCK(so); 1421 mac_set_socket_peer_from_mbuf(m, so); 1422 SOCK_UNLOCK(so); 1423#endif 1424 /* Do window scaling on this connection? */ 1425 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 1426 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 1427 tp->rcv_scale = tp->request_r_scale; 1428 } 1429 tp->rcv_adv += tp->rcv_wnd; 1430 tp->snd_una++; /* SYN is acked */ 1431 /* 1432 * If there's data, delay ACK; if there's also a FIN 1433 * ACKNOW will be turned on later. 1434 */ 1435 if (DELAY_ACK(tp) && tlen != 0) 1436 tcp_timer_activate(tp, TT_DELACK, 1437 tcp_delacktime); 1438 else 1439 tp->t_flags |= TF_ACKNOW; 1440 /* 1441 * Received <SYN,ACK> in SYN_SENT[*] state. 1442 * Transitions: 1443 * SYN_SENT --> ESTABLISHED 1444 * SYN_SENT* --> FIN_WAIT_1 1445 */ 1446 tp->t_starttime = ticks; 1447 if (tp->t_flags & TF_NEEDFIN) { 1448 tp->t_state = TCPS_FIN_WAIT_1; 1449 tp->t_flags &= ~TF_NEEDFIN; 1450 thflags &= ~TH_SYN; 1451 } else { 1452 tp->t_state = TCPS_ESTABLISHED; 1453 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle); 1454 } 1455 } else { 1456 /* 1457 * Received initial SYN in SYN-SENT[*] state => 1458 * simultaneous open. If segment contains CC option 1459 * and there is a cached CC, apply TAO test. 1460 * If it succeeds, connection is * half-synchronized. 1461 * Otherwise, do 3-way handshake: 1462 * SYN-SENT -> SYN-RECEIVED 1463 * SYN-SENT* -> SYN-RECEIVED* 1464 * If there was no CC option, clear cached CC value. 1465 */ 1466 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 1467 tcp_timer_activate(tp, TT_REXMT, 0); 1468 tp->t_state = TCPS_SYN_RECEIVED; 1469 } 1470 1471 KASSERT(headlocked, ("%s: trimthenstep6: head not locked", 1472 __func__)); 1473 INP_LOCK_ASSERT(tp->t_inpcb); 1474 1475 /* 1476 * Advance th->th_seq to correspond to first data byte. 1477 * If data, trim to stay within window, 1478 * dropping FIN if necessary. 1479 */ 1480 th->th_seq++; 1481 if (tlen > tp->rcv_wnd) { 1482 todrop = tlen - tp->rcv_wnd; 1483 m_adj(m, -todrop); 1484 tlen = tp->rcv_wnd; 1485 thflags &= ~TH_FIN; 1486 tcpstat.tcps_rcvpackafterwin++; 1487 tcpstat.tcps_rcvbyteafterwin += todrop; 1488 } 1489 tp->snd_wl1 = th->th_seq - 1; 1490 tp->rcv_up = th->th_seq; 1491 /* 1492 * Client side of transaction: already sent SYN and data. 1493 * If the remote host used T/TCP to validate the SYN, 1494 * our data will be ACK'd; if so, enter normal data segment 1495 * processing in the middle of step 5, ack processing. 1496 * Otherwise, goto step 6. 1497 */ 1498 if (thflags & TH_ACK) 1499 goto process_ACK; 1500 1501 goto step6; 1502 1503 /* 1504 * If the state is LAST_ACK or CLOSING or TIME_WAIT: 1505 * do normal processing. 1506 * 1507 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later. 1508 */ 1509 case TCPS_LAST_ACK: 1510 case TCPS_CLOSING: 1511 case TCPS_TIME_WAIT: 1512 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: timewait", 1513 __func__)); 1514 break; /* continue normal processing */ 1515 } 1516 1517 /* 1518 * States other than LISTEN or SYN_SENT. 1519 * First check the RST flag and sequence number since reset segments 1520 * are exempt from the timestamp and connection count tests. This 1521 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix 1522 * below which allowed reset segments in half the sequence space 1523 * to fall though and be processed (which gives forged reset 1524 * segments with a random sequence number a 50 percent chance of 1525 * killing a connection). 1526 * Then check timestamp, if present. 1527 * Then check the connection count, if present. 1528 * Then check that at least some bytes of segment are within 1529 * receive window. If segment begins before rcv_nxt, 1530 * drop leading data (and SYN); if nothing left, just ack. 1531 * 1532 * 1533 * If the RST bit is set, check the sequence number to see 1534 * if this is a valid reset segment. 1535 * RFC 793 page 37: 1536 * In all states except SYN-SENT, all reset (RST) segments 1537 * are validated by checking their SEQ-fields. A reset is 1538 * valid if its sequence number is in the window. 1539 * Note: this does not take into account delayed ACKs, so 1540 * we should test against last_ack_sent instead of rcv_nxt. 1541 * The sequence number in the reset segment is normally an 1542 * echo of our outgoing acknowlegement numbers, but some hosts 1543 * send a reset with the sequence number at the rightmost edge 1544 * of our receive window, and we have to handle this case. 1545 * Note 2: Paul Watson's paper "Slipping in the Window" has shown 1546 * that brute force RST attacks are possible. To combat this, 1547 * we use a much stricter check while in the ESTABLISHED state, 1548 * only accepting RSTs where the sequence number is equal to 1549 * last_ack_sent. In all other states (the states in which a 1550 * RST is more likely), the more permissive check is used. 1551 * If we have multiple segments in flight, the intial reset 1552 * segment sequence numbers will be to the left of last_ack_sent, 1553 * but they will eventually catch up. 1554 * In any case, it never made sense to trim reset segments to 1555 * fit the receive window since RFC 1122 says: 1556 * 4.2.2.12 RST Segment: RFC-793 Section 3.4 1557 * 1558 * A TCP SHOULD allow a received RST segment to include data. 1559 * 1560 * DISCUSSION 1561 * It has been suggested that a RST segment could contain 1562 * ASCII text that encoded and explained the cause of the 1563 * RST. No standard has yet been established for such 1564 * data. 1565 * 1566 * If the reset segment passes the sequence number test examine 1567 * the state: 1568 * SYN_RECEIVED STATE: 1569 * If passive open, return to LISTEN state. 1570 * If active open, inform user that connection was refused. 1571 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES: 1572 * Inform user that connection was reset, and close tcb. 1573 * CLOSING, LAST_ACK STATES: 1574 * Close the tcb. 1575 * TIME_WAIT STATE: 1576 * Drop the segment - see Stevens, vol. 2, p. 964 and 1577 * RFC 1337. 1578 */ 1579 if (thflags & TH_RST) { 1580 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) && 1581 SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) { 1582 switch (tp->t_state) { 1583 1584 case TCPS_SYN_RECEIVED: 1585 so->so_error = ECONNREFUSED; 1586 goto close; 1587 1588 case TCPS_ESTABLISHED: 1589 if (tcp_insecure_rst == 0 && 1590 !(SEQ_GEQ(th->th_seq, tp->rcv_nxt - 1) && 1591 SEQ_LEQ(th->th_seq, tp->rcv_nxt + 1)) && 1592 !(SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) && 1593 SEQ_LEQ(th->th_seq, tp->last_ack_sent + 1))) { 1594 tcpstat.tcps_badrst++; 1595 goto drop; 1596 } 1597 case TCPS_FIN_WAIT_1: 1598 case TCPS_FIN_WAIT_2: 1599 case TCPS_CLOSE_WAIT: 1600 so->so_error = ECONNRESET; 1601 close: 1602 tp->t_state = TCPS_CLOSED; 1603 tcpstat.tcps_drops++; 1604 KASSERT(headlocked, ("%s: trimthenstep6: " 1605 "tcp_close: head not locked", __func__)); 1606 tp = tcp_close(tp); 1607 break; 1608 1609 case TCPS_CLOSING: 1610 case TCPS_LAST_ACK: 1611 KASSERT(headlocked, ("%s: trimthenstep6: " 1612 "tcp_close.2: head not locked", __func__)); 1613 tp = tcp_close(tp); 1614 break; 1615 1616 case TCPS_TIME_WAIT: 1617 KASSERT(tp->t_state != TCPS_TIME_WAIT, 1618 ("%s: timewait", __func__)); 1619 break; 1620 } 1621 } 1622 goto drop; 1623 } 1624 1625 /* 1626 * RFC 1323 PAWS: If we have a timestamp reply on this segment 1627 * and it's less than ts_recent, drop it. 1628 */ 1629 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent && 1630 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 1631 1632 /* Check to see if ts_recent is over 24 days old. */ 1633 if ((int)(ticks - tp->ts_recent_age) > TCP_PAWS_IDLE) { 1634 /* 1635 * Invalidate ts_recent. If this segment updates 1636 * ts_recent, the age will be reset later and ts_recent 1637 * will get a valid value. If it does not, setting 1638 * ts_recent to zero will at least satisfy the 1639 * requirement that zero be placed in the timestamp 1640 * echo reply when ts_recent isn't valid. The 1641 * age isn't reset until we get a valid ts_recent 1642 * because we don't want out-of-order segments to be 1643 * dropped when ts_recent is old. 1644 */ 1645 tp->ts_recent = 0; 1646 } else { 1647 tcpstat.tcps_rcvduppack++; 1648 tcpstat.tcps_rcvdupbyte += tlen; 1649 tcpstat.tcps_pawsdrop++; 1650 if (tlen) 1651 goto dropafterack; 1652 goto drop; 1653 } 1654 } 1655 1656 /* 1657 * In the SYN-RECEIVED state, validate that the packet belongs to 1658 * this connection before trimming the data to fit the receive 1659 * window. Check the sequence number versus IRS since we know 1660 * the sequence numbers haven't wrapped. This is a partial fix 1661 * for the "LAND" DoS attack. 1662 */ 1663 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) { 1664 rstreason = BANDLIM_RST_OPENPORT; 1665 goto dropwithreset; 1666 } 1667 1668 todrop = tp->rcv_nxt - th->th_seq; 1669 if (todrop > 0) { 1670 if (thflags & TH_SYN) { 1671 thflags &= ~TH_SYN; 1672 th->th_seq++; 1673 if (th->th_urp > 1) 1674 th->th_urp--; 1675 else 1676 thflags &= ~TH_URG; 1677 todrop--; 1678 } 1679 /* 1680 * Following if statement from Stevens, vol. 2, p. 960. 1681 */ 1682 if (todrop > tlen 1683 || (todrop == tlen && (thflags & TH_FIN) == 0)) { 1684 /* 1685 * Any valid FIN must be to the left of the window. 1686 * At this point the FIN must be a duplicate or out 1687 * of sequence; drop it. 1688 */ 1689 thflags &= ~TH_FIN; 1690 1691 /* 1692 * Send an ACK to resynchronize and drop any data. 1693 * But keep on processing for RST or ACK. 1694 */ 1695 tp->t_flags |= TF_ACKNOW; 1696 todrop = tlen; 1697 tcpstat.tcps_rcvduppack++; 1698 tcpstat.tcps_rcvdupbyte += todrop; 1699 } else { 1700 tcpstat.tcps_rcvpartduppack++; 1701 tcpstat.tcps_rcvpartdupbyte += todrop; 1702 } 1703 drop_hdrlen += todrop; /* drop from the top afterwards */ 1704 th->th_seq += todrop; 1705 tlen -= todrop; 1706 if (th->th_urp > todrop) 1707 th->th_urp -= todrop; 1708 else { 1709 thflags &= ~TH_URG; 1710 th->th_urp = 0; 1711 } 1712 } 1713 1714 /* 1715 * If new data are received on a connection after the 1716 * user processes are gone, then RST the other end. 1717 */ 1718 if ((so->so_state & SS_NOFDREF) && 1719 tp->t_state > TCPS_CLOSE_WAIT && tlen) { 1720 KASSERT(headlocked, ("%s: trimthenstep6: tcp_close.3: head " 1721 "not locked", __func__)); 1722 tp = tcp_close(tp); 1723 tcpstat.tcps_rcvafterclose++; 1724 rstreason = BANDLIM_UNLIMITED; 1725 goto dropwithreset; 1726 } 1727 1728 /* 1729 * If segment ends after window, drop trailing data 1730 * (and PUSH and FIN); if nothing left, just ACK. 1731 */ 1732 todrop = (th->th_seq+tlen) - (tp->rcv_nxt+tp->rcv_wnd); 1733 if (todrop > 0) { 1734 tcpstat.tcps_rcvpackafterwin++; 1735 if (todrop >= tlen) { 1736 tcpstat.tcps_rcvbyteafterwin += tlen; 1737 /* 1738 * If a new connection request is received 1739 * while in TIME_WAIT, drop the old connection 1740 * and start over if the sequence numbers 1741 * are above the previous ones. 1742 */ 1743 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: timewait", 1744 __func__)); 1745 if (thflags & TH_SYN && 1746 tp->t_state == TCPS_TIME_WAIT && 1747 SEQ_GT(th->th_seq, tp->rcv_nxt)) { 1748 KASSERT(headlocked, ("%s: trimthenstep6: " 1749 "tcp_close.4: head not locked", __func__)); 1750 tp = tcp_close(tp); 1751 /* XXX: Shouldn't be possible. */ 1752 return (1); 1753 } 1754 /* 1755 * If window is closed can only take segments at 1756 * window edge, and have to drop data and PUSH from 1757 * incoming segments. Continue processing, but 1758 * remember to ack. Otherwise, drop segment 1759 * and ack. 1760 */ 1761 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { 1762 tp->t_flags |= TF_ACKNOW; 1763 tcpstat.tcps_rcvwinprobe++; 1764 } else 1765 goto dropafterack; 1766 } else 1767 tcpstat.tcps_rcvbyteafterwin += todrop; 1768 m_adj(m, -todrop); 1769 tlen -= todrop; 1770 thflags &= ~(TH_PUSH|TH_FIN); 1771 } 1772 1773 /* 1774 * If last ACK falls within this segment's sequence numbers, 1775 * record its timestamp. 1776 * NOTE: 1777 * 1) That the test incorporates suggestions from the latest 1778 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1779 * 2) That updating only on newer timestamps interferes with 1780 * our earlier PAWS tests, so this check should be solely 1781 * predicated on the sequence space of this segment. 1782 * 3) That we modify the segment boundary check to be 1783 * Last.ACK.Sent <= SEG.SEQ + SEG.Len 1784 * instead of RFC1323's 1785 * Last.ACK.Sent < SEG.SEQ + SEG.Len, 1786 * This modified check allows us to overcome RFC1323's 1787 * limitations as described in Stevens TCP/IP Illustrated 1788 * Vol. 2 p.869. In such cases, we can still calculate the 1789 * RTT correctly when RCV.NXT == Last.ACK.Sent. 1790 */ 1791 if ((to.to_flags & TOF_TS) != 0 && 1792 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 1793 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 1794 ((thflags & (TH_SYN|TH_FIN)) != 0))) { 1795 tp->ts_recent_age = ticks; 1796 tp->ts_recent = to.to_tsval; 1797 } 1798 1799 /* 1800 * If a SYN is in the window, then this is an 1801 * error and we send an RST and drop the connection. 1802 */ 1803 if (thflags & TH_SYN) { 1804 KASSERT(headlocked, ("%s: tcp_drop: trimthenstep6: " 1805 "head not locked", __func__)); 1806 tp = tcp_drop(tp, ECONNRESET); 1807 rstreason = BANDLIM_UNLIMITED; 1808 goto drop; 1809 } 1810 1811 /* 1812 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN 1813 * flag is on (half-synchronized state), then queue data for 1814 * later processing; else drop segment and return. 1815 */ 1816 if ((thflags & TH_ACK) == 0) { 1817 if (tp->t_state == TCPS_SYN_RECEIVED || 1818 (tp->t_flags & TF_NEEDSYN)) 1819 goto step6; 1820 else if (tp->t_flags & TF_ACKNOW) 1821 goto dropafterack; 1822 else 1823 goto drop; 1824 } 1825 1826 /* 1827 * Ack processing. 1828 */ 1829 switch (tp->t_state) { 1830 1831 /* 1832 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter 1833 * ESTABLISHED state and continue processing. 1834 * The ACK was checked above. 1835 */ 1836 case TCPS_SYN_RECEIVED: 1837 1838 tcpstat.tcps_connects++; 1839 soisconnected(so); 1840 /* Do window scaling? */ 1841 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 1842 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 1843 tp->rcv_scale = tp->request_r_scale; 1844 tp->snd_wnd = tiwin; 1845 } 1846 /* 1847 * Make transitions: 1848 * SYN-RECEIVED -> ESTABLISHED 1849 * SYN-RECEIVED* -> FIN-WAIT-1 1850 */ 1851 tp->t_starttime = ticks; 1852 if (tp->t_flags & TF_NEEDFIN) { 1853 tp->t_state = TCPS_FIN_WAIT_1; 1854 tp->t_flags &= ~TF_NEEDFIN; 1855 } else { 1856 tp->t_state = TCPS_ESTABLISHED; 1857 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle); 1858 } 1859 /* 1860 * If segment contains data or ACK, will call tcp_reass() 1861 * later; if not, do so now to pass queued data to user. 1862 */ 1863 if (tlen == 0 && (thflags & TH_FIN) == 0) 1864 (void) tcp_reass(tp, (struct tcphdr *)0, 0, 1865 (struct mbuf *)0); 1866 tp->snd_wl1 = th->th_seq - 1; 1867 /* FALLTHROUGH */ 1868 1869 /* 1870 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range 1871 * ACKs. If the ack is in the range 1872 * tp->snd_una < th->th_ack <= tp->snd_max 1873 * then advance tp->snd_una to th->th_ack and drop 1874 * data from the retransmission queue. If this ACK reflects 1875 * more up to date window information we update our window information. 1876 */ 1877 case TCPS_ESTABLISHED: 1878 case TCPS_FIN_WAIT_1: 1879 case TCPS_FIN_WAIT_2: 1880 case TCPS_CLOSE_WAIT: 1881 case TCPS_CLOSING: 1882 case TCPS_LAST_ACK: 1883 case TCPS_TIME_WAIT: 1884 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: timewait", 1885 __func__)); 1886 if (SEQ_GT(th->th_ack, tp->snd_max)) { 1887 tcpstat.tcps_rcvacktoomuch++; 1888 goto dropafterack; 1889 } 1890 if (tp->sack_enable && 1891 ((to.to_flags & TOF_SACK) || 1892 !TAILQ_EMPTY(&tp->snd_holes))) 1893 tcp_sack_doack(tp, &to, th->th_ack); 1894 if (SEQ_LEQ(th->th_ack, tp->snd_una)) { 1895 if (tlen == 0 && tiwin == tp->snd_wnd) { 1896 tcpstat.tcps_rcvdupack++; 1897 /* 1898 * If we have outstanding data (other than 1899 * a window probe), this is a completely 1900 * duplicate ack (ie, window info didn't 1901 * change), the ack is the biggest we've 1902 * seen and we've seen exactly our rexmt 1903 * threshhold of them, assume a packet 1904 * has been dropped and retransmit it. 1905 * Kludge snd_nxt & the congestion 1906 * window so we send only this one 1907 * packet. 1908 * 1909 * We know we're losing at the current 1910 * window size so do congestion avoidance 1911 * (set ssthresh to half the current window 1912 * and pull our congestion window back to 1913 * the new ssthresh). 1914 * 1915 * Dup acks mean that packets have left the 1916 * network (they're now cached at the receiver) 1917 * so bump cwnd by the amount in the receiver 1918 * to keep a constant cwnd packets in the 1919 * network. 1920 */ 1921 if (!tcp_timer_active(tp, TT_REXMT) || 1922 th->th_ack != tp->snd_una) 1923 tp->t_dupacks = 0; 1924 else if (++tp->t_dupacks > tcprexmtthresh || 1925 ((tcp_do_newreno || tp->sack_enable) && 1926 IN_FASTRECOVERY(tp))) { 1927 if (tp->sack_enable && IN_FASTRECOVERY(tp)) { 1928 int awnd; 1929 1930 /* 1931 * Compute the amount of data in flight first. 1932 * We can inject new data into the pipe iff 1933 * we have less than 1/2 the original window's 1934 * worth of data in flight. 1935 */ 1936 awnd = (tp->snd_nxt - tp->snd_fack) + 1937 tp->sackhint.sack_bytes_rexmit; 1938 if (awnd < tp->snd_ssthresh) { 1939 tp->snd_cwnd += tp->t_maxseg; 1940 if (tp->snd_cwnd > tp->snd_ssthresh) 1941 tp->snd_cwnd = tp->snd_ssthresh; 1942 } 1943 } else 1944 tp->snd_cwnd += tp->t_maxseg; 1945 (void) tcp_output(tp); 1946 goto drop; 1947 } else if (tp->t_dupacks == tcprexmtthresh) { 1948 tcp_seq onxt = tp->snd_nxt; 1949 u_int win; 1950 1951 /* 1952 * If we're doing sack, check to 1953 * see if we're already in sack 1954 * recovery. If we're not doing sack, 1955 * check to see if we're in newreno 1956 * recovery. 1957 */ 1958 if (tp->sack_enable) { 1959 if (IN_FASTRECOVERY(tp)) { 1960 tp->t_dupacks = 0; 1961 break; 1962 } 1963 } else if (tcp_do_newreno) { 1964 if (SEQ_LEQ(th->th_ack, 1965 tp->snd_recover)) { 1966 tp->t_dupacks = 0; 1967 break; 1968 } 1969 } 1970 win = min(tp->snd_wnd, tp->snd_cwnd) / 1971 2 / tp->t_maxseg; 1972 if (win < 2) 1973 win = 2; 1974 tp->snd_ssthresh = win * tp->t_maxseg; 1975 ENTER_FASTRECOVERY(tp); 1976 tp->snd_recover = tp->snd_max; 1977 tcp_timer_activate(tp, TT_REXMT, 0); 1978 tp->t_rtttime = 0; 1979 if (tp->sack_enable) { 1980 tcpstat.tcps_sack_recovery_episode++; 1981 tp->sack_newdata = tp->snd_nxt; 1982 tp->snd_cwnd = tp->t_maxseg; 1983 (void) tcp_output(tp); 1984 goto drop; 1985 } 1986 tp->snd_nxt = th->th_ack; 1987 tp->snd_cwnd = tp->t_maxseg; 1988 (void) tcp_output(tp); 1989 KASSERT(tp->snd_limited <= 2, 1990 ("%s: tp->snd_limited too big", 1991 __func__)); 1992 tp->snd_cwnd = tp->snd_ssthresh + 1993 tp->t_maxseg * 1994 (tp->t_dupacks - tp->snd_limited); 1995 if (SEQ_GT(onxt, tp->snd_nxt)) 1996 tp->snd_nxt = onxt; 1997 goto drop; 1998 } else if (tcp_do_rfc3042) { 1999 u_long oldcwnd = tp->snd_cwnd; 2000 tcp_seq oldsndmax = tp->snd_max; 2001 u_int sent; 2002 2003 KASSERT(tp->t_dupacks == 1 || 2004 tp->t_dupacks == 2, 2005 ("%s: dupacks not 1 or 2", 2006 __func__)); 2007 if (tp->t_dupacks == 1) 2008 tp->snd_limited = 0; 2009 tp->snd_cwnd = 2010 (tp->snd_nxt - tp->snd_una) + 2011 (tp->t_dupacks - tp->snd_limited) * 2012 tp->t_maxseg; 2013 (void) tcp_output(tp); 2014 sent = tp->snd_max - oldsndmax; 2015 if (sent > tp->t_maxseg) { 2016 KASSERT((tp->t_dupacks == 2 && 2017 tp->snd_limited == 0) || 2018 (sent == tp->t_maxseg + 1 && 2019 tp->t_flags & TF_SENTFIN), 2020 ("%s: sent too much", 2021 __func__)); 2022 tp->snd_limited = 2; 2023 } else if (sent > 0) 2024 ++tp->snd_limited; 2025 tp->snd_cwnd = oldcwnd; 2026 goto drop; 2027 } 2028 } else 2029 tp->t_dupacks = 0; 2030 break; 2031 } 2032 2033 KASSERT(SEQ_GT(th->th_ack, tp->snd_una), 2034 ("%s: th_ack <= snd_una", __func__)); 2035 2036 /* 2037 * If the congestion window was inflated to account 2038 * for the other side's cached packets, retract it. 2039 */ 2040 if (tcp_do_newreno || tp->sack_enable) { 2041 if (IN_FASTRECOVERY(tp)) { 2042 if (SEQ_LT(th->th_ack, tp->snd_recover)) { 2043 if (tp->sack_enable) 2044 tcp_sack_partialack(tp, th); 2045 else 2046 tcp_newreno_partial_ack(tp, th); 2047 } else { 2048 /* 2049 * Out of fast recovery. 2050 * Window inflation should have left us 2051 * with approximately snd_ssthresh 2052 * outstanding data. 2053 * But in case we would be inclined to 2054 * send a burst, better to do it via 2055 * the slow start mechanism. 2056 */ 2057 if (SEQ_GT(th->th_ack + 2058 tp->snd_ssthresh, 2059 tp->snd_max)) 2060 tp->snd_cwnd = tp->snd_max - 2061 th->th_ack + 2062 tp->t_maxseg; 2063 else 2064 tp->snd_cwnd = tp->snd_ssthresh; 2065 } 2066 } 2067 } else { 2068 if (tp->t_dupacks >= tcprexmtthresh && 2069 tp->snd_cwnd > tp->snd_ssthresh) 2070 tp->snd_cwnd = tp->snd_ssthresh; 2071 } 2072 tp->t_dupacks = 0; 2073 /* 2074 * If we reach this point, ACK is not a duplicate, 2075 * i.e., it ACKs something we sent. 2076 */ 2077 if (tp->t_flags & TF_NEEDSYN) { 2078 /* 2079 * T/TCP: Connection was half-synchronized, and our 2080 * SYN has been ACK'd (so connection is now fully 2081 * synchronized). Go to non-starred state, 2082 * increment snd_una for ACK of SYN, and check if 2083 * we can do window scaling. 2084 */ 2085 tp->t_flags &= ~TF_NEEDSYN; 2086 tp->snd_una++; 2087 /* Do window scaling? */ 2088 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2089 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2090 tp->rcv_scale = tp->request_r_scale; 2091 /* Send window already scaled. */ 2092 } 2093 } 2094 2095process_ACK: 2096 KASSERT(headlocked, ("%s: process_ACK: head not locked", 2097 __func__)); 2098 INP_LOCK_ASSERT(tp->t_inpcb); 2099 2100 acked = th->th_ack - tp->snd_una; 2101 tcpstat.tcps_rcvackpack++; 2102 tcpstat.tcps_rcvackbyte += acked; 2103 2104 /* 2105 * If we just performed our first retransmit, and the ACK 2106 * arrives within our recovery window, then it was a mistake 2107 * to do the retransmit in the first place. Recover our 2108 * original cwnd and ssthresh, and proceed to transmit where 2109 * we left off. 2110 */ 2111 if (tp->t_rxtshift == 1 && ticks < tp->t_badrxtwin) { 2112 ++tcpstat.tcps_sndrexmitbad; 2113 tp->snd_cwnd = tp->snd_cwnd_prev; 2114 tp->snd_ssthresh = tp->snd_ssthresh_prev; 2115 tp->snd_recover = tp->snd_recover_prev; 2116 if (tp->t_flags & TF_WASFRECOVERY) 2117 ENTER_FASTRECOVERY(tp); 2118 tp->snd_nxt = tp->snd_max; 2119 tp->t_badrxtwin = 0; /* XXX probably not required */ 2120 } 2121 2122 /* 2123 * If we have a timestamp reply, update smoothed 2124 * round trip time. If no timestamp is present but 2125 * transmit timer is running and timed sequence 2126 * number was acked, update smoothed round trip time. 2127 * Since we now have an rtt measurement, cancel the 2128 * timer backoff (cf., Phil Karn's retransmit alg.). 2129 * Recompute the initial retransmit timer. 2130 * 2131 * Some boxes send broken timestamp replies 2132 * during the SYN+ACK phase, ignore 2133 * timestamps of 0 or we could calculate a 2134 * huge RTT and blow up the retransmit timer. 2135 */ 2136 if ((to.to_flags & TOF_TS) != 0 && 2137 to.to_tsecr) { 2138 if (!tp->t_rttlow || tp->t_rttlow > ticks - to.to_tsecr) 2139 tp->t_rttlow = ticks - to.to_tsecr; 2140 tcp_xmit_timer(tp, ticks - to.to_tsecr + 1); 2141 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) { 2142 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime) 2143 tp->t_rttlow = ticks - tp->t_rtttime; 2144 tcp_xmit_timer(tp, ticks - tp->t_rtttime); 2145 } 2146 tcp_xmit_bandwidth_limit(tp, th->th_ack); 2147 2148 /* 2149 * If all outstanding data is acked, stop retransmit 2150 * timer and remember to restart (more output or persist). 2151 * If there is more data to be acked, restart retransmit 2152 * timer, using current (possibly backed-off) value. 2153 */ 2154 if (th->th_ack == tp->snd_max) { 2155 tcp_timer_activate(tp, TT_REXMT, 0); 2156 needoutput = 1; 2157 } else if (!tcp_timer_active(tp, TT_PERSIST)) 2158 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur); 2159 2160 /* 2161 * If no data (only SYN) was ACK'd, 2162 * skip rest of ACK processing. 2163 */ 2164 if (acked == 0) 2165 goto step6; 2166 2167 /* 2168 * When new data is acked, open the congestion window. 2169 * If the window gives us less than ssthresh packets 2170 * in flight, open exponentially (maxseg per packet). 2171 * Otherwise open linearly: maxseg per window 2172 * (maxseg^2 / cwnd per packet). 2173 */ 2174 if ((!tcp_do_newreno && !tp->sack_enable) || 2175 !IN_FASTRECOVERY(tp)) { 2176 u_int cw = tp->snd_cwnd; 2177 u_int incr = tp->t_maxseg; 2178 if (cw > tp->snd_ssthresh) 2179 incr = incr * incr / cw; 2180 tp->snd_cwnd = min(cw+incr, TCP_MAXWIN<<tp->snd_scale); 2181 } 2182 SOCKBUF_LOCK(&so->so_snd); 2183 if (acked > so->so_snd.sb_cc) { 2184 tp->snd_wnd -= so->so_snd.sb_cc; 2185 sbdrop_locked(&so->so_snd, (int)so->so_snd.sb_cc); 2186 ourfinisacked = 1; 2187 } else { 2188 sbdrop_locked(&so->so_snd, acked); 2189 tp->snd_wnd -= acked; 2190 ourfinisacked = 0; 2191 } 2192 sowwakeup_locked(so); 2193 /* detect una wraparound */ 2194 if ((tcp_do_newreno || tp->sack_enable) && 2195 !IN_FASTRECOVERY(tp) && 2196 SEQ_GT(tp->snd_una, tp->snd_recover) && 2197 SEQ_LEQ(th->th_ack, tp->snd_recover)) 2198 tp->snd_recover = th->th_ack - 1; 2199 if ((tcp_do_newreno || tp->sack_enable) && 2200 IN_FASTRECOVERY(tp) && 2201 SEQ_GEQ(th->th_ack, tp->snd_recover)) 2202 EXIT_FASTRECOVERY(tp); 2203 tp->snd_una = th->th_ack; 2204 if (tp->sack_enable) { 2205 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 2206 tp->snd_recover = tp->snd_una; 2207 } 2208 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 2209 tp->snd_nxt = tp->snd_una; 2210 2211 switch (tp->t_state) { 2212 2213 /* 2214 * In FIN_WAIT_1 STATE in addition to the processing 2215 * for the ESTABLISHED state if our FIN is now acknowledged 2216 * then enter FIN_WAIT_2. 2217 */ 2218 case TCPS_FIN_WAIT_1: 2219 if (ourfinisacked) { 2220 /* 2221 * If we can't receive any more 2222 * data, then closing user can proceed. 2223 * Starting the timer is contrary to the 2224 * specification, but if we don't get a FIN 2225 * we'll hang forever. 2226 */ 2227 /* XXXjl 2228 * we should release the tp also, and use a 2229 * compressed state. 2230 */ 2231 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 2232 int timeout; 2233 2234 soisdisconnected(so); 2235 timeout = (tcp_fast_finwait2_recycle) ? 2236 tcp_finwait2_timeout : tcp_maxidle; 2237 tcp_timer_activate(tp, TT_2MSL, timeout); 2238 } 2239 tp->t_state = TCPS_FIN_WAIT_2; 2240 } 2241 break; 2242 2243 /* 2244 * In CLOSING STATE in addition to the processing for 2245 * the ESTABLISHED state if the ACK acknowledges our FIN 2246 * then enter the TIME-WAIT state, otherwise ignore 2247 * the segment. 2248 */ 2249 case TCPS_CLOSING: 2250 if (ourfinisacked) { 2251 KASSERT(headlocked, ("%s: process_ACK: " 2252 "head not locked", __func__)); 2253 tcp_twstart(tp); 2254 INP_INFO_WUNLOCK(&tcbinfo); 2255 headlocked = 0; 2256 m_freem(m); 2257 return (0); 2258 } 2259 break; 2260 2261 /* 2262 * In LAST_ACK, we may still be waiting for data to drain 2263 * and/or to be acked, as well as for the ack of our FIN. 2264 * If our FIN is now acknowledged, delete the TCB, 2265 * enter the closed state and return. 2266 */ 2267 case TCPS_LAST_ACK: 2268 if (ourfinisacked) { 2269 KASSERT(headlocked, ("%s: process_ACK: " 2270 "tcp_close: head not locked", __func__)); 2271 tp = tcp_close(tp); 2272 goto drop; 2273 } 2274 break; 2275 2276 /* 2277 * In TIME_WAIT state the only thing that should arrive 2278 * is a retransmission of the remote FIN. Acknowledge 2279 * it and restart the finack timer. 2280 */ 2281 case TCPS_TIME_WAIT: 2282 KASSERT(tp->t_state != TCPS_TIME_WAIT, 2283 ("%s: timewait", __func__)); 2284 tcp_timer_activate(tp, TT_2MSL, 2 * tcp_msl); 2285 goto dropafterack; 2286 } 2287 } 2288 2289step6: 2290 KASSERT(headlocked, ("%s: step6: head not locked", __func__)); 2291 INP_LOCK_ASSERT(tp->t_inpcb); 2292 2293 /* 2294 * Update window information. 2295 * Don't look at window if no ACK: TAC's send garbage on first SYN. 2296 */ 2297 if ((thflags & TH_ACK) && 2298 (SEQ_LT(tp->snd_wl1, th->th_seq) || 2299 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 2300 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 2301 /* keep track of pure window updates */ 2302 if (tlen == 0 && 2303 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 2304 tcpstat.tcps_rcvwinupd++; 2305 tp->snd_wnd = tiwin; 2306 tp->snd_wl1 = th->th_seq; 2307 tp->snd_wl2 = th->th_ack; 2308 if (tp->snd_wnd > tp->max_sndwnd) 2309 tp->max_sndwnd = tp->snd_wnd; 2310 needoutput = 1; 2311 } 2312 2313 /* 2314 * Process segments with URG. 2315 */ 2316 if ((thflags & TH_URG) && th->th_urp && 2317 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2318 /* 2319 * This is a kludge, but if we receive and accept 2320 * random urgent pointers, we'll crash in 2321 * soreceive. It's hard to imagine someone 2322 * actually wanting to send this much urgent data. 2323 */ 2324 SOCKBUF_LOCK(&so->so_rcv); 2325 if (th->th_urp + so->so_rcv.sb_cc > sb_max) { 2326 th->th_urp = 0; /* XXX */ 2327 thflags &= ~TH_URG; /* XXX */ 2328 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */ 2329 goto dodata; /* XXX */ 2330 } 2331 /* 2332 * If this segment advances the known urgent pointer, 2333 * then mark the data stream. This should not happen 2334 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since 2335 * a FIN has been received from the remote side. 2336 * In these states we ignore the URG. 2337 * 2338 * According to RFC961 (Assigned Protocols), 2339 * the urgent pointer points to the last octet 2340 * of urgent data. We continue, however, 2341 * to consider it to indicate the first octet 2342 * of data past the urgent section as the original 2343 * spec states (in one of two places). 2344 */ 2345 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) { 2346 tp->rcv_up = th->th_seq + th->th_urp; 2347 so->so_oobmark = so->so_rcv.sb_cc + 2348 (tp->rcv_up - tp->rcv_nxt) - 1; 2349 if (so->so_oobmark == 0) 2350 so->so_rcv.sb_state |= SBS_RCVATMARK; 2351 sohasoutofband(so); 2352 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); 2353 } 2354 SOCKBUF_UNLOCK(&so->so_rcv); 2355 /* 2356 * Remove out of band data so doesn't get presented to user. 2357 * This can happen independent of advancing the URG pointer, 2358 * but if two URG's are pending at once, some out-of-band 2359 * data may creep in... ick. 2360 */ 2361 if (th->th_urp <= (u_long)tlen && 2362 !(so->so_options & SO_OOBINLINE)) { 2363 /* hdr drop is delayed */ 2364 tcp_pulloutofband(so, th, m, drop_hdrlen); 2365 } 2366 } else { 2367 /* 2368 * If no out of band data is expected, 2369 * pull receive urgent pointer along 2370 * with the receive window. 2371 */ 2372 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) 2373 tp->rcv_up = tp->rcv_nxt; 2374 } 2375dodata: /* XXX */ 2376 KASSERT(headlocked, ("%s: dodata: head not locked", __func__)); 2377 INP_LOCK_ASSERT(tp->t_inpcb); 2378 2379 /* 2380 * Process the segment text, merging it into the TCP sequencing queue, 2381 * and arranging for acknowledgment of receipt if necessary. 2382 * This process logically involves adjusting tp->rcv_wnd as data 2383 * is presented to the user (this happens in tcp_usrreq.c, 2384 * case PRU_RCVD). If a FIN has already been received on this 2385 * connection then we just ignore the text. 2386 */ 2387 if ((tlen || (thflags & TH_FIN)) && 2388 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2389 tcp_seq save_start = th->th_seq; 2390 tcp_seq save_end = th->th_seq + tlen; 2391 m_adj(m, drop_hdrlen); /* delayed header drop */ 2392 /* 2393 * Insert segment which includes th into TCP reassembly queue 2394 * with control block tp. Set thflags to whether reassembly now 2395 * includes a segment with FIN. This handles the common case 2396 * inline (segment is the next to be received on an established 2397 * connection, and the queue is empty), avoiding linkage into 2398 * and removal from the queue and repetition of various 2399 * conversions. 2400 * Set DELACK for segments received in order, but ack 2401 * immediately when segments are out of order (so 2402 * fast retransmit can work). 2403 */ 2404 if (th->th_seq == tp->rcv_nxt && 2405 LIST_EMPTY(&tp->t_segq) && 2406 TCPS_HAVEESTABLISHED(tp->t_state)) { 2407 if (DELAY_ACK(tp)) 2408 tp->t_flags |= TF_DELACK; 2409 else 2410 tp->t_flags |= TF_ACKNOW; 2411 tp->rcv_nxt += tlen; 2412 thflags = th->th_flags & TH_FIN; 2413 tcpstat.tcps_rcvpack++; 2414 tcpstat.tcps_rcvbyte += tlen; 2415 ND6_HINT(tp); 2416 SOCKBUF_LOCK(&so->so_rcv); 2417 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 2418 m_freem(m); 2419 else 2420 sbappendstream_locked(&so->so_rcv, m); 2421 sorwakeup_locked(so); 2422 } else { 2423 thflags = tcp_reass(tp, th, &tlen, m); 2424 tp->t_flags |= TF_ACKNOW; 2425 } 2426 if (tlen > 0 && tp->sack_enable) 2427 tcp_update_sack_list(tp, save_start, save_end); 2428#if 0 2429 /* 2430 * Note the amount of data that peer has sent into 2431 * our window, in order to estimate the sender's 2432 * buffer size. 2433 * XXX: Unused. 2434 */ 2435 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt); 2436#endif 2437 } else { 2438 m_freem(m); 2439 thflags &= ~TH_FIN; 2440 } 2441 2442 /* 2443 * If FIN is received ACK the FIN and let the user know 2444 * that the connection is closing. 2445 */ 2446 if (thflags & TH_FIN) { 2447 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2448 socantrcvmore(so); 2449 /* 2450 * If connection is half-synchronized 2451 * (ie NEEDSYN flag on) then delay ACK, 2452 * so it may be piggybacked when SYN is sent. 2453 * Otherwise, since we received a FIN then no 2454 * more input can be expected, send ACK now. 2455 */ 2456 if (tp->t_flags & TF_NEEDSYN) 2457 tp->t_flags |= TF_DELACK; 2458 else 2459 tp->t_flags |= TF_ACKNOW; 2460 tp->rcv_nxt++; 2461 } 2462 switch (tp->t_state) { 2463 2464 /* 2465 * In SYN_RECEIVED and ESTABLISHED STATES 2466 * enter the CLOSE_WAIT state. 2467 */ 2468 case TCPS_SYN_RECEIVED: 2469 tp->t_starttime = ticks; 2470 /*FALLTHROUGH*/ 2471 case TCPS_ESTABLISHED: 2472 tp->t_state = TCPS_CLOSE_WAIT; 2473 break; 2474 2475 /* 2476 * If still in FIN_WAIT_1 STATE FIN has not been acked so 2477 * enter the CLOSING state. 2478 */ 2479 case TCPS_FIN_WAIT_1: 2480 tp->t_state = TCPS_CLOSING; 2481 break; 2482 2483 /* 2484 * In FIN_WAIT_2 state enter the TIME_WAIT state, 2485 * starting the time-wait timer, turning off the other 2486 * standard timers. 2487 */ 2488 case TCPS_FIN_WAIT_2: 2489 KASSERT(headlocked == 1, ("%s: dodata: " 2490 "TCP_FIN_WAIT_2: head not locked", __func__)); 2491 tcp_twstart(tp); 2492 INP_INFO_WUNLOCK(&tcbinfo); 2493 return (0); 2494 2495 /* 2496 * In TIME_WAIT state restart the 2 MSL time_wait timer. 2497 */ 2498 case TCPS_TIME_WAIT: 2499 KASSERT(tp->t_state != TCPS_TIME_WAIT, 2500 ("%s: timewait", __func__)); 2501 tcp_timer_activate(tp, TT_2MSL, 2 * tcp_msl); 2502 break; 2503 } 2504 } 2505 INP_INFO_WUNLOCK(&tcbinfo); 2506 headlocked = 0; 2507#ifdef TCPDEBUG 2508 if (so->so_options & SO_DEBUG) 2509 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen, 2510 &tcp_savetcp, 0); 2511#endif 2512 2513 /* 2514 * Return any desired output. 2515 */ 2516 if (needoutput || (tp->t_flags & TF_ACKNOW)) 2517 (void) tcp_output(tp); 2518 2519check_delack: 2520 KASSERT(headlocked == 0, ("%s: check_delack: head locked", 2521 __func__)); 2522 INP_LOCK_ASSERT(tp->t_inpcb); 2523 if (tp->t_flags & TF_DELACK) { 2524 tp->t_flags &= ~TF_DELACK; 2525 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime); 2526 } 2527 INP_UNLOCK(tp->t_inpcb); 2528 return (0); 2529 2530dropafterack: 2531 KASSERT(headlocked, ("%s: dropafterack: head not locked", __func__)); 2532 /* 2533 * Generate an ACK dropping incoming segment if it occupies 2534 * sequence space, where the ACK reflects our state. 2535 * 2536 * We can now skip the test for the RST flag since all 2537 * paths to this code happen after packets containing 2538 * RST have been dropped. 2539 * 2540 * In the SYN-RECEIVED state, don't send an ACK unless the 2541 * segment we received passes the SYN-RECEIVED ACK test. 2542 * If it fails send a RST. This breaks the loop in the 2543 * "LAND" DoS attack, and also prevents an ACK storm 2544 * between two listening ports that have been sent forged 2545 * SYN segments, each with the source address of the other. 2546 */ 2547 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && 2548 (SEQ_GT(tp->snd_una, th->th_ack) || 2549 SEQ_GT(th->th_ack, tp->snd_max)) ) { 2550 rstreason = BANDLIM_RST_OPENPORT; 2551 goto dropwithreset; 2552 } 2553#ifdef TCPDEBUG 2554 if (so->so_options & SO_DEBUG) 2555 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 2556 &tcp_savetcp, 0); 2557#endif 2558 KASSERT(headlocked, ("%s: headlocked should be 1", __func__)); 2559 INP_INFO_WUNLOCK(&tcbinfo); 2560 tp->t_flags |= TF_ACKNOW; 2561 (void) tcp_output(tp); 2562 INP_UNLOCK(tp->t_inpcb); 2563 m_freem(m); 2564 return (0); 2565 2566dropwithreset: 2567 KASSERT(headlocked, ("%s: dropwithreset: head not locked", __func__)); 2568 2569 tcp_dropwithreset(m, th, tp, tlen, rstreason); 2570 2571 if (tp != NULL) 2572 INP_UNLOCK(tp->t_inpcb); 2573 if (headlocked) 2574 INP_INFO_WUNLOCK(&tcbinfo); 2575 return (0); 2576 2577drop: 2578 /* 2579 * Drop space held by incoming segment and return. 2580 */ 2581#ifdef TCPDEBUG 2582 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 2583 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 2584 &tcp_savetcp, 0); 2585#endif 2586 if (tp != NULL) 2587 INP_UNLOCK(tp->t_inpcb); 2588 if (headlocked) 2589 INP_INFO_WUNLOCK(&tcbinfo); 2590 m_freem(m); 2591 return (0); 2592} 2593 2594 2595/* 2596 * Issue RST on TCP segment. The mbuf must still include the original 2597 * packet header. 2598 */ 2599static void 2600tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, 2601 int tlen, int rstreason) 2602{ 2603 struct ip *ip; 2604#ifdef INET6 2605 struct ip6_hdr *ip6; 2606#endif 2607 /* 2608 * Generate a RST, dropping incoming segment. 2609 * Make ACK acceptable to originator of segment. 2610 * Don't bother to respond if destination was broadcast/multicast. 2611 * tp may be NULL. 2612 */ 2613 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST)) 2614 goto drop; 2615#ifdef INET6 2616 if (mtod(m, struct ip *)->ip_v == 6) { 2617 ip6 = mtod(m, struct ip6_hdr *); 2618 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 2619 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 2620 goto drop; 2621 /* IPv6 anycast check is done at tcp6_input() */ 2622 } else 2623#endif 2624 { 2625 ip = mtod(m, struct ip *); 2626 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 2627 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 2628 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 2629 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 2630 goto drop; 2631 } 2632 2633 /* Perform bandwidth limiting. */ 2634 if (badport_bandlim(rstreason) < 0) 2635 goto drop; 2636 2637 /* tcp_respond consumes the mbuf chain. */ 2638 if (th->th_flags & TH_ACK) { 2639 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, 2640 th->th_ack, TH_RST); 2641 } else { 2642 if (th->th_flags & TH_SYN) 2643 tlen++; 2644 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen, 2645 (tcp_seq)0, TH_RST|TH_ACK); 2646 } 2647 return; 2648drop: 2649 m_freem(m); 2650 return; 2651} 2652 2653/* 2654 * Parse TCP options and place in tcpopt. 2655 */ 2656static void 2657tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags) 2658{ 2659 int opt, optlen; 2660 2661 to->to_flags = 0; 2662 for (; cnt > 0; cnt -= optlen, cp += optlen) { 2663 opt = cp[0]; 2664 if (opt == TCPOPT_EOL) 2665 break; 2666 if (opt == TCPOPT_NOP) 2667 optlen = 1; 2668 else { 2669 if (cnt < 2) 2670 break; 2671 optlen = cp[1]; 2672 if (optlen < 2 || optlen > cnt) 2673 break; 2674 } 2675 switch (opt) { 2676 case TCPOPT_MAXSEG: 2677 if (optlen != TCPOLEN_MAXSEG) 2678 continue; 2679 if (!(flags & TO_SYN)) 2680 continue; 2681 to->to_flags |= TOF_MSS; 2682 bcopy((char *)cp + 2, 2683 (char *)&to->to_mss, sizeof(to->to_mss)); 2684 to->to_mss = ntohs(to->to_mss); 2685 break; 2686 case TCPOPT_WINDOW: 2687 if (optlen != TCPOLEN_WINDOW) 2688 continue; 2689 if (!(flags & TO_SYN)) 2690 continue; 2691 to->to_flags |= TOF_SCALE; 2692 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT); 2693 break; 2694 case TCPOPT_TIMESTAMP: 2695 if (optlen != TCPOLEN_TIMESTAMP) 2696 continue; 2697 to->to_flags |= TOF_TS; 2698 bcopy((char *)cp + 2, 2699 (char *)&to->to_tsval, sizeof(to->to_tsval)); 2700 to->to_tsval = ntohl(to->to_tsval); 2701 bcopy((char *)cp + 6, 2702 (char *)&to->to_tsecr, sizeof(to->to_tsecr)); 2703 to->to_tsecr = ntohl(to->to_tsecr); 2704 break; 2705#ifdef TCP_SIGNATURE 2706 /* 2707 * XXX In order to reply to a host which has set the 2708 * TCP_SIGNATURE option in its initial SYN, we have to 2709 * record the fact that the option was observed here 2710 * for the syncache code to perform the correct response. 2711 */ 2712 case TCPOPT_SIGNATURE: 2713 if (optlen != TCPOLEN_SIGNATURE) 2714 continue; 2715 to->to_flags |= (TOF_SIGNATURE | TOF_SIGLEN); 2716 break; 2717#endif 2718 case TCPOPT_SACK_PERMITTED: 2719 if (optlen != TCPOLEN_SACK_PERMITTED) 2720 continue; 2721 if (!(flags & TO_SYN)) 2722 continue; 2723 if (!tcp_do_sack) 2724 continue; 2725 to->to_flags |= TOF_SACKPERM; 2726 break; 2727 case TCPOPT_SACK: 2728 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0) 2729 continue; 2730 if (flags & TO_SYN) 2731 continue; 2732 to->to_flags |= TOF_SACK; 2733 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK; 2734 to->to_sacks = cp + 2; 2735 tcpstat.tcps_sack_rcv_blocks++; 2736 break; 2737 default: 2738 continue; 2739 } 2740 } 2741} 2742 2743/* 2744 * Pull out of band byte out of a segment so 2745 * it doesn't appear in the user's data queue. 2746 * It is still reflected in the segment length for 2747 * sequencing purposes. 2748 */ 2749static void 2750tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, 2751 int off) 2752{ 2753 int cnt = off + th->th_urp - 1; 2754 2755 while (cnt >= 0) { 2756 if (m->m_len > cnt) { 2757 char *cp = mtod(m, caddr_t) + cnt; 2758 struct tcpcb *tp = sototcpcb(so); 2759 2760 tp->t_iobc = *cp; 2761 tp->t_oobflags |= TCPOOB_HAVEDATA; 2762 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1)); 2763 m->m_len--; 2764 if (m->m_flags & M_PKTHDR) 2765 m->m_pkthdr.len--; 2766 return; 2767 } 2768 cnt -= m->m_len; 2769 m = m->m_next; 2770 if (m == NULL) 2771 break; 2772 } 2773 panic("tcp_pulloutofband"); 2774} 2775 2776/* 2777 * Collect new round-trip time estimate 2778 * and update averages and current timeout. 2779 */ 2780static void 2781tcp_xmit_timer(struct tcpcb *tp, int rtt) 2782{ 2783 int delta; 2784 2785 INP_LOCK_ASSERT(tp->t_inpcb); 2786 2787 tcpstat.tcps_rttupdated++; 2788 tp->t_rttupdated++; 2789 if (tp->t_srtt != 0) { 2790 /* 2791 * srtt is stored as fixed point with 5 bits after the 2792 * binary point (i.e., scaled by 8). The following magic 2793 * is equivalent to the smoothing algorithm in rfc793 with 2794 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed 2795 * point). Adjust rtt to origin 0. 2796 */ 2797 delta = ((rtt - 1) << TCP_DELTA_SHIFT) 2798 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); 2799 2800 if ((tp->t_srtt += delta) <= 0) 2801 tp->t_srtt = 1; 2802 2803 /* 2804 * We accumulate a smoothed rtt variance (actually, a 2805 * smoothed mean difference), then set the retransmit 2806 * timer to smoothed rtt + 4 times the smoothed variance. 2807 * rttvar is stored as fixed point with 4 bits after the 2808 * binary point (scaled by 16). The following is 2809 * equivalent to rfc793 smoothing with an alpha of .75 2810 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces 2811 * rfc793's wired-in beta. 2812 */ 2813 if (delta < 0) 2814 delta = -delta; 2815 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); 2816 if ((tp->t_rttvar += delta) <= 0) 2817 tp->t_rttvar = 1; 2818 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 2819 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 2820 } else { 2821 /* 2822 * No rtt measurement yet - use the unsmoothed rtt. 2823 * Set the variance to half the rtt (so our first 2824 * retransmit happens at 3*rtt). 2825 */ 2826 tp->t_srtt = rtt << TCP_RTT_SHIFT; 2827 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); 2828 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 2829 } 2830 tp->t_rtttime = 0; 2831 tp->t_rxtshift = 0; 2832 2833 /* 2834 * the retransmit should happen at rtt + 4 * rttvar. 2835 * Because of the way we do the smoothing, srtt and rttvar 2836 * will each average +1/2 tick of bias. When we compute 2837 * the retransmit timer, we want 1/2 tick of rounding and 2838 * 1 extra tick because of +-1/2 tick uncertainty in the 2839 * firing of the timer. The bias will give us exactly the 2840 * 1.5 tick we need. But, because the bias is 2841 * statistical, we have to test that we don't drop below 2842 * the minimum feasible timer (which is 2 ticks). 2843 */ 2844 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 2845 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX); 2846 2847 /* 2848 * We received an ack for a packet that wasn't retransmitted; 2849 * it is probably safe to discard any error indications we've 2850 * received recently. This isn't quite right, but close enough 2851 * for now (a route might have failed after we sent a segment, 2852 * and the return path might not be symmetrical). 2853 */ 2854 tp->t_softerror = 0; 2855} 2856 2857/* 2858 * Determine a reasonable value for maxseg size. 2859 * If the route is known, check route for mtu. 2860 * If none, use an mss that can be handled on the outgoing 2861 * interface without forcing IP to fragment; if bigger than 2862 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES 2863 * to utilize large mbufs. If no route is found, route has no mtu, 2864 * or the destination isn't local, use a default, hopefully conservative 2865 * size (usually 512 or the default IP max size, but no more than the mtu 2866 * of the interface), as we can't discover anything about intervening 2867 * gateways or networks. We also initialize the congestion/slow start 2868 * window to be a single segment if the destination isn't local. 2869 * While looking at the routing entry, we also initialize other path-dependent 2870 * parameters from pre-set or cached values in the routing entry. 2871 * 2872 * Also take into account the space needed for options that we 2873 * send regularly. Make maxseg shorter by that amount to assure 2874 * that we can send maxseg amount of data even when the options 2875 * are present. Store the upper limit of the length of options plus 2876 * data in maxopd. 2877 * 2878 * 2879 * In case of T/TCP, we call this routine during implicit connection 2880 * setup as well (offer = -1), to initialize maxseg from the cached 2881 * MSS of our peer. 2882 * 2883 * NOTE that this routine is only called when we process an incoming 2884 * segment. Outgoing SYN/ACK MSS settings are handled in tcp_mssopt(). 2885 */ 2886void 2887tcp_mss(struct tcpcb *tp, int offer) 2888{ 2889 int rtt, mss; 2890 u_long bufsize; 2891 u_long maxmtu; 2892 struct inpcb *inp = tp->t_inpcb; 2893 struct socket *so; 2894 struct hc_metrics_lite metrics; 2895 int origoffer = offer; 2896 int mtuflags = 0; 2897#ifdef INET6 2898 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0; 2899 size_t min_protoh = isipv6 ? 2900 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) : 2901 sizeof (struct tcpiphdr); 2902#else 2903 const size_t min_protoh = sizeof(struct tcpiphdr); 2904#endif 2905 2906 /* initialize */ 2907#ifdef INET6 2908 if (isipv6) { 2909 maxmtu = tcp_maxmtu6(&inp->inp_inc, &mtuflags); 2910 tp->t_maxopd = tp->t_maxseg = tcp_v6mssdflt; 2911 } else 2912#endif 2913 { 2914 maxmtu = tcp_maxmtu(&inp->inp_inc, &mtuflags); 2915 tp->t_maxopd = tp->t_maxseg = tcp_mssdflt; 2916 } 2917 so = inp->inp_socket; 2918 2919 /* 2920 * no route to sender, stay with default mss and return 2921 */ 2922 if (maxmtu == 0) 2923 return; 2924 2925 /* what have we got? */ 2926 switch (offer) { 2927 case 0: 2928 /* 2929 * Offer == 0 means that there was no MSS on the SYN 2930 * segment, in this case we use tcp_mssdflt. 2931 */ 2932 offer = 2933#ifdef INET6 2934 isipv6 ? tcp_v6mssdflt : 2935#endif 2936 tcp_mssdflt; 2937 break; 2938 2939 case -1: 2940 /* 2941 * Offer == -1 means that we didn't receive SYN yet. 2942 */ 2943 /* FALLTHROUGH */ 2944 2945 default: 2946 /* 2947 * Prevent DoS attack with too small MSS. Round up 2948 * to at least minmss. 2949 */ 2950 offer = max(offer, tcp_minmss); 2951 /* 2952 * Sanity check: make sure that maxopd will be large 2953 * enough to allow some data on segments even if the 2954 * all the option space is used (40bytes). Otherwise 2955 * funny things may happen in tcp_output. 2956 */ 2957 offer = max(offer, 64); 2958 } 2959 2960 /* 2961 * rmx information is now retrieved from tcp_hostcache 2962 */ 2963 tcp_hc_get(&inp->inp_inc, &metrics); 2964 2965 /* 2966 * if there's a discovered mtu int tcp hostcache, use it 2967 * else, use the link mtu. 2968 */ 2969 if (metrics.rmx_mtu) 2970 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh; 2971 else { 2972#ifdef INET6 2973 if (isipv6) { 2974 mss = maxmtu - min_protoh; 2975 if (!path_mtu_discovery && 2976 !in6_localaddr(&inp->in6p_faddr)) 2977 mss = min(mss, tcp_v6mssdflt); 2978 } else 2979#endif 2980 { 2981 mss = maxmtu - min_protoh; 2982 if (!path_mtu_discovery && 2983 !in_localaddr(inp->inp_faddr)) 2984 mss = min(mss, tcp_mssdflt); 2985 } 2986 } 2987 mss = min(mss, offer); 2988 2989 /* 2990 * maxopd stores the maximum length of data AND options 2991 * in a segment; maxseg is the amount of data in a normal 2992 * segment. We need to store this value (maxopd) apart 2993 * from maxseg, because now every segment carries options 2994 * and thus we normally have somewhat less data in segments. 2995 */ 2996 tp->t_maxopd = mss; 2997 2998 /* 2999 * origoffer==-1 indicates, that no segments were received yet. 3000 * In this case we just guess. 3001 */ 3002 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && 3003 (origoffer == -1 || 3004 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)) 3005 mss -= TCPOLEN_TSTAMP_APPA; 3006 tp->t_maxseg = mss; 3007 3008#if (MCLBYTES & (MCLBYTES - 1)) == 0 3009 if (mss > MCLBYTES) 3010 mss &= ~(MCLBYTES-1); 3011#else 3012 if (mss > MCLBYTES) 3013 mss = mss / MCLBYTES * MCLBYTES; 3014#endif 3015 tp->t_maxseg = mss; 3016 3017 /* 3018 * If there's a pipesize, change the socket buffer to that size, 3019 * don't change if sb_hiwat is different than default (then it 3020 * has been changed on purpose with setsockopt). 3021 * Make the socket buffers an integral number of mss units; 3022 * if the mss is larger than the socket buffer, decrease the mss. 3023 */ 3024 SOCKBUF_LOCK(&so->so_snd); 3025 if ((so->so_snd.sb_hiwat == tcp_sendspace) && metrics.rmx_sendpipe) 3026 bufsize = metrics.rmx_sendpipe; 3027 else 3028 bufsize = so->so_snd.sb_hiwat; 3029 if (bufsize < mss) 3030 mss = bufsize; 3031 else { 3032 bufsize = roundup(bufsize, mss); 3033 if (bufsize > sb_max) 3034 bufsize = sb_max; 3035 if (bufsize > so->so_snd.sb_hiwat) 3036 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL); 3037 } 3038 SOCKBUF_UNLOCK(&so->so_snd); 3039 tp->t_maxseg = mss; 3040 3041 SOCKBUF_LOCK(&so->so_rcv); 3042 if ((so->so_rcv.sb_hiwat == tcp_recvspace) && metrics.rmx_recvpipe) 3043 bufsize = metrics.rmx_recvpipe; 3044 else 3045 bufsize = so->so_rcv.sb_hiwat; 3046 if (bufsize > mss) { 3047 bufsize = roundup(bufsize, mss); 3048 if (bufsize > sb_max) 3049 bufsize = sb_max; 3050 if (bufsize > so->so_rcv.sb_hiwat) 3051 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL); 3052 } 3053 SOCKBUF_UNLOCK(&so->so_rcv); 3054 /* 3055 * While we're here, check the others too 3056 */ 3057 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) { 3058 tp->t_srtt = rtt; 3059 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE; 3060 tcpstat.tcps_usedrtt++; 3061 if (metrics.rmx_rttvar) { 3062 tp->t_rttvar = metrics.rmx_rttvar; 3063 tcpstat.tcps_usedrttvar++; 3064 } else { 3065 /* default variation is +- 1 rtt */ 3066 tp->t_rttvar = 3067 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE; 3068 } 3069 TCPT_RANGESET(tp->t_rxtcur, 3070 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, 3071 tp->t_rttmin, TCPTV_REXMTMAX); 3072 } 3073 if (metrics.rmx_ssthresh) { 3074 /* 3075 * There's some sort of gateway or interface 3076 * buffer limit on the path. Use this to set 3077 * the slow start threshhold, but set the 3078 * threshold to no less than 2*mss. 3079 */ 3080 tp->snd_ssthresh = max(2 * mss, metrics.rmx_ssthresh); 3081 tcpstat.tcps_usedssthresh++; 3082 } 3083 if (metrics.rmx_bandwidth) 3084 tp->snd_bandwidth = metrics.rmx_bandwidth; 3085 3086 /* 3087 * Set the slow-start flight size depending on whether this 3088 * is a local network or not. 3089 * 3090 * Extend this so we cache the cwnd too and retrieve it here. 3091 * Make cwnd even bigger than RFC3390 suggests but only if we 3092 * have previous experience with the remote host. Be careful 3093 * not make cwnd bigger than remote receive window or our own 3094 * send socket buffer. Maybe put some additional upper bound 3095 * on the retrieved cwnd. Should do incremental updates to 3096 * hostcache when cwnd collapses so next connection doesn't 3097 * overloads the path again. 3098 * 3099 * RFC3390 says only do this if SYN or SYN/ACK didn't got lost. 3100 * We currently check only in syncache_socket for that. 3101 */ 3102#define TCP_METRICS_CWND 3103#ifdef TCP_METRICS_CWND 3104 if (metrics.rmx_cwnd) 3105 tp->snd_cwnd = max(mss, 3106 min(metrics.rmx_cwnd / 2, 3107 min(tp->snd_wnd, so->so_snd.sb_hiwat))); 3108 else 3109#endif 3110 if (tcp_do_rfc3390) 3111 tp->snd_cwnd = min(4 * mss, max(2 * mss, 4380)); 3112#ifdef INET6 3113 else if ((isipv6 && in6_localaddr(&inp->in6p_faddr)) || 3114 (!isipv6 && in_localaddr(inp->inp_faddr))) 3115#else 3116 else if (in_localaddr(inp->inp_faddr)) 3117#endif 3118 tp->snd_cwnd = mss * ss_fltsz_local; 3119 else 3120 tp->snd_cwnd = mss * ss_fltsz; 3121 3122 /* Check the interface for TSO capabilities. */ 3123 if (mtuflags & CSUM_TSO) 3124 tp->t_flags |= TF_TSO; 3125} 3126 3127/* 3128 * Determine the MSS option to send on an outgoing SYN. 3129 */ 3130int 3131tcp_mssopt(struct in_conninfo *inc) 3132{ 3133 int mss = 0; 3134 u_long maxmtu = 0; 3135 u_long thcmtu = 0; 3136 size_t min_protoh; 3137#ifdef INET6 3138 int isipv6 = inc->inc_isipv6 ? 1 : 0; 3139#endif 3140 3141 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer")); 3142 3143#ifdef INET6 3144 if (isipv6) { 3145 mss = tcp_v6mssdflt; 3146 maxmtu = tcp_maxmtu6(inc, NULL); 3147 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */ 3148 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 3149 } else 3150#endif 3151 { 3152 mss = tcp_mssdflt; 3153 maxmtu = tcp_maxmtu(inc, NULL); 3154 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */ 3155 min_protoh = sizeof(struct tcpiphdr); 3156 } 3157 if (maxmtu && thcmtu) 3158 mss = min(maxmtu, thcmtu) - min_protoh; 3159 else if (maxmtu || thcmtu) 3160 mss = max(maxmtu, thcmtu) - min_protoh; 3161 3162 return (mss); 3163} 3164 3165 3166/* 3167 * On a partial ack arrives, force the retransmission of the 3168 * next unacknowledged segment. Do not clear tp->t_dupacks. 3169 * By setting snd_nxt to ti_ack, this forces retransmission timer to 3170 * be started again. 3171 */ 3172static void 3173tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th) 3174{ 3175 tcp_seq onxt = tp->snd_nxt; 3176 u_long ocwnd = tp->snd_cwnd; 3177 3178 tcp_timer_activate(tp, TT_REXMT, 0); 3179 tp->t_rtttime = 0; 3180 tp->snd_nxt = th->th_ack; 3181 /* 3182 * Set snd_cwnd to one segment beyond acknowledged offset. 3183 * (tp->snd_una has not yet been updated when this function is called.) 3184 */ 3185 tp->snd_cwnd = tp->t_maxseg + (th->th_ack - tp->snd_una); 3186 tp->t_flags |= TF_ACKNOW; 3187 (void) tcp_output(tp); 3188 tp->snd_cwnd = ocwnd; 3189 if (SEQ_GT(onxt, tp->snd_nxt)) 3190 tp->snd_nxt = onxt; 3191 /* 3192 * Partial window deflation. Relies on fact that tp->snd_una 3193 * not updated yet. 3194 */ 3195 if (tp->snd_cwnd > th->th_ack - tp->snd_una) 3196 tp->snd_cwnd -= th->th_ack - tp->snd_una; 3197 else 3198 tp->snd_cwnd = 0; 3199 tp->snd_cwnd += tp->t_maxseg; 3200} 3201 3202/* 3203 * Returns 1 if the TIME_WAIT state was killed and we should start over, 3204 * looking for a pcb in the listen state. Returns 0 otherwise. 3205 */ 3206static int 3207tcp_timewait(struct inpcb *inp, struct tcpopt *to, struct tcphdr *th, 3208 struct mbuf *m, int tlen) 3209{ 3210 struct tcptw *tw; 3211 int thflags; 3212 tcp_seq seq; 3213#ifdef INET6 3214 int isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0; 3215#else 3216 const int isipv6 = 0; 3217#endif 3218 3219 /* tcbinfo lock required for tcp_twclose(), tcp_timer_2msl_reset(). */ 3220 INP_INFO_WLOCK_ASSERT(&tcbinfo); 3221 INP_LOCK_ASSERT(inp); 3222 3223 /* 3224 * XXXRW: Time wait state for inpcb has been recycled, but inpcb is 3225 * still present. This is undesirable, but temporarily necessary 3226 * until we work out how to handle inpcb's who's timewait state has 3227 * been removed. 3228 */ 3229 tw = intotw(inp); 3230 if (tw == NULL) 3231 goto drop; 3232 3233 thflags = th->th_flags; 3234 3235 /* 3236 * NOTE: for FIN_WAIT_2 (to be added later), 3237 * must validate sequence number before accepting RST 3238 */ 3239 3240 /* 3241 * If the segment contains RST: 3242 * Drop the segment - see Stevens, vol. 2, p. 964 and 3243 * RFC 1337. 3244 */ 3245 if (thflags & TH_RST) 3246 goto drop; 3247 3248#if 0 3249/* PAWS not needed at the moment */ 3250 /* 3251 * RFC 1323 PAWS: If we have a timestamp reply on this segment 3252 * and it's less than ts_recent, drop it. 3253 */ 3254 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent && 3255 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 3256 if ((thflags & TH_ACK) == 0) 3257 goto drop; 3258 goto ack; 3259 } 3260 /* 3261 * ts_recent is never updated because we never accept new segments. 3262 */ 3263#endif 3264 3265 /* 3266 * If a new connection request is received 3267 * while in TIME_WAIT, drop the old connection 3268 * and start over if the sequence numbers 3269 * are above the previous ones. 3270 */ 3271 if ((thflags & TH_SYN) && SEQ_GT(th->th_seq, tw->rcv_nxt)) { 3272 tcp_twclose(tw, 0); 3273 return (1); 3274 } 3275 3276 /* 3277 * Drop the the segment if it does not contain an ACK. 3278 */ 3279 if ((thflags & TH_ACK) == 0) 3280 goto drop; 3281 3282 /* 3283 * Reset the 2MSL timer if this is a duplicate FIN. 3284 */ 3285 if (thflags & TH_FIN) { 3286 seq = th->th_seq + tlen + (thflags & TH_SYN ? 1 : 0); 3287 if (seq + 1 == tw->rcv_nxt) 3288 tcp_timer_2msl_reset(tw, 1); 3289 } 3290 3291 /* 3292 * Acknowledge the segment if it has data or is not a duplicate ACK. 3293 */ 3294 if (thflags != TH_ACK || tlen != 0 || 3295 th->th_seq != tw->rcv_nxt || th->th_ack != tw->snd_nxt) 3296 tcp_twrespond(tw, TH_ACK); 3297 goto drop; 3298 3299 /* 3300 * Generate a RST, dropping incoming segment. 3301 * Make ACK acceptable to originator of segment. 3302 * Don't bother to respond if destination was broadcast/multicast. 3303 */ 3304 if (m->m_flags & (M_BCAST|M_MCAST)) 3305 goto drop; 3306 if (isipv6) { 3307 struct ip6_hdr *ip6; 3308 3309 /* IPv6 anycast check is done at tcp6_input() */ 3310 ip6 = mtod(m, struct ip6_hdr *); 3311 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 3312 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 3313 goto drop; 3314 } else { 3315 struct ip *ip; 3316 3317 ip = mtod(m, struct ip *); 3318 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 3319 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 3320 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 3321 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 3322 goto drop; 3323 } 3324 if (thflags & TH_ACK) { 3325 tcp_respond(NULL, 3326 mtod(m, void *), th, m, 0, th->th_ack, TH_RST); 3327 } else { 3328 seq = th->th_seq + (thflags & TH_SYN ? 1 : 0); 3329 tcp_respond(NULL, 3330 mtod(m, void *), th, m, seq, 0, TH_RST|TH_ACK); 3331 } 3332 INP_UNLOCK(inp); 3333 return (0); 3334 3335drop: 3336 INP_UNLOCK(inp); 3337 m_freem(m); 3338 return (0); 3339} 3340