tcp_input.c revision 168364
1/*- 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95 30 * $FreeBSD: head/sys/netinet/tcp_input.c 168364 2007-04-04 14:44:15Z andre $ 31 */ 32 33#include "opt_ipfw.h" /* for ipfw_fwd */ 34#include "opt_inet.h" 35#include "opt_inet6.h" 36#include "opt_ipsec.h" 37#include "opt_mac.h" 38#include "opt_tcpdebug.h" 39 40#include <sys/param.h> 41#include <sys/kernel.h> 42#include <sys/malloc.h> 43#include <sys/mbuf.h> 44#include <sys/proc.h> /* for proc0 declaration */ 45#include <sys/protosw.h> 46#include <sys/signalvar.h> 47#include <sys/socket.h> 48#include <sys/socketvar.h> 49#include <sys/sysctl.h> 50#include <sys/syslog.h> 51#include <sys/systm.h> 52 53#include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */ 54 55#include <vm/uma.h> 56 57#include <net/if.h> 58#include <net/route.h> 59 60#include <netinet/in.h> 61#include <netinet/in_pcb.h> 62#include <netinet/in_systm.h> 63#include <netinet/in_var.h> 64#include <netinet/ip.h> 65#include <netinet/ip_icmp.h> /* required for icmp_var.h */ 66#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 67#include <netinet/ip_var.h> 68#include <netinet/ip_options.h> 69#include <netinet/ip6.h> 70#include <netinet/icmp6.h> 71#include <netinet6/in6_pcb.h> 72#include <netinet6/ip6_var.h> 73#include <netinet6/nd6.h> 74#include <netinet/tcp.h> 75#include <netinet/tcp_fsm.h> 76#include <netinet/tcp_seq.h> 77#include <netinet/tcp_timer.h> 78#include <netinet/tcp_var.h> 79#include <netinet6/tcp6_var.h> 80#include <netinet/tcpip.h> 81#ifdef TCPDEBUG 82#include <netinet/tcp_debug.h> 83#endif /* TCPDEBUG */ 84 85#ifdef FAST_IPSEC 86#include <netipsec/ipsec.h> 87#include <netipsec/ipsec6.h> 88#endif /*FAST_IPSEC*/ 89 90#ifdef IPSEC 91#include <netinet6/ipsec.h> 92#include <netinet6/ipsec6.h> 93#include <netkey/key.h> 94#endif /*IPSEC*/ 95 96#include <machine/in_cksum.h> 97 98#include <security/mac/mac_framework.h> 99 100static const int tcprexmtthresh = 3; 101 102struct tcpstat tcpstat; 103SYSCTL_STRUCT(_net_inet_tcp, TCPCTL_STATS, stats, CTLFLAG_RW, 104 &tcpstat , tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)"); 105 106static int tcp_log_in_vain = 0; 107SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW, 108 &tcp_log_in_vain, 0, "Log all incoming TCP segments to closed ports"); 109 110static int blackhole = 0; 111SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW, 112 &blackhole, 0, "Do not send RST on segments to closed ports"); 113 114int tcp_delack_enabled = 1; 115SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW, 116 &tcp_delack_enabled, 0, 117 "Delay ACK to try and piggyback it onto a data packet"); 118 119static int drop_synfin = 0; 120SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW, 121 &drop_synfin, 0, "Drop TCP packets with SYN+FIN set"); 122 123static int tcp_do_rfc3042 = 1; 124SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_RW, 125 &tcp_do_rfc3042, 0, "Enable RFC 3042 (Limited Transmit)"); 126 127static int tcp_do_rfc3390 = 1; 128SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW, 129 &tcp_do_rfc3390, 0, 130 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)"); 131 132static int tcp_insecure_rst = 0; 133SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_RW, 134 &tcp_insecure_rst, 0, 135 "Follow the old (insecure) criteria for accepting RST packets"); 136 137SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass, CTLFLAG_RW, 0, 138 "TCP Segment Reassembly Queue"); 139 140static int tcp_reass_maxseg = 0; 141SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, maxsegments, CTLFLAG_RDTUN, 142 &tcp_reass_maxseg, 0, 143 "Global maximum number of TCP Segments in Reassembly Queue"); 144 145int tcp_reass_qsize = 0; 146SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, cursegments, CTLFLAG_RD, 147 &tcp_reass_qsize, 0, 148 "Global number of TCP Segments currently in Reassembly Queue"); 149 150static int tcp_reass_maxqlen = 48; 151SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, maxqlen, CTLFLAG_RW, 152 &tcp_reass_maxqlen, 0, 153 "Maximum number of TCP Segments per individual Reassembly Queue"); 154 155static int tcp_reass_overflows = 0; 156SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, overflows, CTLFLAG_RD, 157 &tcp_reass_overflows, 0, 158 "Global number of TCP Segment Reassembly Queue Overflows"); 159 160int tcp_do_autorcvbuf = 1; 161SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_RW, 162 &tcp_do_autorcvbuf, 0, "Enable automatic receive buffer sizing"); 163 164int tcp_autorcvbuf_inc = 16*1024; 165SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_RW, 166 &tcp_autorcvbuf_inc, 0, 167 "Incrementor step size of automatic receive buffer"); 168 169int tcp_autorcvbuf_max = 256*1024; 170SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_RW, 171 &tcp_autorcvbuf_max, 0, "Max size of automatic receive buffer"); 172 173struct inpcbhead tcb; 174#define tcb6 tcb /* for KAME src sync over BSD*'s */ 175struct inpcbinfo tcbinfo; 176struct mtx *tcbinfo_mtx; 177 178static void tcp_dooptions(struct tcpopt *, u_char *, int, int); 179static int tcp_do_segment(struct mbuf *, struct tcphdr *, 180 struct socket *, struct tcpcb *, int, int); 181static void tcp_dropwithreset(struct mbuf *, struct tcphdr *, 182 struct tcpcb *, int, int); 183static void tcp_pulloutofband(struct socket *, 184 struct tcphdr *, struct mbuf *, int); 185static int tcp_reass(struct tcpcb *, struct tcphdr *, int *, 186 struct mbuf *); 187static void tcp_xmit_timer(struct tcpcb *, int); 188static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *); 189static int tcp_timewait(struct inpcb *, struct tcpopt *, 190 struct tcphdr *, struct mbuf *, int); 191 192/* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */ 193#ifdef INET6 194#define ND6_HINT(tp) \ 195do { \ 196 if ((tp) && (tp)->t_inpcb && \ 197 ((tp)->t_inpcb->inp_vflag & INP_IPV6) != 0) \ 198 nd6_nud_hint(NULL, NULL, 0); \ 199} while (0) 200#else 201#define ND6_HINT(tp) 202#endif 203 204/* 205 * Indicate whether this ack should be delayed. We can delay the ack if 206 * - there is no delayed ack timer in progress and 207 * - our last ack wasn't a 0-sized window. We never want to delay 208 * the ack that opens up a 0-sized window and 209 * - delayed acks are enabled or 210 * - this is a half-synchronized T/TCP connection. 211 */ 212#define DELAY_ACK(tp) \ 213 ((!callout_active(tp->tt_delack) && \ 214 (tp->t_flags & TF_RXWIN0SENT) == 0) && \ 215 (tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN))) 216 217/* Initialize TCP reassembly queue */ 218static void 219tcp_reass_zone_change(void *tag) 220{ 221 222 tcp_reass_maxseg = nmbclusters / 16; 223 uma_zone_set_max(tcp_reass_zone, tcp_reass_maxseg); 224} 225 226uma_zone_t tcp_reass_zone; 227void 228tcp_reass_init() 229{ 230 tcp_reass_maxseg = nmbclusters / 16; 231 TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments", 232 &tcp_reass_maxseg); 233 tcp_reass_zone = uma_zcreate("tcpreass", sizeof (struct tseg_qent), 234 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 235 uma_zone_set_max(tcp_reass_zone, tcp_reass_maxseg); 236 EVENTHANDLER_REGISTER(nmbclusters_change, 237 tcp_reass_zone_change, NULL, EVENTHANDLER_PRI_ANY); 238} 239 240static int 241tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m) 242{ 243 struct tseg_qent *q; 244 struct tseg_qent *p = NULL; 245 struct tseg_qent *nq; 246 struct tseg_qent *te = NULL; 247 struct socket *so = tp->t_inpcb->inp_socket; 248 int flags; 249 250 INP_LOCK_ASSERT(tp->t_inpcb); 251 252 /* 253 * XXX: tcp_reass() is rather inefficient with its data structures 254 * and should be rewritten (see NetBSD for optimizations). While 255 * doing that it should move to its own file tcp_reass.c. 256 */ 257 258 /* 259 * Call with th==NULL after become established to 260 * force pre-ESTABLISHED data up to user socket. 261 */ 262 if (th == NULL) 263 goto present; 264 265 /* 266 * Limit the number of segments in the reassembly queue to prevent 267 * holding on to too many segments (and thus running out of mbufs). 268 * Make sure to let the missing segment through which caused this 269 * queue. Always keep one global queue entry spare to be able to 270 * process the missing segment. 271 */ 272 if (th->th_seq != tp->rcv_nxt && 273 (tcp_reass_qsize + 1 >= tcp_reass_maxseg || 274 tp->t_segqlen >= tcp_reass_maxqlen)) { 275 tcp_reass_overflows++; 276 tcpstat.tcps_rcvmemdrop++; 277 m_freem(m); 278 *tlenp = 0; 279 return (0); 280 } 281 282 /* 283 * Allocate a new queue entry. If we can't, or hit the zone limit 284 * just drop the pkt. 285 */ 286 te = uma_zalloc(tcp_reass_zone, M_NOWAIT); 287 if (te == NULL) { 288 tcpstat.tcps_rcvmemdrop++; 289 m_freem(m); 290 *tlenp = 0; 291 return (0); 292 } 293 tp->t_segqlen++; 294 tcp_reass_qsize++; 295 296 /* 297 * Find a segment which begins after this one does. 298 */ 299 LIST_FOREACH(q, &tp->t_segq, tqe_q) { 300 if (SEQ_GT(q->tqe_th->th_seq, th->th_seq)) 301 break; 302 p = q; 303 } 304 305 /* 306 * If there is a preceding segment, it may provide some of 307 * our data already. If so, drop the data from the incoming 308 * segment. If it provides all of our data, drop us. 309 */ 310 if (p != NULL) { 311 int i; 312 /* conversion to int (in i) handles seq wraparound */ 313 i = p->tqe_th->th_seq + p->tqe_len - th->th_seq; 314 if (i > 0) { 315 if (i >= *tlenp) { 316 tcpstat.tcps_rcvduppack++; 317 tcpstat.tcps_rcvdupbyte += *tlenp; 318 m_freem(m); 319 uma_zfree(tcp_reass_zone, te); 320 tp->t_segqlen--; 321 tcp_reass_qsize--; 322 /* 323 * Try to present any queued data 324 * at the left window edge to the user. 325 * This is needed after the 3-WHS 326 * completes. 327 */ 328 goto present; /* ??? */ 329 } 330 m_adj(m, i); 331 *tlenp -= i; 332 th->th_seq += i; 333 } 334 } 335 tcpstat.tcps_rcvoopack++; 336 tcpstat.tcps_rcvoobyte += *tlenp; 337 338 /* 339 * While we overlap succeeding segments trim them or, 340 * if they are completely covered, dequeue them. 341 */ 342 while (q) { 343 int i = (th->th_seq + *tlenp) - q->tqe_th->th_seq; 344 if (i <= 0) 345 break; 346 if (i < q->tqe_len) { 347 q->tqe_th->th_seq += i; 348 q->tqe_len -= i; 349 m_adj(q->tqe_m, i); 350 break; 351 } 352 353 nq = LIST_NEXT(q, tqe_q); 354 LIST_REMOVE(q, tqe_q); 355 m_freem(q->tqe_m); 356 uma_zfree(tcp_reass_zone, q); 357 tp->t_segqlen--; 358 tcp_reass_qsize--; 359 q = nq; 360 } 361 362 /* Insert the new segment queue entry into place. */ 363 te->tqe_m = m; 364 te->tqe_th = th; 365 te->tqe_len = *tlenp; 366 367 if (p == NULL) { 368 LIST_INSERT_HEAD(&tp->t_segq, te, tqe_q); 369 } else { 370 LIST_INSERT_AFTER(p, te, tqe_q); 371 } 372 373present: 374 /* 375 * Present data to user, advancing rcv_nxt through 376 * completed sequence space. 377 */ 378 if (!TCPS_HAVEESTABLISHED(tp->t_state)) 379 return (0); 380 q = LIST_FIRST(&tp->t_segq); 381 if (!q || q->tqe_th->th_seq != tp->rcv_nxt) 382 return (0); 383 SOCKBUF_LOCK(&so->so_rcv); 384 do { 385 tp->rcv_nxt += q->tqe_len; 386 flags = q->tqe_th->th_flags & TH_FIN; 387 nq = LIST_NEXT(q, tqe_q); 388 LIST_REMOVE(q, tqe_q); 389 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 390 m_freem(q->tqe_m); 391 else 392 sbappendstream_locked(&so->so_rcv, q->tqe_m); 393 uma_zfree(tcp_reass_zone, q); 394 tp->t_segqlen--; 395 tcp_reass_qsize--; 396 q = nq; 397 } while (q && q->tqe_th->th_seq == tp->rcv_nxt); 398 ND6_HINT(tp); 399 sorwakeup_locked(so); 400 return (flags); 401} 402 403/* 404 * TCP input routine, follows pages 65-76 of the 405 * protocol specification dated September, 1981 very closely. 406 */ 407#ifdef INET6 408int 409tcp6_input(struct mbuf **mp, int *offp, int proto) 410{ 411 struct mbuf *m = *mp; 412 struct in6_ifaddr *ia6; 413 414 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE); 415 416 /* 417 * draft-itojun-ipv6-tcp-to-anycast 418 * better place to put this in? 419 */ 420 ia6 = ip6_getdstifaddr(m); 421 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) { 422 struct ip6_hdr *ip6; 423 424 ip6 = mtod(m, struct ip6_hdr *); 425 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR, 426 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6); 427 return IPPROTO_DONE; 428 } 429 430 tcp_input(m, *offp); 431 return IPPROTO_DONE; 432} 433#endif 434 435void 436tcp_input(struct mbuf *m, int off0) 437{ 438 struct tcphdr *th; 439 struct ip *ip = NULL; 440 struct ipovly *ipov; 441 struct inpcb *inp = NULL; 442 struct tcpcb *tp = NULL; 443 struct socket *so = NULL; 444 u_char *optp = NULL; 445 int optlen = 0; 446 int len, tlen, off; 447 int drop_hdrlen; 448 int thflags; 449 int rstreason = 0; /* For badport_bandlim accounting purposes */ 450#ifdef IPFIREWALL_FORWARD 451 struct m_tag *fwd_tag; 452#endif 453#ifdef INET6 454 struct ip6_hdr *ip6 = NULL; 455 int isipv6; 456 char ip6buf[INET6_ADDRSTRLEN]; 457#else 458 const int isipv6 = 0; 459#endif 460 struct tcpopt to; /* options in this segment */ 461 462#ifdef TCPDEBUG 463 /* 464 * The size of tcp_saveipgen must be the size of the max ip header, 465 * now IPv6. 466 */ 467 u_char tcp_saveipgen[IP6_HDR_LEN]; 468 struct tcphdr tcp_savetcp; 469 short ostate = 0; 470#endif 471 472#ifdef INET6 473 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0; 474#endif 475 476 to.to_flags = 0; 477 tcpstat.tcps_rcvtotal++; 478 479 if (isipv6) { 480#ifdef INET6 481 /* IP6_EXTHDR_CHECK() is already done at tcp6_input() */ 482 ip6 = mtod(m, struct ip6_hdr *); 483 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0; 484 if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) { 485 tcpstat.tcps_rcvbadsum++; 486 goto drop; 487 } 488 th = (struct tcphdr *)((caddr_t)ip6 + off0); 489 490 /* 491 * Be proactive about unspecified IPv6 address in source. 492 * As we use all-zero to indicate unbounded/unconnected pcb, 493 * unspecified IPv6 address can be used to confuse us. 494 * 495 * Note that packets with unspecified IPv6 destination is 496 * already dropped in ip6_input. 497 */ 498 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { 499 /* XXX stat */ 500 goto drop; 501 } 502#else 503 th = NULL; /* XXX: avoid compiler warning */ 504#endif 505 } else { 506 /* 507 * Get IP and TCP header together in first mbuf. 508 * Note: IP leaves IP header in first mbuf. 509 */ 510 if (off0 > sizeof (struct ip)) { 511 ip_stripoptions(m, (struct mbuf *)0); 512 off0 = sizeof(struct ip); 513 } 514 if (m->m_len < sizeof (struct tcpiphdr)) { 515 if ((m = m_pullup(m, sizeof (struct tcpiphdr))) 516 == NULL) { 517 tcpstat.tcps_rcvshort++; 518 return; 519 } 520 } 521 ip = mtod(m, struct ip *); 522 ipov = (struct ipovly *)ip; 523 th = (struct tcphdr *)((caddr_t)ip + off0); 524 tlen = ip->ip_len; 525 526 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 527 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 528 th->th_sum = m->m_pkthdr.csum_data; 529 else 530 th->th_sum = in_pseudo(ip->ip_src.s_addr, 531 ip->ip_dst.s_addr, 532 htonl(m->m_pkthdr.csum_data + 533 ip->ip_len + 534 IPPROTO_TCP)); 535 th->th_sum ^= 0xffff; 536#ifdef TCPDEBUG 537 ipov->ih_len = (u_short)tlen; 538 ipov->ih_len = htons(ipov->ih_len); 539#endif 540 } else { 541 /* 542 * Checksum extended TCP header and data. 543 */ 544 len = sizeof (struct ip) + tlen; 545 bzero(ipov->ih_x1, sizeof(ipov->ih_x1)); 546 ipov->ih_len = (u_short)tlen; 547 ipov->ih_len = htons(ipov->ih_len); 548 th->th_sum = in_cksum(m, len); 549 } 550 if (th->th_sum) { 551 tcpstat.tcps_rcvbadsum++; 552 goto drop; 553 } 554 /* Re-initialization for later version check */ 555 ip->ip_v = IPVERSION; 556 } 557 558 /* 559 * Check that TCP offset makes sense, 560 * pull out TCP options and adjust length. XXX 561 */ 562 off = th->th_off << 2; 563 if (off < sizeof (struct tcphdr) || off > tlen) { 564 tcpstat.tcps_rcvbadoff++; 565 goto drop; 566 } 567 tlen -= off; /* tlen is used instead of ti->ti_len */ 568 if (off > sizeof (struct tcphdr)) { 569 if (isipv6) { 570#ifdef INET6 571 IP6_EXTHDR_CHECK(m, off0, off, ); 572 ip6 = mtod(m, struct ip6_hdr *); 573 th = (struct tcphdr *)((caddr_t)ip6 + off0); 574#endif 575 } else { 576 if (m->m_len < sizeof(struct ip) + off) { 577 if ((m = m_pullup(m, sizeof (struct ip) + off)) 578 == NULL) { 579 tcpstat.tcps_rcvshort++; 580 return; 581 } 582 ip = mtod(m, struct ip *); 583 ipov = (struct ipovly *)ip; 584 th = (struct tcphdr *)((caddr_t)ip + off0); 585 } 586 } 587 optlen = off - sizeof (struct tcphdr); 588 optp = (u_char *)(th + 1); 589 } 590 thflags = th->th_flags; 591 592 /* 593 * If the drop_synfin option is enabled, drop all packets with 594 * both the SYN and FIN bits set. This prevents e.g. nmap from 595 * identifying the TCP/IP stack. 596 * 597 * This is a violation of the TCP specification. 598 */ 599 if (drop_synfin && (thflags & (TH_SYN|TH_FIN)) == (TH_SYN|TH_FIN)) 600 goto drop; 601 602 /* 603 * Convert TCP protocol specific fields to host format. 604 */ 605 th->th_seq = ntohl(th->th_seq); 606 th->th_ack = ntohl(th->th_ack); 607 th->th_win = ntohs(th->th_win); 608 th->th_urp = ntohs(th->th_urp); 609 610 /* 611 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options. 612 */ 613 drop_hdrlen = off0 + off; 614 615 /* 616 * Locate pcb for segment. 617 */ 618 INP_INFO_WLOCK(&tcbinfo); 619findpcb: 620 INP_INFO_WLOCK_ASSERT(&tcbinfo); 621#ifdef IPFIREWALL_FORWARD 622 /* Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. */ 623 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL); 624 625 if (fwd_tag != NULL && isipv6 == 0) { /* IPv6 support is not yet */ 626 struct sockaddr_in *next_hop; 627 628 next_hop = (struct sockaddr_in *)(fwd_tag+1); 629 /* 630 * Transparently forwarded. Pretend to be the destination. 631 * already got one like this? 632 */ 633 inp = in_pcblookup_hash(&tcbinfo, 634 ip->ip_src, th->th_sport, 635 ip->ip_dst, th->th_dport, 636 0, m->m_pkthdr.rcvif); 637 if (!inp) { 638 /* It's new. Try to find the ambushing socket. */ 639 inp = in_pcblookup_hash(&tcbinfo, 640 ip->ip_src, th->th_sport, 641 next_hop->sin_addr, 642 next_hop->sin_port ? 643 ntohs(next_hop->sin_port) : 644 th->th_dport, 645 INPLOOKUP_WILDCARD, 646 m->m_pkthdr.rcvif); 647 } 648 /* Remove the tag from the packet. We don't need it anymore. */ 649 m_tag_delete(m, fwd_tag); 650 } else 651#endif /* IPFIREWALL_FORWARD */ 652 { 653 if (isipv6) { 654#ifdef INET6 655 inp = in6_pcblookup_hash(&tcbinfo, 656 &ip6->ip6_src, th->th_sport, 657 &ip6->ip6_dst, th->th_dport, 658 INPLOOKUP_WILDCARD, 659 m->m_pkthdr.rcvif); 660#endif 661 } else 662 inp = in_pcblookup_hash(&tcbinfo, 663 ip->ip_src, th->th_sport, 664 ip->ip_dst, th->th_dport, 665 INPLOOKUP_WILDCARD, 666 m->m_pkthdr.rcvif); 667 } 668 669#if defined(IPSEC) || defined(FAST_IPSEC) 670#ifdef INET6 671 if (isipv6 && inp != NULL && ipsec6_in_reject(m, inp)) { 672#ifdef IPSEC 673 ipsec6stat.in_polvio++; 674#endif 675 goto dropunlock; 676 } else 677#endif /* INET6 */ 678 if (inp != NULL && ipsec4_in_reject(m, inp)) { 679#ifdef IPSEC 680 ipsecstat.in_polvio++; 681#endif 682 goto dropunlock; 683 } 684#endif /*IPSEC || FAST_IPSEC*/ 685 686 /* 687 * If the INPCB does not exist then all data in the incoming 688 * segment is discarded and an appropriate RST is sent back. 689 */ 690 if (inp == NULL) { 691 /* 692 * Log communication attempts to ports that are not 693 * in use. 694 */ 695 if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) || 696 tcp_log_in_vain == 2) { 697#ifndef INET6 698 char dbuf[4*sizeof "123"], sbuf[4*sizeof "123"]; 699#else 700 char dbuf[INET6_ADDRSTRLEN+2], sbuf[INET6_ADDRSTRLEN+2]; 701 if (isipv6) { 702 strcpy(dbuf, "["); 703 strcat(dbuf, 704 ip6_sprintf(ip6buf, &ip6->ip6_dst)); 705 strcat(dbuf, "]"); 706 strcpy(sbuf, "["); 707 strcat(sbuf, 708 ip6_sprintf(ip6buf, &ip6->ip6_src)); 709 strcat(sbuf, "]"); 710 } else 711#endif /* INET6 */ 712 { 713 strcpy(dbuf, inet_ntoa(ip->ip_dst)); 714 strcpy(sbuf, inet_ntoa(ip->ip_src)); 715 } 716 log(LOG_INFO, 717 "Connection attempt to TCP %s:%d " 718 "from %s:%d flags:0x%02x\n", 719 dbuf, ntohs(th->th_dport), sbuf, 720 ntohs(th->th_sport), thflags); 721 } 722 /* 723 * When blackholing do not respond with a RST but 724 * completely ignore the segment and drop it. 725 */ 726 if ((blackhole == 1 && (thflags & TH_SYN)) || 727 blackhole == 2) 728 goto dropunlock; 729 730 rstreason = BANDLIM_RST_CLOSEDPORT; 731 goto dropwithreset; 732 } 733 INP_LOCK(inp); 734 735 /* Check the minimum TTL for socket. */ 736 if (inp->inp_ip_minttl != 0) { 737#ifdef INET6 738 if (isipv6 && inp->inp_ip_minttl > ip6->ip6_hlim) 739 goto dropunlock; 740 else 741#endif 742 if (inp->inp_ip_minttl > ip->ip_ttl) 743 goto dropunlock; 744 } 745 746 /* 747 * A previous connection in TIMEWAIT state is supposed to catch 748 * stray or duplicate segments arriving late. If this segment 749 * was a legitimate new connection attempt the old INPCB gets 750 * removed and we can try again to find a listening socket. 751 */ 752 if (inp->inp_vflag & INP_TIMEWAIT) { 753 if (thflags & TH_SYN) 754 tcp_dooptions(&to, optp, optlen, TO_SYN); 755 if (tcp_timewait(inp, &to, th, m, tlen)) 756 goto findpcb; 757 /* tcp_timewait unlocks inp. */ 758 INP_INFO_WUNLOCK(&tcbinfo); 759 return; 760 } 761 /* 762 * The TCPCB may no longer exist if the connection is winding 763 * down or it is in the CLOSED state. Either way we drop the 764 * segment and send an appropriate response. 765 */ 766 tp = intotcpcb(inp); 767 if (tp == NULL) { 768 INP_UNLOCK(inp); 769 rstreason = BANDLIM_RST_CLOSEDPORT; 770 goto dropwithreset; 771 } 772 if (tp->t_state == TCPS_CLOSED) 773 goto dropunlock; /* XXX: dropwithreset??? */ 774 775#ifdef MAC 776 INP_LOCK_ASSERT(inp); 777 if (mac_check_inpcb_deliver(inp, m)) 778 goto dropunlock; 779#endif 780 so = inp->inp_socket; 781 KASSERT(so != NULL, ("%s: so == NULL", __func__)); 782#ifdef TCPDEBUG 783 if (so->so_options & SO_DEBUG) { 784 ostate = tp->t_state; 785 if (isipv6) 786 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6)); 787 else 788 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip)); 789 tcp_savetcp = *th; 790 } 791#endif 792 /* 793 * When the socket is accepting connections (the INPCB is in LISTEN 794 * state) we look into the SYN cache if this is a new connection 795 * attempt or the completion of a previous one. 796 */ 797 if (so->so_options & SO_ACCEPTCONN) { 798 struct in_conninfo inc; 799 800 bzero(&inc, sizeof(inc)); 801 inc.inc_isipv6 = isipv6; 802#ifdef INET6 803 if (isipv6) { 804 inc.inc6_faddr = ip6->ip6_src; 805 inc.inc6_laddr = ip6->ip6_dst; 806 } else 807#endif 808 { 809 inc.inc_faddr = ip->ip_src; 810 inc.inc_laddr = ip->ip_dst; 811 } 812 inc.inc_fport = th->th_sport; 813 inc.inc_lport = th->th_dport; 814 815 /* 816 * If the state is LISTEN then ignore segment if it contains 817 * a RST. If the segment contains an ACK then it is bad and 818 * send a RST. If it does not contain a SYN then it is not 819 * interesting; drop it. 820 * 821 * If the state is SYN_RECEIVED (syncache) and seg contains 822 * an ACK, but not for our SYN/ACK, send a RST. If the seg 823 * contains a RST, check the sequence number to see if it 824 * is a valid reset segment. 825 */ 826 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) != TH_SYN) { 827 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) { 828 /* 829 * Parse the TCP options here because 830 * syncookies need access to the reflected 831 * timestamp. 832 */ 833 tcp_dooptions(&to, optp, optlen, 0); 834 if (!syncache_expand(&inc, &to, th, &so, m)) { 835 /* 836 * No syncache entry, or ACK was not 837 * for our SYN/ACK. Send a RST. 838 */ 839 tcpstat.tcps_badsyn++; 840 rstreason = BANDLIM_RST_OPENPORT; 841 goto dropwithreset; 842 } 843 if (so == NULL) { 844 /* 845 * Could not complete 3-way handshake, 846 * connection is being closed down, and 847 * syncache has free'd mbuf. 848 */ 849 INP_UNLOCK(inp); 850 INP_INFO_WUNLOCK(&tcbinfo); 851 return; 852 } 853 /* 854 * Socket is created in state SYN_RECEIVED. 855 * Continue processing segment. 856 */ 857 INP_UNLOCK(inp); 858 inp = sotoinpcb(so); 859 INP_LOCK(inp); 860 tp = intotcpcb(inp); 861 /* 862 * This is what would have happened in 863 * tcp_output() when the SYN,ACK was sent. 864 */ 865 tp->snd_up = tp->snd_una; 866 tp->snd_max = tp->snd_nxt = tp->iss + 1; 867 tp->last_ack_sent = tp->rcv_nxt; 868 869 /* 870 * Process the segment and the data it 871 * contains. tcp_do_segment() consumes 872 * the mbuf chain and unlocks the inpcb. 873 * XXX: The potential return value of 874 * TIME_WAIT nuked is supposed to be 875 * handled above. 876 */ 877 if (tcp_do_segment(m, th, so, tp, 878 drop_hdrlen, tlen)) 879 goto findpcb; /* TIME_WAIT nuked */ 880 return; 881 } 882 if (thflags & TH_RST) { 883 syncache_chkrst(&inc, th); 884 goto dropunlock; 885 } 886 if (thflags & TH_ACK) { 887 syncache_badack(&inc); 888 tcpstat.tcps_badsyn++; 889 rstreason = BANDLIM_RST_OPENPORT; 890 goto dropwithreset; 891 } 892 goto dropunlock; 893 } 894 895 /* 896 * Segment's flags are (SYN) or (SYN|FIN). 897 */ 898#ifdef INET6 899 /* 900 * If deprecated address is forbidden, 901 * we do not accept SYN to deprecated interface 902 * address to prevent any new inbound connection from 903 * getting established. 904 * When we do not accept SYN, we send a TCP RST, 905 * with deprecated source address (instead of dropping 906 * it). We compromise it as it is much better for peer 907 * to send a RST, and RST will be the final packet 908 * for the exchange. 909 * 910 * If we do not forbid deprecated addresses, we accept 911 * the SYN packet. RFC2462 does not suggest dropping 912 * SYN in this case. 913 * If we decipher RFC2462 5.5.4, it says like this: 914 * 1. use of deprecated addr with existing 915 * communication is okay - "SHOULD continue to be 916 * used" 917 * 2. use of it with new communication: 918 * (2a) "SHOULD NOT be used if alternate address 919 * with sufficient scope is available" 920 * (2b) nothing mentioned otherwise. 921 * Here we fall into (2b) case as we have no choice in 922 * our source address selection - we must obey the peer. 923 * 924 * The wording in RFC2462 is confusing, and there are 925 * multiple description text for deprecated address 926 * handling - worse, they are not exactly the same. 927 * I believe 5.5.4 is the best one, so we follow 5.5.4. 928 */ 929 if (isipv6 && !ip6_use_deprecated) { 930 struct in6_ifaddr *ia6; 931 932 if ((ia6 = ip6_getdstifaddr(m)) && 933 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) { 934 INP_UNLOCK(inp); 935 tp = NULL; 936 rstreason = BANDLIM_RST_OPENPORT; 937 goto dropwithreset; 938 } 939 } 940#endif 941 /* 942 * Basic sanity checks on incoming SYN requests: 943 * 944 * Don't bother responding if the destination was a 945 * broadcast according to RFC1122 4.2.3.10, p. 104. 946 * 947 * If it is from this socket, drop it, it must be forged. 948 * 949 * Note that it is quite possible to receive unicast 950 * link-layer packets with a broadcast IP address. Use 951 * in_broadcast() to find them. 952 */ 953 if (m->m_flags & (M_BCAST|M_MCAST)) 954 goto dropunlock; 955 if (isipv6) { 956#ifdef INET6 957 if (th->th_dport == th->th_sport && 958 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) 959 goto dropunlock; 960 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 961 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 962 goto dropunlock; 963#endif 964 } else { 965 if (th->th_dport == th->th_sport && 966 ip->ip_dst.s_addr == ip->ip_src.s_addr) 967 goto dropunlock; 968 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 969 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 970 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 971 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 972 goto dropunlock; 973 } 974 /* 975 * SYN appears to be valid. Create compressed TCP state 976 * for syncache. 977 */ 978 if (so->so_qlen <= so->so_qlimit) { 979#ifdef TCPDEBUG 980 if (so->so_options & SO_DEBUG) 981 tcp_trace(TA_INPUT, ostate, tp, 982 (void *)tcp_saveipgen, &tcp_savetcp, 0); 983#endif 984 tcp_dooptions(&to, optp, optlen, TO_SYN); 985 if (!syncache_add(&inc, &to, th, inp, &so, m)) 986 goto dropunlock; 987 /* 988 * Entry added to syncache, mbuf used to 989 * send SYN-ACK packet. Everything unlocked 990 * already. 991 */ 992 return; 993 } 994 /* Catch all. Everthing that makes it down here is junk. */ 995 goto dropunlock; 996 } 997 998 /* 999 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or 1000 * later state. tcp_do_segment() always consumes the mbuf chain 1001 * and unlocks the inpcb. 1002 */ 1003 if (tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen)) 1004 goto findpcb; /* XXX: TIME_WAIT was nuked. */ 1005 return; 1006 1007dropwithreset: 1008 tcp_dropwithreset(m, th, tp, tlen, rstreason); 1009 m = NULL; /* mbuf chain got consumed. */ 1010dropunlock: 1011 if (tp != NULL) 1012 INP_UNLOCK(inp); 1013 INP_INFO_WUNLOCK(&tcbinfo); 1014drop: 1015 if (m != NULL) 1016 m_freem(m); 1017 return; 1018} 1019 1020static int 1021tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 1022 struct tcpcb *tp, int drop_hdrlen, int tlen) 1023{ 1024 int thflags, acked, ourfinisacked, needoutput = 0; 1025 int headlocked = 1; 1026 int rstreason, todrop, win; 1027 u_long tiwin; 1028 struct tcpopt to; 1029 1030#ifdef TCPDEBUG 1031 /* 1032 * The size of tcp_saveipgen must be the size of the max ip header, 1033 * now IPv6. 1034 */ 1035 u_char tcp_saveipgen[IP6_HDR_LEN]; 1036 struct tcphdr tcp_savetcp; 1037 short ostate = 0; 1038#endif 1039 thflags = th->th_flags; 1040 1041 INP_INFO_WLOCK_ASSERT(&tcbinfo); 1042 INP_LOCK_ASSERT(tp->t_inpcb); 1043 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", __func__)); 1044 1045 /* 1046 * Segment received on connection. 1047 * Reset idle time and keep-alive timer. 1048 */ 1049 tp->t_rcvtime = ticks; 1050 if (TCPS_HAVEESTABLISHED(tp->t_state)) 1051 callout_reset(tp->tt_keep, tcp_keepidle, tcp_timer_keep, tp); 1052 1053 /* 1054 * Unscale the window into a 32-bit value. 1055 * This value is bogus for the TCPS_SYN_SENT state 1056 * and is overwritten later. 1057 */ 1058 tiwin = th->th_win << tp->snd_scale; 1059 1060 /* 1061 * Parse options on any incoming segment. 1062 */ 1063 tcp_dooptions(&to, (u_char *)(th + 1), 1064 (th->th_off << 2) - sizeof(struct tcphdr), 1065 (thflags & TH_SYN) ? TO_SYN : 0); 1066 1067 /* 1068 * If echoed timestamp is later than the current time, 1069 * fall back to non RFC1323 RTT calculation. Normalize 1070 * timestamp if syncookies were used when this connection 1071 * was established. 1072 */ 1073 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 1074 to.to_tsecr -= tp->ts_offset; 1075 if (TSTMP_GT(to.to_tsecr, ticks)) 1076 to.to_tsecr = 0; 1077 } 1078 1079 /* 1080 * Process options only when we get SYN/ACK back. The SYN case 1081 * for incoming connections is handled in tcp_syncache. 1082 * XXX this is traditional behavior, may need to be cleaned up. 1083 */ 1084 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 1085 if ((to.to_flags & TOF_SCALE) && 1086 (tp->t_flags & TF_REQ_SCALE)) { 1087 tp->t_flags |= TF_RCVD_SCALE; 1088 tp->snd_scale = to.to_wscale; 1089 tp->snd_wnd = th->th_win << tp->snd_scale; 1090 tiwin = tp->snd_wnd; 1091 } 1092 if (to.to_flags & TOF_TS) { 1093 tp->t_flags |= TF_RCVD_TSTMP; 1094 tp->ts_recent = to.to_tsval; 1095 tp->ts_recent_age = ticks; 1096 } 1097 /* Initial send window, already scaled. */ 1098 tp->snd_wnd = th->th_win; 1099 if (to.to_flags & TOF_MSS) 1100 tcp_mss(tp, to.to_mss); 1101 if (tp->sack_enable) { 1102 if (!(to.to_flags & TOF_SACKPERM)) 1103 tp->sack_enable = 0; 1104 else 1105 tp->t_flags |= TF_SACK_PERMIT; 1106 } 1107 1108 } 1109 1110 /* 1111 * Header prediction: check for the two common cases 1112 * of a uni-directional data xfer. If the packet has 1113 * no control flags, is in-sequence, the window didn't 1114 * change and we're not retransmitting, it's a 1115 * candidate. If the length is zero and the ack moved 1116 * forward, we're the sender side of the xfer. Just 1117 * free the data acked & wake any higher level process 1118 * that was blocked waiting for space. If the length 1119 * is non-zero and the ack didn't move, we're the 1120 * receiver side. If we're getting packets in-order 1121 * (the reassembly queue is empty), add the data to 1122 * the socket buffer and note that we need a delayed ack. 1123 * Make sure that the hidden state-flags are also off. 1124 * Since we check for TCPS_ESTABLISHED above, it can only 1125 * be TH_NEEDSYN. 1126 */ 1127 if (tp->t_state == TCPS_ESTABLISHED && 1128 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && 1129 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) && 1130 ((to.to_flags & TOF_TS) == 0 || 1131 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) && 1132 th->th_seq == tp->rcv_nxt && tiwin && tiwin == tp->snd_wnd && 1133 tp->snd_nxt == tp->snd_max) { 1134 1135 /* 1136 * If last ACK falls within this segment's sequence numbers, 1137 * record the timestamp. 1138 * NOTE that the test is modified according to the latest 1139 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1140 */ 1141 if ((to.to_flags & TOF_TS) != 0 && 1142 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 1143 tp->ts_recent_age = ticks; 1144 tp->ts_recent = to.to_tsval; 1145 } 1146 1147 if (tlen == 0) { 1148 if (SEQ_GT(th->th_ack, tp->snd_una) && 1149 SEQ_LEQ(th->th_ack, tp->snd_max) && 1150 tp->snd_cwnd >= tp->snd_wnd && 1151 ((!tcp_do_newreno && !tp->sack_enable && 1152 tp->t_dupacks < tcprexmtthresh) || 1153 ((tcp_do_newreno || tp->sack_enable) && 1154 !IN_FASTRECOVERY(tp) && 1155 (to.to_flags & TOF_SACK) == 0 && 1156 TAILQ_EMPTY(&tp->snd_holes)))) { 1157 KASSERT(headlocked, 1158 ("%s: headlocked", __func__)); 1159 INP_INFO_WUNLOCK(&tcbinfo); 1160 headlocked = 0; 1161 /* 1162 * this is a pure ack for outstanding data. 1163 */ 1164 ++tcpstat.tcps_predack; 1165 /* 1166 * "bad retransmit" recovery 1167 */ 1168 if (tp->t_rxtshift == 1 && 1169 ticks < tp->t_badrxtwin) { 1170 ++tcpstat.tcps_sndrexmitbad; 1171 tp->snd_cwnd = tp->snd_cwnd_prev; 1172 tp->snd_ssthresh = 1173 tp->snd_ssthresh_prev; 1174 tp->snd_recover = tp->snd_recover_prev; 1175 if (tp->t_flags & TF_WASFRECOVERY) 1176 ENTER_FASTRECOVERY(tp); 1177 tp->snd_nxt = tp->snd_max; 1178 tp->t_badrxtwin = 0; 1179 } 1180 1181 /* 1182 * Recalculate the transmit timer / rtt. 1183 * 1184 * Some boxes send broken timestamp replies 1185 * during the SYN+ACK phase, ignore 1186 * timestamps of 0 or we could calculate a 1187 * huge RTT and blow up the retransmit timer. 1188 */ 1189 if ((to.to_flags & TOF_TS) != 0 && 1190 to.to_tsecr) { 1191 if (!tp->t_rttlow || 1192 tp->t_rttlow > ticks - to.to_tsecr) 1193 tp->t_rttlow = ticks - to.to_tsecr; 1194 tcp_xmit_timer(tp, 1195 ticks - to.to_tsecr + 1); 1196 } else if (tp->t_rtttime && 1197 SEQ_GT(th->th_ack, tp->t_rtseq)) { 1198 if (!tp->t_rttlow || 1199 tp->t_rttlow > ticks - tp->t_rtttime) 1200 tp->t_rttlow = ticks - tp->t_rtttime; 1201 tcp_xmit_timer(tp, 1202 ticks - tp->t_rtttime); 1203 } 1204 tcp_xmit_bandwidth_limit(tp, th->th_ack); 1205 acked = th->th_ack - tp->snd_una; 1206 tcpstat.tcps_rcvackpack++; 1207 tcpstat.tcps_rcvackbyte += acked; 1208 sbdrop(&so->so_snd, acked); 1209 if (SEQ_GT(tp->snd_una, tp->snd_recover) && 1210 SEQ_LEQ(th->th_ack, tp->snd_recover)) 1211 tp->snd_recover = th->th_ack - 1; 1212 tp->snd_una = th->th_ack; 1213 /* 1214 * pull snd_wl2 up to prevent seq wrap relative 1215 * to th_ack. 1216 */ 1217 tp->snd_wl2 = th->th_ack; 1218 tp->t_dupacks = 0; 1219 m_freem(m); 1220 ND6_HINT(tp); /* some progress has been done */ 1221 1222 /* 1223 * If all outstanding data are acked, stop 1224 * retransmit timer, otherwise restart timer 1225 * using current (possibly backed-off) value. 1226 * If process is waiting for space, 1227 * wakeup/selwakeup/signal. If data 1228 * are ready to send, let tcp_output 1229 * decide between more output or persist. 1230 1231#ifdef TCPDEBUG 1232 if (so->so_options & SO_DEBUG) 1233 tcp_trace(TA_INPUT, ostate, tp, 1234 (void *)tcp_saveipgen, 1235 &tcp_savetcp, 0); 1236#endif 1237 */ 1238 if (tp->snd_una == tp->snd_max) 1239 callout_stop(tp->tt_rexmt); 1240 else if (!callout_active(tp->tt_persist)) 1241 callout_reset(tp->tt_rexmt, 1242 tp->t_rxtcur, 1243 tcp_timer_rexmt, tp); 1244 1245 sowwakeup(so); 1246 if (so->so_snd.sb_cc) 1247 (void) tcp_output(tp); 1248 goto check_delack; 1249 } 1250 } else if (th->th_ack == tp->snd_una && 1251 LIST_EMPTY(&tp->t_segq) && 1252 tlen <= sbspace(&so->so_rcv)) { 1253 int newsize = 0; /* automatic sockbuf scaling */ 1254 1255 KASSERT(headlocked, ("%s: headlocked", __func__)); 1256 INP_INFO_WUNLOCK(&tcbinfo); 1257 headlocked = 0; 1258 /* 1259 * this is a pure, in-sequence data packet 1260 * with nothing on the reassembly queue and 1261 * we have enough buffer space to take it. 1262 */ 1263 /* Clean receiver SACK report if present */ 1264 if (tp->sack_enable && tp->rcv_numsacks) 1265 tcp_clean_sackreport(tp); 1266 ++tcpstat.tcps_preddat; 1267 tp->rcv_nxt += tlen; 1268 /* 1269 * Pull snd_wl1 up to prevent seq wrap relative to 1270 * th_seq. 1271 */ 1272 tp->snd_wl1 = th->th_seq; 1273 /* 1274 * Pull rcv_up up to prevent seq wrap relative to 1275 * rcv_nxt. 1276 */ 1277 tp->rcv_up = tp->rcv_nxt; 1278 tcpstat.tcps_rcvpack++; 1279 tcpstat.tcps_rcvbyte += tlen; 1280 ND6_HINT(tp); /* some progress has been done */ 1281#ifdef TCPDEBUG 1282 if (so->so_options & SO_DEBUG) 1283 tcp_trace(TA_INPUT, ostate, tp, 1284 (void *)tcp_saveipgen, &tcp_savetcp, 0); 1285#endif 1286 /* 1287 * Automatic sizing of receive socket buffer. Often the send 1288 * buffer size is not optimally adjusted to the actual network 1289 * conditions at hand (delay bandwidth product). Setting the 1290 * buffer size too small limits throughput on links with high 1291 * bandwidth and high delay (eg. trans-continental/oceanic links). 1292 * 1293 * On the receive side the socket buffer memory is only rarely 1294 * used to any significant extent. This allows us to be much 1295 * more aggressive in scaling the receive socket buffer. For 1296 * the case that the buffer space is actually used to a large 1297 * extent and we run out of kernel memory we can simply drop 1298 * the new segments; TCP on the sender will just retransmit it 1299 * later. Setting the buffer size too big may only consume too 1300 * much kernel memory if the application doesn't read() from 1301 * the socket or packet loss or reordering makes use of the 1302 * reassembly queue. 1303 * 1304 * The criteria to step up the receive buffer one notch are: 1305 * 1. the number of bytes received during the time it takes 1306 * one timestamp to be reflected back to us (the RTT); 1307 * 2. received bytes per RTT is within seven eighth of the 1308 * current socket buffer size; 1309 * 3. receive buffer size has not hit maximal automatic size; 1310 * 1311 * This algorithm does one step per RTT at most and only if 1312 * we receive a bulk stream w/o packet losses or reorderings. 1313 * Shrinking the buffer during idle times is not necessary as 1314 * it doesn't consume any memory when idle. 1315 * 1316 * TODO: Only step up if the application is actually serving 1317 * the buffer to better manage the socket buffer resources. 1318 */ 1319 if (tcp_do_autorcvbuf && 1320 to.to_tsecr && 1321 (so->so_rcv.sb_flags & SB_AUTOSIZE)) { 1322 if (to.to_tsecr > tp->rfbuf_ts && 1323 to.to_tsecr - tp->rfbuf_ts < hz) { 1324 if (tp->rfbuf_cnt > 1325 (so->so_rcv.sb_hiwat / 8 * 7) && 1326 so->so_rcv.sb_hiwat < 1327 tcp_autorcvbuf_max) { 1328 newsize = 1329 min(so->so_rcv.sb_hiwat + 1330 tcp_autorcvbuf_inc, 1331 tcp_autorcvbuf_max); 1332 } 1333 /* Start over with next RTT. */ 1334 tp->rfbuf_ts = 0; 1335 tp->rfbuf_cnt = 0; 1336 } else 1337 tp->rfbuf_cnt += tlen; /* add up */ 1338 } 1339 1340 /* Add data to socket buffer. */ 1341 SOCKBUF_LOCK(&so->so_rcv); 1342 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1343 m_freem(m); 1344 } else { 1345 /* 1346 * Set new socket buffer size. 1347 * Give up when limit is reached. 1348 */ 1349 if (newsize) 1350 if (!sbreserve_locked(&so->so_rcv, 1351 newsize, so, curthread)) 1352 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 1353 m_adj(m, drop_hdrlen); /* delayed header drop */ 1354 sbappendstream_locked(&so->so_rcv, m); 1355 } 1356 sorwakeup_locked(so); 1357 if (DELAY_ACK(tp)) { 1358 tp->t_flags |= TF_DELACK; 1359 } else { 1360 tp->t_flags |= TF_ACKNOW; 1361 tcp_output(tp); 1362 } 1363 goto check_delack; 1364 } 1365 } 1366 1367 /* 1368 * Calculate amount of space in receive window, 1369 * and then do TCP input processing. 1370 * Receive window is amount of space in rcv queue, 1371 * but not less than advertised window. 1372 */ 1373 win = sbspace(&so->so_rcv); 1374 if (win < 0) 1375 win = 0; 1376 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt)); 1377 1378 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 1379 tp->rfbuf_ts = 0; 1380 tp->rfbuf_cnt = 0; 1381 1382 switch (tp->t_state) { 1383 1384 /* 1385 * If the state is SYN_RECEIVED: 1386 * if seg contains an ACK, but not for our SYN/ACK, send a RST. 1387 */ 1388 case TCPS_SYN_RECEIVED: 1389 if ((thflags & TH_ACK) && 1390 (SEQ_LEQ(th->th_ack, tp->snd_una) || 1391 SEQ_GT(th->th_ack, tp->snd_max))) { 1392 rstreason = BANDLIM_RST_OPENPORT; 1393 goto dropwithreset; 1394 } 1395 break; 1396 1397 /* 1398 * If the state is SYN_SENT: 1399 * if seg contains an ACK, but not for our SYN, drop the input. 1400 * if seg contains a RST, then drop the connection. 1401 * if seg does not contain SYN, then drop it. 1402 * Otherwise this is an acceptable SYN segment 1403 * initialize tp->rcv_nxt and tp->irs 1404 * if seg contains ack then advance tp->snd_una 1405 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state 1406 * arrange for segment to be acked (eventually) 1407 * continue processing rest of data/controls, beginning with URG 1408 */ 1409 case TCPS_SYN_SENT: 1410 if ((thflags & TH_ACK) && 1411 (SEQ_LEQ(th->th_ack, tp->iss) || 1412 SEQ_GT(th->th_ack, tp->snd_max))) { 1413 rstreason = BANDLIM_UNLIMITED; 1414 goto dropwithreset; 1415 } 1416 if (thflags & TH_RST) { 1417 if (thflags & TH_ACK) { 1418 KASSERT(headlocked, ("%s: after_listen: " 1419 "tcp_drop.2: head not locked", __func__)); 1420 tp = tcp_drop(tp, ECONNREFUSED); 1421 } 1422 goto drop; 1423 } 1424 if ((thflags & TH_SYN) == 0) 1425 goto drop; 1426 1427 tp->irs = th->th_seq; 1428 tcp_rcvseqinit(tp); 1429 if (thflags & TH_ACK) { 1430 tcpstat.tcps_connects++; 1431 soisconnected(so); 1432#ifdef MAC 1433 SOCK_LOCK(so); 1434 mac_set_socket_peer_from_mbuf(m, so); 1435 SOCK_UNLOCK(so); 1436#endif 1437 /* Do window scaling on this connection? */ 1438 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 1439 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 1440 tp->rcv_scale = tp->request_r_scale; 1441 } 1442 tp->rcv_adv += tp->rcv_wnd; 1443 tp->snd_una++; /* SYN is acked */ 1444 /* 1445 * If there's data, delay ACK; if there's also a FIN 1446 * ACKNOW will be turned on later. 1447 */ 1448 if (DELAY_ACK(tp) && tlen != 0) 1449 callout_reset(tp->tt_delack, tcp_delacktime, 1450 tcp_timer_delack, tp); 1451 else 1452 tp->t_flags |= TF_ACKNOW; 1453 /* 1454 * Received <SYN,ACK> in SYN_SENT[*] state. 1455 * Transitions: 1456 * SYN_SENT --> ESTABLISHED 1457 * SYN_SENT* --> FIN_WAIT_1 1458 */ 1459 tp->t_starttime = ticks; 1460 if (tp->t_flags & TF_NEEDFIN) { 1461 tp->t_state = TCPS_FIN_WAIT_1; 1462 tp->t_flags &= ~TF_NEEDFIN; 1463 thflags &= ~TH_SYN; 1464 } else { 1465 tp->t_state = TCPS_ESTABLISHED; 1466 callout_reset(tp->tt_keep, tcp_keepidle, 1467 tcp_timer_keep, tp); 1468 } 1469 } else { 1470 /* 1471 * Received initial SYN in SYN-SENT[*] state => 1472 * simultaneous open. If segment contains CC option 1473 * and there is a cached CC, apply TAO test. 1474 * If it succeeds, connection is * half-synchronized. 1475 * Otherwise, do 3-way handshake: 1476 * SYN-SENT -> SYN-RECEIVED 1477 * SYN-SENT* -> SYN-RECEIVED* 1478 * If there was no CC option, clear cached CC value. 1479 */ 1480 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 1481 callout_stop(tp->tt_rexmt); 1482 tp->t_state = TCPS_SYN_RECEIVED; 1483 } 1484 1485 KASSERT(headlocked, ("%s: trimthenstep6: head not locked", 1486 __func__)); 1487 INP_LOCK_ASSERT(tp->t_inpcb); 1488 1489 /* 1490 * Advance th->th_seq to correspond to first data byte. 1491 * If data, trim to stay within window, 1492 * dropping FIN if necessary. 1493 */ 1494 th->th_seq++; 1495 if (tlen > tp->rcv_wnd) { 1496 todrop = tlen - tp->rcv_wnd; 1497 m_adj(m, -todrop); 1498 tlen = tp->rcv_wnd; 1499 thflags &= ~TH_FIN; 1500 tcpstat.tcps_rcvpackafterwin++; 1501 tcpstat.tcps_rcvbyteafterwin += todrop; 1502 } 1503 tp->snd_wl1 = th->th_seq - 1; 1504 tp->rcv_up = th->th_seq; 1505 /* 1506 * Client side of transaction: already sent SYN and data. 1507 * If the remote host used T/TCP to validate the SYN, 1508 * our data will be ACK'd; if so, enter normal data segment 1509 * processing in the middle of step 5, ack processing. 1510 * Otherwise, goto step 6. 1511 */ 1512 if (thflags & TH_ACK) 1513 goto process_ACK; 1514 1515 goto step6; 1516 1517 /* 1518 * If the state is LAST_ACK or CLOSING or TIME_WAIT: 1519 * do normal processing. 1520 * 1521 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later. 1522 */ 1523 case TCPS_LAST_ACK: 1524 case TCPS_CLOSING: 1525 case TCPS_TIME_WAIT: 1526 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: timewait", 1527 __func__)); 1528 break; /* continue normal processing */ 1529 } 1530 1531 /* 1532 * States other than LISTEN or SYN_SENT. 1533 * First check the RST flag and sequence number since reset segments 1534 * are exempt from the timestamp and connection count tests. This 1535 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix 1536 * below which allowed reset segments in half the sequence space 1537 * to fall though and be processed (which gives forged reset 1538 * segments with a random sequence number a 50 percent chance of 1539 * killing a connection). 1540 * Then check timestamp, if present. 1541 * Then check the connection count, if present. 1542 * Then check that at least some bytes of segment are within 1543 * receive window. If segment begins before rcv_nxt, 1544 * drop leading data (and SYN); if nothing left, just ack. 1545 * 1546 * 1547 * If the RST bit is set, check the sequence number to see 1548 * if this is a valid reset segment. 1549 * RFC 793 page 37: 1550 * In all states except SYN-SENT, all reset (RST) segments 1551 * are validated by checking their SEQ-fields. A reset is 1552 * valid if its sequence number is in the window. 1553 * Note: this does not take into account delayed ACKs, so 1554 * we should test against last_ack_sent instead of rcv_nxt. 1555 * The sequence number in the reset segment is normally an 1556 * echo of our outgoing acknowlegement numbers, but some hosts 1557 * send a reset with the sequence number at the rightmost edge 1558 * of our receive window, and we have to handle this case. 1559 * Note 2: Paul Watson's paper "Slipping in the Window" has shown 1560 * that brute force RST attacks are possible. To combat this, 1561 * we use a much stricter check while in the ESTABLISHED state, 1562 * only accepting RSTs where the sequence number is equal to 1563 * last_ack_sent. In all other states (the states in which a 1564 * RST is more likely), the more permissive check is used. 1565 * If we have multiple segments in flight, the intial reset 1566 * segment sequence numbers will be to the left of last_ack_sent, 1567 * but they will eventually catch up. 1568 * In any case, it never made sense to trim reset segments to 1569 * fit the receive window since RFC 1122 says: 1570 * 4.2.2.12 RST Segment: RFC-793 Section 3.4 1571 * 1572 * A TCP SHOULD allow a received RST segment to include data. 1573 * 1574 * DISCUSSION 1575 * It has been suggested that a RST segment could contain 1576 * ASCII text that encoded and explained the cause of the 1577 * RST. No standard has yet been established for such 1578 * data. 1579 * 1580 * If the reset segment passes the sequence number test examine 1581 * the state: 1582 * SYN_RECEIVED STATE: 1583 * If passive open, return to LISTEN state. 1584 * If active open, inform user that connection was refused. 1585 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES: 1586 * Inform user that connection was reset, and close tcb. 1587 * CLOSING, LAST_ACK STATES: 1588 * Close the tcb. 1589 * TIME_WAIT STATE: 1590 * Drop the segment - see Stevens, vol. 2, p. 964 and 1591 * RFC 1337. 1592 */ 1593 if (thflags & TH_RST) { 1594 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) && 1595 SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) { 1596 switch (tp->t_state) { 1597 1598 case TCPS_SYN_RECEIVED: 1599 so->so_error = ECONNREFUSED; 1600 goto close; 1601 1602 case TCPS_ESTABLISHED: 1603 if (tcp_insecure_rst == 0 && 1604 !(SEQ_GEQ(th->th_seq, tp->rcv_nxt - 1) && 1605 SEQ_LEQ(th->th_seq, tp->rcv_nxt + 1)) && 1606 !(SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) && 1607 SEQ_LEQ(th->th_seq, tp->last_ack_sent + 1))) { 1608 tcpstat.tcps_badrst++; 1609 goto drop; 1610 } 1611 case TCPS_FIN_WAIT_1: 1612 case TCPS_FIN_WAIT_2: 1613 case TCPS_CLOSE_WAIT: 1614 so->so_error = ECONNRESET; 1615 close: 1616 tp->t_state = TCPS_CLOSED; 1617 tcpstat.tcps_drops++; 1618 KASSERT(headlocked, ("%s: trimthenstep6: " 1619 "tcp_close: head not locked", __func__)); 1620 tp = tcp_close(tp); 1621 break; 1622 1623 case TCPS_CLOSING: 1624 case TCPS_LAST_ACK: 1625 KASSERT(headlocked, ("%s: trimthenstep6: " 1626 "tcp_close.2: head not locked", __func__)); 1627 tp = tcp_close(tp); 1628 break; 1629 1630 case TCPS_TIME_WAIT: 1631 KASSERT(tp->t_state != TCPS_TIME_WAIT, 1632 ("%s: timewait", __func__)); 1633 break; 1634 } 1635 } 1636 goto drop; 1637 } 1638 1639 /* 1640 * RFC 1323 PAWS: If we have a timestamp reply on this segment 1641 * and it's less than ts_recent, drop it. 1642 */ 1643 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent && 1644 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 1645 1646 /* Check to see if ts_recent is over 24 days old. */ 1647 if ((int)(ticks - tp->ts_recent_age) > TCP_PAWS_IDLE) { 1648 /* 1649 * Invalidate ts_recent. If this segment updates 1650 * ts_recent, the age will be reset later and ts_recent 1651 * will get a valid value. If it does not, setting 1652 * ts_recent to zero will at least satisfy the 1653 * requirement that zero be placed in the timestamp 1654 * echo reply when ts_recent isn't valid. The 1655 * age isn't reset until we get a valid ts_recent 1656 * because we don't want out-of-order segments to be 1657 * dropped when ts_recent is old. 1658 */ 1659 tp->ts_recent = 0; 1660 } else { 1661 tcpstat.tcps_rcvduppack++; 1662 tcpstat.tcps_rcvdupbyte += tlen; 1663 tcpstat.tcps_pawsdrop++; 1664 if (tlen) 1665 goto dropafterack; 1666 goto drop; 1667 } 1668 } 1669 1670 /* 1671 * In the SYN-RECEIVED state, validate that the packet belongs to 1672 * this connection before trimming the data to fit the receive 1673 * window. Check the sequence number versus IRS since we know 1674 * the sequence numbers haven't wrapped. This is a partial fix 1675 * for the "LAND" DoS attack. 1676 */ 1677 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) { 1678 rstreason = BANDLIM_RST_OPENPORT; 1679 goto dropwithreset; 1680 } 1681 1682 todrop = tp->rcv_nxt - th->th_seq; 1683 if (todrop > 0) { 1684 if (thflags & TH_SYN) { 1685 thflags &= ~TH_SYN; 1686 th->th_seq++; 1687 if (th->th_urp > 1) 1688 th->th_urp--; 1689 else 1690 thflags &= ~TH_URG; 1691 todrop--; 1692 } 1693 /* 1694 * Following if statement from Stevens, vol. 2, p. 960. 1695 */ 1696 if (todrop > tlen 1697 || (todrop == tlen && (thflags & TH_FIN) == 0)) { 1698 /* 1699 * Any valid FIN must be to the left of the window. 1700 * At this point the FIN must be a duplicate or out 1701 * of sequence; drop it. 1702 */ 1703 thflags &= ~TH_FIN; 1704 1705 /* 1706 * Send an ACK to resynchronize and drop any data. 1707 * But keep on processing for RST or ACK. 1708 */ 1709 tp->t_flags |= TF_ACKNOW; 1710 todrop = tlen; 1711 tcpstat.tcps_rcvduppack++; 1712 tcpstat.tcps_rcvdupbyte += todrop; 1713 } else { 1714 tcpstat.tcps_rcvpartduppack++; 1715 tcpstat.tcps_rcvpartdupbyte += todrop; 1716 } 1717 drop_hdrlen += todrop; /* drop from the top afterwards */ 1718 th->th_seq += todrop; 1719 tlen -= todrop; 1720 if (th->th_urp > todrop) 1721 th->th_urp -= todrop; 1722 else { 1723 thflags &= ~TH_URG; 1724 th->th_urp = 0; 1725 } 1726 } 1727 1728 /* 1729 * If new data are received on a connection after the 1730 * user processes are gone, then RST the other end. 1731 */ 1732 if ((so->so_state & SS_NOFDREF) && 1733 tp->t_state > TCPS_CLOSE_WAIT && tlen) { 1734 KASSERT(headlocked, ("%s: trimthenstep6: tcp_close.3: head " 1735 "not locked", __func__)); 1736 tp = tcp_close(tp); 1737 tcpstat.tcps_rcvafterclose++; 1738 rstreason = BANDLIM_UNLIMITED; 1739 goto dropwithreset; 1740 } 1741 1742 /* 1743 * If segment ends after window, drop trailing data 1744 * (and PUSH and FIN); if nothing left, just ACK. 1745 */ 1746 todrop = (th->th_seq+tlen) - (tp->rcv_nxt+tp->rcv_wnd); 1747 if (todrop > 0) { 1748 tcpstat.tcps_rcvpackafterwin++; 1749 if (todrop >= tlen) { 1750 tcpstat.tcps_rcvbyteafterwin += tlen; 1751 /* 1752 * If a new connection request is received 1753 * while in TIME_WAIT, drop the old connection 1754 * and start over if the sequence numbers 1755 * are above the previous ones. 1756 */ 1757 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: timewait", 1758 __func__)); 1759 if (thflags & TH_SYN && 1760 tp->t_state == TCPS_TIME_WAIT && 1761 SEQ_GT(th->th_seq, tp->rcv_nxt)) { 1762 KASSERT(headlocked, ("%s: trimthenstep6: " 1763 "tcp_close.4: head not locked", __func__)); 1764 tp = tcp_close(tp); 1765 /* XXX: Shouldn't be possible. */ 1766 return (1); 1767 } 1768 /* 1769 * If window is closed can only take segments at 1770 * window edge, and have to drop data and PUSH from 1771 * incoming segments. Continue processing, but 1772 * remember to ack. Otherwise, drop segment 1773 * and ack. 1774 */ 1775 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { 1776 tp->t_flags |= TF_ACKNOW; 1777 tcpstat.tcps_rcvwinprobe++; 1778 } else 1779 goto dropafterack; 1780 } else 1781 tcpstat.tcps_rcvbyteafterwin += todrop; 1782 m_adj(m, -todrop); 1783 tlen -= todrop; 1784 thflags &= ~(TH_PUSH|TH_FIN); 1785 } 1786 1787 /* 1788 * If last ACK falls within this segment's sequence numbers, 1789 * record its timestamp. 1790 * NOTE: 1791 * 1) That the test incorporates suggestions from the latest 1792 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1793 * 2) That updating only on newer timestamps interferes with 1794 * our earlier PAWS tests, so this check should be solely 1795 * predicated on the sequence space of this segment. 1796 * 3) That we modify the segment boundary check to be 1797 * Last.ACK.Sent <= SEG.SEQ + SEG.Len 1798 * instead of RFC1323's 1799 * Last.ACK.Sent < SEG.SEQ + SEG.Len, 1800 * This modified check allows us to overcome RFC1323's 1801 * limitations as described in Stevens TCP/IP Illustrated 1802 * Vol. 2 p.869. In such cases, we can still calculate the 1803 * RTT correctly when RCV.NXT == Last.ACK.Sent. 1804 */ 1805 if ((to.to_flags & TOF_TS) != 0 && 1806 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 1807 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 1808 ((thflags & (TH_SYN|TH_FIN)) != 0))) { 1809 tp->ts_recent_age = ticks; 1810 tp->ts_recent = to.to_tsval; 1811 } 1812 1813 /* 1814 * If a SYN is in the window, then this is an 1815 * error and we send an RST and drop the connection. 1816 */ 1817 if (thflags & TH_SYN) { 1818 KASSERT(headlocked, ("%s: tcp_drop: trimthenstep6: " 1819 "head not locked", __func__)); 1820 tp = tcp_drop(tp, ECONNRESET); 1821 rstreason = BANDLIM_UNLIMITED; 1822 goto drop; 1823 } 1824 1825 /* 1826 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN 1827 * flag is on (half-synchronized state), then queue data for 1828 * later processing; else drop segment and return. 1829 */ 1830 if ((thflags & TH_ACK) == 0) { 1831 if (tp->t_state == TCPS_SYN_RECEIVED || 1832 (tp->t_flags & TF_NEEDSYN)) 1833 goto step6; 1834 else if (tp->t_flags & TF_ACKNOW) 1835 goto dropafterack; 1836 else 1837 goto drop; 1838 } 1839 1840 /* 1841 * Ack processing. 1842 */ 1843 switch (tp->t_state) { 1844 1845 /* 1846 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter 1847 * ESTABLISHED state and continue processing. 1848 * The ACK was checked above. 1849 */ 1850 case TCPS_SYN_RECEIVED: 1851 1852 tcpstat.tcps_connects++; 1853 soisconnected(so); 1854 /* Do window scaling? */ 1855 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 1856 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 1857 tp->rcv_scale = tp->request_r_scale; 1858 tp->snd_wnd = tiwin; 1859 } 1860 /* 1861 * Make transitions: 1862 * SYN-RECEIVED -> ESTABLISHED 1863 * SYN-RECEIVED* -> FIN-WAIT-1 1864 */ 1865 tp->t_starttime = ticks; 1866 if (tp->t_flags & TF_NEEDFIN) { 1867 tp->t_state = TCPS_FIN_WAIT_1; 1868 tp->t_flags &= ~TF_NEEDFIN; 1869 } else { 1870 tp->t_state = TCPS_ESTABLISHED; 1871 callout_reset(tp->tt_keep, tcp_keepidle, 1872 tcp_timer_keep, tp); 1873 } 1874 /* 1875 * If segment contains data or ACK, will call tcp_reass() 1876 * later; if not, do so now to pass queued data to user. 1877 */ 1878 if (tlen == 0 && (thflags & TH_FIN) == 0) 1879 (void) tcp_reass(tp, (struct tcphdr *)0, 0, 1880 (struct mbuf *)0); 1881 tp->snd_wl1 = th->th_seq - 1; 1882 /* FALLTHROUGH */ 1883 1884 /* 1885 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range 1886 * ACKs. If the ack is in the range 1887 * tp->snd_una < th->th_ack <= tp->snd_max 1888 * then advance tp->snd_una to th->th_ack and drop 1889 * data from the retransmission queue. If this ACK reflects 1890 * more up to date window information we update our window information. 1891 */ 1892 case TCPS_ESTABLISHED: 1893 case TCPS_FIN_WAIT_1: 1894 case TCPS_FIN_WAIT_2: 1895 case TCPS_CLOSE_WAIT: 1896 case TCPS_CLOSING: 1897 case TCPS_LAST_ACK: 1898 case TCPS_TIME_WAIT: 1899 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: timewait", 1900 __func__)); 1901 if (SEQ_GT(th->th_ack, tp->snd_max)) { 1902 tcpstat.tcps_rcvacktoomuch++; 1903 goto dropafterack; 1904 } 1905 if (tp->sack_enable && 1906 ((to.to_flags & TOF_SACK) || 1907 !TAILQ_EMPTY(&tp->snd_holes))) 1908 tcp_sack_doack(tp, &to, th->th_ack); 1909 if (SEQ_LEQ(th->th_ack, tp->snd_una)) { 1910 if (tlen == 0 && tiwin == tp->snd_wnd) { 1911 tcpstat.tcps_rcvdupack++; 1912 /* 1913 * If we have outstanding data (other than 1914 * a window probe), this is a completely 1915 * duplicate ack (ie, window info didn't 1916 * change), the ack is the biggest we've 1917 * seen and we've seen exactly our rexmt 1918 * threshhold of them, assume a packet 1919 * has been dropped and retransmit it. 1920 * Kludge snd_nxt & the congestion 1921 * window so we send only this one 1922 * packet. 1923 * 1924 * We know we're losing at the current 1925 * window size so do congestion avoidance 1926 * (set ssthresh to half the current window 1927 * and pull our congestion window back to 1928 * the new ssthresh). 1929 * 1930 * Dup acks mean that packets have left the 1931 * network (they're now cached at the receiver) 1932 * so bump cwnd by the amount in the receiver 1933 * to keep a constant cwnd packets in the 1934 * network. 1935 */ 1936 if (!callout_active(tp->tt_rexmt) || 1937 th->th_ack != tp->snd_una) 1938 tp->t_dupacks = 0; 1939 else if (++tp->t_dupacks > tcprexmtthresh || 1940 ((tcp_do_newreno || tp->sack_enable) && 1941 IN_FASTRECOVERY(tp))) { 1942 if (tp->sack_enable && IN_FASTRECOVERY(tp)) { 1943 int awnd; 1944 1945 /* 1946 * Compute the amount of data in flight first. 1947 * We can inject new data into the pipe iff 1948 * we have less than 1/2 the original window's 1949 * worth of data in flight. 1950 */ 1951 awnd = (tp->snd_nxt - tp->snd_fack) + 1952 tp->sackhint.sack_bytes_rexmit; 1953 if (awnd < tp->snd_ssthresh) { 1954 tp->snd_cwnd += tp->t_maxseg; 1955 if (tp->snd_cwnd > tp->snd_ssthresh) 1956 tp->snd_cwnd = tp->snd_ssthresh; 1957 } 1958 } else 1959 tp->snd_cwnd += tp->t_maxseg; 1960 (void) tcp_output(tp); 1961 goto drop; 1962 } else if (tp->t_dupacks == tcprexmtthresh) { 1963 tcp_seq onxt = tp->snd_nxt; 1964 u_int win; 1965 1966 /* 1967 * If we're doing sack, check to 1968 * see if we're already in sack 1969 * recovery. If we're not doing sack, 1970 * check to see if we're in newreno 1971 * recovery. 1972 */ 1973 if (tp->sack_enable) { 1974 if (IN_FASTRECOVERY(tp)) { 1975 tp->t_dupacks = 0; 1976 break; 1977 } 1978 } else if (tcp_do_newreno) { 1979 if (SEQ_LEQ(th->th_ack, 1980 tp->snd_recover)) { 1981 tp->t_dupacks = 0; 1982 break; 1983 } 1984 } 1985 win = min(tp->snd_wnd, tp->snd_cwnd) / 1986 2 / tp->t_maxseg; 1987 if (win < 2) 1988 win = 2; 1989 tp->snd_ssthresh = win * tp->t_maxseg; 1990 ENTER_FASTRECOVERY(tp); 1991 tp->snd_recover = tp->snd_max; 1992 callout_stop(tp->tt_rexmt); 1993 tp->t_rtttime = 0; 1994 if (tp->sack_enable) { 1995 tcpstat.tcps_sack_recovery_episode++; 1996 tp->sack_newdata = tp->snd_nxt; 1997 tp->snd_cwnd = tp->t_maxseg; 1998 (void) tcp_output(tp); 1999 goto drop; 2000 } 2001 tp->snd_nxt = th->th_ack; 2002 tp->snd_cwnd = tp->t_maxseg; 2003 (void) tcp_output(tp); 2004 KASSERT(tp->snd_limited <= 2, 2005 ("%s: tp->snd_limited too big", 2006 __func__)); 2007 tp->snd_cwnd = tp->snd_ssthresh + 2008 tp->t_maxseg * 2009 (tp->t_dupacks - tp->snd_limited); 2010 if (SEQ_GT(onxt, tp->snd_nxt)) 2011 tp->snd_nxt = onxt; 2012 goto drop; 2013 } else if (tcp_do_rfc3042) { 2014 u_long oldcwnd = tp->snd_cwnd; 2015 tcp_seq oldsndmax = tp->snd_max; 2016 u_int sent; 2017 2018 KASSERT(tp->t_dupacks == 1 || 2019 tp->t_dupacks == 2, 2020 ("%s: dupacks not 1 or 2", 2021 __func__)); 2022 if (tp->t_dupacks == 1) 2023 tp->snd_limited = 0; 2024 tp->snd_cwnd = 2025 (tp->snd_nxt - tp->snd_una) + 2026 (tp->t_dupacks - tp->snd_limited) * 2027 tp->t_maxseg; 2028 (void) tcp_output(tp); 2029 sent = tp->snd_max - oldsndmax; 2030 if (sent > tp->t_maxseg) { 2031 KASSERT((tp->t_dupacks == 2 && 2032 tp->snd_limited == 0) || 2033 (sent == tp->t_maxseg + 1 && 2034 tp->t_flags & TF_SENTFIN), 2035 ("%s: sent too much", 2036 __func__)); 2037 tp->snd_limited = 2; 2038 } else if (sent > 0) 2039 ++tp->snd_limited; 2040 tp->snd_cwnd = oldcwnd; 2041 goto drop; 2042 } 2043 } else 2044 tp->t_dupacks = 0; 2045 break; 2046 } 2047 2048 KASSERT(SEQ_GT(th->th_ack, tp->snd_una), 2049 ("%s: th_ack <= snd_una", __func__)); 2050 2051 /* 2052 * If the congestion window was inflated to account 2053 * for the other side's cached packets, retract it. 2054 */ 2055 if (tcp_do_newreno || tp->sack_enable) { 2056 if (IN_FASTRECOVERY(tp)) { 2057 if (SEQ_LT(th->th_ack, tp->snd_recover)) { 2058 if (tp->sack_enable) 2059 tcp_sack_partialack(tp, th); 2060 else 2061 tcp_newreno_partial_ack(tp, th); 2062 } else { 2063 /* 2064 * Out of fast recovery. 2065 * Window inflation should have left us 2066 * with approximately snd_ssthresh 2067 * outstanding data. 2068 * But in case we would be inclined to 2069 * send a burst, better to do it via 2070 * the slow start mechanism. 2071 */ 2072 if (SEQ_GT(th->th_ack + 2073 tp->snd_ssthresh, 2074 tp->snd_max)) 2075 tp->snd_cwnd = tp->snd_max - 2076 th->th_ack + 2077 tp->t_maxseg; 2078 else 2079 tp->snd_cwnd = tp->snd_ssthresh; 2080 } 2081 } 2082 } else { 2083 if (tp->t_dupacks >= tcprexmtthresh && 2084 tp->snd_cwnd > tp->snd_ssthresh) 2085 tp->snd_cwnd = tp->snd_ssthresh; 2086 } 2087 tp->t_dupacks = 0; 2088 /* 2089 * If we reach this point, ACK is not a duplicate, 2090 * i.e., it ACKs something we sent. 2091 */ 2092 if (tp->t_flags & TF_NEEDSYN) { 2093 /* 2094 * T/TCP: Connection was half-synchronized, and our 2095 * SYN has been ACK'd (so connection is now fully 2096 * synchronized). Go to non-starred state, 2097 * increment snd_una for ACK of SYN, and check if 2098 * we can do window scaling. 2099 */ 2100 tp->t_flags &= ~TF_NEEDSYN; 2101 tp->snd_una++; 2102 /* Do window scaling? */ 2103 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2104 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2105 tp->rcv_scale = tp->request_r_scale; 2106 /* Send window already scaled. */ 2107 } 2108 } 2109 2110process_ACK: 2111 KASSERT(headlocked, ("%s: process_ACK: head not locked", 2112 __func__)); 2113 INP_LOCK_ASSERT(tp->t_inpcb); 2114 2115 acked = th->th_ack - tp->snd_una; 2116 tcpstat.tcps_rcvackpack++; 2117 tcpstat.tcps_rcvackbyte += acked; 2118 2119 /* 2120 * If we just performed our first retransmit, and the ACK 2121 * arrives within our recovery window, then it was a mistake 2122 * to do the retransmit in the first place. Recover our 2123 * original cwnd and ssthresh, and proceed to transmit where 2124 * we left off. 2125 */ 2126 if (tp->t_rxtshift == 1 && ticks < tp->t_badrxtwin) { 2127 ++tcpstat.tcps_sndrexmitbad; 2128 tp->snd_cwnd = tp->snd_cwnd_prev; 2129 tp->snd_ssthresh = tp->snd_ssthresh_prev; 2130 tp->snd_recover = tp->snd_recover_prev; 2131 if (tp->t_flags & TF_WASFRECOVERY) 2132 ENTER_FASTRECOVERY(tp); 2133 tp->snd_nxt = tp->snd_max; 2134 tp->t_badrxtwin = 0; /* XXX probably not required */ 2135 } 2136 2137 /* 2138 * If we have a timestamp reply, update smoothed 2139 * round trip time. If no timestamp is present but 2140 * transmit timer is running and timed sequence 2141 * number was acked, update smoothed round trip time. 2142 * Since we now have an rtt measurement, cancel the 2143 * timer backoff (cf., Phil Karn's retransmit alg.). 2144 * Recompute the initial retransmit timer. 2145 * 2146 * Some boxes send broken timestamp replies 2147 * during the SYN+ACK phase, ignore 2148 * timestamps of 0 or we could calculate a 2149 * huge RTT and blow up the retransmit timer. 2150 */ 2151 if ((to.to_flags & TOF_TS) != 0 && 2152 to.to_tsecr) { 2153 if (!tp->t_rttlow || tp->t_rttlow > ticks - to.to_tsecr) 2154 tp->t_rttlow = ticks - to.to_tsecr; 2155 tcp_xmit_timer(tp, ticks - to.to_tsecr + 1); 2156 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) { 2157 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime) 2158 tp->t_rttlow = ticks - tp->t_rtttime; 2159 tcp_xmit_timer(tp, ticks - tp->t_rtttime); 2160 } 2161 tcp_xmit_bandwidth_limit(tp, th->th_ack); 2162 2163 /* 2164 * If all outstanding data is acked, stop retransmit 2165 * timer and remember to restart (more output or persist). 2166 * If there is more data to be acked, restart retransmit 2167 * timer, using current (possibly backed-off) value. 2168 */ 2169 if (th->th_ack == tp->snd_max) { 2170 callout_stop(tp->tt_rexmt); 2171 needoutput = 1; 2172 } else if (!callout_active(tp->tt_persist)) 2173 callout_reset(tp->tt_rexmt, tp->t_rxtcur, 2174 tcp_timer_rexmt, tp); 2175 2176 /* 2177 * If no data (only SYN) was ACK'd, 2178 * skip rest of ACK processing. 2179 */ 2180 if (acked == 0) 2181 goto step6; 2182 2183 /* 2184 * When new data is acked, open the congestion window. 2185 * If the window gives us less than ssthresh packets 2186 * in flight, open exponentially (maxseg per packet). 2187 * Otherwise open linearly: maxseg per window 2188 * (maxseg^2 / cwnd per packet). 2189 */ 2190 if ((!tcp_do_newreno && !tp->sack_enable) || 2191 !IN_FASTRECOVERY(tp)) { 2192 u_int cw = tp->snd_cwnd; 2193 u_int incr = tp->t_maxseg; 2194 if (cw > tp->snd_ssthresh) 2195 incr = incr * incr / cw; 2196 tp->snd_cwnd = min(cw+incr, TCP_MAXWIN<<tp->snd_scale); 2197 } 2198 SOCKBUF_LOCK(&so->so_snd); 2199 if (acked > so->so_snd.sb_cc) { 2200 tp->snd_wnd -= so->so_snd.sb_cc; 2201 sbdrop_locked(&so->so_snd, (int)so->so_snd.sb_cc); 2202 ourfinisacked = 1; 2203 } else { 2204 sbdrop_locked(&so->so_snd, acked); 2205 tp->snd_wnd -= acked; 2206 ourfinisacked = 0; 2207 } 2208 sowwakeup_locked(so); 2209 /* detect una wraparound */ 2210 if ((tcp_do_newreno || tp->sack_enable) && 2211 !IN_FASTRECOVERY(tp) && 2212 SEQ_GT(tp->snd_una, tp->snd_recover) && 2213 SEQ_LEQ(th->th_ack, tp->snd_recover)) 2214 tp->snd_recover = th->th_ack - 1; 2215 if ((tcp_do_newreno || tp->sack_enable) && 2216 IN_FASTRECOVERY(tp) && 2217 SEQ_GEQ(th->th_ack, tp->snd_recover)) 2218 EXIT_FASTRECOVERY(tp); 2219 tp->snd_una = th->th_ack; 2220 if (tp->sack_enable) { 2221 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 2222 tp->snd_recover = tp->snd_una; 2223 } 2224 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 2225 tp->snd_nxt = tp->snd_una; 2226 2227 switch (tp->t_state) { 2228 2229 /* 2230 * In FIN_WAIT_1 STATE in addition to the processing 2231 * for the ESTABLISHED state if our FIN is now acknowledged 2232 * then enter FIN_WAIT_2. 2233 */ 2234 case TCPS_FIN_WAIT_1: 2235 if (ourfinisacked) { 2236 /* 2237 * If we can't receive any more 2238 * data, then closing user can proceed. 2239 * Starting the timer is contrary to the 2240 * specification, but if we don't get a FIN 2241 * we'll hang forever. 2242 */ 2243 /* XXXjl 2244 * we should release the tp also, and use a 2245 * compressed state. 2246 */ 2247 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 2248 int timeout; 2249 2250 soisdisconnected(so); 2251 timeout = (tcp_fast_finwait2_recycle) ? 2252 tcp_finwait2_timeout : tcp_maxidle; 2253 callout_reset(tp->tt_2msl, timeout, 2254 tcp_timer_2msl, tp); 2255 } 2256 tp->t_state = TCPS_FIN_WAIT_2; 2257 } 2258 break; 2259 2260 /* 2261 * In CLOSING STATE in addition to the processing for 2262 * the ESTABLISHED state if the ACK acknowledges our FIN 2263 * then enter the TIME-WAIT state, otherwise ignore 2264 * the segment. 2265 */ 2266 case TCPS_CLOSING: 2267 if (ourfinisacked) { 2268 KASSERT(headlocked, ("%s: process_ACK: " 2269 "head not locked", __func__)); 2270 tcp_twstart(tp); 2271 INP_INFO_WUNLOCK(&tcbinfo); 2272 headlocked = 0; 2273 m_freem(m); 2274 return (0); 2275 } 2276 break; 2277 2278 /* 2279 * In LAST_ACK, we may still be waiting for data to drain 2280 * and/or to be acked, as well as for the ack of our FIN. 2281 * If our FIN is now acknowledged, delete the TCB, 2282 * enter the closed state and return. 2283 */ 2284 case TCPS_LAST_ACK: 2285 if (ourfinisacked) { 2286 KASSERT(headlocked, ("%s: process_ACK: " 2287 "tcp_close: head not locked", __func__)); 2288 tp = tcp_close(tp); 2289 goto drop; 2290 } 2291 break; 2292 2293 /* 2294 * In TIME_WAIT state the only thing that should arrive 2295 * is a retransmission of the remote FIN. Acknowledge 2296 * it and restart the finack timer. 2297 */ 2298 case TCPS_TIME_WAIT: 2299 KASSERT(tp->t_state != TCPS_TIME_WAIT, 2300 ("%s: timewait", __func__)); 2301 callout_reset(tp->tt_2msl, 2 * tcp_msl, 2302 tcp_timer_2msl, tp); 2303 goto dropafterack; 2304 } 2305 } 2306 2307step6: 2308 KASSERT(headlocked, ("%s: step6: head not locked", __func__)); 2309 INP_LOCK_ASSERT(tp->t_inpcb); 2310 2311 /* 2312 * Update window information. 2313 * Don't look at window if no ACK: TAC's send garbage on first SYN. 2314 */ 2315 if ((thflags & TH_ACK) && 2316 (SEQ_LT(tp->snd_wl1, th->th_seq) || 2317 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 2318 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 2319 /* keep track of pure window updates */ 2320 if (tlen == 0 && 2321 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 2322 tcpstat.tcps_rcvwinupd++; 2323 tp->snd_wnd = tiwin; 2324 tp->snd_wl1 = th->th_seq; 2325 tp->snd_wl2 = th->th_ack; 2326 if (tp->snd_wnd > tp->max_sndwnd) 2327 tp->max_sndwnd = tp->snd_wnd; 2328 needoutput = 1; 2329 } 2330 2331 /* 2332 * Process segments with URG. 2333 */ 2334 if ((thflags & TH_URG) && th->th_urp && 2335 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2336 /* 2337 * This is a kludge, but if we receive and accept 2338 * random urgent pointers, we'll crash in 2339 * soreceive. It's hard to imagine someone 2340 * actually wanting to send this much urgent data. 2341 */ 2342 SOCKBUF_LOCK(&so->so_rcv); 2343 if (th->th_urp + so->so_rcv.sb_cc > sb_max) { 2344 th->th_urp = 0; /* XXX */ 2345 thflags &= ~TH_URG; /* XXX */ 2346 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */ 2347 goto dodata; /* XXX */ 2348 } 2349 /* 2350 * If this segment advances the known urgent pointer, 2351 * then mark the data stream. This should not happen 2352 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since 2353 * a FIN has been received from the remote side. 2354 * In these states we ignore the URG. 2355 * 2356 * According to RFC961 (Assigned Protocols), 2357 * the urgent pointer points to the last octet 2358 * of urgent data. We continue, however, 2359 * to consider it to indicate the first octet 2360 * of data past the urgent section as the original 2361 * spec states (in one of two places). 2362 */ 2363 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) { 2364 tp->rcv_up = th->th_seq + th->th_urp; 2365 so->so_oobmark = so->so_rcv.sb_cc + 2366 (tp->rcv_up - tp->rcv_nxt) - 1; 2367 if (so->so_oobmark == 0) 2368 so->so_rcv.sb_state |= SBS_RCVATMARK; 2369 sohasoutofband(so); 2370 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); 2371 } 2372 SOCKBUF_UNLOCK(&so->so_rcv); 2373 /* 2374 * Remove out of band data so doesn't get presented to user. 2375 * This can happen independent of advancing the URG pointer, 2376 * but if two URG's are pending at once, some out-of-band 2377 * data may creep in... ick. 2378 */ 2379 if (th->th_urp <= (u_long)tlen && 2380 !(so->so_options & SO_OOBINLINE)) { 2381 /* hdr drop is delayed */ 2382 tcp_pulloutofband(so, th, m, drop_hdrlen); 2383 } 2384 } else { 2385 /* 2386 * If no out of band data is expected, 2387 * pull receive urgent pointer along 2388 * with the receive window. 2389 */ 2390 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) 2391 tp->rcv_up = tp->rcv_nxt; 2392 } 2393dodata: /* XXX */ 2394 KASSERT(headlocked, ("%s: dodata: head not locked", __func__)); 2395 INP_LOCK_ASSERT(tp->t_inpcb); 2396 2397 /* 2398 * Process the segment text, merging it into the TCP sequencing queue, 2399 * and arranging for acknowledgment of receipt if necessary. 2400 * This process logically involves adjusting tp->rcv_wnd as data 2401 * is presented to the user (this happens in tcp_usrreq.c, 2402 * case PRU_RCVD). If a FIN has already been received on this 2403 * connection then we just ignore the text. 2404 */ 2405 if ((tlen || (thflags & TH_FIN)) && 2406 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2407 tcp_seq save_start = th->th_seq; 2408 tcp_seq save_end = th->th_seq + tlen; 2409 m_adj(m, drop_hdrlen); /* delayed header drop */ 2410 /* 2411 * Insert segment which includes th into TCP reassembly queue 2412 * with control block tp. Set thflags to whether reassembly now 2413 * includes a segment with FIN. This handles the common case 2414 * inline (segment is the next to be received on an established 2415 * connection, and the queue is empty), avoiding linkage into 2416 * and removal from the queue and repetition of various 2417 * conversions. 2418 * Set DELACK for segments received in order, but ack 2419 * immediately when segments are out of order (so 2420 * fast retransmit can work). 2421 */ 2422 if (th->th_seq == tp->rcv_nxt && 2423 LIST_EMPTY(&tp->t_segq) && 2424 TCPS_HAVEESTABLISHED(tp->t_state)) { 2425 if (DELAY_ACK(tp)) 2426 tp->t_flags |= TF_DELACK; 2427 else 2428 tp->t_flags |= TF_ACKNOW; 2429 tp->rcv_nxt += tlen; 2430 thflags = th->th_flags & TH_FIN; 2431 tcpstat.tcps_rcvpack++; 2432 tcpstat.tcps_rcvbyte += tlen; 2433 ND6_HINT(tp); 2434 SOCKBUF_LOCK(&so->so_rcv); 2435 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 2436 m_freem(m); 2437 else 2438 sbappendstream_locked(&so->so_rcv, m); 2439 sorwakeup_locked(so); 2440 } else { 2441 thflags = tcp_reass(tp, th, &tlen, m); 2442 tp->t_flags |= TF_ACKNOW; 2443 } 2444 if (tlen > 0 && tp->sack_enable) 2445 tcp_update_sack_list(tp, save_start, save_end); 2446#if 0 2447 /* 2448 * Note the amount of data that peer has sent into 2449 * our window, in order to estimate the sender's 2450 * buffer size. 2451 * XXX: Unused. 2452 */ 2453 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt); 2454#endif 2455 } else { 2456 m_freem(m); 2457 thflags &= ~TH_FIN; 2458 } 2459 2460 /* 2461 * If FIN is received ACK the FIN and let the user know 2462 * that the connection is closing. 2463 */ 2464 if (thflags & TH_FIN) { 2465 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2466 socantrcvmore(so); 2467 /* 2468 * If connection is half-synchronized 2469 * (ie NEEDSYN flag on) then delay ACK, 2470 * so it may be piggybacked when SYN is sent. 2471 * Otherwise, since we received a FIN then no 2472 * more input can be expected, send ACK now. 2473 */ 2474 if (tp->t_flags & TF_NEEDSYN) 2475 tp->t_flags |= TF_DELACK; 2476 else 2477 tp->t_flags |= TF_ACKNOW; 2478 tp->rcv_nxt++; 2479 } 2480 switch (tp->t_state) { 2481 2482 /* 2483 * In SYN_RECEIVED and ESTABLISHED STATES 2484 * enter the CLOSE_WAIT state. 2485 */ 2486 case TCPS_SYN_RECEIVED: 2487 tp->t_starttime = ticks; 2488 /*FALLTHROUGH*/ 2489 case TCPS_ESTABLISHED: 2490 tp->t_state = TCPS_CLOSE_WAIT; 2491 break; 2492 2493 /* 2494 * If still in FIN_WAIT_1 STATE FIN has not been acked so 2495 * enter the CLOSING state. 2496 */ 2497 case TCPS_FIN_WAIT_1: 2498 tp->t_state = TCPS_CLOSING; 2499 break; 2500 2501 /* 2502 * In FIN_WAIT_2 state enter the TIME_WAIT state, 2503 * starting the time-wait timer, turning off the other 2504 * standard timers. 2505 */ 2506 case TCPS_FIN_WAIT_2: 2507 KASSERT(headlocked == 1, ("%s: dodata: " 2508 "TCP_FIN_WAIT_2: head not locked", __func__)); 2509 tcp_twstart(tp); 2510 INP_INFO_WUNLOCK(&tcbinfo); 2511 return (0); 2512 2513 /* 2514 * In TIME_WAIT state restart the 2 MSL time_wait timer. 2515 */ 2516 case TCPS_TIME_WAIT: 2517 KASSERT(tp->t_state != TCPS_TIME_WAIT, 2518 ("%s: timewait", __func__)); 2519 callout_reset(tp->tt_2msl, 2 * tcp_msl, 2520 tcp_timer_2msl, tp); 2521 break; 2522 } 2523 } 2524 INP_INFO_WUNLOCK(&tcbinfo); 2525 headlocked = 0; 2526#ifdef TCPDEBUG 2527 if (so->so_options & SO_DEBUG) 2528 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen, 2529 &tcp_savetcp, 0); 2530#endif 2531 2532 /* 2533 * Return any desired output. 2534 */ 2535 if (needoutput || (tp->t_flags & TF_ACKNOW)) 2536 (void) tcp_output(tp); 2537 2538check_delack: 2539 KASSERT(headlocked == 0, ("%s: check_delack: head locked", 2540 __func__)); 2541 INP_LOCK_ASSERT(tp->t_inpcb); 2542 if (tp->t_flags & TF_DELACK) { 2543 tp->t_flags &= ~TF_DELACK; 2544 callout_reset(tp->tt_delack, tcp_delacktime, 2545 tcp_timer_delack, tp); 2546 } 2547 INP_UNLOCK(tp->t_inpcb); 2548 return (0); 2549 2550dropafterack: 2551 KASSERT(headlocked, ("%s: dropafterack: head not locked", __func__)); 2552 /* 2553 * Generate an ACK dropping incoming segment if it occupies 2554 * sequence space, where the ACK reflects our state. 2555 * 2556 * We can now skip the test for the RST flag since all 2557 * paths to this code happen after packets containing 2558 * RST have been dropped. 2559 * 2560 * In the SYN-RECEIVED state, don't send an ACK unless the 2561 * segment we received passes the SYN-RECEIVED ACK test. 2562 * If it fails send a RST. This breaks the loop in the 2563 * "LAND" DoS attack, and also prevents an ACK storm 2564 * between two listening ports that have been sent forged 2565 * SYN segments, each with the source address of the other. 2566 */ 2567 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && 2568 (SEQ_GT(tp->snd_una, th->th_ack) || 2569 SEQ_GT(th->th_ack, tp->snd_max)) ) { 2570 rstreason = BANDLIM_RST_OPENPORT; 2571 goto dropwithreset; 2572 } 2573#ifdef TCPDEBUG 2574 if (so->so_options & SO_DEBUG) 2575 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 2576 &tcp_savetcp, 0); 2577#endif 2578 KASSERT(headlocked, ("%s: headlocked should be 1", __func__)); 2579 INP_INFO_WUNLOCK(&tcbinfo); 2580 tp->t_flags |= TF_ACKNOW; 2581 (void) tcp_output(tp); 2582 INP_UNLOCK(tp->t_inpcb); 2583 m_freem(m); 2584 return (0); 2585 2586dropwithreset: 2587 KASSERT(headlocked, ("%s: dropwithreset: head not locked", __func__)); 2588 2589 tcp_dropwithreset(m, th, tp, tlen, rstreason); 2590 2591 if (tp != NULL) 2592 INP_UNLOCK(tp->t_inpcb); 2593 if (headlocked) 2594 INP_INFO_WUNLOCK(&tcbinfo); 2595 return (0); 2596 2597drop: 2598 /* 2599 * Drop space held by incoming segment and return. 2600 */ 2601#ifdef TCPDEBUG 2602 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 2603 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 2604 &tcp_savetcp, 0); 2605#endif 2606 if (tp != NULL) 2607 INP_UNLOCK(tp->t_inpcb); 2608 if (headlocked) 2609 INP_INFO_WUNLOCK(&tcbinfo); 2610 m_freem(m); 2611 return (0); 2612} 2613 2614 2615/* 2616 * Issue RST on TCP segment. The mbuf must still include the original 2617 * packet header. 2618 */ 2619static void 2620tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, 2621 int tlen, int rstreason) 2622{ 2623 struct ip *ip; 2624#ifdef INET6 2625 struct ip6_hdr *ip6; 2626#endif 2627 2628 /* 2629 * Generate a RST, dropping incoming segment. 2630 * Make ACK acceptable to originator of segment. 2631 * Don't bother to respond if destination was broadcast/multicast. 2632 */ 2633 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST)) 2634 goto drop; 2635#ifdef INET6 2636 if (mtod(m, struct ip *)->ip_v == 6) { 2637 ip6 = mtod(m, struct ip6_hdr *); 2638 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 2639 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 2640 goto drop; 2641 /* IPv6 anycast check is done at tcp6_input() */ 2642 } else 2643#endif 2644 { 2645 ip = mtod(m, struct ip *); 2646 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 2647 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 2648 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 2649 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 2650 goto drop; 2651 } 2652 2653 /* Perform bandwidth limiting. */ 2654 if (badport_bandlim(rstreason) < 0) 2655 goto drop; 2656 2657 /* tcp_respond consumes the mbuf chain. */ 2658 if (th->th_flags & TH_ACK) { 2659 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, 2660 th->th_ack, TH_RST); 2661 } else { 2662 if (th->th_flags & TH_SYN) 2663 tlen++; 2664 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen, 2665 (tcp_seq)0, TH_RST|TH_ACK); 2666 } 2667 return; 2668drop: 2669 m_freem(m); 2670 return; 2671} 2672 2673/* 2674 * Parse TCP options and place in tcpopt. 2675 */ 2676static void 2677tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags) 2678{ 2679 int opt, optlen; 2680 2681 to->to_flags = 0; 2682 for (; cnt > 0; cnt -= optlen, cp += optlen) { 2683 opt = cp[0]; 2684 if (opt == TCPOPT_EOL) 2685 break; 2686 if (opt == TCPOPT_NOP) 2687 optlen = 1; 2688 else { 2689 if (cnt < 2) 2690 break; 2691 optlen = cp[1]; 2692 if (optlen < 2 || optlen > cnt) 2693 break; 2694 } 2695 switch (opt) { 2696 case TCPOPT_MAXSEG: 2697 if (optlen != TCPOLEN_MAXSEG) 2698 continue; 2699 if (!(flags & TO_SYN)) 2700 continue; 2701 to->to_flags |= TOF_MSS; 2702 bcopy((char *)cp + 2, 2703 (char *)&to->to_mss, sizeof(to->to_mss)); 2704 to->to_mss = ntohs(to->to_mss); 2705 break; 2706 case TCPOPT_WINDOW: 2707 if (optlen != TCPOLEN_WINDOW) 2708 continue; 2709 if (!(flags & TO_SYN)) 2710 continue; 2711 to->to_flags |= TOF_SCALE; 2712 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT); 2713 break; 2714 case TCPOPT_TIMESTAMP: 2715 if (optlen != TCPOLEN_TIMESTAMP) 2716 continue; 2717 to->to_flags |= TOF_TS; 2718 bcopy((char *)cp + 2, 2719 (char *)&to->to_tsval, sizeof(to->to_tsval)); 2720 to->to_tsval = ntohl(to->to_tsval); 2721 bcopy((char *)cp + 6, 2722 (char *)&to->to_tsecr, sizeof(to->to_tsecr)); 2723 to->to_tsecr = ntohl(to->to_tsecr); 2724 break; 2725#ifdef TCP_SIGNATURE 2726 /* 2727 * XXX In order to reply to a host which has set the 2728 * TCP_SIGNATURE option in its initial SYN, we have to 2729 * record the fact that the option was observed here 2730 * for the syncache code to perform the correct response. 2731 */ 2732 case TCPOPT_SIGNATURE: 2733 if (optlen != TCPOLEN_SIGNATURE) 2734 continue; 2735 to->to_flags |= (TOF_SIGNATURE | TOF_SIGLEN); 2736 break; 2737#endif 2738 case TCPOPT_SACK_PERMITTED: 2739 if (optlen != TCPOLEN_SACK_PERMITTED) 2740 continue; 2741 if (!(flags & TO_SYN)) 2742 continue; 2743 if (!tcp_do_sack) 2744 continue; 2745 to->to_flags |= TOF_SACKPERM; 2746 break; 2747 case TCPOPT_SACK: 2748 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0) 2749 continue; 2750 if (flags & TO_SYN) 2751 continue; 2752 to->to_flags |= TOF_SACK; 2753 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK; 2754 to->to_sacks = cp + 2; 2755 tcpstat.tcps_sack_rcv_blocks++; 2756 break; 2757 default: 2758 continue; 2759 } 2760 } 2761} 2762 2763/* 2764 * Pull out of band byte out of a segment so 2765 * it doesn't appear in the user's data queue. 2766 * It is still reflected in the segment length for 2767 * sequencing purposes. 2768 */ 2769static void 2770tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, 2771 int off) 2772{ 2773 int cnt = off + th->th_urp - 1; 2774 2775 while (cnt >= 0) { 2776 if (m->m_len > cnt) { 2777 char *cp = mtod(m, caddr_t) + cnt; 2778 struct tcpcb *tp = sototcpcb(so); 2779 2780 tp->t_iobc = *cp; 2781 tp->t_oobflags |= TCPOOB_HAVEDATA; 2782 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1)); 2783 m->m_len--; 2784 if (m->m_flags & M_PKTHDR) 2785 m->m_pkthdr.len--; 2786 return; 2787 } 2788 cnt -= m->m_len; 2789 m = m->m_next; 2790 if (m == NULL) 2791 break; 2792 } 2793 panic("tcp_pulloutofband"); 2794} 2795 2796/* 2797 * Collect new round-trip time estimate 2798 * and update averages and current timeout. 2799 */ 2800static void 2801tcp_xmit_timer(struct tcpcb *tp, int rtt) 2802{ 2803 int delta; 2804 2805 INP_LOCK_ASSERT(tp->t_inpcb); 2806 2807 tcpstat.tcps_rttupdated++; 2808 tp->t_rttupdated++; 2809 if (tp->t_srtt != 0) { 2810 /* 2811 * srtt is stored as fixed point with 5 bits after the 2812 * binary point (i.e., scaled by 8). The following magic 2813 * is equivalent to the smoothing algorithm in rfc793 with 2814 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed 2815 * point). Adjust rtt to origin 0. 2816 */ 2817 delta = ((rtt - 1) << TCP_DELTA_SHIFT) 2818 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); 2819 2820 if ((tp->t_srtt += delta) <= 0) 2821 tp->t_srtt = 1; 2822 2823 /* 2824 * We accumulate a smoothed rtt variance (actually, a 2825 * smoothed mean difference), then set the retransmit 2826 * timer to smoothed rtt + 4 times the smoothed variance. 2827 * rttvar is stored as fixed point with 4 bits after the 2828 * binary point (scaled by 16). The following is 2829 * equivalent to rfc793 smoothing with an alpha of .75 2830 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces 2831 * rfc793's wired-in beta. 2832 */ 2833 if (delta < 0) 2834 delta = -delta; 2835 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); 2836 if ((tp->t_rttvar += delta) <= 0) 2837 tp->t_rttvar = 1; 2838 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 2839 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 2840 } else { 2841 /* 2842 * No rtt measurement yet - use the unsmoothed rtt. 2843 * Set the variance to half the rtt (so our first 2844 * retransmit happens at 3*rtt). 2845 */ 2846 tp->t_srtt = rtt << TCP_RTT_SHIFT; 2847 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); 2848 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 2849 } 2850 tp->t_rtttime = 0; 2851 tp->t_rxtshift = 0; 2852 2853 /* 2854 * the retransmit should happen at rtt + 4 * rttvar. 2855 * Because of the way we do the smoothing, srtt and rttvar 2856 * will each average +1/2 tick of bias. When we compute 2857 * the retransmit timer, we want 1/2 tick of rounding and 2858 * 1 extra tick because of +-1/2 tick uncertainty in the 2859 * firing of the timer. The bias will give us exactly the 2860 * 1.5 tick we need. But, because the bias is 2861 * statistical, we have to test that we don't drop below 2862 * the minimum feasible timer (which is 2 ticks). 2863 */ 2864 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 2865 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX); 2866 2867 /* 2868 * We received an ack for a packet that wasn't retransmitted; 2869 * it is probably safe to discard any error indications we've 2870 * received recently. This isn't quite right, but close enough 2871 * for now (a route might have failed after we sent a segment, 2872 * and the return path might not be symmetrical). 2873 */ 2874 tp->t_softerror = 0; 2875} 2876 2877/* 2878 * Determine a reasonable value for maxseg size. 2879 * If the route is known, check route for mtu. 2880 * If none, use an mss that can be handled on the outgoing 2881 * interface without forcing IP to fragment; if bigger than 2882 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES 2883 * to utilize large mbufs. If no route is found, route has no mtu, 2884 * or the destination isn't local, use a default, hopefully conservative 2885 * size (usually 512 or the default IP max size, but no more than the mtu 2886 * of the interface), as we can't discover anything about intervening 2887 * gateways or networks. We also initialize the congestion/slow start 2888 * window to be a single segment if the destination isn't local. 2889 * While looking at the routing entry, we also initialize other path-dependent 2890 * parameters from pre-set or cached values in the routing entry. 2891 * 2892 * Also take into account the space needed for options that we 2893 * send regularly. Make maxseg shorter by that amount to assure 2894 * that we can send maxseg amount of data even when the options 2895 * are present. Store the upper limit of the length of options plus 2896 * data in maxopd. 2897 * 2898 * 2899 * In case of T/TCP, we call this routine during implicit connection 2900 * setup as well (offer = -1), to initialize maxseg from the cached 2901 * MSS of our peer. 2902 * 2903 * NOTE that this routine is only called when we process an incoming 2904 * segment. Outgoing SYN/ACK MSS settings are handled in tcp_mssopt(). 2905 */ 2906void 2907tcp_mss(struct tcpcb *tp, int offer) 2908{ 2909 int rtt, mss; 2910 u_long bufsize; 2911 u_long maxmtu; 2912 struct inpcb *inp = tp->t_inpcb; 2913 struct socket *so; 2914 struct hc_metrics_lite metrics; 2915 int origoffer = offer; 2916 int mtuflags = 0; 2917#ifdef INET6 2918 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0; 2919 size_t min_protoh = isipv6 ? 2920 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) : 2921 sizeof (struct tcpiphdr); 2922#else 2923 const size_t min_protoh = sizeof(struct tcpiphdr); 2924#endif 2925 2926 /* initialize */ 2927#ifdef INET6 2928 if (isipv6) { 2929 maxmtu = tcp_maxmtu6(&inp->inp_inc, &mtuflags); 2930 tp->t_maxopd = tp->t_maxseg = tcp_v6mssdflt; 2931 } else 2932#endif 2933 { 2934 maxmtu = tcp_maxmtu(&inp->inp_inc, &mtuflags); 2935 tp->t_maxopd = tp->t_maxseg = tcp_mssdflt; 2936 } 2937 so = inp->inp_socket; 2938 2939 /* 2940 * no route to sender, stay with default mss and return 2941 */ 2942 if (maxmtu == 0) 2943 return; 2944 2945 /* what have we got? */ 2946 switch (offer) { 2947 case 0: 2948 /* 2949 * Offer == 0 means that there was no MSS on the SYN 2950 * segment, in this case we use tcp_mssdflt. 2951 */ 2952 offer = 2953#ifdef INET6 2954 isipv6 ? tcp_v6mssdflt : 2955#endif 2956 tcp_mssdflt; 2957 break; 2958 2959 case -1: 2960 /* 2961 * Offer == -1 means that we didn't receive SYN yet. 2962 */ 2963 /* FALLTHROUGH */ 2964 2965 default: 2966 /* 2967 * Prevent DoS attack with too small MSS. Round up 2968 * to at least minmss. 2969 */ 2970 offer = max(offer, tcp_minmss); 2971 /* 2972 * Sanity check: make sure that maxopd will be large 2973 * enough to allow some data on segments even if the 2974 * all the option space is used (40bytes). Otherwise 2975 * funny things may happen in tcp_output. 2976 */ 2977 offer = max(offer, 64); 2978 } 2979 2980 /* 2981 * rmx information is now retrieved from tcp_hostcache 2982 */ 2983 tcp_hc_get(&inp->inp_inc, &metrics); 2984 2985 /* 2986 * if there's a discovered mtu int tcp hostcache, use it 2987 * else, use the link mtu. 2988 */ 2989 if (metrics.rmx_mtu) 2990 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh; 2991 else { 2992#ifdef INET6 2993 if (isipv6) { 2994 mss = maxmtu - min_protoh; 2995 if (!path_mtu_discovery && 2996 !in6_localaddr(&inp->in6p_faddr)) 2997 mss = min(mss, tcp_v6mssdflt); 2998 } else 2999#endif 3000 { 3001 mss = maxmtu - min_protoh; 3002 if (!path_mtu_discovery && 3003 !in_localaddr(inp->inp_faddr)) 3004 mss = min(mss, tcp_mssdflt); 3005 } 3006 } 3007 mss = min(mss, offer); 3008 3009 /* 3010 * maxopd stores the maximum length of data AND options 3011 * in a segment; maxseg is the amount of data in a normal 3012 * segment. We need to store this value (maxopd) apart 3013 * from maxseg, because now every segment carries options 3014 * and thus we normally have somewhat less data in segments. 3015 */ 3016 tp->t_maxopd = mss; 3017 3018 /* 3019 * origoffer==-1 indicates, that no segments were received yet. 3020 * In this case we just guess. 3021 */ 3022 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && 3023 (origoffer == -1 || 3024 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)) 3025 mss -= TCPOLEN_TSTAMP_APPA; 3026 tp->t_maxseg = mss; 3027 3028#if (MCLBYTES & (MCLBYTES - 1)) == 0 3029 if (mss > MCLBYTES) 3030 mss &= ~(MCLBYTES-1); 3031#else 3032 if (mss > MCLBYTES) 3033 mss = mss / MCLBYTES * MCLBYTES; 3034#endif 3035 tp->t_maxseg = mss; 3036 3037 /* 3038 * If there's a pipesize, change the socket buffer to that size, 3039 * don't change if sb_hiwat is different than default (then it 3040 * has been changed on purpose with setsockopt). 3041 * Make the socket buffers an integral number of mss units; 3042 * if the mss is larger than the socket buffer, decrease the mss. 3043 */ 3044 SOCKBUF_LOCK(&so->so_snd); 3045 if ((so->so_snd.sb_hiwat == tcp_sendspace) && metrics.rmx_sendpipe) 3046 bufsize = metrics.rmx_sendpipe; 3047 else 3048 bufsize = so->so_snd.sb_hiwat; 3049 if (bufsize < mss) 3050 mss = bufsize; 3051 else { 3052 bufsize = roundup(bufsize, mss); 3053 if (bufsize > sb_max) 3054 bufsize = sb_max; 3055 if (bufsize > so->so_snd.sb_hiwat) 3056 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL); 3057 } 3058 SOCKBUF_UNLOCK(&so->so_snd); 3059 tp->t_maxseg = mss; 3060 3061 SOCKBUF_LOCK(&so->so_rcv); 3062 if ((so->so_rcv.sb_hiwat == tcp_recvspace) && metrics.rmx_recvpipe) 3063 bufsize = metrics.rmx_recvpipe; 3064 else 3065 bufsize = so->so_rcv.sb_hiwat; 3066 if (bufsize > mss) { 3067 bufsize = roundup(bufsize, mss); 3068 if (bufsize > sb_max) 3069 bufsize = sb_max; 3070 if (bufsize > so->so_rcv.sb_hiwat) 3071 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL); 3072 } 3073 SOCKBUF_UNLOCK(&so->so_rcv); 3074 /* 3075 * While we're here, check the others too 3076 */ 3077 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) { 3078 tp->t_srtt = rtt; 3079 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE; 3080 tcpstat.tcps_usedrtt++; 3081 if (metrics.rmx_rttvar) { 3082 tp->t_rttvar = metrics.rmx_rttvar; 3083 tcpstat.tcps_usedrttvar++; 3084 } else { 3085 /* default variation is +- 1 rtt */ 3086 tp->t_rttvar = 3087 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE; 3088 } 3089 TCPT_RANGESET(tp->t_rxtcur, 3090 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, 3091 tp->t_rttmin, TCPTV_REXMTMAX); 3092 } 3093 if (metrics.rmx_ssthresh) { 3094 /* 3095 * There's some sort of gateway or interface 3096 * buffer limit on the path. Use this to set 3097 * the slow start threshhold, but set the 3098 * threshold to no less than 2*mss. 3099 */ 3100 tp->snd_ssthresh = max(2 * mss, metrics.rmx_ssthresh); 3101 tcpstat.tcps_usedssthresh++; 3102 } 3103 if (metrics.rmx_bandwidth) 3104 tp->snd_bandwidth = metrics.rmx_bandwidth; 3105 3106 /* 3107 * Set the slow-start flight size depending on whether this 3108 * is a local network or not. 3109 * 3110 * Extend this so we cache the cwnd too and retrieve it here. 3111 * Make cwnd even bigger than RFC3390 suggests but only if we 3112 * have previous experience with the remote host. Be careful 3113 * not make cwnd bigger than remote receive window or our own 3114 * send socket buffer. Maybe put some additional upper bound 3115 * on the retrieved cwnd. Should do incremental updates to 3116 * hostcache when cwnd collapses so next connection doesn't 3117 * overloads the path again. 3118 * 3119 * RFC3390 says only do this if SYN or SYN/ACK didn't got lost. 3120 * We currently check only in syncache_socket for that. 3121 */ 3122#define TCP_METRICS_CWND 3123#ifdef TCP_METRICS_CWND 3124 if (metrics.rmx_cwnd) 3125 tp->snd_cwnd = max(mss, 3126 min(metrics.rmx_cwnd / 2, 3127 min(tp->snd_wnd, so->so_snd.sb_hiwat))); 3128 else 3129#endif 3130 if (tcp_do_rfc3390) 3131 tp->snd_cwnd = min(4 * mss, max(2 * mss, 4380)); 3132#ifdef INET6 3133 else if ((isipv6 && in6_localaddr(&inp->in6p_faddr)) || 3134 (!isipv6 && in_localaddr(inp->inp_faddr))) 3135#else 3136 else if (in_localaddr(inp->inp_faddr)) 3137#endif 3138 tp->snd_cwnd = mss * ss_fltsz_local; 3139 else 3140 tp->snd_cwnd = mss * ss_fltsz; 3141 3142 /* Check the interface for TSO capabilities. */ 3143 if (mtuflags & CSUM_TSO) 3144 tp->t_flags |= TF_TSO; 3145} 3146 3147/* 3148 * Determine the MSS option to send on an outgoing SYN. 3149 */ 3150int 3151tcp_mssopt(struct in_conninfo *inc) 3152{ 3153 int mss = 0; 3154 u_long maxmtu = 0; 3155 u_long thcmtu = 0; 3156 size_t min_protoh; 3157#ifdef INET6 3158 int isipv6 = inc->inc_isipv6 ? 1 : 0; 3159#endif 3160 3161 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer")); 3162 3163#ifdef INET6 3164 if (isipv6) { 3165 mss = tcp_v6mssdflt; 3166 maxmtu = tcp_maxmtu6(inc, NULL); 3167 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */ 3168 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 3169 } else 3170#endif 3171 { 3172 mss = tcp_mssdflt; 3173 maxmtu = tcp_maxmtu(inc, NULL); 3174 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */ 3175 min_protoh = sizeof(struct tcpiphdr); 3176 } 3177 if (maxmtu && thcmtu) 3178 mss = min(maxmtu, thcmtu) - min_protoh; 3179 else if (maxmtu || thcmtu) 3180 mss = max(maxmtu, thcmtu) - min_protoh; 3181 3182 return (mss); 3183} 3184 3185 3186/* 3187 * On a partial ack arrives, force the retransmission of the 3188 * next unacknowledged segment. Do not clear tp->t_dupacks. 3189 * By setting snd_nxt to ti_ack, this forces retransmission timer to 3190 * be started again. 3191 */ 3192static void 3193tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th) 3194{ 3195 tcp_seq onxt = tp->snd_nxt; 3196 u_long ocwnd = tp->snd_cwnd; 3197 3198 callout_stop(tp->tt_rexmt); 3199 tp->t_rtttime = 0; 3200 tp->snd_nxt = th->th_ack; 3201 /* 3202 * Set snd_cwnd to one segment beyond acknowledged offset. 3203 * (tp->snd_una has not yet been updated when this function is called.) 3204 */ 3205 tp->snd_cwnd = tp->t_maxseg + (th->th_ack - tp->snd_una); 3206 tp->t_flags |= TF_ACKNOW; 3207 (void) tcp_output(tp); 3208 tp->snd_cwnd = ocwnd; 3209 if (SEQ_GT(onxt, tp->snd_nxt)) 3210 tp->snd_nxt = onxt; 3211 /* 3212 * Partial window deflation. Relies on fact that tp->snd_una 3213 * not updated yet. 3214 */ 3215 if (tp->snd_cwnd > th->th_ack - tp->snd_una) 3216 tp->snd_cwnd -= th->th_ack - tp->snd_una; 3217 else 3218 tp->snd_cwnd = 0; 3219 tp->snd_cwnd += tp->t_maxseg; 3220} 3221 3222/* 3223 * Returns 1 if the TIME_WAIT state was killed and we should start over, 3224 * looking for a pcb in the listen state. Returns 0 otherwise. 3225 */ 3226static int 3227tcp_timewait(struct inpcb *inp, struct tcpopt *to, struct tcphdr *th, 3228 struct mbuf *m, int tlen) 3229{ 3230 struct tcptw *tw; 3231 int thflags; 3232 tcp_seq seq; 3233#ifdef INET6 3234 int isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0; 3235#else 3236 const int isipv6 = 0; 3237#endif 3238 3239 /* tcbinfo lock required for tcp_twclose(), tcp_timer_2msl_reset(). */ 3240 INP_INFO_WLOCK_ASSERT(&tcbinfo); 3241 INP_LOCK_ASSERT(inp); 3242 3243 /* 3244 * XXXRW: Time wait state for inpcb has been recycled, but inpcb is 3245 * still present. This is undesirable, but temporarily necessary 3246 * until we work out how to handle inpcb's who's timewait state has 3247 * been removed. 3248 */ 3249 tw = intotw(inp); 3250 if (tw == NULL) 3251 goto drop; 3252 3253 thflags = th->th_flags; 3254 3255 /* 3256 * NOTE: for FIN_WAIT_2 (to be added later), 3257 * must validate sequence number before accepting RST 3258 */ 3259 3260 /* 3261 * If the segment contains RST: 3262 * Drop the segment - see Stevens, vol. 2, p. 964 and 3263 * RFC 1337. 3264 */ 3265 if (thflags & TH_RST) 3266 goto drop; 3267 3268#if 0 3269/* PAWS not needed at the moment */ 3270 /* 3271 * RFC 1323 PAWS: If we have a timestamp reply on this segment 3272 * and it's less than ts_recent, drop it. 3273 */ 3274 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent && 3275 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 3276 if ((thflags & TH_ACK) == 0) 3277 goto drop; 3278 goto ack; 3279 } 3280 /* 3281 * ts_recent is never updated because we never accept new segments. 3282 */ 3283#endif 3284 3285 /* 3286 * If a new connection request is received 3287 * while in TIME_WAIT, drop the old connection 3288 * and start over if the sequence numbers 3289 * are above the previous ones. 3290 */ 3291 if ((thflags & TH_SYN) && SEQ_GT(th->th_seq, tw->rcv_nxt)) { 3292 tcp_twclose(tw, 0); 3293 return (1); 3294 } 3295 3296 /* 3297 * Drop the the segment if it does not contain an ACK. 3298 */ 3299 if ((thflags & TH_ACK) == 0) 3300 goto drop; 3301 3302 /* 3303 * Reset the 2MSL timer if this is a duplicate FIN. 3304 */ 3305 if (thflags & TH_FIN) { 3306 seq = th->th_seq + tlen + (thflags & TH_SYN ? 1 : 0); 3307 if (seq + 1 == tw->rcv_nxt) 3308 tcp_timer_2msl_reset(tw, 1); 3309 } 3310 3311 /* 3312 * Acknowledge the segment if it has data or is not a duplicate ACK. 3313 */ 3314 if (thflags != TH_ACK || tlen != 0 || 3315 th->th_seq != tw->rcv_nxt || th->th_ack != tw->snd_nxt) 3316 tcp_twrespond(tw, TH_ACK); 3317 goto drop; 3318 3319 /* 3320 * Generate a RST, dropping incoming segment. 3321 * Make ACK acceptable to originator of segment. 3322 * Don't bother to respond if destination was broadcast/multicast. 3323 */ 3324 if (m->m_flags & (M_BCAST|M_MCAST)) 3325 goto drop; 3326 if (isipv6) { 3327 struct ip6_hdr *ip6; 3328 3329 /* IPv6 anycast check is done at tcp6_input() */ 3330 ip6 = mtod(m, struct ip6_hdr *); 3331 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 3332 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 3333 goto drop; 3334 } else { 3335 struct ip *ip; 3336 3337 ip = mtod(m, struct ip *); 3338 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 3339 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 3340 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 3341 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 3342 goto drop; 3343 } 3344 if (thflags & TH_ACK) { 3345 tcp_respond(NULL, 3346 mtod(m, void *), th, m, 0, th->th_ack, TH_RST); 3347 } else { 3348 seq = th->th_seq + (thflags & TH_SYN ? 1 : 0); 3349 tcp_respond(NULL, 3350 mtod(m, void *), th, m, seq, 0, TH_RST|TH_ACK); 3351 } 3352 INP_UNLOCK(inp); 3353 return (0); 3354 3355drop: 3356 INP_UNLOCK(inp); 3357 m_freem(m); 3358 return (0); 3359} 3360