tcp_input.c revision 167989
1/*- 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95 30 * $FreeBSD: head/sys/netinet/tcp_input.c 167989 2007-03-28 12:58:13Z andre $ 31 */ 32 33#include "opt_ipfw.h" /* for ipfw_fwd */ 34#include "opt_inet.h" 35#include "opt_inet6.h" 36#include "opt_ipsec.h" 37#include "opt_mac.h" 38#include "opt_tcpdebug.h" 39#include "opt_tcp_sack.h" 40 41#include <sys/param.h> 42#include <sys/kernel.h> 43#include <sys/malloc.h> 44#include <sys/mbuf.h> 45#include <sys/proc.h> /* for proc0 declaration */ 46#include <sys/protosw.h> 47#include <sys/signalvar.h> 48#include <sys/socket.h> 49#include <sys/socketvar.h> 50#include <sys/sysctl.h> 51#include <sys/syslog.h> 52#include <sys/systm.h> 53 54#include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */ 55 56#include <vm/uma.h> 57 58#include <net/if.h> 59#include <net/route.h> 60 61#include <netinet/in.h> 62#include <netinet/in_pcb.h> 63#include <netinet/in_systm.h> 64#include <netinet/in_var.h> 65#include <netinet/ip.h> 66#include <netinet/ip_icmp.h> /* required for icmp_var.h */ 67#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 68#include <netinet/ip_var.h> 69#include <netinet/ip_options.h> 70#include <netinet/ip6.h> 71#include <netinet/icmp6.h> 72#include <netinet6/in6_pcb.h> 73#include <netinet6/ip6_var.h> 74#include <netinet6/nd6.h> 75#include <netinet/tcp.h> 76#include <netinet/tcp_fsm.h> 77#include <netinet/tcp_seq.h> 78#include <netinet/tcp_timer.h> 79#include <netinet/tcp_var.h> 80#include <netinet6/tcp6_var.h> 81#include <netinet/tcpip.h> 82#ifdef TCPDEBUG 83#include <netinet/tcp_debug.h> 84#endif /* TCPDEBUG */ 85 86#ifdef FAST_IPSEC 87#include <netipsec/ipsec.h> 88#include <netipsec/ipsec6.h> 89#endif /*FAST_IPSEC*/ 90 91#ifdef IPSEC 92#include <netinet6/ipsec.h> 93#include <netinet6/ipsec6.h> 94#include <netkey/key.h> 95#endif /*IPSEC*/ 96 97#include <machine/in_cksum.h> 98 99#include <security/mac/mac_framework.h> 100 101static const int tcprexmtthresh = 3; 102 103struct tcpstat tcpstat; 104SYSCTL_STRUCT(_net_inet_tcp, TCPCTL_STATS, stats, CTLFLAG_RW, 105 &tcpstat , tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)"); 106 107static int tcp_log_in_vain = 0; 108SYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW, 109 &tcp_log_in_vain, 0, "Log all incoming TCP segments to closed ports"); 110 111static int blackhole = 0; 112SYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW, 113 &blackhole, 0, "Do not send RST on segments to closed ports"); 114 115int tcp_delack_enabled = 1; 116SYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW, 117 &tcp_delack_enabled, 0, 118 "Delay ACK to try and piggyback it onto a data packet"); 119 120static int drop_synfin = 0; 121SYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW, 122 &drop_synfin, 0, "Drop TCP packets with SYN+FIN set"); 123 124static int tcp_do_rfc3042 = 1; 125SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_RW, 126 &tcp_do_rfc3042, 0, "Enable RFC 3042 (Limited Transmit)"); 127 128static int tcp_do_rfc3390 = 1; 129SYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW, 130 &tcp_do_rfc3390, 0, 131 "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)"); 132 133static int tcp_insecure_rst = 0; 134SYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_RW, 135 &tcp_insecure_rst, 0, 136 "Follow the old (insecure) criteria for accepting RST packets"); 137 138SYSCTL_NODE(_net_inet_tcp, OID_AUTO, reass, CTLFLAG_RW, 0, 139 "TCP Segment Reassembly Queue"); 140 141static int tcp_reass_maxseg = 0; 142SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, maxsegments, CTLFLAG_RDTUN, 143 &tcp_reass_maxseg, 0, 144 "Global maximum number of TCP Segments in Reassembly Queue"); 145 146int tcp_reass_qsize = 0; 147SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, cursegments, CTLFLAG_RD, 148 &tcp_reass_qsize, 0, 149 "Global number of TCP Segments currently in Reassembly Queue"); 150 151static int tcp_reass_maxqlen = 48; 152SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, maxqlen, CTLFLAG_RW, 153 &tcp_reass_maxqlen, 0, 154 "Maximum number of TCP Segments per individual Reassembly Queue"); 155 156static int tcp_reass_overflows = 0; 157SYSCTL_INT(_net_inet_tcp_reass, OID_AUTO, overflows, CTLFLAG_RD, 158 &tcp_reass_overflows, 0, 159 "Global number of TCP Segment Reassembly Queue Overflows"); 160 161int tcp_do_autorcvbuf = 1; 162SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_RW, 163 &tcp_do_autorcvbuf, 0, "Enable automatic receive buffer sizing"); 164 165int tcp_autorcvbuf_inc = 16*1024; 166SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_RW, 167 &tcp_autorcvbuf_inc, 0, 168 "Incrementor step size of automatic receive buffer"); 169 170int tcp_autorcvbuf_max = 256*1024; 171SYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_RW, 172 &tcp_autorcvbuf_max, 0, "Max size of automatic receive buffer"); 173 174struct inpcbhead tcb; 175#define tcb6 tcb /* for KAME src sync over BSD*'s */ 176struct inpcbinfo tcbinfo; 177struct mtx *tcbinfo_mtx; 178 179static void tcp_dooptions(struct tcpopt *, u_char *, int, int); 180static int tcp_do_segment(struct mbuf *, struct tcphdr *, 181 struct socket *, struct tcpcb *, int, int); 182static void tcp_dropwithreset(struct mbuf *, struct tcphdr *, 183 struct tcpcb *, int, int); 184static void tcp_pulloutofband(struct socket *, 185 struct tcphdr *, struct mbuf *, int); 186static int tcp_reass(struct tcpcb *, struct tcphdr *, int *, 187 struct mbuf *); 188static void tcp_xmit_timer(struct tcpcb *, int); 189static void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *); 190static int tcp_timewait(struct inpcb *, struct tcpopt *, 191 struct tcphdr *, struct mbuf *, int); 192 193/* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */ 194#ifdef INET6 195#define ND6_HINT(tp) \ 196do { \ 197 if ((tp) && (tp)->t_inpcb && \ 198 ((tp)->t_inpcb->inp_vflag & INP_IPV6) != 0) \ 199 nd6_nud_hint(NULL, NULL, 0); \ 200} while (0) 201#else 202#define ND6_HINT(tp) 203#endif 204 205/* 206 * Indicate whether this ack should be delayed. We can delay the ack if 207 * - there is no delayed ack timer in progress and 208 * - our last ack wasn't a 0-sized window. We never want to delay 209 * the ack that opens up a 0-sized window and 210 * - delayed acks are enabled or 211 * - this is a half-synchronized T/TCP connection. 212 */ 213#define DELAY_ACK(tp) \ 214 ((!callout_active(tp->tt_delack) && \ 215 (tp->t_flags & TF_RXWIN0SENT) == 0) && \ 216 (tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN))) 217 218/* Initialize TCP reassembly queue */ 219static void 220tcp_reass_zone_change(void *tag) 221{ 222 223 tcp_reass_maxseg = nmbclusters / 16; 224 uma_zone_set_max(tcp_reass_zone, tcp_reass_maxseg); 225} 226 227uma_zone_t tcp_reass_zone; 228void 229tcp_reass_init() 230{ 231 tcp_reass_maxseg = nmbclusters / 16; 232 TUNABLE_INT_FETCH("net.inet.tcp.reass.maxsegments", 233 &tcp_reass_maxseg); 234 tcp_reass_zone = uma_zcreate("tcpreass", sizeof (struct tseg_qent), 235 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 236 uma_zone_set_max(tcp_reass_zone, tcp_reass_maxseg); 237 EVENTHANDLER_REGISTER(nmbclusters_change, 238 tcp_reass_zone_change, NULL, EVENTHANDLER_PRI_ANY); 239} 240 241static int 242tcp_reass(struct tcpcb *tp, struct tcphdr *th, int *tlenp, struct mbuf *m) 243{ 244 struct tseg_qent *q; 245 struct tseg_qent *p = NULL; 246 struct tseg_qent *nq; 247 struct tseg_qent *te = NULL; 248 struct socket *so = tp->t_inpcb->inp_socket; 249 int flags; 250 251 INP_LOCK_ASSERT(tp->t_inpcb); 252 253 /* 254 * XXX: tcp_reass() is rather inefficient with its data structures 255 * and should be rewritten (see NetBSD for optimizations). While 256 * doing that it should move to its own file tcp_reass.c. 257 */ 258 259 /* 260 * Call with th==NULL after become established to 261 * force pre-ESTABLISHED data up to user socket. 262 */ 263 if (th == NULL) 264 goto present; 265 266 /* 267 * Limit the number of segments in the reassembly queue to prevent 268 * holding on to too many segments (and thus running out of mbufs). 269 * Make sure to let the missing segment through which caused this 270 * queue. Always keep one global queue entry spare to be able to 271 * process the missing segment. 272 */ 273 if (th->th_seq != tp->rcv_nxt && 274 (tcp_reass_qsize + 1 >= tcp_reass_maxseg || 275 tp->t_segqlen >= tcp_reass_maxqlen)) { 276 tcp_reass_overflows++; 277 tcpstat.tcps_rcvmemdrop++; 278 m_freem(m); 279 *tlenp = 0; 280 return (0); 281 } 282 283 /* 284 * Allocate a new queue entry. If we can't, or hit the zone limit 285 * just drop the pkt. 286 */ 287 te = uma_zalloc(tcp_reass_zone, M_NOWAIT); 288 if (te == NULL) { 289 tcpstat.tcps_rcvmemdrop++; 290 m_freem(m); 291 *tlenp = 0; 292 return (0); 293 } 294 tp->t_segqlen++; 295 tcp_reass_qsize++; 296 297 /* 298 * Find a segment which begins after this one does. 299 */ 300 LIST_FOREACH(q, &tp->t_segq, tqe_q) { 301 if (SEQ_GT(q->tqe_th->th_seq, th->th_seq)) 302 break; 303 p = q; 304 } 305 306 /* 307 * If there is a preceding segment, it may provide some of 308 * our data already. If so, drop the data from the incoming 309 * segment. If it provides all of our data, drop us. 310 */ 311 if (p != NULL) { 312 int i; 313 /* conversion to int (in i) handles seq wraparound */ 314 i = p->tqe_th->th_seq + p->tqe_len - th->th_seq; 315 if (i > 0) { 316 if (i >= *tlenp) { 317 tcpstat.tcps_rcvduppack++; 318 tcpstat.tcps_rcvdupbyte += *tlenp; 319 m_freem(m); 320 uma_zfree(tcp_reass_zone, te); 321 tp->t_segqlen--; 322 tcp_reass_qsize--; 323 /* 324 * Try to present any queued data 325 * at the left window edge to the user. 326 * This is needed after the 3-WHS 327 * completes. 328 */ 329 goto present; /* ??? */ 330 } 331 m_adj(m, i); 332 *tlenp -= i; 333 th->th_seq += i; 334 } 335 } 336 tcpstat.tcps_rcvoopack++; 337 tcpstat.tcps_rcvoobyte += *tlenp; 338 339 /* 340 * While we overlap succeeding segments trim them or, 341 * if they are completely covered, dequeue them. 342 */ 343 while (q) { 344 int i = (th->th_seq + *tlenp) - q->tqe_th->th_seq; 345 if (i <= 0) 346 break; 347 if (i < q->tqe_len) { 348 q->tqe_th->th_seq += i; 349 q->tqe_len -= i; 350 m_adj(q->tqe_m, i); 351 break; 352 } 353 354 nq = LIST_NEXT(q, tqe_q); 355 LIST_REMOVE(q, tqe_q); 356 m_freem(q->tqe_m); 357 uma_zfree(tcp_reass_zone, q); 358 tp->t_segqlen--; 359 tcp_reass_qsize--; 360 q = nq; 361 } 362 363 /* Insert the new segment queue entry into place. */ 364 te->tqe_m = m; 365 te->tqe_th = th; 366 te->tqe_len = *tlenp; 367 368 if (p == NULL) { 369 LIST_INSERT_HEAD(&tp->t_segq, te, tqe_q); 370 } else { 371 LIST_INSERT_AFTER(p, te, tqe_q); 372 } 373 374present: 375 /* 376 * Present data to user, advancing rcv_nxt through 377 * completed sequence space. 378 */ 379 if (!TCPS_HAVEESTABLISHED(tp->t_state)) 380 return (0); 381 q = LIST_FIRST(&tp->t_segq); 382 if (!q || q->tqe_th->th_seq != tp->rcv_nxt) 383 return (0); 384 SOCKBUF_LOCK(&so->so_rcv); 385 do { 386 tp->rcv_nxt += q->tqe_len; 387 flags = q->tqe_th->th_flags & TH_FIN; 388 nq = LIST_NEXT(q, tqe_q); 389 LIST_REMOVE(q, tqe_q); 390 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 391 m_freem(q->tqe_m); 392 else 393 sbappendstream_locked(&so->so_rcv, q->tqe_m); 394 uma_zfree(tcp_reass_zone, q); 395 tp->t_segqlen--; 396 tcp_reass_qsize--; 397 q = nq; 398 } while (q && q->tqe_th->th_seq == tp->rcv_nxt); 399 ND6_HINT(tp); 400 sorwakeup_locked(so); 401 return (flags); 402} 403 404/* 405 * TCP input routine, follows pages 65-76 of the 406 * protocol specification dated September, 1981 very closely. 407 */ 408#ifdef INET6 409int 410tcp6_input(struct mbuf **mp, int *offp, int proto) 411{ 412 struct mbuf *m = *mp; 413 struct in6_ifaddr *ia6; 414 415 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE); 416 417 /* 418 * draft-itojun-ipv6-tcp-to-anycast 419 * better place to put this in? 420 */ 421 ia6 = ip6_getdstifaddr(m); 422 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) { 423 struct ip6_hdr *ip6; 424 425 ip6 = mtod(m, struct ip6_hdr *); 426 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR, 427 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6); 428 return IPPROTO_DONE; 429 } 430 431 tcp_input(m, *offp); 432 return IPPROTO_DONE; 433} 434#endif 435 436void 437tcp_input(struct mbuf *m, int off0) 438{ 439 struct tcphdr *th; 440 struct ip *ip = NULL; 441 struct ipovly *ipov; 442 struct inpcb *inp = NULL; 443 struct tcpcb *tp = NULL; 444 struct socket *so = NULL; 445 u_char *optp = NULL; 446 int optlen = 0; 447 int len, tlen, off; 448 int drop_hdrlen; 449 int thflags; 450 int rstreason = 0; /* For badport_bandlim accounting purposes */ 451#ifdef IPFIREWALL_FORWARD 452 struct m_tag *fwd_tag; 453#endif 454#ifdef INET6 455 struct ip6_hdr *ip6 = NULL; 456 int isipv6; 457 char ip6buf[INET6_ADDRSTRLEN]; 458#else 459 const int isipv6 = 0; 460#endif 461 struct tcpopt to; /* options in this segment */ 462 463#ifdef TCPDEBUG 464 /* 465 * The size of tcp_saveipgen must be the size of the max ip header, 466 * now IPv6. 467 */ 468 u_char tcp_saveipgen[IP6_HDR_LEN]; 469 struct tcphdr tcp_savetcp; 470 short ostate = 0; 471#endif 472 473#ifdef INET6 474 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0; 475#endif 476 477 to.to_flags = 0; 478 tcpstat.tcps_rcvtotal++; 479 480 if (isipv6) { 481#ifdef INET6 482 /* IP6_EXTHDR_CHECK() is already done at tcp6_input() */ 483 ip6 = mtod(m, struct ip6_hdr *); 484 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0; 485 if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) { 486 tcpstat.tcps_rcvbadsum++; 487 goto drop; 488 } 489 th = (struct tcphdr *)((caddr_t)ip6 + off0); 490 491 /* 492 * Be proactive about unspecified IPv6 address in source. 493 * As we use all-zero to indicate unbounded/unconnected pcb, 494 * unspecified IPv6 address can be used to confuse us. 495 * 496 * Note that packets with unspecified IPv6 destination is 497 * already dropped in ip6_input. 498 */ 499 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { 500 /* XXX stat */ 501 goto drop; 502 } 503#else 504 th = NULL; /* XXX: avoid compiler warning */ 505#endif 506 } else { 507 /* 508 * Get IP and TCP header together in first mbuf. 509 * Note: IP leaves IP header in first mbuf. 510 */ 511 if (off0 > sizeof (struct ip)) { 512 ip_stripoptions(m, (struct mbuf *)0); 513 off0 = sizeof(struct ip); 514 } 515 if (m->m_len < sizeof (struct tcpiphdr)) { 516 if ((m = m_pullup(m, sizeof (struct tcpiphdr))) 517 == NULL) { 518 tcpstat.tcps_rcvshort++; 519 return; 520 } 521 } 522 ip = mtod(m, struct ip *); 523 ipov = (struct ipovly *)ip; 524 th = (struct tcphdr *)((caddr_t)ip + off0); 525 tlen = ip->ip_len; 526 527 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 528 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 529 th->th_sum = m->m_pkthdr.csum_data; 530 else 531 th->th_sum = in_pseudo(ip->ip_src.s_addr, 532 ip->ip_dst.s_addr, 533 htonl(m->m_pkthdr.csum_data + 534 ip->ip_len + 535 IPPROTO_TCP)); 536 th->th_sum ^= 0xffff; 537#ifdef TCPDEBUG 538 ipov->ih_len = (u_short)tlen; 539 ipov->ih_len = htons(ipov->ih_len); 540#endif 541 } else { 542 /* 543 * Checksum extended TCP header and data. 544 */ 545 len = sizeof (struct ip) + tlen; 546 bzero(ipov->ih_x1, sizeof(ipov->ih_x1)); 547 ipov->ih_len = (u_short)tlen; 548 ipov->ih_len = htons(ipov->ih_len); 549 th->th_sum = in_cksum(m, len); 550 } 551 if (th->th_sum) { 552 tcpstat.tcps_rcvbadsum++; 553 goto drop; 554 } 555 /* Re-initialization for later version check */ 556 ip->ip_v = IPVERSION; 557 } 558 559 /* 560 * Check that TCP offset makes sense, 561 * pull out TCP options and adjust length. XXX 562 */ 563 off = th->th_off << 2; 564 if (off < sizeof (struct tcphdr) || off > tlen) { 565 tcpstat.tcps_rcvbadoff++; 566 goto drop; 567 } 568 tlen -= off; /* tlen is used instead of ti->ti_len */ 569 if (off > sizeof (struct tcphdr)) { 570 if (isipv6) { 571#ifdef INET6 572 IP6_EXTHDR_CHECK(m, off0, off, ); 573 ip6 = mtod(m, struct ip6_hdr *); 574 th = (struct tcphdr *)((caddr_t)ip6 + off0); 575#endif 576 } else { 577 if (m->m_len < sizeof(struct ip) + off) { 578 if ((m = m_pullup(m, sizeof (struct ip) + off)) 579 == NULL) { 580 tcpstat.tcps_rcvshort++; 581 return; 582 } 583 ip = mtod(m, struct ip *); 584 ipov = (struct ipovly *)ip; 585 th = (struct tcphdr *)((caddr_t)ip + off0); 586 } 587 } 588 optlen = off - sizeof (struct tcphdr); 589 optp = (u_char *)(th + 1); 590 } 591 thflags = th->th_flags; 592 593 /* 594 * If the drop_synfin option is enabled, drop all packets with 595 * both the SYN and FIN bits set. This prevents e.g. nmap from 596 * identifying the TCP/IP stack. 597 * 598 * This is a violation of the TCP specification. 599 */ 600 if (drop_synfin && (thflags & (TH_SYN|TH_FIN)) == (TH_SYN|TH_FIN)) 601 goto drop; 602 603 /* 604 * Convert TCP protocol specific fields to host format. 605 */ 606 th->th_seq = ntohl(th->th_seq); 607 th->th_ack = ntohl(th->th_ack); 608 th->th_win = ntohs(th->th_win); 609 th->th_urp = ntohs(th->th_urp); 610 611 /* 612 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options. 613 */ 614 drop_hdrlen = off0 + off; 615 616 /* 617 * Locate pcb for segment. 618 */ 619 INP_INFO_WLOCK(&tcbinfo); 620findpcb: 621 INP_INFO_WLOCK_ASSERT(&tcbinfo); 622#ifdef IPFIREWALL_FORWARD 623 /* Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. */ 624 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL); 625 626 if (fwd_tag != NULL && isipv6 == 0) { /* IPv6 support is not yet */ 627 struct sockaddr_in *next_hop; 628 629 next_hop = (struct sockaddr_in *)(fwd_tag+1); 630 /* 631 * Transparently forwarded. Pretend to be the destination. 632 * already got one like this? 633 */ 634 inp = in_pcblookup_hash(&tcbinfo, 635 ip->ip_src, th->th_sport, 636 ip->ip_dst, th->th_dport, 637 0, m->m_pkthdr.rcvif); 638 if (!inp) { 639 /* It's new. Try to find the ambushing socket. */ 640 inp = in_pcblookup_hash(&tcbinfo, 641 ip->ip_src, th->th_sport, 642 next_hop->sin_addr, 643 next_hop->sin_port ? 644 ntohs(next_hop->sin_port) : 645 th->th_dport, 646 INPLOOKUP_WILDCARD, 647 m->m_pkthdr.rcvif); 648 } 649 /* Remove the tag from the packet. We don't need it anymore. */ 650 m_tag_delete(m, fwd_tag); 651 } else 652#endif /* IPFIREWALL_FORWARD */ 653 { 654 if (isipv6) { 655#ifdef INET6 656 inp = in6_pcblookup_hash(&tcbinfo, 657 &ip6->ip6_src, th->th_sport, 658 &ip6->ip6_dst, th->th_dport, 659 INPLOOKUP_WILDCARD, 660 m->m_pkthdr.rcvif); 661#endif 662 } else 663 inp = in_pcblookup_hash(&tcbinfo, 664 ip->ip_src, th->th_sport, 665 ip->ip_dst, th->th_dport, 666 INPLOOKUP_WILDCARD, 667 m->m_pkthdr.rcvif); 668 } 669 670#if defined(IPSEC) || defined(FAST_IPSEC) 671#ifdef INET6 672 if (isipv6 && inp != NULL && ipsec6_in_reject(m, inp)) { 673#ifdef IPSEC 674 ipsec6stat.in_polvio++; 675#endif 676 goto dropunlock; 677 } else 678#endif /* INET6 */ 679 if (inp != NULL && ipsec4_in_reject(m, inp)) { 680#ifdef IPSEC 681 ipsecstat.in_polvio++; 682#endif 683 goto dropunlock; 684 } 685#endif /*IPSEC || FAST_IPSEC*/ 686 687 /* 688 * If the INPCB does not exist then all data in the incoming 689 * segment is discarded and an appropriate RST is sent back. 690 */ 691 if (inp == NULL) { 692 /* 693 * Log communication attempts to ports that are not 694 * in use. 695 */ 696 if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) || 697 tcp_log_in_vain == 2) { 698#ifndef INET6 699 char dbuf[4*sizeof "123"], sbuf[4*sizeof "123"]; 700#else 701 char dbuf[INET6_ADDRSTRLEN+2], sbuf[INET6_ADDRSTRLEN+2]; 702 if (isipv6) { 703 strcpy(dbuf, "["); 704 strcat(dbuf, 705 ip6_sprintf(ip6buf, &ip6->ip6_dst)); 706 strcat(dbuf, "]"); 707 strcpy(sbuf, "["); 708 strcat(sbuf, 709 ip6_sprintf(ip6buf, &ip6->ip6_src)); 710 strcat(sbuf, "]"); 711 } else 712#endif /* INET6 */ 713 { 714 strcpy(dbuf, inet_ntoa(ip->ip_dst)); 715 strcpy(sbuf, inet_ntoa(ip->ip_src)); 716 } 717 log(LOG_INFO, 718 "Connection attempt to TCP %s:%d " 719 "from %s:%d flags:0x%02x\n", 720 dbuf, ntohs(th->th_dport), sbuf, 721 ntohs(th->th_sport), thflags); 722 } 723 /* 724 * When blackholing do not respond with a RST but 725 * completely ignore the segment and drop it. 726 */ 727 if ((blackhole == 1 && (thflags & TH_SYN)) || 728 blackhole == 2) 729 goto dropunlock; 730 731 rstreason = BANDLIM_RST_CLOSEDPORT; 732 goto dropwithreset; 733 } 734 INP_LOCK(inp); 735 736 /* Check the minimum TTL for socket. */ 737 if (inp->inp_ip_minttl != 0) { 738#ifdef INET6 739 if (isipv6 && inp->inp_ip_minttl > ip6->ip6_hlim) 740 goto dropunlock; 741 else 742#endif 743 if (inp->inp_ip_minttl > ip->ip_ttl) 744 goto dropunlock; 745 } 746 747 /* 748 * A previous connection in TIMEWAIT state is supposed to catch 749 * stray or duplicate segments arriving late. If this segment 750 * was a legitimate new connection attempt the old INPCB gets 751 * removed and we can try again to find a listening socket. 752 */ 753 if (inp->inp_vflag & INP_TIMEWAIT) { 754 if (thflags & TH_SYN) 755 tcp_dooptions(&to, optp, optlen, TO_SYN); 756 if (tcp_timewait(inp, &to, th, m, tlen)) 757 goto findpcb; 758 /* tcp_timewait unlocks inp. */ 759 INP_INFO_WUNLOCK(&tcbinfo); 760 return; 761 } 762 /* 763 * The TCPCB may no longer exist if the connection is winding 764 * down or it is in the CLOSED state. Either way we drop the 765 * segment and send an appropriate response. 766 */ 767 tp = intotcpcb(inp); 768 if (tp == NULL) { 769 INP_UNLOCK(inp); 770 rstreason = BANDLIM_RST_CLOSEDPORT; 771 goto dropwithreset; 772 } 773 if (tp->t_state == TCPS_CLOSED) 774 goto dropunlock; /* XXX: dropwithreset??? */ 775 776#ifdef MAC 777 INP_LOCK_ASSERT(inp); 778 if (mac_check_inpcb_deliver(inp, m)) 779 goto dropunlock; 780#endif 781 so = inp->inp_socket; 782 KASSERT(so != NULL, ("%s: so == NULL", __func__)); 783#ifdef TCPDEBUG 784 if (so->so_options & SO_DEBUG) { 785 ostate = tp->t_state; 786 if (isipv6) 787 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6)); 788 else 789 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip)); 790 tcp_savetcp = *th; 791 } 792#endif 793 /* 794 * When the socket is accepting connections (the INPCB is in LISTEN 795 * state) we look into the SYN cache if this is a new connection 796 * attempt or the completion of a previous one. 797 */ 798 if (so->so_options & SO_ACCEPTCONN) { 799 struct in_conninfo inc; 800 801 bzero(&inc, sizeof(inc)); 802 inc.inc_isipv6 = isipv6; 803#ifdef INET6 804 if (isipv6) { 805 inc.inc6_faddr = ip6->ip6_src; 806 inc.inc6_laddr = ip6->ip6_dst; 807 } else 808#endif 809 { 810 inc.inc_faddr = ip->ip_src; 811 inc.inc_laddr = ip->ip_dst; 812 } 813 inc.inc_fport = th->th_sport; 814 inc.inc_lport = th->th_dport; 815 816 /* 817 * If the state is LISTEN then ignore segment if it contains 818 * a RST. If the segment contains an ACK then it is bad and 819 * send a RST. If it does not contain a SYN then it is not 820 * interesting; drop it. 821 * 822 * If the state is SYN_RECEIVED (syncache) and seg contains 823 * an ACK, but not for our SYN/ACK, send a RST. If the seg 824 * contains a RST, check the sequence number to see if it 825 * is a valid reset segment. 826 */ 827 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) != TH_SYN) { 828 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) { 829 /* 830 * Parse the TCP options here because 831 * syncookies need access to the reflected 832 * timestamp. 833 */ 834 tcp_dooptions(&to, optp, optlen, 0); 835 if (!syncache_expand(&inc, &to, th, &so, m)) { 836 /* 837 * No syncache entry, or ACK was not 838 * for our SYN/ACK. Send a RST. 839 */ 840 tcpstat.tcps_badsyn++; 841 rstreason = BANDLIM_RST_OPENPORT; 842 goto dropwithreset; 843 } 844 if (so == NULL) { 845 /* 846 * Could not complete 3-way handshake, 847 * connection is being closed down, and 848 * syncache has free'd mbuf. 849 */ 850 INP_UNLOCK(inp); 851 INP_INFO_WUNLOCK(&tcbinfo); 852 return; 853 } 854 /* 855 * Socket is created in state SYN_RECEIVED. 856 * Continue processing segment. 857 */ 858 INP_UNLOCK(inp); 859 inp = sotoinpcb(so); 860 INP_LOCK(inp); 861 tp = intotcpcb(inp); 862 /* 863 * This is what would have happened in 864 * tcp_output() when the SYN,ACK was sent. 865 */ 866 tp->snd_up = tp->snd_una; 867 tp->snd_max = tp->snd_nxt = tp->iss + 1; 868 tp->last_ack_sent = tp->rcv_nxt; 869 870 /* 871 * Process the segment and the data it 872 * contains. tcp_do_segment() consumes 873 * the mbuf chain and unlocks the inpcb. 874 * XXX: The potential return value of 875 * TIME_WAIT nuked is supposed to be 876 * handled above. 877 */ 878 if (tcp_do_segment(m, th, so, tp, 879 drop_hdrlen, tlen)) 880 goto findpcb; /* TIME_WAIT nuked */ 881 return; 882 } 883 if (thflags & TH_RST) { 884 syncache_chkrst(&inc, th); 885 goto dropunlock; 886 } 887 if (thflags & TH_ACK) { 888 syncache_badack(&inc); 889 tcpstat.tcps_badsyn++; 890 rstreason = BANDLIM_RST_OPENPORT; 891 goto dropwithreset; 892 } 893 goto dropunlock; 894 } 895 896 /* 897 * Segment's flags are (SYN) or (SYN|FIN). 898 */ 899#ifdef INET6 900 /* 901 * If deprecated address is forbidden, 902 * we do not accept SYN to deprecated interface 903 * address to prevent any new inbound connection from 904 * getting established. 905 * When we do not accept SYN, we send a TCP RST, 906 * with deprecated source address (instead of dropping 907 * it). We compromise it as it is much better for peer 908 * to send a RST, and RST will be the final packet 909 * for the exchange. 910 * 911 * If we do not forbid deprecated addresses, we accept 912 * the SYN packet. RFC2462 does not suggest dropping 913 * SYN in this case. 914 * If we decipher RFC2462 5.5.4, it says like this: 915 * 1. use of deprecated addr with existing 916 * communication is okay - "SHOULD continue to be 917 * used" 918 * 2. use of it with new communication: 919 * (2a) "SHOULD NOT be used if alternate address 920 * with sufficient scope is available" 921 * (2b) nothing mentioned otherwise. 922 * Here we fall into (2b) case as we have no choice in 923 * our source address selection - we must obey the peer. 924 * 925 * The wording in RFC2462 is confusing, and there are 926 * multiple description text for deprecated address 927 * handling - worse, they are not exactly the same. 928 * I believe 5.5.4 is the best one, so we follow 5.5.4. 929 */ 930 if (isipv6 && !ip6_use_deprecated) { 931 struct in6_ifaddr *ia6; 932 933 if ((ia6 = ip6_getdstifaddr(m)) && 934 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) { 935 INP_UNLOCK(inp); 936 tp = NULL; 937 rstreason = BANDLIM_RST_OPENPORT; 938 goto dropwithreset; 939 } 940 } 941#endif 942 /* 943 * Basic sanity checks on incoming SYN requests: 944 * 945 * Don't bother responding if the destination was a 946 * broadcast according to RFC1122 4.2.3.10, p. 104. 947 * 948 * If it is from this socket, drop it, it must be forged. 949 * 950 * Note that it is quite possible to receive unicast 951 * link-layer packets with a broadcast IP address. Use 952 * in_broadcast() to find them. 953 */ 954 if (m->m_flags & (M_BCAST|M_MCAST)) 955 goto dropunlock; 956 if (isipv6) { 957#ifdef INET6 958 if (th->th_dport == th->th_sport && 959 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) 960 goto dropunlock; 961 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 962 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 963 goto dropunlock; 964#endif 965 } else { 966 if (th->th_dport == th->th_sport && 967 ip->ip_dst.s_addr == ip->ip_src.s_addr) 968 goto dropunlock; 969 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 970 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 971 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 972 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 973 goto dropunlock; 974 } 975 /* 976 * SYN appears to be valid. Create compressed TCP state 977 * for syncache. 978 */ 979 if (so->so_qlen <= so->so_qlimit) { 980#ifdef TCPDEBUG 981 if (so->so_options & SO_DEBUG) 982 tcp_trace(TA_INPUT, ostate, tp, 983 (void *)tcp_saveipgen, &tcp_savetcp, 0); 984#endif 985 tcp_dooptions(&to, optp, optlen, TO_SYN); 986 if (!syncache_add(&inc, &to, th, inp, &so, m)) 987 goto dropunlock; 988 /* 989 * Entry added to syncache, mbuf used to 990 * send SYN-ACK packet. Everything unlocked 991 * already. 992 */ 993 return; 994 } 995 /* Catch all. Everthing that makes it down here is junk. */ 996 goto dropunlock; 997 } 998 999 /* 1000 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or 1001 * later state. tcp_do_segment() always consumes the mbuf chain 1002 * and unlocks the inpcb. 1003 */ 1004 if (tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen)) 1005 goto findpcb; /* XXX: TIME_WAIT was nuked. */ 1006 return; 1007 1008dropwithreset: 1009 tcp_dropwithreset(m, th, tp, tlen, rstreason); 1010 m = NULL; /* mbuf chain got consumed. */ 1011dropunlock: 1012 if (tp != NULL) 1013 INP_UNLOCK(inp); 1014 INP_INFO_WUNLOCK(&tcbinfo); 1015drop: 1016 if (m != NULL) 1017 m_freem(m); 1018 return; 1019} 1020 1021static int 1022tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 1023 struct tcpcb *tp, int drop_hdrlen, int tlen) 1024{ 1025 int thflags, acked, ourfinisacked, needoutput = 0; 1026 int headlocked = 1; 1027 int rstreason, todrop, win; 1028 u_long tiwin; 1029 struct tcpopt to; 1030 1031#ifdef TCPDEBUG 1032 /* 1033 * The size of tcp_saveipgen must be the size of the max ip header, 1034 * now IPv6. 1035 */ 1036 u_char tcp_saveipgen[IP6_HDR_LEN]; 1037 struct tcphdr tcp_savetcp; 1038 short ostate = 0; 1039#endif 1040 thflags = th->th_flags; 1041 1042 INP_INFO_WLOCK_ASSERT(&tcbinfo); 1043 INP_LOCK_ASSERT(tp->t_inpcb); 1044 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", __func__)); 1045 1046 /* 1047 * Segment received on connection. 1048 * Reset idle time and keep-alive timer. 1049 */ 1050 tp->t_rcvtime = ticks; 1051 if (TCPS_HAVEESTABLISHED(tp->t_state)) 1052 callout_reset(tp->tt_keep, tcp_keepidle, tcp_timer_keep, tp); 1053 1054 /* 1055 * Unscale the window into a 32-bit value. 1056 * This value is bogus for the TCPS_SYN_SENT state 1057 * and is overwritten later. 1058 */ 1059 tiwin = th->th_win << tp->snd_scale; 1060 1061 /* 1062 * Parse options on any incoming segment. 1063 */ 1064 tcp_dooptions(&to, (u_char *)(th + 1), 1065 (th->th_off << 2) - sizeof(struct tcphdr), 1066 (thflags & TH_SYN) ? TO_SYN : 0); 1067 1068 /* 1069 * If echoed timestamp is later than the current time, 1070 * fall back to non RFC1323 RTT calculation. Normalize 1071 * timestamp if syncookies were used when this connection 1072 * was established. 1073 */ 1074 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 1075 to.to_tsecr -= tp->ts_offset; 1076 if (TSTMP_GT(to.to_tsecr, ticks)) 1077 to.to_tsecr = 0; 1078 } 1079 1080 /* 1081 * Process options only when we get SYN/ACK back. The SYN case 1082 * for incoming connections is handled in tcp_syncache. 1083 * XXX this is traditional behavior, may need to be cleaned up. 1084 */ 1085 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 1086 if ((to.to_flags & TOF_SCALE) && 1087 (tp->t_flags & TF_REQ_SCALE)) { 1088 tp->t_flags |= TF_RCVD_SCALE; 1089 tp->snd_scale = to.to_wscale; 1090 tp->snd_wnd = th->th_win << tp->snd_scale; 1091 tiwin = tp->snd_wnd; 1092 } 1093 if (to.to_flags & TOF_TS) { 1094 tp->t_flags |= TF_RCVD_TSTMP; 1095 tp->ts_recent = to.to_tsval; 1096 tp->ts_recent_age = ticks; 1097 } 1098 /* Initial send window, already scaled. */ 1099 tp->snd_wnd = th->th_win; 1100 if (to.to_flags & TOF_MSS) 1101 tcp_mss(tp, to.to_mss); 1102 if (tp->sack_enable) { 1103 if (!(to.to_flags & TOF_SACKPERM)) 1104 tp->sack_enable = 0; 1105 else 1106 tp->t_flags |= TF_SACK_PERMIT; 1107 } 1108 1109 } 1110 1111 /* 1112 * Header prediction: check for the two common cases 1113 * of a uni-directional data xfer. If the packet has 1114 * no control flags, is in-sequence, the window didn't 1115 * change and we're not retransmitting, it's a 1116 * candidate. If the length is zero and the ack moved 1117 * forward, we're the sender side of the xfer. Just 1118 * free the data acked & wake any higher level process 1119 * that was blocked waiting for space. If the length 1120 * is non-zero and the ack didn't move, we're the 1121 * receiver side. If we're getting packets in-order 1122 * (the reassembly queue is empty), add the data to 1123 * the socket buffer and note that we need a delayed ack. 1124 * Make sure that the hidden state-flags are also off. 1125 * Since we check for TCPS_ESTABLISHED above, it can only 1126 * be TH_NEEDSYN. 1127 */ 1128 if (tp->t_state == TCPS_ESTABLISHED && 1129 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && 1130 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) && 1131 ((to.to_flags & TOF_TS) == 0 || 1132 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) && 1133 th->th_seq == tp->rcv_nxt && tiwin && tiwin == tp->snd_wnd && 1134 tp->snd_nxt == tp->snd_max) { 1135 1136 /* 1137 * If last ACK falls within this segment's sequence numbers, 1138 * record the timestamp. 1139 * NOTE that the test is modified according to the latest 1140 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1141 */ 1142 if ((to.to_flags & TOF_TS) != 0 && 1143 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 1144 tp->ts_recent_age = ticks; 1145 tp->ts_recent = to.to_tsval; 1146 } 1147 1148 if (tlen == 0) { 1149 if (SEQ_GT(th->th_ack, tp->snd_una) && 1150 SEQ_LEQ(th->th_ack, tp->snd_max) && 1151 tp->snd_cwnd >= tp->snd_wnd && 1152 ((!tcp_do_newreno && !tp->sack_enable && 1153 tp->t_dupacks < tcprexmtthresh) || 1154 ((tcp_do_newreno || tp->sack_enable) && 1155 !IN_FASTRECOVERY(tp) && 1156 (to.to_flags & TOF_SACK) == 0 && 1157 TAILQ_EMPTY(&tp->snd_holes)))) { 1158 KASSERT(headlocked, 1159 ("%s: headlocked", __func__)); 1160 INP_INFO_WUNLOCK(&tcbinfo); 1161 headlocked = 0; 1162 /* 1163 * this is a pure ack for outstanding data. 1164 */ 1165 ++tcpstat.tcps_predack; 1166 /* 1167 * "bad retransmit" recovery 1168 */ 1169 if (tp->t_rxtshift == 1 && 1170 ticks < tp->t_badrxtwin) { 1171 ++tcpstat.tcps_sndrexmitbad; 1172 tp->snd_cwnd = tp->snd_cwnd_prev; 1173 tp->snd_ssthresh = 1174 tp->snd_ssthresh_prev; 1175 tp->snd_recover = tp->snd_recover_prev; 1176 if (tp->t_flags & TF_WASFRECOVERY) 1177 ENTER_FASTRECOVERY(tp); 1178 tp->snd_nxt = tp->snd_max; 1179 tp->t_badrxtwin = 0; 1180 } 1181 1182 /* 1183 * Recalculate the transmit timer / rtt. 1184 * 1185 * Some boxes send broken timestamp replies 1186 * during the SYN+ACK phase, ignore 1187 * timestamps of 0 or we could calculate a 1188 * huge RTT and blow up the retransmit timer. 1189 */ 1190 if ((to.to_flags & TOF_TS) != 0 && 1191 to.to_tsecr) { 1192 if (!tp->t_rttlow || 1193 tp->t_rttlow > ticks - to.to_tsecr) 1194 tp->t_rttlow = ticks - to.to_tsecr; 1195 tcp_xmit_timer(tp, 1196 ticks - to.to_tsecr + 1); 1197 } else if (tp->t_rtttime && 1198 SEQ_GT(th->th_ack, tp->t_rtseq)) { 1199 if (!tp->t_rttlow || 1200 tp->t_rttlow > ticks - tp->t_rtttime) 1201 tp->t_rttlow = ticks - tp->t_rtttime; 1202 tcp_xmit_timer(tp, 1203 ticks - tp->t_rtttime); 1204 } 1205 tcp_xmit_bandwidth_limit(tp, th->th_ack); 1206 acked = th->th_ack - tp->snd_una; 1207 tcpstat.tcps_rcvackpack++; 1208 tcpstat.tcps_rcvackbyte += acked; 1209 sbdrop(&so->so_snd, acked); 1210 if (SEQ_GT(tp->snd_una, tp->snd_recover) && 1211 SEQ_LEQ(th->th_ack, tp->snd_recover)) 1212 tp->snd_recover = th->th_ack - 1; 1213 tp->snd_una = th->th_ack; 1214 /* 1215 * pull snd_wl2 up to prevent seq wrap relative 1216 * to th_ack. 1217 */ 1218 tp->snd_wl2 = th->th_ack; 1219 tp->t_dupacks = 0; 1220 m_freem(m); 1221 ND6_HINT(tp); /* some progress has been done */ 1222 1223 /* 1224 * If all outstanding data are acked, stop 1225 * retransmit timer, otherwise restart timer 1226 * using current (possibly backed-off) value. 1227 * If process is waiting for space, 1228 * wakeup/selwakeup/signal. If data 1229 * are ready to send, let tcp_output 1230 * decide between more output or persist. 1231 1232#ifdef TCPDEBUG 1233 if (so->so_options & SO_DEBUG) 1234 tcp_trace(TA_INPUT, ostate, tp, 1235 (void *)tcp_saveipgen, 1236 &tcp_savetcp, 0); 1237#endif 1238 */ 1239 if (tp->snd_una == tp->snd_max) 1240 callout_stop(tp->tt_rexmt); 1241 else if (!callout_active(tp->tt_persist)) 1242 callout_reset(tp->tt_rexmt, 1243 tp->t_rxtcur, 1244 tcp_timer_rexmt, tp); 1245 1246 sowwakeup(so); 1247 if (so->so_snd.sb_cc) 1248 (void) tcp_output(tp); 1249 goto check_delack; 1250 } 1251 } else if (th->th_ack == tp->snd_una && 1252 LIST_EMPTY(&tp->t_segq) && 1253 tlen <= sbspace(&so->so_rcv)) { 1254 int newsize = 0; /* automatic sockbuf scaling */ 1255 1256 KASSERT(headlocked, ("%s: headlocked", __func__)); 1257 INP_INFO_WUNLOCK(&tcbinfo); 1258 headlocked = 0; 1259 /* 1260 * this is a pure, in-sequence data packet 1261 * with nothing on the reassembly queue and 1262 * we have enough buffer space to take it. 1263 */ 1264 /* Clean receiver SACK report if present */ 1265 if (tp->sack_enable && tp->rcv_numsacks) 1266 tcp_clean_sackreport(tp); 1267 ++tcpstat.tcps_preddat; 1268 tp->rcv_nxt += tlen; 1269 /* 1270 * Pull snd_wl1 up to prevent seq wrap relative to 1271 * th_seq. 1272 */ 1273 tp->snd_wl1 = th->th_seq; 1274 /* 1275 * Pull rcv_up up to prevent seq wrap relative to 1276 * rcv_nxt. 1277 */ 1278 tp->rcv_up = tp->rcv_nxt; 1279 tcpstat.tcps_rcvpack++; 1280 tcpstat.tcps_rcvbyte += tlen; 1281 ND6_HINT(tp); /* some progress has been done */ 1282#ifdef TCPDEBUG 1283 if (so->so_options & SO_DEBUG) 1284 tcp_trace(TA_INPUT, ostate, tp, 1285 (void *)tcp_saveipgen, &tcp_savetcp, 0); 1286#endif 1287 /* 1288 * Automatic sizing of receive socket buffer. Often the send 1289 * buffer size is not optimally adjusted to the actual network 1290 * conditions at hand (delay bandwidth product). Setting the 1291 * buffer size too small limits throughput on links with high 1292 * bandwidth and high delay (eg. trans-continental/oceanic links). 1293 * 1294 * On the receive side the socket buffer memory is only rarely 1295 * used to any significant extent. This allows us to be much 1296 * more aggressive in scaling the receive socket buffer. For 1297 * the case that the buffer space is actually used to a large 1298 * extent and we run out of kernel memory we can simply drop 1299 * the new segments; TCP on the sender will just retransmit it 1300 * later. Setting the buffer size too big may only consume too 1301 * much kernel memory if the application doesn't read() from 1302 * the socket or packet loss or reordering makes use of the 1303 * reassembly queue. 1304 * 1305 * The criteria to step up the receive buffer one notch are: 1306 * 1. the number of bytes received during the time it takes 1307 * one timestamp to be reflected back to us (the RTT); 1308 * 2. received bytes per RTT is within seven eighth of the 1309 * current socket buffer size; 1310 * 3. receive buffer size has not hit maximal automatic size; 1311 * 1312 * This algorithm does one step per RTT at most and only if 1313 * we receive a bulk stream w/o packet losses or reorderings. 1314 * Shrinking the buffer during idle times is not necessary as 1315 * it doesn't consume any memory when idle. 1316 * 1317 * TODO: Only step up if the application is actually serving 1318 * the buffer to better manage the socket buffer resources. 1319 */ 1320 if (tcp_do_autorcvbuf && 1321 to.to_tsecr && 1322 (so->so_rcv.sb_flags & SB_AUTOSIZE)) { 1323 if (to.to_tsecr > tp->rfbuf_ts && 1324 to.to_tsecr - tp->rfbuf_ts < hz) { 1325 if (tp->rfbuf_cnt > 1326 (so->so_rcv.sb_hiwat / 8 * 7) && 1327 so->so_rcv.sb_hiwat < 1328 tcp_autorcvbuf_max) { 1329 newsize = 1330 min(so->so_rcv.sb_hiwat + 1331 tcp_autorcvbuf_inc, 1332 tcp_autorcvbuf_max); 1333 } 1334 /* Start over with next RTT. */ 1335 tp->rfbuf_ts = 0; 1336 tp->rfbuf_cnt = 0; 1337 } else 1338 tp->rfbuf_cnt += tlen; /* add up */ 1339 } 1340 1341 /* Add data to socket buffer. */ 1342 SOCKBUF_LOCK(&so->so_rcv); 1343 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1344 m_freem(m); 1345 } else { 1346 /* 1347 * Set new socket buffer size. 1348 * Give up when limit is reached. 1349 */ 1350 if (newsize) 1351 if (!sbreserve_locked(&so->so_rcv, 1352 newsize, so, curthread)) 1353 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 1354 m_adj(m, drop_hdrlen); /* delayed header drop */ 1355 sbappendstream_locked(&so->so_rcv, m); 1356 } 1357 sorwakeup_locked(so); 1358 if (DELAY_ACK(tp)) { 1359 tp->t_flags |= TF_DELACK; 1360 } else { 1361 tp->t_flags |= TF_ACKNOW; 1362 tcp_output(tp); 1363 } 1364 goto check_delack; 1365 } 1366 } 1367 1368 /* 1369 * Calculate amount of space in receive window, 1370 * and then do TCP input processing. 1371 * Receive window is amount of space in rcv queue, 1372 * but not less than advertised window. 1373 */ 1374 win = sbspace(&so->so_rcv); 1375 if (win < 0) 1376 win = 0; 1377 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt)); 1378 1379 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 1380 tp->rfbuf_ts = 0; 1381 tp->rfbuf_cnt = 0; 1382 1383 switch (tp->t_state) { 1384 1385 /* 1386 * If the state is SYN_RECEIVED: 1387 * if seg contains an ACK, but not for our SYN/ACK, send a RST. 1388 */ 1389 case TCPS_SYN_RECEIVED: 1390 if ((thflags & TH_ACK) && 1391 (SEQ_LEQ(th->th_ack, tp->snd_una) || 1392 SEQ_GT(th->th_ack, tp->snd_max))) { 1393 rstreason = BANDLIM_RST_OPENPORT; 1394 goto dropwithreset; 1395 } 1396 break; 1397 1398 /* 1399 * If the state is SYN_SENT: 1400 * if seg contains an ACK, but not for our SYN, drop the input. 1401 * if seg contains a RST, then drop the connection. 1402 * if seg does not contain SYN, then drop it. 1403 * Otherwise this is an acceptable SYN segment 1404 * initialize tp->rcv_nxt and tp->irs 1405 * if seg contains ack then advance tp->snd_una 1406 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state 1407 * arrange for segment to be acked (eventually) 1408 * continue processing rest of data/controls, beginning with URG 1409 */ 1410 case TCPS_SYN_SENT: 1411 if ((thflags & TH_ACK) && 1412 (SEQ_LEQ(th->th_ack, tp->iss) || 1413 SEQ_GT(th->th_ack, tp->snd_max))) { 1414 rstreason = BANDLIM_UNLIMITED; 1415 goto dropwithreset; 1416 } 1417 if (thflags & TH_RST) { 1418 if (thflags & TH_ACK) { 1419 KASSERT(headlocked, ("%s: after_listen: " 1420 "tcp_drop.2: head not locked", __func__)); 1421 tp = tcp_drop(tp, ECONNREFUSED); 1422 } 1423 goto drop; 1424 } 1425 if ((thflags & TH_SYN) == 0) 1426 goto drop; 1427 1428 tp->irs = th->th_seq; 1429 tcp_rcvseqinit(tp); 1430 if (thflags & TH_ACK) { 1431 tcpstat.tcps_connects++; 1432 soisconnected(so); 1433#ifdef MAC 1434 SOCK_LOCK(so); 1435 mac_set_socket_peer_from_mbuf(m, so); 1436 SOCK_UNLOCK(so); 1437#endif 1438 /* Do window scaling on this connection? */ 1439 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 1440 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 1441 tp->rcv_scale = tp->request_r_scale; 1442 } 1443 tp->rcv_adv += tp->rcv_wnd; 1444 tp->snd_una++; /* SYN is acked */ 1445 /* 1446 * If there's data, delay ACK; if there's also a FIN 1447 * ACKNOW will be turned on later. 1448 */ 1449 if (DELAY_ACK(tp) && tlen != 0) 1450 callout_reset(tp->tt_delack, tcp_delacktime, 1451 tcp_timer_delack, tp); 1452 else 1453 tp->t_flags |= TF_ACKNOW; 1454 /* 1455 * Received <SYN,ACK> in SYN_SENT[*] state. 1456 * Transitions: 1457 * SYN_SENT --> ESTABLISHED 1458 * SYN_SENT* --> FIN_WAIT_1 1459 */ 1460 tp->t_starttime = ticks; 1461 if (tp->t_flags & TF_NEEDFIN) { 1462 tp->t_state = TCPS_FIN_WAIT_1; 1463 tp->t_flags &= ~TF_NEEDFIN; 1464 thflags &= ~TH_SYN; 1465 } else { 1466 tp->t_state = TCPS_ESTABLISHED; 1467 callout_reset(tp->tt_keep, tcp_keepidle, 1468 tcp_timer_keep, tp); 1469 } 1470 } else { 1471 /* 1472 * Received initial SYN in SYN-SENT[*] state => 1473 * simultaneous open. If segment contains CC option 1474 * and there is a cached CC, apply TAO test. 1475 * If it succeeds, connection is * half-synchronized. 1476 * Otherwise, do 3-way handshake: 1477 * SYN-SENT -> SYN-RECEIVED 1478 * SYN-SENT* -> SYN-RECEIVED* 1479 * If there was no CC option, clear cached CC value. 1480 */ 1481 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 1482 callout_stop(tp->tt_rexmt); 1483 tp->t_state = TCPS_SYN_RECEIVED; 1484 } 1485 1486 KASSERT(headlocked, ("%s: trimthenstep6: head not locked", 1487 __func__)); 1488 INP_LOCK_ASSERT(tp->t_inpcb); 1489 1490 /* 1491 * Advance th->th_seq to correspond to first data byte. 1492 * If data, trim to stay within window, 1493 * dropping FIN if necessary. 1494 */ 1495 th->th_seq++; 1496 if (tlen > tp->rcv_wnd) { 1497 todrop = tlen - tp->rcv_wnd; 1498 m_adj(m, -todrop); 1499 tlen = tp->rcv_wnd; 1500 thflags &= ~TH_FIN; 1501 tcpstat.tcps_rcvpackafterwin++; 1502 tcpstat.tcps_rcvbyteafterwin += todrop; 1503 } 1504 tp->snd_wl1 = th->th_seq - 1; 1505 tp->rcv_up = th->th_seq; 1506 /* 1507 * Client side of transaction: already sent SYN and data. 1508 * If the remote host used T/TCP to validate the SYN, 1509 * our data will be ACK'd; if so, enter normal data segment 1510 * processing in the middle of step 5, ack processing. 1511 * Otherwise, goto step 6. 1512 */ 1513 if (thflags & TH_ACK) 1514 goto process_ACK; 1515 1516 goto step6; 1517 1518 /* 1519 * If the state is LAST_ACK or CLOSING or TIME_WAIT: 1520 * do normal processing. 1521 * 1522 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later. 1523 */ 1524 case TCPS_LAST_ACK: 1525 case TCPS_CLOSING: 1526 case TCPS_TIME_WAIT: 1527 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: timewait", 1528 __func__)); 1529 break; /* continue normal processing */ 1530 } 1531 1532 /* 1533 * States other than LISTEN or SYN_SENT. 1534 * First check the RST flag and sequence number since reset segments 1535 * are exempt from the timestamp and connection count tests. This 1536 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix 1537 * below which allowed reset segments in half the sequence space 1538 * to fall though and be processed (which gives forged reset 1539 * segments with a random sequence number a 50 percent chance of 1540 * killing a connection). 1541 * Then check timestamp, if present. 1542 * Then check the connection count, if present. 1543 * Then check that at least some bytes of segment are within 1544 * receive window. If segment begins before rcv_nxt, 1545 * drop leading data (and SYN); if nothing left, just ack. 1546 * 1547 * 1548 * If the RST bit is set, check the sequence number to see 1549 * if this is a valid reset segment. 1550 * RFC 793 page 37: 1551 * In all states except SYN-SENT, all reset (RST) segments 1552 * are validated by checking their SEQ-fields. A reset is 1553 * valid if its sequence number is in the window. 1554 * Note: this does not take into account delayed ACKs, so 1555 * we should test against last_ack_sent instead of rcv_nxt. 1556 * The sequence number in the reset segment is normally an 1557 * echo of our outgoing acknowlegement numbers, but some hosts 1558 * send a reset with the sequence number at the rightmost edge 1559 * of our receive window, and we have to handle this case. 1560 * Note 2: Paul Watson's paper "Slipping in the Window" has shown 1561 * that brute force RST attacks are possible. To combat this, 1562 * we use a much stricter check while in the ESTABLISHED state, 1563 * only accepting RSTs where the sequence number is equal to 1564 * last_ack_sent. In all other states (the states in which a 1565 * RST is more likely), the more permissive check is used. 1566 * If we have multiple segments in flight, the intial reset 1567 * segment sequence numbers will be to the left of last_ack_sent, 1568 * but they will eventually catch up. 1569 * In any case, it never made sense to trim reset segments to 1570 * fit the receive window since RFC 1122 says: 1571 * 4.2.2.12 RST Segment: RFC-793 Section 3.4 1572 * 1573 * A TCP SHOULD allow a received RST segment to include data. 1574 * 1575 * DISCUSSION 1576 * It has been suggested that a RST segment could contain 1577 * ASCII text that encoded and explained the cause of the 1578 * RST. No standard has yet been established for such 1579 * data. 1580 * 1581 * If the reset segment passes the sequence number test examine 1582 * the state: 1583 * SYN_RECEIVED STATE: 1584 * If passive open, return to LISTEN state. 1585 * If active open, inform user that connection was refused. 1586 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES: 1587 * Inform user that connection was reset, and close tcb. 1588 * CLOSING, LAST_ACK STATES: 1589 * Close the tcb. 1590 * TIME_WAIT STATE: 1591 * Drop the segment - see Stevens, vol. 2, p. 964 and 1592 * RFC 1337. 1593 */ 1594 if (thflags & TH_RST) { 1595 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) && 1596 SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) { 1597 switch (tp->t_state) { 1598 1599 case TCPS_SYN_RECEIVED: 1600 so->so_error = ECONNREFUSED; 1601 goto close; 1602 1603 case TCPS_ESTABLISHED: 1604 if (tcp_insecure_rst == 0 && 1605 !(SEQ_GEQ(th->th_seq, tp->rcv_nxt - 1) && 1606 SEQ_LEQ(th->th_seq, tp->rcv_nxt + 1)) && 1607 !(SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) && 1608 SEQ_LEQ(th->th_seq, tp->last_ack_sent + 1))) { 1609 tcpstat.tcps_badrst++; 1610 goto drop; 1611 } 1612 case TCPS_FIN_WAIT_1: 1613 case TCPS_FIN_WAIT_2: 1614 case TCPS_CLOSE_WAIT: 1615 so->so_error = ECONNRESET; 1616 close: 1617 tp->t_state = TCPS_CLOSED; 1618 tcpstat.tcps_drops++; 1619 KASSERT(headlocked, ("%s: trimthenstep6: " 1620 "tcp_close: head not locked", __func__)); 1621 tp = tcp_close(tp); 1622 break; 1623 1624 case TCPS_CLOSING: 1625 case TCPS_LAST_ACK: 1626 KASSERT(headlocked, ("%s: trimthenstep6: " 1627 "tcp_close.2: head not locked", __func__)); 1628 tp = tcp_close(tp); 1629 break; 1630 1631 case TCPS_TIME_WAIT: 1632 KASSERT(tp->t_state != TCPS_TIME_WAIT, 1633 ("%s: timewait", __func__)); 1634 break; 1635 } 1636 } 1637 goto drop; 1638 } 1639 1640 /* 1641 * RFC 1323 PAWS: If we have a timestamp reply on this segment 1642 * and it's less than ts_recent, drop it. 1643 */ 1644 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent && 1645 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 1646 1647 /* Check to see if ts_recent is over 24 days old. */ 1648 if ((int)(ticks - tp->ts_recent_age) > TCP_PAWS_IDLE) { 1649 /* 1650 * Invalidate ts_recent. If this segment updates 1651 * ts_recent, the age will be reset later and ts_recent 1652 * will get a valid value. If it does not, setting 1653 * ts_recent to zero will at least satisfy the 1654 * requirement that zero be placed in the timestamp 1655 * echo reply when ts_recent isn't valid. The 1656 * age isn't reset until we get a valid ts_recent 1657 * because we don't want out-of-order segments to be 1658 * dropped when ts_recent is old. 1659 */ 1660 tp->ts_recent = 0; 1661 } else { 1662 tcpstat.tcps_rcvduppack++; 1663 tcpstat.tcps_rcvdupbyte += tlen; 1664 tcpstat.tcps_pawsdrop++; 1665 if (tlen) 1666 goto dropafterack; 1667 goto drop; 1668 } 1669 } 1670 1671 /* 1672 * In the SYN-RECEIVED state, validate that the packet belongs to 1673 * this connection before trimming the data to fit the receive 1674 * window. Check the sequence number versus IRS since we know 1675 * the sequence numbers haven't wrapped. This is a partial fix 1676 * for the "LAND" DoS attack. 1677 */ 1678 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) { 1679 rstreason = BANDLIM_RST_OPENPORT; 1680 goto dropwithreset; 1681 } 1682 1683 todrop = tp->rcv_nxt - th->th_seq; 1684 if (todrop > 0) { 1685 if (thflags & TH_SYN) { 1686 thflags &= ~TH_SYN; 1687 th->th_seq++; 1688 if (th->th_urp > 1) 1689 th->th_urp--; 1690 else 1691 thflags &= ~TH_URG; 1692 todrop--; 1693 } 1694 /* 1695 * Following if statement from Stevens, vol. 2, p. 960. 1696 */ 1697 if (todrop > tlen 1698 || (todrop == tlen && (thflags & TH_FIN) == 0)) { 1699 /* 1700 * Any valid FIN must be to the left of the window. 1701 * At this point the FIN must be a duplicate or out 1702 * of sequence; drop it. 1703 */ 1704 thflags &= ~TH_FIN; 1705 1706 /* 1707 * Send an ACK to resynchronize and drop any data. 1708 * But keep on processing for RST or ACK. 1709 */ 1710 tp->t_flags |= TF_ACKNOW; 1711 todrop = tlen; 1712 tcpstat.tcps_rcvduppack++; 1713 tcpstat.tcps_rcvdupbyte += todrop; 1714 } else { 1715 tcpstat.tcps_rcvpartduppack++; 1716 tcpstat.tcps_rcvpartdupbyte += todrop; 1717 } 1718 drop_hdrlen += todrop; /* drop from the top afterwards */ 1719 th->th_seq += todrop; 1720 tlen -= todrop; 1721 if (th->th_urp > todrop) 1722 th->th_urp -= todrop; 1723 else { 1724 thflags &= ~TH_URG; 1725 th->th_urp = 0; 1726 } 1727 } 1728 1729 /* 1730 * If new data are received on a connection after the 1731 * user processes are gone, then RST the other end. 1732 */ 1733 if ((so->so_state & SS_NOFDREF) && 1734 tp->t_state > TCPS_CLOSE_WAIT && tlen) { 1735 KASSERT(headlocked, ("%s: trimthenstep6: tcp_close.3: head " 1736 "not locked", __func__)); 1737 tp = tcp_close(tp); 1738 tcpstat.tcps_rcvafterclose++; 1739 rstreason = BANDLIM_UNLIMITED; 1740 goto dropwithreset; 1741 } 1742 1743 /* 1744 * If segment ends after window, drop trailing data 1745 * (and PUSH and FIN); if nothing left, just ACK. 1746 */ 1747 todrop = (th->th_seq+tlen) - (tp->rcv_nxt+tp->rcv_wnd); 1748 if (todrop > 0) { 1749 tcpstat.tcps_rcvpackafterwin++; 1750 if (todrop >= tlen) { 1751 tcpstat.tcps_rcvbyteafterwin += tlen; 1752 /* 1753 * If a new connection request is received 1754 * while in TIME_WAIT, drop the old connection 1755 * and start over if the sequence numbers 1756 * are above the previous ones. 1757 */ 1758 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: timewait", 1759 __func__)); 1760 if (thflags & TH_SYN && 1761 tp->t_state == TCPS_TIME_WAIT && 1762 SEQ_GT(th->th_seq, tp->rcv_nxt)) { 1763 KASSERT(headlocked, ("%s: trimthenstep6: " 1764 "tcp_close.4: head not locked", __func__)); 1765 tp = tcp_close(tp); 1766 /* XXX: Shouldn't be possible. */ 1767 return (1); 1768 } 1769 /* 1770 * If window is closed can only take segments at 1771 * window edge, and have to drop data and PUSH from 1772 * incoming segments. Continue processing, but 1773 * remember to ack. Otherwise, drop segment 1774 * and ack. 1775 */ 1776 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { 1777 tp->t_flags |= TF_ACKNOW; 1778 tcpstat.tcps_rcvwinprobe++; 1779 } else 1780 goto dropafterack; 1781 } else 1782 tcpstat.tcps_rcvbyteafterwin += todrop; 1783 m_adj(m, -todrop); 1784 tlen -= todrop; 1785 thflags &= ~(TH_PUSH|TH_FIN); 1786 } 1787 1788 /* 1789 * If last ACK falls within this segment's sequence numbers, 1790 * record its timestamp. 1791 * NOTE: 1792 * 1) That the test incorporates suggestions from the latest 1793 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1794 * 2) That updating only on newer timestamps interferes with 1795 * our earlier PAWS tests, so this check should be solely 1796 * predicated on the sequence space of this segment. 1797 * 3) That we modify the segment boundary check to be 1798 * Last.ACK.Sent <= SEG.SEQ + SEG.Len 1799 * instead of RFC1323's 1800 * Last.ACK.Sent < SEG.SEQ + SEG.Len, 1801 * This modified check allows us to overcome RFC1323's 1802 * limitations as described in Stevens TCP/IP Illustrated 1803 * Vol. 2 p.869. In such cases, we can still calculate the 1804 * RTT correctly when RCV.NXT == Last.ACK.Sent. 1805 */ 1806 if ((to.to_flags & TOF_TS) != 0 && 1807 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 1808 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 1809 ((thflags & (TH_SYN|TH_FIN)) != 0))) { 1810 tp->ts_recent_age = ticks; 1811 tp->ts_recent = to.to_tsval; 1812 } 1813 1814 /* 1815 * If a SYN is in the window, then this is an 1816 * error and we send an RST and drop the connection. 1817 */ 1818 if (thflags & TH_SYN) { 1819 KASSERT(headlocked, ("%s: tcp_drop: trimthenstep6: " 1820 "head not locked", __func__)); 1821 tp = tcp_drop(tp, ECONNRESET); 1822 rstreason = BANDLIM_UNLIMITED; 1823 goto drop; 1824 } 1825 1826 /* 1827 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN 1828 * flag is on (half-synchronized state), then queue data for 1829 * later processing; else drop segment and return. 1830 */ 1831 if ((thflags & TH_ACK) == 0) { 1832 if (tp->t_state == TCPS_SYN_RECEIVED || 1833 (tp->t_flags & TF_NEEDSYN)) 1834 goto step6; 1835 else if (tp->t_flags & TF_ACKNOW) 1836 goto dropafterack; 1837 else 1838 goto drop; 1839 } 1840 1841 /* 1842 * Ack processing. 1843 */ 1844 switch (tp->t_state) { 1845 1846 /* 1847 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter 1848 * ESTABLISHED state and continue processing. 1849 * The ACK was checked above. 1850 */ 1851 case TCPS_SYN_RECEIVED: 1852 1853 tcpstat.tcps_connects++; 1854 soisconnected(so); 1855 /* Do window scaling? */ 1856 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 1857 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 1858 tp->rcv_scale = tp->request_r_scale; 1859 tp->snd_wnd = tiwin; 1860 } 1861 /* 1862 * Make transitions: 1863 * SYN-RECEIVED -> ESTABLISHED 1864 * SYN-RECEIVED* -> FIN-WAIT-1 1865 */ 1866 tp->t_starttime = ticks; 1867 if (tp->t_flags & TF_NEEDFIN) { 1868 tp->t_state = TCPS_FIN_WAIT_1; 1869 tp->t_flags &= ~TF_NEEDFIN; 1870 } else { 1871 tp->t_state = TCPS_ESTABLISHED; 1872 callout_reset(tp->tt_keep, tcp_keepidle, 1873 tcp_timer_keep, tp); 1874 } 1875 /* 1876 * If segment contains data or ACK, will call tcp_reass() 1877 * later; if not, do so now to pass queued data to user. 1878 */ 1879 if (tlen == 0 && (thflags & TH_FIN) == 0) 1880 (void) tcp_reass(tp, (struct tcphdr *)0, 0, 1881 (struct mbuf *)0); 1882 tp->snd_wl1 = th->th_seq - 1; 1883 /* FALLTHROUGH */ 1884 1885 /* 1886 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range 1887 * ACKs. If the ack is in the range 1888 * tp->snd_una < th->th_ack <= tp->snd_max 1889 * then advance tp->snd_una to th->th_ack and drop 1890 * data from the retransmission queue. If this ACK reflects 1891 * more up to date window information we update our window information. 1892 */ 1893 case TCPS_ESTABLISHED: 1894 case TCPS_FIN_WAIT_1: 1895 case TCPS_FIN_WAIT_2: 1896 case TCPS_CLOSE_WAIT: 1897 case TCPS_CLOSING: 1898 case TCPS_LAST_ACK: 1899 case TCPS_TIME_WAIT: 1900 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: timewait", 1901 __func__)); 1902 if (SEQ_GT(th->th_ack, tp->snd_max)) { 1903 tcpstat.tcps_rcvacktoomuch++; 1904 goto dropafterack; 1905 } 1906 if (tp->sack_enable && 1907 ((to.to_flags & TOF_SACK) || 1908 !TAILQ_EMPTY(&tp->snd_holes))) 1909 tcp_sack_doack(tp, &to, th->th_ack); 1910 if (SEQ_LEQ(th->th_ack, tp->snd_una)) { 1911 if (tlen == 0 && tiwin == tp->snd_wnd) { 1912 tcpstat.tcps_rcvdupack++; 1913 /* 1914 * If we have outstanding data (other than 1915 * a window probe), this is a completely 1916 * duplicate ack (ie, window info didn't 1917 * change), the ack is the biggest we've 1918 * seen and we've seen exactly our rexmt 1919 * threshhold of them, assume a packet 1920 * has been dropped and retransmit it. 1921 * Kludge snd_nxt & the congestion 1922 * window so we send only this one 1923 * packet. 1924 * 1925 * We know we're losing at the current 1926 * window size so do congestion avoidance 1927 * (set ssthresh to half the current window 1928 * and pull our congestion window back to 1929 * the new ssthresh). 1930 * 1931 * Dup acks mean that packets have left the 1932 * network (they're now cached at the receiver) 1933 * so bump cwnd by the amount in the receiver 1934 * to keep a constant cwnd packets in the 1935 * network. 1936 */ 1937 if (!callout_active(tp->tt_rexmt) || 1938 th->th_ack != tp->snd_una) 1939 tp->t_dupacks = 0; 1940 else if (++tp->t_dupacks > tcprexmtthresh || 1941 ((tcp_do_newreno || tp->sack_enable) && 1942 IN_FASTRECOVERY(tp))) { 1943 if (tp->sack_enable && IN_FASTRECOVERY(tp)) { 1944 int awnd; 1945 1946 /* 1947 * Compute the amount of data in flight first. 1948 * We can inject new data into the pipe iff 1949 * we have less than 1/2 the original window's 1950 * worth of data in flight. 1951 */ 1952 awnd = (tp->snd_nxt - tp->snd_fack) + 1953 tp->sackhint.sack_bytes_rexmit; 1954 if (awnd < tp->snd_ssthresh) { 1955 tp->snd_cwnd += tp->t_maxseg; 1956 if (tp->snd_cwnd > tp->snd_ssthresh) 1957 tp->snd_cwnd = tp->snd_ssthresh; 1958 } 1959 } else 1960 tp->snd_cwnd += tp->t_maxseg; 1961 (void) tcp_output(tp); 1962 goto drop; 1963 } else if (tp->t_dupacks == tcprexmtthresh) { 1964 tcp_seq onxt = tp->snd_nxt; 1965 u_int win; 1966 1967 /* 1968 * If we're doing sack, check to 1969 * see if we're already in sack 1970 * recovery. If we're not doing sack, 1971 * check to see if we're in newreno 1972 * recovery. 1973 */ 1974 if (tp->sack_enable) { 1975 if (IN_FASTRECOVERY(tp)) { 1976 tp->t_dupacks = 0; 1977 break; 1978 } 1979 } else if (tcp_do_newreno) { 1980 if (SEQ_LEQ(th->th_ack, 1981 tp->snd_recover)) { 1982 tp->t_dupacks = 0; 1983 break; 1984 } 1985 } 1986 win = min(tp->snd_wnd, tp->snd_cwnd) / 1987 2 / tp->t_maxseg; 1988 if (win < 2) 1989 win = 2; 1990 tp->snd_ssthresh = win * tp->t_maxseg; 1991 ENTER_FASTRECOVERY(tp); 1992 tp->snd_recover = tp->snd_max; 1993 callout_stop(tp->tt_rexmt); 1994 tp->t_rtttime = 0; 1995 if (tp->sack_enable) { 1996 tcpstat.tcps_sack_recovery_episode++; 1997 tp->sack_newdata = tp->snd_nxt; 1998 tp->snd_cwnd = tp->t_maxseg; 1999 (void) tcp_output(tp); 2000 goto drop; 2001 } 2002 tp->snd_nxt = th->th_ack; 2003 tp->snd_cwnd = tp->t_maxseg; 2004 (void) tcp_output(tp); 2005 KASSERT(tp->snd_limited <= 2, 2006 ("%s: tp->snd_limited too big", 2007 __func__)); 2008 tp->snd_cwnd = tp->snd_ssthresh + 2009 tp->t_maxseg * 2010 (tp->t_dupacks - tp->snd_limited); 2011 if (SEQ_GT(onxt, tp->snd_nxt)) 2012 tp->snd_nxt = onxt; 2013 goto drop; 2014 } else if (tcp_do_rfc3042) { 2015 u_long oldcwnd = tp->snd_cwnd; 2016 tcp_seq oldsndmax = tp->snd_max; 2017 u_int sent; 2018 2019 KASSERT(tp->t_dupacks == 1 || 2020 tp->t_dupacks == 2, 2021 ("%s: dupacks not 1 or 2", 2022 __func__)); 2023 if (tp->t_dupacks == 1) 2024 tp->snd_limited = 0; 2025 tp->snd_cwnd = 2026 (tp->snd_nxt - tp->snd_una) + 2027 (tp->t_dupacks - tp->snd_limited) * 2028 tp->t_maxseg; 2029 (void) tcp_output(tp); 2030 sent = tp->snd_max - oldsndmax; 2031 if (sent > tp->t_maxseg) { 2032 KASSERT((tp->t_dupacks == 2 && 2033 tp->snd_limited == 0) || 2034 (sent == tp->t_maxseg + 1 && 2035 tp->t_flags & TF_SENTFIN), 2036 ("%s: sent too much", 2037 __func__)); 2038 tp->snd_limited = 2; 2039 } else if (sent > 0) 2040 ++tp->snd_limited; 2041 tp->snd_cwnd = oldcwnd; 2042 goto drop; 2043 } 2044 } else 2045 tp->t_dupacks = 0; 2046 break; 2047 } 2048 2049 KASSERT(SEQ_GT(th->th_ack, tp->snd_una), 2050 ("%s: th_ack <= snd_una", __func__)); 2051 2052 /* 2053 * If the congestion window was inflated to account 2054 * for the other side's cached packets, retract it. 2055 */ 2056 if (tcp_do_newreno || tp->sack_enable) { 2057 if (IN_FASTRECOVERY(tp)) { 2058 if (SEQ_LT(th->th_ack, tp->snd_recover)) { 2059 if (tp->sack_enable) 2060 tcp_sack_partialack(tp, th); 2061 else 2062 tcp_newreno_partial_ack(tp, th); 2063 } else { 2064 /* 2065 * Out of fast recovery. 2066 * Window inflation should have left us 2067 * with approximately snd_ssthresh 2068 * outstanding data. 2069 * But in case we would be inclined to 2070 * send a burst, better to do it via 2071 * the slow start mechanism. 2072 */ 2073 if (SEQ_GT(th->th_ack + 2074 tp->snd_ssthresh, 2075 tp->snd_max)) 2076 tp->snd_cwnd = tp->snd_max - 2077 th->th_ack + 2078 tp->t_maxseg; 2079 else 2080 tp->snd_cwnd = tp->snd_ssthresh; 2081 } 2082 } 2083 } else { 2084 if (tp->t_dupacks >= tcprexmtthresh && 2085 tp->snd_cwnd > tp->snd_ssthresh) 2086 tp->snd_cwnd = tp->snd_ssthresh; 2087 } 2088 tp->t_dupacks = 0; 2089 /* 2090 * If we reach this point, ACK is not a duplicate, 2091 * i.e., it ACKs something we sent. 2092 */ 2093 if (tp->t_flags & TF_NEEDSYN) { 2094 /* 2095 * T/TCP: Connection was half-synchronized, and our 2096 * SYN has been ACK'd (so connection is now fully 2097 * synchronized). Go to non-starred state, 2098 * increment snd_una for ACK of SYN, and check if 2099 * we can do window scaling. 2100 */ 2101 tp->t_flags &= ~TF_NEEDSYN; 2102 tp->snd_una++; 2103 /* Do window scaling? */ 2104 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 2105 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 2106 tp->rcv_scale = tp->request_r_scale; 2107 /* Send window already scaled. */ 2108 } 2109 } 2110 2111process_ACK: 2112 KASSERT(headlocked, ("%s: process_ACK: head not locked", 2113 __func__)); 2114 INP_LOCK_ASSERT(tp->t_inpcb); 2115 2116 acked = th->th_ack - tp->snd_una; 2117 tcpstat.tcps_rcvackpack++; 2118 tcpstat.tcps_rcvackbyte += acked; 2119 2120 /* 2121 * If we just performed our first retransmit, and the ACK 2122 * arrives within our recovery window, then it was a mistake 2123 * to do the retransmit in the first place. Recover our 2124 * original cwnd and ssthresh, and proceed to transmit where 2125 * we left off. 2126 */ 2127 if (tp->t_rxtshift == 1 && ticks < tp->t_badrxtwin) { 2128 ++tcpstat.tcps_sndrexmitbad; 2129 tp->snd_cwnd = tp->snd_cwnd_prev; 2130 tp->snd_ssthresh = tp->snd_ssthresh_prev; 2131 tp->snd_recover = tp->snd_recover_prev; 2132 if (tp->t_flags & TF_WASFRECOVERY) 2133 ENTER_FASTRECOVERY(tp); 2134 tp->snd_nxt = tp->snd_max; 2135 tp->t_badrxtwin = 0; /* XXX probably not required */ 2136 } 2137 2138 /* 2139 * If we have a timestamp reply, update smoothed 2140 * round trip time. If no timestamp is present but 2141 * transmit timer is running and timed sequence 2142 * number was acked, update smoothed round trip time. 2143 * Since we now have an rtt measurement, cancel the 2144 * timer backoff (cf., Phil Karn's retransmit alg.). 2145 * Recompute the initial retransmit timer. 2146 * 2147 * Some boxes send broken timestamp replies 2148 * during the SYN+ACK phase, ignore 2149 * timestamps of 0 or we could calculate a 2150 * huge RTT and blow up the retransmit timer. 2151 */ 2152 if ((to.to_flags & TOF_TS) != 0 && 2153 to.to_tsecr) { 2154 if (!tp->t_rttlow || tp->t_rttlow > ticks - to.to_tsecr) 2155 tp->t_rttlow = ticks - to.to_tsecr; 2156 tcp_xmit_timer(tp, ticks - to.to_tsecr + 1); 2157 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) { 2158 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime) 2159 tp->t_rttlow = ticks - tp->t_rtttime; 2160 tcp_xmit_timer(tp, ticks - tp->t_rtttime); 2161 } 2162 tcp_xmit_bandwidth_limit(tp, th->th_ack); 2163 2164 /* 2165 * If all outstanding data is acked, stop retransmit 2166 * timer and remember to restart (more output or persist). 2167 * If there is more data to be acked, restart retransmit 2168 * timer, using current (possibly backed-off) value. 2169 */ 2170 if (th->th_ack == tp->snd_max) { 2171 callout_stop(tp->tt_rexmt); 2172 needoutput = 1; 2173 } else if (!callout_active(tp->tt_persist)) 2174 callout_reset(tp->tt_rexmt, tp->t_rxtcur, 2175 tcp_timer_rexmt, tp); 2176 2177 /* 2178 * If no data (only SYN) was ACK'd, 2179 * skip rest of ACK processing. 2180 */ 2181 if (acked == 0) 2182 goto step6; 2183 2184 /* 2185 * When new data is acked, open the congestion window. 2186 * If the window gives us less than ssthresh packets 2187 * in flight, open exponentially (maxseg per packet). 2188 * Otherwise open linearly: maxseg per window 2189 * (maxseg^2 / cwnd per packet). 2190 */ 2191 if ((!tcp_do_newreno && !tp->sack_enable) || 2192 !IN_FASTRECOVERY(tp)) { 2193 u_int cw = tp->snd_cwnd; 2194 u_int incr = tp->t_maxseg; 2195 if (cw > tp->snd_ssthresh) 2196 incr = incr * incr / cw; 2197 tp->snd_cwnd = min(cw+incr, TCP_MAXWIN<<tp->snd_scale); 2198 } 2199 SOCKBUF_LOCK(&so->so_snd); 2200 if (acked > so->so_snd.sb_cc) { 2201 tp->snd_wnd -= so->so_snd.sb_cc; 2202 sbdrop_locked(&so->so_snd, (int)so->so_snd.sb_cc); 2203 ourfinisacked = 1; 2204 } else { 2205 sbdrop_locked(&so->so_snd, acked); 2206 tp->snd_wnd -= acked; 2207 ourfinisacked = 0; 2208 } 2209 sowwakeup_locked(so); 2210 /* detect una wraparound */ 2211 if ((tcp_do_newreno || tp->sack_enable) && 2212 !IN_FASTRECOVERY(tp) && 2213 SEQ_GT(tp->snd_una, tp->snd_recover) && 2214 SEQ_LEQ(th->th_ack, tp->snd_recover)) 2215 tp->snd_recover = th->th_ack - 1; 2216 if ((tcp_do_newreno || tp->sack_enable) && 2217 IN_FASTRECOVERY(tp) && 2218 SEQ_GEQ(th->th_ack, tp->snd_recover)) 2219 EXIT_FASTRECOVERY(tp); 2220 tp->snd_una = th->th_ack; 2221 if (tp->sack_enable) { 2222 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 2223 tp->snd_recover = tp->snd_una; 2224 } 2225 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 2226 tp->snd_nxt = tp->snd_una; 2227 2228 switch (tp->t_state) { 2229 2230 /* 2231 * In FIN_WAIT_1 STATE in addition to the processing 2232 * for the ESTABLISHED state if our FIN is now acknowledged 2233 * then enter FIN_WAIT_2. 2234 */ 2235 case TCPS_FIN_WAIT_1: 2236 if (ourfinisacked) { 2237 /* 2238 * If we can't receive any more 2239 * data, then closing user can proceed. 2240 * Starting the timer is contrary to the 2241 * specification, but if we don't get a FIN 2242 * we'll hang forever. 2243 */ 2244 /* XXXjl 2245 * we should release the tp also, and use a 2246 * compressed state. 2247 */ 2248 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 2249 int timeout; 2250 2251 soisdisconnected(so); 2252 timeout = (tcp_fast_finwait2_recycle) ? 2253 tcp_finwait2_timeout : tcp_maxidle; 2254 callout_reset(tp->tt_2msl, timeout, 2255 tcp_timer_2msl, tp); 2256 } 2257 tp->t_state = TCPS_FIN_WAIT_2; 2258 } 2259 break; 2260 2261 /* 2262 * In CLOSING STATE in addition to the processing for 2263 * the ESTABLISHED state if the ACK acknowledges our FIN 2264 * then enter the TIME-WAIT state, otherwise ignore 2265 * the segment. 2266 */ 2267 case TCPS_CLOSING: 2268 if (ourfinisacked) { 2269 KASSERT(headlocked, ("%s: process_ACK: " 2270 "head not locked", __func__)); 2271 tcp_twstart(tp); 2272 INP_INFO_WUNLOCK(&tcbinfo); 2273 headlocked = 0; 2274 m_freem(m); 2275 return (0); 2276 } 2277 break; 2278 2279 /* 2280 * In LAST_ACK, we may still be waiting for data to drain 2281 * and/or to be acked, as well as for the ack of our FIN. 2282 * If our FIN is now acknowledged, delete the TCB, 2283 * enter the closed state and return. 2284 */ 2285 case TCPS_LAST_ACK: 2286 if (ourfinisacked) { 2287 KASSERT(headlocked, ("%s: process_ACK: " 2288 "tcp_close: head not locked", __func__)); 2289 tp = tcp_close(tp); 2290 goto drop; 2291 } 2292 break; 2293 2294 /* 2295 * In TIME_WAIT state the only thing that should arrive 2296 * is a retransmission of the remote FIN. Acknowledge 2297 * it and restart the finack timer. 2298 */ 2299 case TCPS_TIME_WAIT: 2300 KASSERT(tp->t_state != TCPS_TIME_WAIT, 2301 ("%s: timewait", __func__)); 2302 callout_reset(tp->tt_2msl, 2 * tcp_msl, 2303 tcp_timer_2msl, tp); 2304 goto dropafterack; 2305 } 2306 } 2307 2308step6: 2309 KASSERT(headlocked, ("%s: step6: head not locked", __func__)); 2310 INP_LOCK_ASSERT(tp->t_inpcb); 2311 2312 /* 2313 * Update window information. 2314 * Don't look at window if no ACK: TAC's send garbage on first SYN. 2315 */ 2316 if ((thflags & TH_ACK) && 2317 (SEQ_LT(tp->snd_wl1, th->th_seq) || 2318 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 2319 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 2320 /* keep track of pure window updates */ 2321 if (tlen == 0 && 2322 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 2323 tcpstat.tcps_rcvwinupd++; 2324 tp->snd_wnd = tiwin; 2325 tp->snd_wl1 = th->th_seq; 2326 tp->snd_wl2 = th->th_ack; 2327 if (tp->snd_wnd > tp->max_sndwnd) 2328 tp->max_sndwnd = tp->snd_wnd; 2329 needoutput = 1; 2330 } 2331 2332 /* 2333 * Process segments with URG. 2334 */ 2335 if ((thflags & TH_URG) && th->th_urp && 2336 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2337 /* 2338 * This is a kludge, but if we receive and accept 2339 * random urgent pointers, we'll crash in 2340 * soreceive. It's hard to imagine someone 2341 * actually wanting to send this much urgent data. 2342 */ 2343 SOCKBUF_LOCK(&so->so_rcv); 2344 if (th->th_urp + so->so_rcv.sb_cc > sb_max) { 2345 th->th_urp = 0; /* XXX */ 2346 thflags &= ~TH_URG; /* XXX */ 2347 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */ 2348 goto dodata; /* XXX */ 2349 } 2350 /* 2351 * If this segment advances the known urgent pointer, 2352 * then mark the data stream. This should not happen 2353 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since 2354 * a FIN has been received from the remote side. 2355 * In these states we ignore the URG. 2356 * 2357 * According to RFC961 (Assigned Protocols), 2358 * the urgent pointer points to the last octet 2359 * of urgent data. We continue, however, 2360 * to consider it to indicate the first octet 2361 * of data past the urgent section as the original 2362 * spec states (in one of two places). 2363 */ 2364 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) { 2365 tp->rcv_up = th->th_seq + th->th_urp; 2366 so->so_oobmark = so->so_rcv.sb_cc + 2367 (tp->rcv_up - tp->rcv_nxt) - 1; 2368 if (so->so_oobmark == 0) 2369 so->so_rcv.sb_state |= SBS_RCVATMARK; 2370 sohasoutofband(so); 2371 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); 2372 } 2373 SOCKBUF_UNLOCK(&so->so_rcv); 2374 /* 2375 * Remove out of band data so doesn't get presented to user. 2376 * This can happen independent of advancing the URG pointer, 2377 * but if two URG's are pending at once, some out-of-band 2378 * data may creep in... ick. 2379 */ 2380 if (th->th_urp <= (u_long)tlen && 2381 !(so->so_options & SO_OOBINLINE)) { 2382 /* hdr drop is delayed */ 2383 tcp_pulloutofband(so, th, m, drop_hdrlen); 2384 } 2385 } else { 2386 /* 2387 * If no out of band data is expected, 2388 * pull receive urgent pointer along 2389 * with the receive window. 2390 */ 2391 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) 2392 tp->rcv_up = tp->rcv_nxt; 2393 } 2394dodata: /* XXX */ 2395 KASSERT(headlocked, ("%s: dodata: head not locked", __func__)); 2396 INP_LOCK_ASSERT(tp->t_inpcb); 2397 2398 /* 2399 * Process the segment text, merging it into the TCP sequencing queue, 2400 * and arranging for acknowledgment of receipt if necessary. 2401 * This process logically involves adjusting tp->rcv_wnd as data 2402 * is presented to the user (this happens in tcp_usrreq.c, 2403 * case PRU_RCVD). If a FIN has already been received on this 2404 * connection then we just ignore the text. 2405 */ 2406 if ((tlen || (thflags & TH_FIN)) && 2407 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2408 tcp_seq save_start = th->th_seq; 2409 tcp_seq save_end = th->th_seq + tlen; 2410 m_adj(m, drop_hdrlen); /* delayed header drop */ 2411 /* 2412 * Insert segment which includes th into TCP reassembly queue 2413 * with control block tp. Set thflags to whether reassembly now 2414 * includes a segment with FIN. This handles the common case 2415 * inline (segment is the next to be received on an established 2416 * connection, and the queue is empty), avoiding linkage into 2417 * and removal from the queue and repetition of various 2418 * conversions. 2419 * Set DELACK for segments received in order, but ack 2420 * immediately when segments are out of order (so 2421 * fast retransmit can work). 2422 */ 2423 if (th->th_seq == tp->rcv_nxt && 2424 LIST_EMPTY(&tp->t_segq) && 2425 TCPS_HAVEESTABLISHED(tp->t_state)) { 2426 if (DELAY_ACK(tp)) 2427 tp->t_flags |= TF_DELACK; 2428 else 2429 tp->t_flags |= TF_ACKNOW; 2430 tp->rcv_nxt += tlen; 2431 thflags = th->th_flags & TH_FIN; 2432 tcpstat.tcps_rcvpack++; 2433 tcpstat.tcps_rcvbyte += tlen; 2434 ND6_HINT(tp); 2435 SOCKBUF_LOCK(&so->so_rcv); 2436 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 2437 m_freem(m); 2438 else 2439 sbappendstream_locked(&so->so_rcv, m); 2440 sorwakeup_locked(so); 2441 } else { 2442 thflags = tcp_reass(tp, th, &tlen, m); 2443 tp->t_flags |= TF_ACKNOW; 2444 } 2445 if (tlen > 0 && tp->sack_enable) 2446 tcp_update_sack_list(tp, save_start, save_end); 2447#if 0 2448 /* 2449 * Note the amount of data that peer has sent into 2450 * our window, in order to estimate the sender's 2451 * buffer size. 2452 * XXX: Unused. 2453 */ 2454 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt); 2455#endif 2456 } else { 2457 m_freem(m); 2458 thflags &= ~TH_FIN; 2459 } 2460 2461 /* 2462 * If FIN is received ACK the FIN and let the user know 2463 * that the connection is closing. 2464 */ 2465 if (thflags & TH_FIN) { 2466 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2467 socantrcvmore(so); 2468 /* 2469 * If connection is half-synchronized 2470 * (ie NEEDSYN flag on) then delay ACK, 2471 * so it may be piggybacked when SYN is sent. 2472 * Otherwise, since we received a FIN then no 2473 * more input can be expected, send ACK now. 2474 */ 2475 if (tp->t_flags & TF_NEEDSYN) 2476 tp->t_flags |= TF_DELACK; 2477 else 2478 tp->t_flags |= TF_ACKNOW; 2479 tp->rcv_nxt++; 2480 } 2481 switch (tp->t_state) { 2482 2483 /* 2484 * In SYN_RECEIVED and ESTABLISHED STATES 2485 * enter the CLOSE_WAIT state. 2486 */ 2487 case TCPS_SYN_RECEIVED: 2488 tp->t_starttime = ticks; 2489 /*FALLTHROUGH*/ 2490 case TCPS_ESTABLISHED: 2491 tp->t_state = TCPS_CLOSE_WAIT; 2492 break; 2493 2494 /* 2495 * If still in FIN_WAIT_1 STATE FIN has not been acked so 2496 * enter the CLOSING state. 2497 */ 2498 case TCPS_FIN_WAIT_1: 2499 tp->t_state = TCPS_CLOSING; 2500 break; 2501 2502 /* 2503 * In FIN_WAIT_2 state enter the TIME_WAIT state, 2504 * starting the time-wait timer, turning off the other 2505 * standard timers. 2506 */ 2507 case TCPS_FIN_WAIT_2: 2508 KASSERT(headlocked == 1, ("%s: dodata: " 2509 "TCP_FIN_WAIT_2: head not locked", __func__)); 2510 tcp_twstart(tp); 2511 INP_INFO_WUNLOCK(&tcbinfo); 2512 return (0); 2513 2514 /* 2515 * In TIME_WAIT state restart the 2 MSL time_wait timer. 2516 */ 2517 case TCPS_TIME_WAIT: 2518 KASSERT(tp->t_state != TCPS_TIME_WAIT, 2519 ("%s: timewait", __func__)); 2520 callout_reset(tp->tt_2msl, 2 * tcp_msl, 2521 tcp_timer_2msl, tp); 2522 break; 2523 } 2524 } 2525 INP_INFO_WUNLOCK(&tcbinfo); 2526 headlocked = 0; 2527#ifdef TCPDEBUG 2528 if (so->so_options & SO_DEBUG) 2529 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen, 2530 &tcp_savetcp, 0); 2531#endif 2532 2533 /* 2534 * Return any desired output. 2535 */ 2536 if (needoutput || (tp->t_flags & TF_ACKNOW)) 2537 (void) tcp_output(tp); 2538 2539check_delack: 2540 KASSERT(headlocked == 0, ("%s: check_delack: head locked", 2541 __func__)); 2542 INP_LOCK_ASSERT(tp->t_inpcb); 2543 if (tp->t_flags & TF_DELACK) { 2544 tp->t_flags &= ~TF_DELACK; 2545 callout_reset(tp->tt_delack, tcp_delacktime, 2546 tcp_timer_delack, tp); 2547 } 2548 INP_UNLOCK(tp->t_inpcb); 2549 return (0); 2550 2551dropafterack: 2552 KASSERT(headlocked, ("%s: dropafterack: head not locked", __func__)); 2553 /* 2554 * Generate an ACK dropping incoming segment if it occupies 2555 * sequence space, where the ACK reflects our state. 2556 * 2557 * We can now skip the test for the RST flag since all 2558 * paths to this code happen after packets containing 2559 * RST have been dropped. 2560 * 2561 * In the SYN-RECEIVED state, don't send an ACK unless the 2562 * segment we received passes the SYN-RECEIVED ACK test. 2563 * If it fails send a RST. This breaks the loop in the 2564 * "LAND" DoS attack, and also prevents an ACK storm 2565 * between two listening ports that have been sent forged 2566 * SYN segments, each with the source address of the other. 2567 */ 2568 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && 2569 (SEQ_GT(tp->snd_una, th->th_ack) || 2570 SEQ_GT(th->th_ack, tp->snd_max)) ) { 2571 rstreason = BANDLIM_RST_OPENPORT; 2572 goto dropwithreset; 2573 } 2574#ifdef TCPDEBUG 2575 if (so->so_options & SO_DEBUG) 2576 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 2577 &tcp_savetcp, 0); 2578#endif 2579 KASSERT(headlocked, ("%s: headlocked should be 1", __func__)); 2580 INP_INFO_WUNLOCK(&tcbinfo); 2581 tp->t_flags |= TF_ACKNOW; 2582 (void) tcp_output(tp); 2583 INP_UNLOCK(tp->t_inpcb); 2584 m_freem(m); 2585 return (0); 2586 2587dropwithreset: 2588 KASSERT(headlocked, ("%s: dropwithreset: head not locked", __func__)); 2589 2590 tcp_dropwithreset(m, th, tp, tlen, rstreason); 2591 2592 if (tp != NULL) 2593 INP_UNLOCK(tp->t_inpcb); 2594 if (headlocked) 2595 INP_INFO_WUNLOCK(&tcbinfo); 2596 return (0); 2597 2598drop: 2599 /* 2600 * Drop space held by incoming segment and return. 2601 */ 2602#ifdef TCPDEBUG 2603 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 2604 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 2605 &tcp_savetcp, 0); 2606#endif 2607 if (tp != NULL) 2608 INP_UNLOCK(tp->t_inpcb); 2609 if (headlocked) 2610 INP_INFO_WUNLOCK(&tcbinfo); 2611 m_freem(m); 2612 return (0); 2613} 2614 2615 2616/* 2617 * Issue RST on TCP segment. The mbuf must still include the original 2618 * packet header. 2619 */ 2620static void 2621tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, 2622 int tlen, int rstreason) 2623{ 2624 struct ip *ip; 2625#ifdef INET6 2626 struct ip6_hdr *ip6; 2627#endif 2628 2629 /* 2630 * Generate a RST, dropping incoming segment. 2631 * Make ACK acceptable to originator of segment. 2632 * Don't bother to respond if destination was broadcast/multicast. 2633 */ 2634 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST)) 2635 goto drop; 2636#ifdef INET6 2637 if (mtod(m, struct ip *)->ip_v == 6) { 2638 ip6 = mtod(m, struct ip6_hdr *); 2639 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 2640 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 2641 goto drop; 2642 /* IPv6 anycast check is done at tcp6_input() */ 2643 } else 2644#endif 2645 { 2646 ip = mtod(m, struct ip *); 2647 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 2648 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 2649 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 2650 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 2651 goto drop; 2652 } 2653 2654 /* Perform bandwidth limiting. */ 2655 if (badport_bandlim(rstreason) < 0) 2656 goto drop; 2657 2658 /* tcp_respond consumes the mbuf chain. */ 2659 if (th->th_flags & TH_ACK) { 2660 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, 2661 th->th_ack, TH_RST); 2662 } else { 2663 if (th->th_flags & TH_SYN) 2664 tlen++; 2665 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen, 2666 (tcp_seq)0, TH_RST|TH_ACK); 2667 } 2668 return; 2669drop: 2670 m_freem(m); 2671 return; 2672} 2673 2674/* 2675 * Parse TCP options and place in tcpopt. 2676 */ 2677static void 2678tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags) 2679{ 2680 int opt, optlen; 2681 2682 to->to_flags = 0; 2683 for (; cnt > 0; cnt -= optlen, cp += optlen) { 2684 opt = cp[0]; 2685 if (opt == TCPOPT_EOL) 2686 break; 2687 if (opt == TCPOPT_NOP) 2688 optlen = 1; 2689 else { 2690 if (cnt < 2) 2691 break; 2692 optlen = cp[1]; 2693 if (optlen < 2 || optlen > cnt) 2694 break; 2695 } 2696 switch (opt) { 2697 case TCPOPT_MAXSEG: 2698 if (optlen != TCPOLEN_MAXSEG) 2699 continue; 2700 if (!(flags & TO_SYN)) 2701 continue; 2702 to->to_flags |= TOF_MSS; 2703 bcopy((char *)cp + 2, 2704 (char *)&to->to_mss, sizeof(to->to_mss)); 2705 to->to_mss = ntohs(to->to_mss); 2706 break; 2707 case TCPOPT_WINDOW: 2708 if (optlen != TCPOLEN_WINDOW) 2709 continue; 2710 if (!(flags & TO_SYN)) 2711 continue; 2712 to->to_flags |= TOF_SCALE; 2713 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT); 2714 break; 2715 case TCPOPT_TIMESTAMP: 2716 if (optlen != TCPOLEN_TIMESTAMP) 2717 continue; 2718 to->to_flags |= TOF_TS; 2719 bcopy((char *)cp + 2, 2720 (char *)&to->to_tsval, sizeof(to->to_tsval)); 2721 to->to_tsval = ntohl(to->to_tsval); 2722 bcopy((char *)cp + 6, 2723 (char *)&to->to_tsecr, sizeof(to->to_tsecr)); 2724 to->to_tsecr = ntohl(to->to_tsecr); 2725 break; 2726#ifdef TCP_SIGNATURE 2727 /* 2728 * XXX In order to reply to a host which has set the 2729 * TCP_SIGNATURE option in its initial SYN, we have to 2730 * record the fact that the option was observed here 2731 * for the syncache code to perform the correct response. 2732 */ 2733 case TCPOPT_SIGNATURE: 2734 if (optlen != TCPOLEN_SIGNATURE) 2735 continue; 2736 to->to_flags |= (TOF_SIGNATURE | TOF_SIGLEN); 2737 break; 2738#endif 2739 case TCPOPT_SACK_PERMITTED: 2740 if (optlen != TCPOLEN_SACK_PERMITTED) 2741 continue; 2742 if (!(flags & TO_SYN)) 2743 continue; 2744 if (!tcp_do_sack) 2745 continue; 2746 to->to_flags |= TOF_SACKPERM; 2747 break; 2748 case TCPOPT_SACK: 2749 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0) 2750 continue; 2751 to->to_flags |= TOF_SACK; 2752 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK; 2753 to->to_sacks = cp + 2; 2754 tcpstat.tcps_sack_rcv_blocks++; 2755 break; 2756 default: 2757 continue; 2758 } 2759 } 2760} 2761 2762/* 2763 * Pull out of band byte out of a segment so 2764 * it doesn't appear in the user's data queue. 2765 * It is still reflected in the segment length for 2766 * sequencing purposes. 2767 */ 2768static void 2769tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, 2770 int off) 2771{ 2772 int cnt = off + th->th_urp - 1; 2773 2774 while (cnt >= 0) { 2775 if (m->m_len > cnt) { 2776 char *cp = mtod(m, caddr_t) + cnt; 2777 struct tcpcb *tp = sototcpcb(so); 2778 2779 tp->t_iobc = *cp; 2780 tp->t_oobflags |= TCPOOB_HAVEDATA; 2781 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1)); 2782 m->m_len--; 2783 if (m->m_flags & M_PKTHDR) 2784 m->m_pkthdr.len--; 2785 return; 2786 } 2787 cnt -= m->m_len; 2788 m = m->m_next; 2789 if (m == NULL) 2790 break; 2791 } 2792 panic("tcp_pulloutofband"); 2793} 2794 2795/* 2796 * Collect new round-trip time estimate 2797 * and update averages and current timeout. 2798 */ 2799static void 2800tcp_xmit_timer(struct tcpcb *tp, int rtt) 2801{ 2802 int delta; 2803 2804 INP_LOCK_ASSERT(tp->t_inpcb); 2805 2806 tcpstat.tcps_rttupdated++; 2807 tp->t_rttupdated++; 2808 if (tp->t_srtt != 0) { 2809 /* 2810 * srtt is stored as fixed point with 5 bits after the 2811 * binary point (i.e., scaled by 8). The following magic 2812 * is equivalent to the smoothing algorithm in rfc793 with 2813 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed 2814 * point). Adjust rtt to origin 0. 2815 */ 2816 delta = ((rtt - 1) << TCP_DELTA_SHIFT) 2817 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); 2818 2819 if ((tp->t_srtt += delta) <= 0) 2820 tp->t_srtt = 1; 2821 2822 /* 2823 * We accumulate a smoothed rtt variance (actually, a 2824 * smoothed mean difference), then set the retransmit 2825 * timer to smoothed rtt + 4 times the smoothed variance. 2826 * rttvar is stored as fixed point with 4 bits after the 2827 * binary point (scaled by 16). The following is 2828 * equivalent to rfc793 smoothing with an alpha of .75 2829 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces 2830 * rfc793's wired-in beta. 2831 */ 2832 if (delta < 0) 2833 delta = -delta; 2834 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); 2835 if ((tp->t_rttvar += delta) <= 0) 2836 tp->t_rttvar = 1; 2837 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 2838 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 2839 } else { 2840 /* 2841 * No rtt measurement yet - use the unsmoothed rtt. 2842 * Set the variance to half the rtt (so our first 2843 * retransmit happens at 3*rtt). 2844 */ 2845 tp->t_srtt = rtt << TCP_RTT_SHIFT; 2846 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); 2847 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 2848 } 2849 tp->t_rtttime = 0; 2850 tp->t_rxtshift = 0; 2851 2852 /* 2853 * the retransmit should happen at rtt + 4 * rttvar. 2854 * Because of the way we do the smoothing, srtt and rttvar 2855 * will each average +1/2 tick of bias. When we compute 2856 * the retransmit timer, we want 1/2 tick of rounding and 2857 * 1 extra tick because of +-1/2 tick uncertainty in the 2858 * firing of the timer. The bias will give us exactly the 2859 * 1.5 tick we need. But, because the bias is 2860 * statistical, we have to test that we don't drop below 2861 * the minimum feasible timer (which is 2 ticks). 2862 */ 2863 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 2864 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX); 2865 2866 /* 2867 * We received an ack for a packet that wasn't retransmitted; 2868 * it is probably safe to discard any error indications we've 2869 * received recently. This isn't quite right, but close enough 2870 * for now (a route might have failed after we sent a segment, 2871 * and the return path might not be symmetrical). 2872 */ 2873 tp->t_softerror = 0; 2874} 2875 2876/* 2877 * Determine a reasonable value for maxseg size. 2878 * If the route is known, check route for mtu. 2879 * If none, use an mss that can be handled on the outgoing 2880 * interface without forcing IP to fragment; if bigger than 2881 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES 2882 * to utilize large mbufs. If no route is found, route has no mtu, 2883 * or the destination isn't local, use a default, hopefully conservative 2884 * size (usually 512 or the default IP max size, but no more than the mtu 2885 * of the interface), as we can't discover anything about intervening 2886 * gateways or networks. We also initialize the congestion/slow start 2887 * window to be a single segment if the destination isn't local. 2888 * While looking at the routing entry, we also initialize other path-dependent 2889 * parameters from pre-set or cached values in the routing entry. 2890 * 2891 * Also take into account the space needed for options that we 2892 * send regularly. Make maxseg shorter by that amount to assure 2893 * that we can send maxseg amount of data even when the options 2894 * are present. Store the upper limit of the length of options plus 2895 * data in maxopd. 2896 * 2897 * 2898 * In case of T/TCP, we call this routine during implicit connection 2899 * setup as well (offer = -1), to initialize maxseg from the cached 2900 * MSS of our peer. 2901 * 2902 * NOTE that this routine is only called when we process an incoming 2903 * segment. Outgoing SYN/ACK MSS settings are handled in tcp_mssopt(). 2904 */ 2905void 2906tcp_mss(struct tcpcb *tp, int offer) 2907{ 2908 int rtt, mss; 2909 u_long bufsize; 2910 u_long maxmtu; 2911 struct inpcb *inp = tp->t_inpcb; 2912 struct socket *so; 2913 struct hc_metrics_lite metrics; 2914 int origoffer = offer; 2915 int mtuflags = 0; 2916#ifdef INET6 2917 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0; 2918 size_t min_protoh = isipv6 ? 2919 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) : 2920 sizeof (struct tcpiphdr); 2921#else 2922 const size_t min_protoh = sizeof(struct tcpiphdr); 2923#endif 2924 2925 /* initialize */ 2926#ifdef INET6 2927 if (isipv6) { 2928 maxmtu = tcp_maxmtu6(&inp->inp_inc, &mtuflags); 2929 tp->t_maxopd = tp->t_maxseg = tcp_v6mssdflt; 2930 } else 2931#endif 2932 { 2933 maxmtu = tcp_maxmtu(&inp->inp_inc, &mtuflags); 2934 tp->t_maxopd = tp->t_maxseg = tcp_mssdflt; 2935 } 2936 so = inp->inp_socket; 2937 2938 /* 2939 * no route to sender, stay with default mss and return 2940 */ 2941 if (maxmtu == 0) 2942 return; 2943 2944 /* what have we got? */ 2945 switch (offer) { 2946 case 0: 2947 /* 2948 * Offer == 0 means that there was no MSS on the SYN 2949 * segment, in this case we use tcp_mssdflt. 2950 */ 2951 offer = 2952#ifdef INET6 2953 isipv6 ? tcp_v6mssdflt : 2954#endif 2955 tcp_mssdflt; 2956 break; 2957 2958 case -1: 2959 /* 2960 * Offer == -1 means that we didn't receive SYN yet. 2961 */ 2962 /* FALLTHROUGH */ 2963 2964 default: 2965 /* 2966 * Prevent DoS attack with too small MSS. Round up 2967 * to at least minmss. 2968 */ 2969 offer = max(offer, tcp_minmss); 2970 /* 2971 * Sanity check: make sure that maxopd will be large 2972 * enough to allow some data on segments even if the 2973 * all the option space is used (40bytes). Otherwise 2974 * funny things may happen in tcp_output. 2975 */ 2976 offer = max(offer, 64); 2977 } 2978 2979 /* 2980 * rmx information is now retrieved from tcp_hostcache 2981 */ 2982 tcp_hc_get(&inp->inp_inc, &metrics); 2983 2984 /* 2985 * if there's a discovered mtu int tcp hostcache, use it 2986 * else, use the link mtu. 2987 */ 2988 if (metrics.rmx_mtu) 2989 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh; 2990 else { 2991#ifdef INET6 2992 if (isipv6) { 2993 mss = maxmtu - min_protoh; 2994 if (!path_mtu_discovery && 2995 !in6_localaddr(&inp->in6p_faddr)) 2996 mss = min(mss, tcp_v6mssdflt); 2997 } else 2998#endif 2999 { 3000 mss = maxmtu - min_protoh; 3001 if (!path_mtu_discovery && 3002 !in_localaddr(inp->inp_faddr)) 3003 mss = min(mss, tcp_mssdflt); 3004 } 3005 } 3006 mss = min(mss, offer); 3007 3008 /* 3009 * maxopd stores the maximum length of data AND options 3010 * in a segment; maxseg is the amount of data in a normal 3011 * segment. We need to store this value (maxopd) apart 3012 * from maxseg, because now every segment carries options 3013 * and thus we normally have somewhat less data in segments. 3014 */ 3015 tp->t_maxopd = mss; 3016 3017 /* 3018 * origoffer==-1 indicates, that no segments were received yet. 3019 * In this case we just guess. 3020 */ 3021 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && 3022 (origoffer == -1 || 3023 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)) 3024 mss -= TCPOLEN_TSTAMP_APPA; 3025 tp->t_maxseg = mss; 3026 3027#if (MCLBYTES & (MCLBYTES - 1)) == 0 3028 if (mss > MCLBYTES) 3029 mss &= ~(MCLBYTES-1); 3030#else 3031 if (mss > MCLBYTES) 3032 mss = mss / MCLBYTES * MCLBYTES; 3033#endif 3034 tp->t_maxseg = mss; 3035 3036 /* 3037 * If there's a pipesize, change the socket buffer to that size, 3038 * don't change if sb_hiwat is different than default (then it 3039 * has been changed on purpose with setsockopt). 3040 * Make the socket buffers an integral number of mss units; 3041 * if the mss is larger than the socket buffer, decrease the mss. 3042 */ 3043 SOCKBUF_LOCK(&so->so_snd); 3044 if ((so->so_snd.sb_hiwat == tcp_sendspace) && metrics.rmx_sendpipe) 3045 bufsize = metrics.rmx_sendpipe; 3046 else 3047 bufsize = so->so_snd.sb_hiwat; 3048 if (bufsize < mss) 3049 mss = bufsize; 3050 else { 3051 bufsize = roundup(bufsize, mss); 3052 if (bufsize > sb_max) 3053 bufsize = sb_max; 3054 if (bufsize > so->so_snd.sb_hiwat) 3055 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL); 3056 } 3057 SOCKBUF_UNLOCK(&so->so_snd); 3058 tp->t_maxseg = mss; 3059 3060 SOCKBUF_LOCK(&so->so_rcv); 3061 if ((so->so_rcv.sb_hiwat == tcp_recvspace) && metrics.rmx_recvpipe) 3062 bufsize = metrics.rmx_recvpipe; 3063 else 3064 bufsize = so->so_rcv.sb_hiwat; 3065 if (bufsize > mss) { 3066 bufsize = roundup(bufsize, mss); 3067 if (bufsize > sb_max) 3068 bufsize = sb_max; 3069 if (bufsize > so->so_rcv.sb_hiwat) 3070 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL); 3071 } 3072 SOCKBUF_UNLOCK(&so->so_rcv); 3073 /* 3074 * While we're here, check the others too 3075 */ 3076 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) { 3077 tp->t_srtt = rtt; 3078 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE; 3079 tcpstat.tcps_usedrtt++; 3080 if (metrics.rmx_rttvar) { 3081 tp->t_rttvar = metrics.rmx_rttvar; 3082 tcpstat.tcps_usedrttvar++; 3083 } else { 3084 /* default variation is +- 1 rtt */ 3085 tp->t_rttvar = 3086 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE; 3087 } 3088 TCPT_RANGESET(tp->t_rxtcur, 3089 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, 3090 tp->t_rttmin, TCPTV_REXMTMAX); 3091 } 3092 if (metrics.rmx_ssthresh) { 3093 /* 3094 * There's some sort of gateway or interface 3095 * buffer limit on the path. Use this to set 3096 * the slow start threshhold, but set the 3097 * threshold to no less than 2*mss. 3098 */ 3099 tp->snd_ssthresh = max(2 * mss, metrics.rmx_ssthresh); 3100 tcpstat.tcps_usedssthresh++; 3101 } 3102 if (metrics.rmx_bandwidth) 3103 tp->snd_bandwidth = metrics.rmx_bandwidth; 3104 3105 /* 3106 * Set the slow-start flight size depending on whether this 3107 * is a local network or not. 3108 * 3109 * Extend this so we cache the cwnd too and retrieve it here. 3110 * Make cwnd even bigger than RFC3390 suggests but only if we 3111 * have previous experience with the remote host. Be careful 3112 * not make cwnd bigger than remote receive window or our own 3113 * send socket buffer. Maybe put some additional upper bound 3114 * on the retrieved cwnd. Should do incremental updates to 3115 * hostcache when cwnd collapses so next connection doesn't 3116 * overloads the path again. 3117 * 3118 * RFC3390 says only do this if SYN or SYN/ACK didn't got lost. 3119 * We currently check only in syncache_socket for that. 3120 */ 3121#define TCP_METRICS_CWND 3122#ifdef TCP_METRICS_CWND 3123 if (metrics.rmx_cwnd) 3124 tp->snd_cwnd = max(mss, 3125 min(metrics.rmx_cwnd / 2, 3126 min(tp->snd_wnd, so->so_snd.sb_hiwat))); 3127 else 3128#endif 3129 if (tcp_do_rfc3390) 3130 tp->snd_cwnd = min(4 * mss, max(2 * mss, 4380)); 3131#ifdef INET6 3132 else if ((isipv6 && in6_localaddr(&inp->in6p_faddr)) || 3133 (!isipv6 && in_localaddr(inp->inp_faddr))) 3134#else 3135 else if (in_localaddr(inp->inp_faddr)) 3136#endif 3137 tp->snd_cwnd = mss * ss_fltsz_local; 3138 else 3139 tp->snd_cwnd = mss * ss_fltsz; 3140 3141 /* Check the interface for TSO capabilities. */ 3142 if (mtuflags & CSUM_TSO) 3143 tp->t_flags |= TF_TSO; 3144} 3145 3146/* 3147 * Determine the MSS option to send on an outgoing SYN. 3148 */ 3149int 3150tcp_mssopt(struct in_conninfo *inc) 3151{ 3152 int mss = 0; 3153 u_long maxmtu = 0; 3154 u_long thcmtu = 0; 3155 size_t min_protoh; 3156#ifdef INET6 3157 int isipv6 = inc->inc_isipv6 ? 1 : 0; 3158#endif 3159 3160 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer")); 3161 3162#ifdef INET6 3163 if (isipv6) { 3164 mss = tcp_v6mssdflt; 3165 maxmtu = tcp_maxmtu6(inc, NULL); 3166 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */ 3167 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 3168 } else 3169#endif 3170 { 3171 mss = tcp_mssdflt; 3172 maxmtu = tcp_maxmtu(inc, NULL); 3173 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */ 3174 min_protoh = sizeof(struct tcpiphdr); 3175 } 3176 if (maxmtu && thcmtu) 3177 mss = min(maxmtu, thcmtu) - min_protoh; 3178 else if (maxmtu || thcmtu) 3179 mss = max(maxmtu, thcmtu) - min_protoh; 3180 3181 return (mss); 3182} 3183 3184 3185/* 3186 * On a partial ack arrives, force the retransmission of the 3187 * next unacknowledged segment. Do not clear tp->t_dupacks. 3188 * By setting snd_nxt to ti_ack, this forces retransmission timer to 3189 * be started again. 3190 */ 3191static void 3192tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th) 3193{ 3194 tcp_seq onxt = tp->snd_nxt; 3195 u_long ocwnd = tp->snd_cwnd; 3196 3197 callout_stop(tp->tt_rexmt); 3198 tp->t_rtttime = 0; 3199 tp->snd_nxt = th->th_ack; 3200 /* 3201 * Set snd_cwnd to one segment beyond acknowledged offset. 3202 * (tp->snd_una has not yet been updated when this function is called.) 3203 */ 3204 tp->snd_cwnd = tp->t_maxseg + (th->th_ack - tp->snd_una); 3205 tp->t_flags |= TF_ACKNOW; 3206 (void) tcp_output(tp); 3207 tp->snd_cwnd = ocwnd; 3208 if (SEQ_GT(onxt, tp->snd_nxt)) 3209 tp->snd_nxt = onxt; 3210 /* 3211 * Partial window deflation. Relies on fact that tp->snd_una 3212 * not updated yet. 3213 */ 3214 if (tp->snd_cwnd > th->th_ack - tp->snd_una) 3215 tp->snd_cwnd -= th->th_ack - tp->snd_una; 3216 else 3217 tp->snd_cwnd = 0; 3218 tp->snd_cwnd += tp->t_maxseg; 3219} 3220 3221/* 3222 * Returns 1 if the TIME_WAIT state was killed and we should start over, 3223 * looking for a pcb in the listen state. Returns 0 otherwise. 3224 */ 3225static int 3226tcp_timewait(struct inpcb *inp, struct tcpopt *to, struct tcphdr *th, 3227 struct mbuf *m, int tlen) 3228{ 3229 struct tcptw *tw; 3230 int thflags; 3231 tcp_seq seq; 3232#ifdef INET6 3233 int isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0; 3234#else 3235 const int isipv6 = 0; 3236#endif 3237 3238 /* tcbinfo lock required for tcp_twclose(), tcp_timer_2msl_reset(). */ 3239 INP_INFO_WLOCK_ASSERT(&tcbinfo); 3240 INP_LOCK_ASSERT(inp); 3241 3242 /* 3243 * XXXRW: Time wait state for inpcb has been recycled, but inpcb is 3244 * still present. This is undesirable, but temporarily necessary 3245 * until we work out how to handle inpcb's who's timewait state has 3246 * been removed. 3247 */ 3248 tw = intotw(inp); 3249 if (tw == NULL) 3250 goto drop; 3251 3252 thflags = th->th_flags; 3253 3254 /* 3255 * NOTE: for FIN_WAIT_2 (to be added later), 3256 * must validate sequence number before accepting RST 3257 */ 3258 3259 /* 3260 * If the segment contains RST: 3261 * Drop the segment - see Stevens, vol. 2, p. 964 and 3262 * RFC 1337. 3263 */ 3264 if (thflags & TH_RST) 3265 goto drop; 3266 3267#if 0 3268/* PAWS not needed at the moment */ 3269 /* 3270 * RFC 1323 PAWS: If we have a timestamp reply on this segment 3271 * and it's less than ts_recent, drop it. 3272 */ 3273 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent && 3274 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 3275 if ((thflags & TH_ACK) == 0) 3276 goto drop; 3277 goto ack; 3278 } 3279 /* 3280 * ts_recent is never updated because we never accept new segments. 3281 */ 3282#endif 3283 3284 /* 3285 * If a new connection request is received 3286 * while in TIME_WAIT, drop the old connection 3287 * and start over if the sequence numbers 3288 * are above the previous ones. 3289 */ 3290 if ((thflags & TH_SYN) && SEQ_GT(th->th_seq, tw->rcv_nxt)) { 3291 tcp_twclose(tw, 0); 3292 return (1); 3293 } 3294 3295 /* 3296 * Drop the the segment if it does not contain an ACK. 3297 */ 3298 if ((thflags & TH_ACK) == 0) 3299 goto drop; 3300 3301 /* 3302 * Reset the 2MSL timer if this is a duplicate FIN. 3303 */ 3304 if (thflags & TH_FIN) { 3305 seq = th->th_seq + tlen + (thflags & TH_SYN ? 1 : 0); 3306 if (seq + 1 == tw->rcv_nxt) 3307 tcp_timer_2msl_reset(tw, 1); 3308 } 3309 3310 /* 3311 * Acknowledge the segment if it has data or is not a duplicate ACK. 3312 */ 3313 if (thflags != TH_ACK || tlen != 0 || 3314 th->th_seq != tw->rcv_nxt || th->th_ack != tw->snd_nxt) 3315 tcp_twrespond(tw, TH_ACK); 3316 goto drop; 3317 3318 /* 3319 * Generate a RST, dropping incoming segment. 3320 * Make ACK acceptable to originator of segment. 3321 * Don't bother to respond if destination was broadcast/multicast. 3322 */ 3323 if (m->m_flags & (M_BCAST|M_MCAST)) 3324 goto drop; 3325 if (isipv6) { 3326 struct ip6_hdr *ip6; 3327 3328 /* IPv6 anycast check is done at tcp6_input() */ 3329 ip6 = mtod(m, struct ip6_hdr *); 3330 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 3331 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 3332 goto drop; 3333 } else { 3334 struct ip *ip; 3335 3336 ip = mtod(m, struct ip *); 3337 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 3338 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 3339 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 3340 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 3341 goto drop; 3342 } 3343 if (thflags & TH_ACK) { 3344 tcp_respond(NULL, 3345 mtod(m, void *), th, m, 0, th->th_ack, TH_RST); 3346 } else { 3347 seq = th->th_seq + (thflags & TH_SYN ? 1 : 0); 3348 tcp_respond(NULL, 3349 mtod(m, void *), th, m, seq, 0, TH_RST|TH_ACK); 3350 } 3351 INP_UNLOCK(inp); 3352 return (0); 3353 3354drop: 3355 INP_UNLOCK(inp); 3356 m_freem(m); 3357 return (0); 3358} 3359