tcp_input.c revision 178888
124139Sjoerg/*- 224139Sjoerg * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994, 1995 324139Sjoerg * The Regents of the University of California. All rights reserved. 424139Sjoerg * 524139Sjoerg * Redistribution and use in source and binary forms, with or without 689750Sdwmalone * modification, are permitted provided that the following conditions 724139Sjoerg * are met: 824139Sjoerg * 1. Redistributions of source code must retain the above copyright 924139Sjoerg * notice, this list of conditions and the following disclaimer. 1024139Sjoerg * 2. Redistributions in binary form must reproduce the above copyright 1124139Sjoerg * notice, this list of conditions and the following disclaimer in the 1224139Sjoerg * documentation and/or other materials provided with the distribution. 1324139Sjoerg * 4. Neither the name of the University nor the names of its contributors 1424139Sjoerg * may be used to endorse or promote products derived from this software 1524139Sjoerg * without specific prior written permission. 1624139Sjoerg * 1724139Sjoerg * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 1824139Sjoerg * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1924139Sjoerg * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2024139Sjoerg * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 2124139Sjoerg * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2224139Sjoerg * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2324139Sjoerg * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2424139Sjoerg * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2524139Sjoerg * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2624139Sjoerg * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2724139Sjoerg * SUCH DAMAGE. 2824139Sjoerg * 2924139Sjoerg * @(#)tcp_input.c 8.12 (Berkeley) 5/24/95 3024139Sjoerg */ 3124139Sjoerg 3224139Sjoerg#include <sys/cdefs.h> 3324139Sjoerg__FBSDID("$FreeBSD: head/sys/netinet/tcp_input.c 178888 2008-05-09 23:03:00Z julian $"); 3424139Sjoerg 3524139Sjoerg#include "opt_ipfw.h" /* for ipfw_fwd */ 3624139Sjoerg#include "opt_inet.h" 3724139Sjoerg#include "opt_inet6.h" 3824139Sjoerg#include "opt_ipsec.h" 3924139Sjoerg#include "opt_mac.h" 4024139Sjoerg#include "opt_tcpdebug.h" 4124139Sjoerg 4224139Sjoerg#include <sys/param.h> 4324139Sjoerg#include <sys/kernel.h> 4424139Sjoerg#include <sys/malloc.h> 4524139Sjoerg#include <sys/mbuf.h> 4624139Sjoerg#include <sys/proc.h> /* for proc0 declaration */ 4724139Sjoerg#include <sys/protosw.h> 4824139Sjoerg#include <sys/signalvar.h> 4924139Sjoerg#include <sys/socket.h> 5024139Sjoerg#include <sys/socketvar.h> 5124139Sjoerg#include <sys/sysctl.h> 5224139Sjoerg#include <sys/syslog.h> 5324139Sjoerg#include <sys/systm.h> 5424139Sjoerg 5524139Sjoerg#include <machine/cpu.h> /* before tcp_seq.h, for tcp_random18() */ 5624139Sjoerg 5724139Sjoerg#include <vm/uma.h> 5824139Sjoerg 5924139Sjoerg#include <net/if.h> 6024139Sjoerg#include <net/route.h> 6124139Sjoerg 6224139Sjoerg#define TCPSTATES /* for logging */ 6324139Sjoerg 6424139Sjoerg#include <netinet/in.h> 6524139Sjoerg#include <netinet/in_pcb.h> 6624139Sjoerg#include <netinet/in_systm.h> 6724139Sjoerg#include <netinet/in_var.h> 6824139Sjoerg#include <netinet/ip.h> 6924139Sjoerg#include <netinet/ip_icmp.h> /* required for icmp_var.h */ 7024139Sjoerg#include <netinet/icmp_var.h> /* for ICMP_BANDLIM */ 7124139Sjoerg#include <netinet/ip_var.h> 7224139Sjoerg#include <netinet/ip_options.h> 7324139Sjoerg#include <netinet/ip6.h> 7424139Sjoerg#include <netinet/icmp6.h> 7524139Sjoerg#include <netinet6/in6_pcb.h> 7624139Sjoerg#include <netinet6/ip6_var.h> 7724139Sjoerg#include <netinet6/nd6.h> 7824139Sjoerg#include <netinet/tcp.h> 7924139Sjoerg#include <netinet/tcp_fsm.h> 8024139Sjoerg#include <netinet/tcp_seq.h> 8124139Sjoerg#include <netinet/tcp_timer.h> 8224139Sjoerg#include <netinet/tcp_var.h> 8324139Sjoerg#include <netinet6/tcp6_var.h> 8424139Sjoerg#include <netinet/tcpip.h> 8524139Sjoerg#include <netinet/tcp_syncache.h> 8624139Sjoerg#ifdef TCPDEBUG 8724139Sjoerg#include <netinet/tcp_debug.h> 8824139Sjoerg#endif /* TCPDEBUG */ 8924139Sjoerg 9024139Sjoerg#ifdef IPSEC 9124139Sjoerg#include <netipsec/ipsec.h> 9224139Sjoerg#include <netipsec/ipsec6.h> 9324139Sjoerg#endif /*IPSEC*/ 9424139Sjoerg 9524139Sjoerg#include <machine/in_cksum.h> 9624139Sjoerg 9724139Sjoerg#include <security/mac/mac_framework.h> 9824139Sjoerg 9924139Sjoergstatic const int tcprexmtthresh = 3; 10024139Sjoerg 10124139Sjoergstruct tcpstat tcpstat; 10224139SjoergSYSCTL_STRUCT(_net_inet_tcp, TCPCTL_STATS, stats, CTLFLAG_RW, 10324139Sjoerg &tcpstat , tcpstat, "TCP statistics (struct tcpstat, netinet/tcp_var.h)"); 10424139Sjoerg 10524139Sjoergint tcp_log_in_vain = 0; 10624139SjoergSYSCTL_INT(_net_inet_tcp, OID_AUTO, log_in_vain, CTLFLAG_RW, 10724139Sjoerg &tcp_log_in_vain, 0, "Log all incoming TCP segments to closed ports"); 10824139Sjoerg 10924139Sjoergstatic int blackhole = 0; 11024139SjoergSYSCTL_INT(_net_inet_tcp, OID_AUTO, blackhole, CTLFLAG_RW, 11124139Sjoerg &blackhole, 0, "Do not send RST on segments to closed ports"); 11224139Sjoerg 11324139Sjoergint tcp_delack_enabled = 1; 11424139SjoergSYSCTL_INT(_net_inet_tcp, OID_AUTO, delayed_ack, CTLFLAG_RW, 11524139Sjoerg &tcp_delack_enabled, 0, 11624139Sjoerg "Delay ACK to try and piggyback it onto a data packet"); 11724139Sjoerg 11824139Sjoergstatic int drop_synfin = 0; 11924139SjoergSYSCTL_INT(_net_inet_tcp, OID_AUTO, drop_synfin, CTLFLAG_RW, 12024139Sjoerg &drop_synfin, 0, "Drop TCP packets with SYN+FIN set"); 12124139Sjoerg 12224139Sjoergstatic int tcp_do_rfc3042 = 1; 12324139SjoergSYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3042, CTLFLAG_RW, 12424139Sjoerg &tcp_do_rfc3042, 0, "Enable RFC 3042 (Limited Transmit)"); 12524139Sjoerg 12624139Sjoergstatic int tcp_do_rfc3390 = 1; 12724139SjoergSYSCTL_INT(_net_inet_tcp, OID_AUTO, rfc3390, CTLFLAG_RW, 12824139Sjoerg &tcp_do_rfc3390, 0, 12924139Sjoerg "Enable RFC 3390 (Increasing TCP's Initial Congestion Window)"); 13024139Sjoerg 13124139Sjoergstatic int tcp_insecure_rst = 0; 13224139SjoergSYSCTL_INT(_net_inet_tcp, OID_AUTO, insecure_rst, CTLFLAG_RW, 13324139Sjoerg &tcp_insecure_rst, 0, 13424139Sjoerg "Follow the old (insecure) criteria for accepting RST packets"); 13524139Sjoerg 13624139Sjoergint tcp_do_autorcvbuf = 1; 13724139SjoergSYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_auto, CTLFLAG_RW, 13824139Sjoerg &tcp_do_autorcvbuf, 0, "Enable automatic receive buffer sizing"); 13924139Sjoerg 14024139Sjoergint tcp_autorcvbuf_inc = 16*1024; 14124139SjoergSYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_inc, CTLFLAG_RW, 14224139Sjoerg &tcp_autorcvbuf_inc, 0, 14324139Sjoerg "Incrementor step size of automatic receive buffer"); 14424139Sjoerg 14524139Sjoergint tcp_autorcvbuf_max = 256*1024; 14624139SjoergSYSCTL_INT(_net_inet_tcp, OID_AUTO, recvbuf_max, CTLFLAG_RW, 14724139Sjoerg &tcp_autorcvbuf_max, 0, "Max size of automatic receive buffer"); 14824139Sjoerg 14924139Sjoergstruct inpcbhead tcb; 15024139Sjoerg#define tcb6 tcb /* for KAME src sync over BSD*'s */ 15124139Sjoergstruct inpcbinfo tcbinfo; 15224139Sjoerg 15324139Sjoergstatic void tcp_dooptions(struct tcpopt *, u_char *, int, int); 15424139Sjoergstatic void tcp_do_segment(struct mbuf *, struct tcphdr *, 15524139Sjoerg struct socket *, struct tcpcb *, int, int); 15624139Sjoergstatic void tcp_dropwithreset(struct mbuf *, struct tcphdr *, 15724139Sjoerg struct tcpcb *, int, int); 15824139Sjoergstatic void tcp_pulloutofband(struct socket *, 15924139Sjoerg struct tcphdr *, struct mbuf *, int); 16024139Sjoergstatic void tcp_xmit_timer(struct tcpcb *, int); 16124139Sjoergstatic void tcp_newreno_partial_ack(struct tcpcb *, struct tcphdr *); 16224139Sjoerg 16324139Sjoerg/* Neighbor Discovery, Neighbor Unreachability Detection Upper layer hint. */ 16424139Sjoerg#ifdef INET6 16524139Sjoerg#define ND6_HINT(tp) \ 166do { \ 167 if ((tp) && (tp)->t_inpcb && \ 168 ((tp)->t_inpcb->inp_vflag & INP_IPV6) != 0) \ 169 nd6_nud_hint(NULL, NULL, 0); \ 170} while (0) 171#else 172#define ND6_HINT(tp) 173#endif 174 175/* 176 * Indicate whether this ack should be delayed. We can delay the ack if 177 * - there is no delayed ack timer in progress and 178 * - our last ack wasn't a 0-sized window. We never want to delay 179 * the ack that opens up a 0-sized window and 180 * - delayed acks are enabled or 181 * - this is a half-synchronized T/TCP connection. 182 */ 183#define DELAY_ACK(tp) \ 184 ((!tcp_timer_active(tp, TT_DELACK) && \ 185 (tp->t_flags & TF_RXWIN0SENT) == 0) && \ 186 (tcp_delack_enabled || (tp->t_flags & TF_NEEDSYN))) 187 188 189/* 190 * TCP input handling is split into multiple parts: 191 * tcp6_input is a thin wrapper around tcp_input for the extended 192 * ip6_protox[] call format in ip6_input 193 * tcp_input handles primary segment validation, inpcb lookup and 194 * SYN processing on listen sockets 195 * tcp_do_segment processes the ACK and text of the segment for 196 * establishing, established and closing connections 197 */ 198#ifdef INET6 199int 200tcp6_input(struct mbuf **mp, int *offp, int proto) 201{ 202 struct mbuf *m = *mp; 203 struct in6_ifaddr *ia6; 204 205 IP6_EXTHDR_CHECK(m, *offp, sizeof(struct tcphdr), IPPROTO_DONE); 206 207 /* 208 * draft-itojun-ipv6-tcp-to-anycast 209 * better place to put this in? 210 */ 211 ia6 = ip6_getdstifaddr(m); 212 if (ia6 && (ia6->ia6_flags & IN6_IFF_ANYCAST)) { 213 struct ip6_hdr *ip6; 214 215 ip6 = mtod(m, struct ip6_hdr *); 216 icmp6_error(m, ICMP6_DST_UNREACH, ICMP6_DST_UNREACH_ADDR, 217 (caddr_t)&ip6->ip6_dst - (caddr_t)ip6); 218 return IPPROTO_DONE; 219 } 220 221 tcp_input(m, *offp); 222 return IPPROTO_DONE; 223} 224#endif 225 226void 227tcp_input(struct mbuf *m, int off0) 228{ 229 struct tcphdr *th; 230 struct ip *ip = NULL; 231 struct ipovly *ipov; 232 struct inpcb *inp = NULL; 233 struct tcpcb *tp = NULL; 234 struct socket *so = NULL; 235 u_char *optp = NULL; 236 int optlen = 0; 237 int len, tlen, off; 238 int drop_hdrlen; 239 int thflags; 240 int rstreason = 0; /* For badport_bandlim accounting purposes */ 241#ifdef IPFIREWALL_FORWARD 242 struct m_tag *fwd_tag; 243#endif 244#ifdef INET6 245 struct ip6_hdr *ip6 = NULL; 246 int isipv6; 247#else 248 const void *ip6 = NULL; 249 const int isipv6 = 0; 250#endif 251 struct tcpopt to; /* options in this segment */ 252 char *s = NULL; /* address and port logging */ 253 254#ifdef TCPDEBUG 255 /* 256 * The size of tcp_saveipgen must be the size of the max ip header, 257 * now IPv6. 258 */ 259 u_char tcp_saveipgen[IP6_HDR_LEN]; 260 struct tcphdr tcp_savetcp; 261 short ostate = 0; 262#endif 263 264#ifdef INET6 265 isipv6 = (mtod(m, struct ip *)->ip_v == 6) ? 1 : 0; 266#endif 267 268 to.to_flags = 0; 269 tcpstat.tcps_rcvtotal++; 270 271 if (isipv6) { 272#ifdef INET6 273 /* IP6_EXTHDR_CHECK() is already done at tcp6_input(). */ 274 ip6 = mtod(m, struct ip6_hdr *); 275 tlen = sizeof(*ip6) + ntohs(ip6->ip6_plen) - off0; 276 if (in6_cksum(m, IPPROTO_TCP, off0, tlen)) { 277 tcpstat.tcps_rcvbadsum++; 278 goto drop; 279 } 280 th = (struct tcphdr *)((caddr_t)ip6 + off0); 281 282 /* 283 * Be proactive about unspecified IPv6 address in source. 284 * As we use all-zero to indicate unbounded/unconnected pcb, 285 * unspecified IPv6 address can be used to confuse us. 286 * 287 * Note that packets with unspecified IPv6 destination is 288 * already dropped in ip6_input. 289 */ 290 if (IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) { 291 /* XXX stat */ 292 goto drop; 293 } 294#else 295 th = NULL; /* XXX: Avoid compiler warning. */ 296#endif 297 } else { 298 /* 299 * Get IP and TCP header together in first mbuf. 300 * Note: IP leaves IP header in first mbuf. 301 */ 302 if (off0 > sizeof (struct ip)) { 303 ip_stripoptions(m, (struct mbuf *)0); 304 off0 = sizeof(struct ip); 305 } 306 if (m->m_len < sizeof (struct tcpiphdr)) { 307 if ((m = m_pullup(m, sizeof (struct tcpiphdr))) 308 == NULL) { 309 tcpstat.tcps_rcvshort++; 310 return; 311 } 312 } 313 ip = mtod(m, struct ip *); 314 ipov = (struct ipovly *)ip; 315 th = (struct tcphdr *)((caddr_t)ip + off0); 316 tlen = ip->ip_len; 317 318 if (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) { 319 if (m->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR) 320 th->th_sum = m->m_pkthdr.csum_data; 321 else 322 th->th_sum = in_pseudo(ip->ip_src.s_addr, 323 ip->ip_dst.s_addr, 324 htonl(m->m_pkthdr.csum_data + 325 ip->ip_len + 326 IPPROTO_TCP)); 327 th->th_sum ^= 0xffff; 328#ifdef TCPDEBUG 329 ipov->ih_len = (u_short)tlen; 330 ipov->ih_len = htons(ipov->ih_len); 331#endif 332 } else { 333 /* 334 * Checksum extended TCP header and data. 335 */ 336 len = sizeof (struct ip) + tlen; 337 bzero(ipov->ih_x1, sizeof(ipov->ih_x1)); 338 ipov->ih_len = (u_short)tlen; 339 ipov->ih_len = htons(ipov->ih_len); 340 th->th_sum = in_cksum(m, len); 341 } 342 if (th->th_sum) { 343 tcpstat.tcps_rcvbadsum++; 344 goto drop; 345 } 346 /* Re-initialization for later version check */ 347 ip->ip_v = IPVERSION; 348 } 349 350 /* 351 * Check that TCP offset makes sense, 352 * pull out TCP options and adjust length. XXX 353 */ 354 off = th->th_off << 2; 355 if (off < sizeof (struct tcphdr) || off > tlen) { 356 tcpstat.tcps_rcvbadoff++; 357 goto drop; 358 } 359 tlen -= off; /* tlen is used instead of ti->ti_len */ 360 if (off > sizeof (struct tcphdr)) { 361 if (isipv6) { 362#ifdef INET6 363 IP6_EXTHDR_CHECK(m, off0, off, ); 364 ip6 = mtod(m, struct ip6_hdr *); 365 th = (struct tcphdr *)((caddr_t)ip6 + off0); 366#endif 367 } else { 368 if (m->m_len < sizeof(struct ip) + off) { 369 if ((m = m_pullup(m, sizeof (struct ip) + off)) 370 == NULL) { 371 tcpstat.tcps_rcvshort++; 372 return; 373 } 374 ip = mtod(m, struct ip *); 375 ipov = (struct ipovly *)ip; 376 th = (struct tcphdr *)((caddr_t)ip + off0); 377 } 378 } 379 optlen = off - sizeof (struct tcphdr); 380 optp = (u_char *)(th + 1); 381 } 382 thflags = th->th_flags; 383 384 /* 385 * Convert TCP protocol specific fields to host format. 386 */ 387 th->th_seq = ntohl(th->th_seq); 388 th->th_ack = ntohl(th->th_ack); 389 th->th_win = ntohs(th->th_win); 390 th->th_urp = ntohs(th->th_urp); 391 392 /* 393 * Delay dropping TCP, IP headers, IPv6 ext headers, and TCP options. 394 */ 395 drop_hdrlen = off0 + off; 396 397 /* 398 * Locate pcb for segment. 399 */ 400 INP_INFO_WLOCK(&tcbinfo); 401findpcb: 402 INP_INFO_WLOCK_ASSERT(&tcbinfo); 403#ifdef IPFIREWALL_FORWARD 404 /* 405 * Grab info from PACKET_TAG_IPFORWARD tag prepended to the chain. 406 */ 407 fwd_tag = m_tag_find(m, PACKET_TAG_IPFORWARD, NULL); 408 409 if (fwd_tag != NULL && isipv6 == 0) { /* IPv6 support is not yet */ 410 struct sockaddr_in *next_hop; 411 412 next_hop = (struct sockaddr_in *)(fwd_tag+1); 413 /* 414 * Transparently forwarded. Pretend to be the destination. 415 * already got one like this? 416 */ 417 inp = in_pcblookup_hash(&tcbinfo, 418 ip->ip_src, th->th_sport, 419 ip->ip_dst, th->th_dport, 420 0, m->m_pkthdr.rcvif); 421 if (!inp) { 422 /* It's new. Try to find the ambushing socket. */ 423 inp = in_pcblookup_hash(&tcbinfo, 424 ip->ip_src, th->th_sport, 425 next_hop->sin_addr, 426 next_hop->sin_port ? 427 ntohs(next_hop->sin_port) : 428 th->th_dport, 429 INPLOOKUP_WILDCARD, 430 m->m_pkthdr.rcvif); 431 } 432 /* Remove the tag from the packet. We don't need it anymore. */ 433 m_tag_delete(m, fwd_tag); 434 } else 435#endif /* IPFIREWALL_FORWARD */ 436 { 437 if (isipv6) { 438#ifdef INET6 439 inp = in6_pcblookup_hash(&tcbinfo, 440 &ip6->ip6_src, th->th_sport, 441 &ip6->ip6_dst, th->th_dport, 442 INPLOOKUP_WILDCARD, 443 m->m_pkthdr.rcvif); 444#endif 445 } else 446 inp = in_pcblookup_hash(&tcbinfo, 447 ip->ip_src, th->th_sport, 448 ip->ip_dst, th->th_dport, 449 INPLOOKUP_WILDCARD, 450 m->m_pkthdr.rcvif); 451 } 452 453 /* 454 * If the INPCB does not exist then all data in the incoming 455 * segment is discarded and an appropriate RST is sent back. 456 * XXX MRT Send RST using which routing table? 457 */ 458 if (inp == NULL) { 459 /* 460 * Log communication attempts to ports that are not 461 * in use. 462 */ 463 if ((tcp_log_in_vain == 1 && (thflags & TH_SYN)) || 464 tcp_log_in_vain == 2) { 465 if ((s = tcp_log_addrs(NULL, th, (void *)ip, ip6))) 466 log(LOG_INFO, "%s; %s: Connection attempt " 467 "to closed port\n", s, __func__); 468 } 469 /* 470 * When blackholing do not respond with a RST but 471 * completely ignore the segment and drop it. 472 */ 473 if ((blackhole == 1 && (thflags & TH_SYN)) || 474 blackhole == 2) 475 goto dropunlock; 476 477 rstreason = BANDLIM_RST_CLOSEDPORT; 478 goto dropwithreset; 479 } 480 INP_WLOCK(inp); 481 482#ifdef IPSEC 483#ifdef INET6 484 if (isipv6 && ipsec6_in_reject(m, inp)) { 485 ipsec6stat.in_polvio++; 486 goto dropunlock; 487 } else 488#endif /* INET6 */ 489 if (ipsec4_in_reject(m, inp) != 0) { 490 ipsec4stat.in_polvio++; 491 goto dropunlock; 492 } 493#endif /* IPSEC */ 494 495 /* 496 * Check the minimum TTL for socket. 497 */ 498 if (inp->inp_ip_minttl != 0) { 499#ifdef INET6 500 if (isipv6 && inp->inp_ip_minttl > ip6->ip6_hlim) 501 goto dropunlock; 502 else 503#endif 504 if (inp->inp_ip_minttl > ip->ip_ttl) 505 goto dropunlock; 506 } 507 508 /* 509 * A previous connection in TIMEWAIT state is supposed to catch 510 * stray or duplicate segments arriving late. If this segment 511 * was a legitimate new connection attempt the old INPCB gets 512 * removed and we can try again to find a listening socket. 513 */ 514 if (inp->inp_vflag & INP_TIMEWAIT) { 515 if (thflags & TH_SYN) 516 tcp_dooptions(&to, optp, optlen, TO_SYN); 517 /* 518 * NB: tcp_twcheck unlocks the INP and frees the mbuf. 519 */ 520 if (tcp_twcheck(inp, &to, th, m, tlen)) 521 goto findpcb; 522 INP_INFO_WUNLOCK(&tcbinfo); 523 return; 524 } 525 /* 526 * The TCPCB may no longer exist if the connection is winding 527 * down or it is in the CLOSED state. Either way we drop the 528 * segment and send an appropriate response. 529 */ 530 tp = intotcpcb(inp); 531 if (tp == NULL || tp->t_state == TCPS_CLOSED) { 532 rstreason = BANDLIM_RST_CLOSEDPORT; 533 goto dropwithreset; 534 } 535 536#ifdef MAC 537 INP_WLOCK_ASSERT(inp); 538 if (mac_inpcb_check_deliver(inp, m)) 539 goto dropunlock; 540#endif 541 so = inp->inp_socket; 542 KASSERT(so != NULL, ("%s: so == NULL", __func__)); 543#ifdef TCPDEBUG 544 if (so->so_options & SO_DEBUG) { 545 ostate = tp->t_state; 546 if (isipv6) { 547#ifdef INET6 548 bcopy((char *)ip6, (char *)tcp_saveipgen, sizeof(*ip6)); 549#endif 550 } else 551 bcopy((char *)ip, (char *)tcp_saveipgen, sizeof(*ip)); 552 tcp_savetcp = *th; 553 } 554#endif 555 /* 556 * When the socket is accepting connections (the INPCB is in LISTEN 557 * state) we look into the SYN cache if this is a new connection 558 * attempt or the completion of a previous one. 559 */ 560 if (so->so_options & SO_ACCEPTCONN) { 561 struct in_conninfo inc; 562 563 KASSERT(tp->t_state == TCPS_LISTEN, ("%s: so accepting but " 564 "tp not listening", __func__)); 565 566 bzero(&inc, sizeof(inc)); 567 inc.inc_isipv6 = isipv6; 568#ifdef INET6 569 if (isipv6) { 570 inc.inc6_faddr = ip6->ip6_src; 571 inc.inc6_laddr = ip6->ip6_dst; 572 } else 573#endif 574 { 575 inc.inc_faddr = ip->ip_src; 576 inc.inc_laddr = ip->ip_dst; 577 } 578 inc.inc_fport = th->th_sport; 579 inc.inc_lport = th->th_dport; 580 581 /* 582 * Check for an existing connection attempt in syncache if 583 * the flag is only ACK. A successful lookup creates a new 584 * socket appended to the listen queue in SYN_RECEIVED state. 585 */ 586 if ((thflags & (TH_RST|TH_ACK|TH_SYN)) == TH_ACK) { 587 /* 588 * Parse the TCP options here because 589 * syncookies need access to the reflected 590 * timestamp. 591 */ 592 tcp_dooptions(&to, optp, optlen, 0); 593 /* 594 * NB: syncache_expand() doesn't unlock 595 * inp and tcpinfo locks. 596 */ 597 if (!syncache_expand(&inc, &to, th, &so, m)) { 598 /* 599 * No syncache entry or ACK was not 600 * for our SYN/ACK. Send a RST. 601 * NB: syncache did its own logging 602 * of the failure cause. 603 */ 604 rstreason = BANDLIM_RST_OPENPORT; 605 goto dropwithreset; 606 } 607 if (so == NULL) { 608 /* 609 * We completed the 3-way handshake 610 * but could not allocate a socket 611 * either due to memory shortage, 612 * listen queue length limits or 613 * global socket limits. Send RST 614 * or wait and have the remote end 615 * retransmit the ACK for another 616 * try. 617 */ 618 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 619 log(LOG_DEBUG, "%s; %s: Listen socket: " 620 "Socket allocation failed due to " 621 "limits or memory shortage, %s\n", 622 s, __func__, (tcp_sc_rst_sock_fail ? 623 "sending RST" : "try again")); 624 if (tcp_sc_rst_sock_fail) { 625 rstreason = BANDLIM_UNLIMITED; 626 goto dropwithreset; 627 } else 628 goto dropunlock; 629 } 630 /* 631 * Socket is created in state SYN_RECEIVED. 632 * Unlock the listen socket, lock the newly 633 * created socket and update the tp variable. 634 */ 635 INP_WUNLOCK(inp); /* listen socket */ 636 inp = sotoinpcb(so); 637 INP_WLOCK(inp); /* new connection */ 638 tp = intotcpcb(inp); 639 KASSERT(tp->t_state == TCPS_SYN_RECEIVED, 640 ("%s: ", __func__)); 641 /* 642 * Process the segment and the data it 643 * contains. tcp_do_segment() consumes 644 * the mbuf chain and unlocks the inpcb. 645 */ 646 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen); 647 INP_INFO_UNLOCK_ASSERT(&tcbinfo); 648 return; 649 } 650 /* 651 * Segment flag validation for new connection attempts: 652 * 653 * Our (SYN|ACK) response was rejected. 654 * Check with syncache and remove entry to prevent 655 * retransmits. 656 * 657 * NB: syncache_chkrst does its own logging of failure 658 * causes. 659 */ 660 if (thflags & TH_RST) { 661 syncache_chkrst(&inc, th); 662 goto dropunlock; 663 } 664 /* 665 * We can't do anything without SYN. 666 */ 667 if ((thflags & TH_SYN) == 0) { 668 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 669 log(LOG_DEBUG, "%s; %s: Listen socket: " 670 "SYN is missing, segment ignored\n", 671 s, __func__); 672 tcpstat.tcps_badsyn++; 673 goto dropunlock; 674 } 675 /* 676 * (SYN|ACK) is bogus on a listen socket. 677 */ 678 if (thflags & TH_ACK) { 679 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 680 log(LOG_DEBUG, "%s; %s: Listen socket: " 681 "SYN|ACK invalid, segment rejected\n", 682 s, __func__); 683 syncache_badack(&inc); /* XXX: Not needed! */ 684 tcpstat.tcps_badsyn++; 685 rstreason = BANDLIM_RST_OPENPORT; 686 goto dropwithreset; 687 } 688 /* 689 * If the drop_synfin option is enabled, drop all 690 * segments with both the SYN and FIN bits set. 691 * This prevents e.g. nmap from identifying the 692 * TCP/IP stack. 693 * XXX: Poor reasoning. nmap has other methods 694 * and is constantly refining its stack detection 695 * strategies. 696 * XXX: This is a violation of the TCP specification 697 * and was used by RFC1644. 698 */ 699 if ((thflags & TH_FIN) && drop_synfin) { 700 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 701 log(LOG_DEBUG, "%s; %s: Listen socket: " 702 "SYN|FIN segment ignored (based on " 703 "sysctl setting)\n", s, __func__); 704 tcpstat.tcps_badsyn++; 705 goto dropunlock; 706 } 707 /* 708 * Segment's flags are (SYN) or (SYN|FIN). 709 * 710 * TH_PUSH, TH_URG, TH_ECE, TH_CWR are ignored 711 * as they do not affect the state of the TCP FSM. 712 * The data pointed to by TH_URG and th_urp is ignored. 713 */ 714 KASSERT((thflags & (TH_RST|TH_ACK)) == 0, 715 ("%s: Listen socket: TH_RST or TH_ACK set", __func__)); 716 KASSERT(thflags & (TH_SYN), 717 ("%s: Listen socket: TH_SYN not set", __func__)); 718#ifdef INET6 719 /* 720 * If deprecated address is forbidden, 721 * we do not accept SYN to deprecated interface 722 * address to prevent any new inbound connection from 723 * getting established. 724 * When we do not accept SYN, we send a TCP RST, 725 * with deprecated source address (instead of dropping 726 * it). We compromise it as it is much better for peer 727 * to send a RST, and RST will be the final packet 728 * for the exchange. 729 * 730 * If we do not forbid deprecated addresses, we accept 731 * the SYN packet. RFC2462 does not suggest dropping 732 * SYN in this case. 733 * If we decipher RFC2462 5.5.4, it says like this: 734 * 1. use of deprecated addr with existing 735 * communication is okay - "SHOULD continue to be 736 * used" 737 * 2. use of it with new communication: 738 * (2a) "SHOULD NOT be used if alternate address 739 * with sufficient scope is available" 740 * (2b) nothing mentioned otherwise. 741 * Here we fall into (2b) case as we have no choice in 742 * our source address selection - we must obey the peer. 743 * 744 * The wording in RFC2462 is confusing, and there are 745 * multiple description text for deprecated address 746 * handling - worse, they are not exactly the same. 747 * I believe 5.5.4 is the best one, so we follow 5.5.4. 748 */ 749 if (isipv6 && !ip6_use_deprecated) { 750 struct in6_ifaddr *ia6; 751 752 if ((ia6 = ip6_getdstifaddr(m)) && 753 (ia6->ia6_flags & IN6_IFF_DEPRECATED)) { 754 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 755 log(LOG_DEBUG, "%s; %s: Listen socket: " 756 "Connection attempt to deprecated " 757 "IPv6 address rejected\n", 758 s, __func__); 759 rstreason = BANDLIM_RST_OPENPORT; 760 goto dropwithreset; 761 } 762 } 763#endif 764 /* 765 * Basic sanity checks on incoming SYN requests: 766 * Don't respond if the destination is a link layer 767 * broadcast according to RFC1122 4.2.3.10, p. 104. 768 * If it is from this socket it must be forged. 769 * Don't respond if the source or destination is a 770 * global or subnet broad- or multicast address. 771 * Note that it is quite possible to receive unicast 772 * link-layer packets with a broadcast IP address. Use 773 * in_broadcast() to find them. 774 */ 775 if (m->m_flags & (M_BCAST|M_MCAST)) { 776 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 777 log(LOG_DEBUG, "%s; %s: Listen socket: " 778 "Connection attempt from broad- or multicast " 779 "link layer address ignored\n", s, __func__); 780 goto dropunlock; 781 } 782 if (isipv6) { 783#ifdef INET6 784 if (th->th_dport == th->th_sport && 785 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &ip6->ip6_src)) { 786 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 787 log(LOG_DEBUG, "%s; %s: Listen socket: " 788 "Connection attempt to/from self " 789 "ignored\n", s, __func__); 790 goto dropunlock; 791 } 792 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 793 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) { 794 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 795 log(LOG_DEBUG, "%s; %s: Listen socket: " 796 "Connection attempt from/to multicast " 797 "address ignored\n", s, __func__); 798 goto dropunlock; 799 } 800#endif 801 } else { 802 if (th->th_dport == th->th_sport && 803 ip->ip_dst.s_addr == ip->ip_src.s_addr) { 804 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 805 log(LOG_DEBUG, "%s; %s: Listen socket: " 806 "Connection attempt from/to self " 807 "ignored\n", s, __func__); 808 goto dropunlock; 809 } 810 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 811 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 812 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 813 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) { 814 if ((s = tcp_log_addrs(&inc, th, NULL, NULL))) 815 log(LOG_DEBUG, "%s; %s: Listen socket: " 816 "Connection attempt from/to broad- " 817 "or multicast address ignored\n", 818 s, __func__); 819 goto dropunlock; 820 } 821 } 822 /* 823 * SYN appears to be valid. Create compressed TCP state 824 * for syncache. 825 */ 826#ifdef TCPDEBUG 827 if (so->so_options & SO_DEBUG) 828 tcp_trace(TA_INPUT, ostate, tp, 829 (void *)tcp_saveipgen, &tcp_savetcp, 0); 830#endif 831 tcp_dooptions(&to, optp, optlen, TO_SYN); 832 syncache_add(&inc, &to, th, inp, &so, m); 833 /* 834 * Entry added to syncache and mbuf consumed. 835 * Everything already unlocked by syncache_add(). 836 */ 837 INP_INFO_UNLOCK_ASSERT(&tcbinfo); 838 return; 839 } 840 841 /* 842 * Segment belongs to a connection in SYN_SENT, ESTABLISHED or later 843 * state. tcp_do_segment() always consumes the mbuf chain, unlocks 844 * the inpcb, and unlocks pcbinfo. 845 */ 846 tcp_do_segment(m, th, so, tp, drop_hdrlen, tlen); 847 INP_INFO_UNLOCK_ASSERT(&tcbinfo); 848 return; 849 850dropwithreset: 851 INP_INFO_WLOCK_ASSERT(&tcbinfo); 852 tcp_dropwithreset(m, th, tp, tlen, rstreason); 853 m = NULL; /* mbuf chain got consumed. */ 854dropunlock: 855 INP_INFO_WLOCK_ASSERT(&tcbinfo); 856 if (inp != NULL) 857 INP_WUNLOCK(inp); 858 INP_INFO_WUNLOCK(&tcbinfo); 859drop: 860 INP_INFO_UNLOCK_ASSERT(&tcbinfo); 861 if (s != NULL) 862 free(s, M_TCPLOG); 863 if (m != NULL) 864 m_freem(m); 865 return; 866} 867 868static void 869tcp_do_segment(struct mbuf *m, struct tcphdr *th, struct socket *so, 870 struct tcpcb *tp, int drop_hdrlen, int tlen) 871{ 872 int thflags, acked, ourfinisacked, needoutput = 0; 873 int headlocked = 1; 874 int rstreason, todrop, win; 875 u_long tiwin; 876 struct tcpopt to; 877 878#ifdef TCPDEBUG 879 /* 880 * The size of tcp_saveipgen must be the size of the max ip header, 881 * now IPv6. 882 */ 883 u_char tcp_saveipgen[IP6_HDR_LEN]; 884 struct tcphdr tcp_savetcp; 885 short ostate = 0; 886#endif 887 thflags = th->th_flags; 888 889 INP_INFO_WLOCK_ASSERT(&tcbinfo); 890 INP_WLOCK_ASSERT(tp->t_inpcb); 891 KASSERT(tp->t_state > TCPS_LISTEN, ("%s: TCPS_LISTEN", 892 __func__)); 893 KASSERT(tp->t_state != TCPS_TIME_WAIT, ("%s: TCPS_TIME_WAIT", 894 __func__)); 895 896 /* 897 * Segment received on connection. 898 * Reset idle time and keep-alive timer. 899 * XXX: This should be done after segment 900 * validation to ignore broken/spoofed segs. 901 */ 902 tp->t_rcvtime = ticks; 903 if (TCPS_HAVEESTABLISHED(tp->t_state)) 904 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle); 905 906 /* 907 * Unscale the window into a 32-bit value. 908 * For the SYN_SENT state the scale is zero. 909 */ 910 tiwin = th->th_win << tp->snd_scale; 911 912 /* 913 * Parse options on any incoming segment. 914 */ 915 tcp_dooptions(&to, (u_char *)(th + 1), 916 (th->th_off << 2) - sizeof(struct tcphdr), 917 (thflags & TH_SYN) ? TO_SYN : 0); 918 919 /* 920 * If echoed timestamp is later than the current time, 921 * fall back to non RFC1323 RTT calculation. Normalize 922 * timestamp if syncookies were used when this connection 923 * was established. 924 */ 925 if ((to.to_flags & TOF_TS) && (to.to_tsecr != 0)) { 926 to.to_tsecr -= tp->ts_offset; 927 if (TSTMP_GT(to.to_tsecr, ticks)) 928 to.to_tsecr = 0; 929 } 930 931 /* 932 * Process options only when we get SYN/ACK back. The SYN case 933 * for incoming connections is handled in tcp_syncache. 934 * According to RFC1323 the window field in a SYN (i.e., a <SYN> 935 * or <SYN,ACK>) segment itself is never scaled. 936 * XXX this is traditional behavior, may need to be cleaned up. 937 */ 938 if (tp->t_state == TCPS_SYN_SENT && (thflags & TH_SYN)) { 939 if ((to.to_flags & TOF_SCALE) && 940 (tp->t_flags & TF_REQ_SCALE)) { 941 tp->t_flags |= TF_RCVD_SCALE; 942 tp->snd_scale = to.to_wscale; 943 } 944 /* 945 * Initial send window. It will be updated with 946 * the next incoming segment to the scaled value. 947 */ 948 tp->snd_wnd = th->th_win; 949 if (to.to_flags & TOF_TS) { 950 tp->t_flags |= TF_RCVD_TSTMP; 951 tp->ts_recent = to.to_tsval; 952 tp->ts_recent_age = ticks; 953 } 954 if (to.to_flags & TOF_MSS) 955 tcp_mss(tp, to.to_mss); 956 if ((tp->t_flags & TF_SACK_PERMIT) && 957 (to.to_flags & TOF_SACKPERM) == 0) 958 tp->t_flags &= ~TF_SACK_PERMIT; 959 } 960 961 /* 962 * Header prediction: check for the two common cases 963 * of a uni-directional data xfer. If the packet has 964 * no control flags, is in-sequence, the window didn't 965 * change and we're not retransmitting, it's a 966 * candidate. If the length is zero and the ack moved 967 * forward, we're the sender side of the xfer. Just 968 * free the data acked & wake any higher level process 969 * that was blocked waiting for space. If the length 970 * is non-zero and the ack didn't move, we're the 971 * receiver side. If we're getting packets in-order 972 * (the reassembly queue is empty), add the data to 973 * the socket buffer and note that we need a delayed ack. 974 * Make sure that the hidden state-flags are also off. 975 * Since we check for TCPS_ESTABLISHED first, it can only 976 * be TH_NEEDSYN. 977 */ 978 if (tp->t_state == TCPS_ESTABLISHED && 979 th->th_seq == tp->rcv_nxt && 980 (thflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK && 981 tp->snd_nxt == tp->snd_max && 982 tiwin && tiwin == tp->snd_wnd && 983 ((tp->t_flags & (TF_NEEDSYN|TF_NEEDFIN)) == 0) && 984 LIST_EMPTY(&tp->t_segq) && 985 ((to.to_flags & TOF_TS) == 0 || 986 TSTMP_GEQ(to.to_tsval, tp->ts_recent)) ) { 987 988 /* 989 * If last ACK falls within this segment's sequence numbers, 990 * record the timestamp. 991 * NOTE that the test is modified according to the latest 992 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 993 */ 994 if ((to.to_flags & TOF_TS) != 0 && 995 SEQ_LEQ(th->th_seq, tp->last_ack_sent)) { 996 tp->ts_recent_age = ticks; 997 tp->ts_recent = to.to_tsval; 998 } 999 1000 if (tlen == 0) { 1001 if (SEQ_GT(th->th_ack, tp->snd_una) && 1002 SEQ_LEQ(th->th_ack, tp->snd_max) && 1003 tp->snd_cwnd >= tp->snd_wnd && 1004 ((!tcp_do_newreno && 1005 !(tp->t_flags & TF_SACK_PERMIT) && 1006 tp->t_dupacks < tcprexmtthresh) || 1007 ((tcp_do_newreno || 1008 (tp->t_flags & TF_SACK_PERMIT)) && 1009 !IN_FASTRECOVERY(tp) && 1010 (to.to_flags & TOF_SACK) == 0 && 1011 TAILQ_EMPTY(&tp->snd_holes)))) { 1012 KASSERT(headlocked, 1013 ("%s: headlocked", __func__)); 1014 INP_INFO_WUNLOCK(&tcbinfo); 1015 headlocked = 0; 1016 /* 1017 * This is a pure ack for outstanding data. 1018 */ 1019 ++tcpstat.tcps_predack; 1020 /* 1021 * "bad retransmit" recovery. 1022 */ 1023 if (tp->t_rxtshift == 1 && 1024 ticks < tp->t_badrxtwin) { 1025 ++tcpstat.tcps_sndrexmitbad; 1026 tp->snd_cwnd = tp->snd_cwnd_prev; 1027 tp->snd_ssthresh = 1028 tp->snd_ssthresh_prev; 1029 tp->snd_recover = tp->snd_recover_prev; 1030 if (tp->t_flags & TF_WASFRECOVERY) 1031 ENTER_FASTRECOVERY(tp); 1032 tp->snd_nxt = tp->snd_max; 1033 tp->t_badrxtwin = 0; 1034 } 1035 1036 /* 1037 * Recalculate the transmit timer / rtt. 1038 * 1039 * Some boxes send broken timestamp replies 1040 * during the SYN+ACK phase, ignore 1041 * timestamps of 0 or we could calculate a 1042 * huge RTT and blow up the retransmit timer. 1043 */ 1044 if ((to.to_flags & TOF_TS) != 0 && 1045 to.to_tsecr) { 1046 if (!tp->t_rttlow || 1047 tp->t_rttlow > ticks - to.to_tsecr) 1048 tp->t_rttlow = ticks - to.to_tsecr; 1049 tcp_xmit_timer(tp, 1050 ticks - to.to_tsecr + 1); 1051 } else if (tp->t_rtttime && 1052 SEQ_GT(th->th_ack, tp->t_rtseq)) { 1053 if (!tp->t_rttlow || 1054 tp->t_rttlow > ticks - tp->t_rtttime) 1055 tp->t_rttlow = ticks - tp->t_rtttime; 1056 tcp_xmit_timer(tp, 1057 ticks - tp->t_rtttime); 1058 } 1059 tcp_xmit_bandwidth_limit(tp, th->th_ack); 1060 acked = th->th_ack - tp->snd_una; 1061 tcpstat.tcps_rcvackpack++; 1062 tcpstat.tcps_rcvackbyte += acked; 1063 sbdrop(&so->so_snd, acked); 1064 if (SEQ_GT(tp->snd_una, tp->snd_recover) && 1065 SEQ_LEQ(th->th_ack, tp->snd_recover)) 1066 tp->snd_recover = th->th_ack - 1; 1067 tp->snd_una = th->th_ack; 1068 /* 1069 * Pull snd_wl2 up to prevent seq wrap relative 1070 * to th_ack. 1071 */ 1072 tp->snd_wl2 = th->th_ack; 1073 tp->t_dupacks = 0; 1074 m_freem(m); 1075 ND6_HINT(tp); /* Some progress has been made. */ 1076 1077 /* 1078 * If all outstanding data are acked, stop 1079 * retransmit timer, otherwise restart timer 1080 * using current (possibly backed-off) value. 1081 * If process is waiting for space, 1082 * wakeup/selwakeup/signal. If data 1083 * are ready to send, let tcp_output 1084 * decide between more output or persist. 1085 */ 1086#ifdef TCPDEBUG 1087 if (so->so_options & SO_DEBUG) 1088 tcp_trace(TA_INPUT, ostate, tp, 1089 (void *)tcp_saveipgen, 1090 &tcp_savetcp, 0); 1091#endif 1092 if (tp->snd_una == tp->snd_max) 1093 tcp_timer_activate(tp, TT_REXMT, 0); 1094 else if (!tcp_timer_active(tp, TT_PERSIST)) 1095 tcp_timer_activate(tp, TT_REXMT, 1096 tp->t_rxtcur); 1097 sowwakeup(so); 1098 if (so->so_snd.sb_cc) 1099 (void) tcp_output(tp); 1100 goto check_delack; 1101 } 1102 } else if (th->th_ack == tp->snd_una && 1103 tlen <= sbspace(&so->so_rcv)) { 1104 int newsize = 0; /* automatic sockbuf scaling */ 1105 1106 KASSERT(headlocked, ("%s: headlocked", __func__)); 1107 INP_INFO_WUNLOCK(&tcbinfo); 1108 headlocked = 0; 1109 /* 1110 * This is a pure, in-sequence data packet 1111 * with nothing on the reassembly queue and 1112 * we have enough buffer space to take it. 1113 */ 1114 /* Clean receiver SACK report if present */ 1115 if ((tp->t_flags & TF_SACK_PERMIT) && tp->rcv_numsacks) 1116 tcp_clean_sackreport(tp); 1117 ++tcpstat.tcps_preddat; 1118 tp->rcv_nxt += tlen; 1119 /* 1120 * Pull snd_wl1 up to prevent seq wrap relative to 1121 * th_seq. 1122 */ 1123 tp->snd_wl1 = th->th_seq; 1124 /* 1125 * Pull rcv_up up to prevent seq wrap relative to 1126 * rcv_nxt. 1127 */ 1128 tp->rcv_up = tp->rcv_nxt; 1129 tcpstat.tcps_rcvpack++; 1130 tcpstat.tcps_rcvbyte += tlen; 1131 ND6_HINT(tp); /* Some progress has been made */ 1132#ifdef TCPDEBUG 1133 if (so->so_options & SO_DEBUG) 1134 tcp_trace(TA_INPUT, ostate, tp, 1135 (void *)tcp_saveipgen, &tcp_savetcp, 0); 1136#endif 1137 /* 1138 * Automatic sizing of receive socket buffer. Often the send 1139 * buffer size is not optimally adjusted to the actual network 1140 * conditions at hand (delay bandwidth product). Setting the 1141 * buffer size too small limits throughput on links with high 1142 * bandwidth and high delay (eg. trans-continental/oceanic links). 1143 * 1144 * On the receive side the socket buffer memory is only rarely 1145 * used to any significant extent. This allows us to be much 1146 * more aggressive in scaling the receive socket buffer. For 1147 * the case that the buffer space is actually used to a large 1148 * extent and we run out of kernel memory we can simply drop 1149 * the new segments; TCP on the sender will just retransmit it 1150 * later. Setting the buffer size too big may only consume too 1151 * much kernel memory if the application doesn't read() from 1152 * the socket or packet loss or reordering makes use of the 1153 * reassembly queue. 1154 * 1155 * The criteria to step up the receive buffer one notch are: 1156 * 1. the number of bytes received during the time it takes 1157 * one timestamp to be reflected back to us (the RTT); 1158 * 2. received bytes per RTT is within seven eighth of the 1159 * current socket buffer size; 1160 * 3. receive buffer size has not hit maximal automatic size; 1161 * 1162 * This algorithm does one step per RTT at most and only if 1163 * we receive a bulk stream w/o packet losses or reorderings. 1164 * Shrinking the buffer during idle times is not necessary as 1165 * it doesn't consume any memory when idle. 1166 * 1167 * TODO: Only step up if the application is actually serving 1168 * the buffer to better manage the socket buffer resources. 1169 */ 1170 if (tcp_do_autorcvbuf && 1171 to.to_tsecr && 1172 (so->so_rcv.sb_flags & SB_AUTOSIZE)) { 1173 if (to.to_tsecr > tp->rfbuf_ts && 1174 to.to_tsecr - tp->rfbuf_ts < hz) { 1175 if (tp->rfbuf_cnt > 1176 (so->so_rcv.sb_hiwat / 8 * 7) && 1177 so->so_rcv.sb_hiwat < 1178 tcp_autorcvbuf_max) { 1179 newsize = 1180 min(so->so_rcv.sb_hiwat + 1181 tcp_autorcvbuf_inc, 1182 tcp_autorcvbuf_max); 1183 } 1184 /* Start over with next RTT. */ 1185 tp->rfbuf_ts = 0; 1186 tp->rfbuf_cnt = 0; 1187 } else 1188 tp->rfbuf_cnt += tlen; /* add up */ 1189 } 1190 1191 /* Add data to socket buffer. */ 1192 SOCKBUF_LOCK(&so->so_rcv); 1193 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 1194 m_freem(m); 1195 } else { 1196 /* 1197 * Set new socket buffer size. 1198 * Give up when limit is reached. 1199 */ 1200 if (newsize) 1201 if (!sbreserve_locked(&so->so_rcv, 1202 newsize, so, curthread)) 1203 so->so_rcv.sb_flags &= ~SB_AUTOSIZE; 1204 m_adj(m, drop_hdrlen); /* delayed header drop */ 1205 sbappendstream_locked(&so->so_rcv, m); 1206 } 1207 /* NB: sorwakeup_locked() does an implicit unlock. */ 1208 sorwakeup_locked(so); 1209 if (DELAY_ACK(tp)) { 1210 tp->t_flags |= TF_DELACK; 1211 } else { 1212 tp->t_flags |= TF_ACKNOW; 1213 tcp_output(tp); 1214 } 1215 goto check_delack; 1216 } 1217 } 1218 1219 /* 1220 * Calculate amount of space in receive window, 1221 * and then do TCP input processing. 1222 * Receive window is amount of space in rcv queue, 1223 * but not less than advertised window. 1224 */ 1225 win = sbspace(&so->so_rcv); 1226 if (win < 0) 1227 win = 0; 1228 tp->rcv_wnd = imax(win, (int)(tp->rcv_adv - tp->rcv_nxt)); 1229 1230 /* Reset receive buffer auto scaling when not in bulk receive mode. */ 1231 tp->rfbuf_ts = 0; 1232 tp->rfbuf_cnt = 0; 1233 1234 switch (tp->t_state) { 1235 1236 /* 1237 * If the state is SYN_RECEIVED: 1238 * if seg contains an ACK, but not for our SYN/ACK, send a RST. 1239 */ 1240 case TCPS_SYN_RECEIVED: 1241 if ((thflags & TH_ACK) && 1242 (SEQ_LEQ(th->th_ack, tp->snd_una) || 1243 SEQ_GT(th->th_ack, tp->snd_max))) { 1244 rstreason = BANDLIM_RST_OPENPORT; 1245 goto dropwithreset; 1246 } 1247 break; 1248 1249 /* 1250 * If the state is SYN_SENT: 1251 * if seg contains an ACK, but not for our SYN, drop the input. 1252 * if seg contains a RST, then drop the connection. 1253 * if seg does not contain SYN, then drop it. 1254 * Otherwise this is an acceptable SYN segment 1255 * initialize tp->rcv_nxt and tp->irs 1256 * if seg contains ack then advance tp->snd_una 1257 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state 1258 * arrange for segment to be acked (eventually) 1259 * continue processing rest of data/controls, beginning with URG 1260 */ 1261 case TCPS_SYN_SENT: 1262 if ((thflags & TH_ACK) && 1263 (SEQ_LEQ(th->th_ack, tp->iss) || 1264 SEQ_GT(th->th_ack, tp->snd_max))) { 1265 rstreason = BANDLIM_UNLIMITED; 1266 goto dropwithreset; 1267 } 1268 if ((thflags & (TH_ACK|TH_RST)) == (TH_ACK|TH_RST)) 1269 tp = tcp_drop(tp, ECONNREFUSED); 1270 if (thflags & TH_RST) 1271 goto drop; 1272 if (!(thflags & TH_SYN)) 1273 goto drop; 1274 1275 tp->irs = th->th_seq; 1276 tcp_rcvseqinit(tp); 1277 if (thflags & TH_ACK) { 1278 tcpstat.tcps_connects++; 1279 soisconnected(so); 1280#ifdef MAC 1281 SOCK_LOCK(so); 1282 mac_socketpeer_set_from_mbuf(m, so); 1283 SOCK_UNLOCK(so); 1284#endif 1285 /* Do window scaling on this connection? */ 1286 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 1287 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 1288 tp->rcv_scale = tp->request_r_scale; 1289 } 1290 tp->rcv_adv += tp->rcv_wnd; 1291 tp->snd_una++; /* SYN is acked */ 1292 /* 1293 * If there's data, delay ACK; if there's also a FIN 1294 * ACKNOW will be turned on later. 1295 */ 1296 if (DELAY_ACK(tp) && tlen != 0) 1297 tcp_timer_activate(tp, TT_DELACK, 1298 tcp_delacktime); 1299 else 1300 tp->t_flags |= TF_ACKNOW; 1301 /* 1302 * Received <SYN,ACK> in SYN_SENT[*] state. 1303 * Transitions: 1304 * SYN_SENT --> ESTABLISHED 1305 * SYN_SENT* --> FIN_WAIT_1 1306 */ 1307 tp->t_starttime = ticks; 1308 if (tp->t_flags & TF_NEEDFIN) { 1309 tp->t_state = TCPS_FIN_WAIT_1; 1310 tp->t_flags &= ~TF_NEEDFIN; 1311 thflags &= ~TH_SYN; 1312 } else { 1313 tp->t_state = TCPS_ESTABLISHED; 1314 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle); 1315 } 1316 } else { 1317 /* 1318 * Received initial SYN in SYN-SENT[*] state => 1319 * simultaneous open. If segment contains CC option 1320 * and there is a cached CC, apply TAO test. 1321 * If it succeeds, connection is * half-synchronized. 1322 * Otherwise, do 3-way handshake: 1323 * SYN-SENT -> SYN-RECEIVED 1324 * SYN-SENT* -> SYN-RECEIVED* 1325 * If there was no CC option, clear cached CC value. 1326 */ 1327 tp->t_flags |= (TF_ACKNOW | TF_NEEDSYN); 1328 tcp_timer_activate(tp, TT_REXMT, 0); 1329 tp->t_state = TCPS_SYN_RECEIVED; 1330 } 1331 1332 KASSERT(headlocked, ("%s: trimthenstep6: head not locked", 1333 __func__)); 1334 INP_WLOCK_ASSERT(tp->t_inpcb); 1335 1336 /* 1337 * Advance th->th_seq to correspond to first data byte. 1338 * If data, trim to stay within window, 1339 * dropping FIN if necessary. 1340 */ 1341 th->th_seq++; 1342 if (tlen > tp->rcv_wnd) { 1343 todrop = tlen - tp->rcv_wnd; 1344 m_adj(m, -todrop); 1345 tlen = tp->rcv_wnd; 1346 thflags &= ~TH_FIN; 1347 tcpstat.tcps_rcvpackafterwin++; 1348 tcpstat.tcps_rcvbyteafterwin += todrop; 1349 } 1350 tp->snd_wl1 = th->th_seq - 1; 1351 tp->rcv_up = th->th_seq; 1352 /* 1353 * Client side of transaction: already sent SYN and data. 1354 * If the remote host used T/TCP to validate the SYN, 1355 * our data will be ACK'd; if so, enter normal data segment 1356 * processing in the middle of step 5, ack processing. 1357 * Otherwise, goto step 6. 1358 */ 1359 if (thflags & TH_ACK) 1360 goto process_ACK; 1361 1362 goto step6; 1363 1364 /* 1365 * If the state is LAST_ACK or CLOSING or TIME_WAIT: 1366 * do normal processing. 1367 * 1368 * NB: Leftover from RFC1644 T/TCP. Cases to be reused later. 1369 */ 1370 case TCPS_LAST_ACK: 1371 case TCPS_CLOSING: 1372 break; /* continue normal processing */ 1373 } 1374 1375 /* 1376 * States other than LISTEN or SYN_SENT. 1377 * First check the RST flag and sequence number since reset segments 1378 * are exempt from the timestamp and connection count tests. This 1379 * fixes a bug introduced by the Stevens, vol. 2, p. 960 bugfix 1380 * below which allowed reset segments in half the sequence space 1381 * to fall though and be processed (which gives forged reset 1382 * segments with a random sequence number a 50 percent chance of 1383 * killing a connection). 1384 * Then check timestamp, if present. 1385 * Then check the connection count, if present. 1386 * Then check that at least some bytes of segment are within 1387 * receive window. If segment begins before rcv_nxt, 1388 * drop leading data (and SYN); if nothing left, just ack. 1389 * 1390 * 1391 * If the RST bit is set, check the sequence number to see 1392 * if this is a valid reset segment. 1393 * RFC 793 page 37: 1394 * In all states except SYN-SENT, all reset (RST) segments 1395 * are validated by checking their SEQ-fields. A reset is 1396 * valid if its sequence number is in the window. 1397 * Note: this does not take into account delayed ACKs, so 1398 * we should test against last_ack_sent instead of rcv_nxt. 1399 * The sequence number in the reset segment is normally an 1400 * echo of our outgoing acknowlegement numbers, but some hosts 1401 * send a reset with the sequence number at the rightmost edge 1402 * of our receive window, and we have to handle this case. 1403 * Note 2: Paul Watson's paper "Slipping in the Window" has shown 1404 * that brute force RST attacks are possible. To combat this, 1405 * we use a much stricter check while in the ESTABLISHED state, 1406 * only accepting RSTs where the sequence number is equal to 1407 * last_ack_sent. In all other states (the states in which a 1408 * RST is more likely), the more permissive check is used. 1409 * If we have multiple segments in flight, the intial reset 1410 * segment sequence numbers will be to the left of last_ack_sent, 1411 * but they will eventually catch up. 1412 * In any case, it never made sense to trim reset segments to 1413 * fit the receive window since RFC 1122 says: 1414 * 4.2.2.12 RST Segment: RFC-793 Section 3.4 1415 * 1416 * A TCP SHOULD allow a received RST segment to include data. 1417 * 1418 * DISCUSSION 1419 * It has been suggested that a RST segment could contain 1420 * ASCII text that encoded and explained the cause of the 1421 * RST. No standard has yet been established for such 1422 * data. 1423 * 1424 * If the reset segment passes the sequence number test examine 1425 * the state: 1426 * SYN_RECEIVED STATE: 1427 * If passive open, return to LISTEN state. 1428 * If active open, inform user that connection was refused. 1429 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT_2, CLOSE_WAIT STATES: 1430 * Inform user that connection was reset, and close tcb. 1431 * CLOSING, LAST_ACK STATES: 1432 * Close the tcb. 1433 * TIME_WAIT STATE: 1434 * Drop the segment - see Stevens, vol. 2, p. 964 and 1435 * RFC 1337. 1436 */ 1437 if (thflags & TH_RST) { 1438 if (SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) && 1439 SEQ_LEQ(th->th_seq, tp->last_ack_sent + tp->rcv_wnd)) { 1440 switch (tp->t_state) { 1441 1442 case TCPS_SYN_RECEIVED: 1443 so->so_error = ECONNREFUSED; 1444 goto close; 1445 1446 case TCPS_ESTABLISHED: 1447 if (tcp_insecure_rst == 0 && 1448 !(SEQ_GEQ(th->th_seq, tp->rcv_nxt - 1) && 1449 SEQ_LEQ(th->th_seq, tp->rcv_nxt + 1)) && 1450 !(SEQ_GEQ(th->th_seq, tp->last_ack_sent - 1) && 1451 SEQ_LEQ(th->th_seq, tp->last_ack_sent + 1))) { 1452 tcpstat.tcps_badrst++; 1453 goto drop; 1454 } 1455 /* FALLTHROUGH */ 1456 case TCPS_FIN_WAIT_1: 1457 case TCPS_FIN_WAIT_2: 1458 case TCPS_CLOSE_WAIT: 1459 so->so_error = ECONNRESET; 1460 close: 1461 tp->t_state = TCPS_CLOSED; 1462 tcpstat.tcps_drops++; 1463 KASSERT(headlocked, ("%s: trimthenstep6: " 1464 "tcp_close: head not locked", __func__)); 1465 tp = tcp_close(tp); 1466 break; 1467 1468 case TCPS_CLOSING: 1469 case TCPS_LAST_ACK: 1470 KASSERT(headlocked, ("%s: trimthenstep6: " 1471 "tcp_close.2: head not locked", __func__)); 1472 tp = tcp_close(tp); 1473 break; 1474 } 1475 } 1476 goto drop; 1477 } 1478 1479 /* 1480 * RFC 1323 PAWS: If we have a timestamp reply on this segment 1481 * and it's less than ts_recent, drop it. 1482 */ 1483 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent && 1484 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 1485 1486 /* Check to see if ts_recent is over 24 days old. */ 1487 if ((int)(ticks - tp->ts_recent_age) > TCP_PAWS_IDLE) { 1488 /* 1489 * Invalidate ts_recent. If this segment updates 1490 * ts_recent, the age will be reset later and ts_recent 1491 * will get a valid value. If it does not, setting 1492 * ts_recent to zero will at least satisfy the 1493 * requirement that zero be placed in the timestamp 1494 * echo reply when ts_recent isn't valid. The 1495 * age isn't reset until we get a valid ts_recent 1496 * because we don't want out-of-order segments to be 1497 * dropped when ts_recent is old. 1498 */ 1499 tp->ts_recent = 0; 1500 } else { 1501 tcpstat.tcps_rcvduppack++; 1502 tcpstat.tcps_rcvdupbyte += tlen; 1503 tcpstat.tcps_pawsdrop++; 1504 if (tlen) 1505 goto dropafterack; 1506 goto drop; 1507 } 1508 } 1509 1510 /* 1511 * In the SYN-RECEIVED state, validate that the packet belongs to 1512 * this connection before trimming the data to fit the receive 1513 * window. Check the sequence number versus IRS since we know 1514 * the sequence numbers haven't wrapped. This is a partial fix 1515 * for the "LAND" DoS attack. 1516 */ 1517 if (tp->t_state == TCPS_SYN_RECEIVED && SEQ_LT(th->th_seq, tp->irs)) { 1518 rstreason = BANDLIM_RST_OPENPORT; 1519 goto dropwithreset; 1520 } 1521 1522 todrop = tp->rcv_nxt - th->th_seq; 1523 if (todrop > 0) { 1524 if (thflags & TH_SYN) { 1525 thflags &= ~TH_SYN; 1526 th->th_seq++; 1527 if (th->th_urp > 1) 1528 th->th_urp--; 1529 else 1530 thflags &= ~TH_URG; 1531 todrop--; 1532 } 1533 /* 1534 * Following if statement from Stevens, vol. 2, p. 960. 1535 */ 1536 if (todrop > tlen 1537 || (todrop == tlen && (thflags & TH_FIN) == 0)) { 1538 /* 1539 * Any valid FIN must be to the left of the window. 1540 * At this point the FIN must be a duplicate or out 1541 * of sequence; drop it. 1542 */ 1543 thflags &= ~TH_FIN; 1544 1545 /* 1546 * Send an ACK to resynchronize and drop any data. 1547 * But keep on processing for RST or ACK. 1548 */ 1549 tp->t_flags |= TF_ACKNOW; 1550 todrop = tlen; 1551 tcpstat.tcps_rcvduppack++; 1552 tcpstat.tcps_rcvdupbyte += todrop; 1553 } else { 1554 tcpstat.tcps_rcvpartduppack++; 1555 tcpstat.tcps_rcvpartdupbyte += todrop; 1556 } 1557 drop_hdrlen += todrop; /* drop from the top afterwards */ 1558 th->th_seq += todrop; 1559 tlen -= todrop; 1560 if (th->th_urp > todrop) 1561 th->th_urp -= todrop; 1562 else { 1563 thflags &= ~TH_URG; 1564 th->th_urp = 0; 1565 } 1566 } 1567 1568 /* 1569 * If new data are received on a connection after the 1570 * user processes are gone, then RST the other end. 1571 */ 1572 if ((so->so_state & SS_NOFDREF) && 1573 tp->t_state > TCPS_CLOSE_WAIT && tlen) { 1574 char *s; 1575 1576 KASSERT(headlocked, ("%s: trimthenstep6: tcp_close.3: head " 1577 "not locked", __func__)); 1578 if ((s = tcp_log_addrs(&tp->t_inpcb->inp_inc, th, NULL, NULL))) { 1579 log(LOG_DEBUG, "%s; %s: %s: Received %d bytes of data after socket " 1580 "was closed, sending RST and removing tcpcb\n", 1581 s, __func__, tcpstates[tp->t_state], tlen); 1582 free(s, M_TCPLOG); 1583 } 1584 tp = tcp_close(tp); 1585 tcpstat.tcps_rcvafterclose++; 1586 rstreason = BANDLIM_UNLIMITED; 1587 goto dropwithreset; 1588 } 1589 1590 /* 1591 * If segment ends after window, drop trailing data 1592 * (and PUSH and FIN); if nothing left, just ACK. 1593 */ 1594 todrop = (th->th_seq + tlen) - (tp->rcv_nxt + tp->rcv_wnd); 1595 if (todrop > 0) { 1596 tcpstat.tcps_rcvpackafterwin++; 1597 if (todrop >= tlen) { 1598 tcpstat.tcps_rcvbyteafterwin += tlen; 1599 /* 1600 * If window is closed can only take segments at 1601 * window edge, and have to drop data and PUSH from 1602 * incoming segments. Continue processing, but 1603 * remember to ack. Otherwise, drop segment 1604 * and ack. 1605 */ 1606 if (tp->rcv_wnd == 0 && th->th_seq == tp->rcv_nxt) { 1607 tp->t_flags |= TF_ACKNOW; 1608 tcpstat.tcps_rcvwinprobe++; 1609 } else 1610 goto dropafterack; 1611 } else 1612 tcpstat.tcps_rcvbyteafterwin += todrop; 1613 m_adj(m, -todrop); 1614 tlen -= todrop; 1615 thflags &= ~(TH_PUSH|TH_FIN); 1616 } 1617 1618 /* 1619 * If last ACK falls within this segment's sequence numbers, 1620 * record its timestamp. 1621 * NOTE: 1622 * 1) That the test incorporates suggestions from the latest 1623 * proposal of the tcplw@cray.com list (Braden 1993/04/26). 1624 * 2) That updating only on newer timestamps interferes with 1625 * our earlier PAWS tests, so this check should be solely 1626 * predicated on the sequence space of this segment. 1627 * 3) That we modify the segment boundary check to be 1628 * Last.ACK.Sent <= SEG.SEQ + SEG.Len 1629 * instead of RFC1323's 1630 * Last.ACK.Sent < SEG.SEQ + SEG.Len, 1631 * This modified check allows us to overcome RFC1323's 1632 * limitations as described in Stevens TCP/IP Illustrated 1633 * Vol. 2 p.869. In such cases, we can still calculate the 1634 * RTT correctly when RCV.NXT == Last.ACK.Sent. 1635 */ 1636 if ((to.to_flags & TOF_TS) != 0 && 1637 SEQ_LEQ(th->th_seq, tp->last_ack_sent) && 1638 SEQ_LEQ(tp->last_ack_sent, th->th_seq + tlen + 1639 ((thflags & (TH_SYN|TH_FIN)) != 0))) { 1640 tp->ts_recent_age = ticks; 1641 tp->ts_recent = to.to_tsval; 1642 } 1643 1644 /* 1645 * If a SYN is in the window, then this is an 1646 * error and we send an RST and drop the connection. 1647 */ 1648 if (thflags & TH_SYN) { 1649 KASSERT(headlocked, ("%s: tcp_drop: trimthenstep6: " 1650 "head not locked", __func__)); 1651 tp = tcp_drop(tp, ECONNRESET); 1652 rstreason = BANDLIM_UNLIMITED; 1653 goto drop; 1654 } 1655 1656 /* 1657 * If the ACK bit is off: if in SYN-RECEIVED state or SENDSYN 1658 * flag is on (half-synchronized state), then queue data for 1659 * later processing; else drop segment and return. 1660 */ 1661 if ((thflags & TH_ACK) == 0) { 1662 if (tp->t_state == TCPS_SYN_RECEIVED || 1663 (tp->t_flags & TF_NEEDSYN)) 1664 goto step6; 1665 else if (tp->t_flags & TF_ACKNOW) 1666 goto dropafterack; 1667 else 1668 goto drop; 1669 } 1670 1671 /* 1672 * Ack processing. 1673 */ 1674 switch (tp->t_state) { 1675 1676 /* 1677 * In SYN_RECEIVED state, the ack ACKs our SYN, so enter 1678 * ESTABLISHED state and continue processing. 1679 * The ACK was checked above. 1680 */ 1681 case TCPS_SYN_RECEIVED: 1682 1683 tcpstat.tcps_connects++; 1684 soisconnected(so); 1685 /* Do window scaling? */ 1686 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 1687 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 1688 tp->rcv_scale = tp->request_r_scale; 1689 tp->snd_wnd = tiwin; 1690 } 1691 /* 1692 * Make transitions: 1693 * SYN-RECEIVED -> ESTABLISHED 1694 * SYN-RECEIVED* -> FIN-WAIT-1 1695 */ 1696 tp->t_starttime = ticks; 1697 if (tp->t_flags & TF_NEEDFIN) { 1698 tp->t_state = TCPS_FIN_WAIT_1; 1699 tp->t_flags &= ~TF_NEEDFIN; 1700 } else { 1701 tp->t_state = TCPS_ESTABLISHED; 1702 tcp_timer_activate(tp, TT_KEEP, tcp_keepidle); 1703 } 1704 /* 1705 * If segment contains data or ACK, will call tcp_reass() 1706 * later; if not, do so now to pass queued data to user. 1707 */ 1708 if (tlen == 0 && (thflags & TH_FIN) == 0) 1709 (void) tcp_reass(tp, (struct tcphdr *)0, 0, 1710 (struct mbuf *)0); 1711 tp->snd_wl1 = th->th_seq - 1; 1712 /* FALLTHROUGH */ 1713 1714 /* 1715 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range 1716 * ACKs. If the ack is in the range 1717 * tp->snd_una < th->th_ack <= tp->snd_max 1718 * then advance tp->snd_una to th->th_ack and drop 1719 * data from the retransmission queue. If this ACK reflects 1720 * more up to date window information we update our window information. 1721 */ 1722 case TCPS_ESTABLISHED: 1723 case TCPS_FIN_WAIT_1: 1724 case TCPS_FIN_WAIT_2: 1725 case TCPS_CLOSE_WAIT: 1726 case TCPS_CLOSING: 1727 case TCPS_LAST_ACK: 1728 if (SEQ_GT(th->th_ack, tp->snd_max)) { 1729 tcpstat.tcps_rcvacktoomuch++; 1730 goto dropafterack; 1731 } 1732 if ((tp->t_flags & TF_SACK_PERMIT) && 1733 ((to.to_flags & TOF_SACK) || 1734 !TAILQ_EMPTY(&tp->snd_holes))) 1735 tcp_sack_doack(tp, &to, th->th_ack); 1736 if (SEQ_LEQ(th->th_ack, tp->snd_una)) { 1737 if (tlen == 0 && tiwin == tp->snd_wnd) { 1738 tcpstat.tcps_rcvdupack++; 1739 /* 1740 * If we have outstanding data (other than 1741 * a window probe), this is a completely 1742 * duplicate ack (ie, window info didn't 1743 * change), the ack is the biggest we've 1744 * seen and we've seen exactly our rexmt 1745 * threshhold of them, assume a packet 1746 * has been dropped and retransmit it. 1747 * Kludge snd_nxt & the congestion 1748 * window so we send only this one 1749 * packet. 1750 * 1751 * We know we're losing at the current 1752 * window size so do congestion avoidance 1753 * (set ssthresh to half the current window 1754 * and pull our congestion window back to 1755 * the new ssthresh). 1756 * 1757 * Dup acks mean that packets have left the 1758 * network (they're now cached at the receiver) 1759 * so bump cwnd by the amount in the receiver 1760 * to keep a constant cwnd packets in the 1761 * network. 1762 */ 1763 if (!tcp_timer_active(tp, TT_REXMT) || 1764 th->th_ack != tp->snd_una) 1765 tp->t_dupacks = 0; 1766 else if (++tp->t_dupacks > tcprexmtthresh || 1767 ((tcp_do_newreno || 1768 (tp->t_flags & TF_SACK_PERMIT)) && 1769 IN_FASTRECOVERY(tp))) { 1770 if ((tp->t_flags & TF_SACK_PERMIT) && 1771 IN_FASTRECOVERY(tp)) { 1772 int awnd; 1773 1774 /* 1775 * Compute the amount of data in flight first. 1776 * We can inject new data into the pipe iff 1777 * we have less than 1/2 the original window's 1778 * worth of data in flight. 1779 */ 1780 awnd = (tp->snd_nxt - tp->snd_fack) + 1781 tp->sackhint.sack_bytes_rexmit; 1782 if (awnd < tp->snd_ssthresh) { 1783 tp->snd_cwnd += tp->t_maxseg; 1784 if (tp->snd_cwnd > tp->snd_ssthresh) 1785 tp->snd_cwnd = tp->snd_ssthresh; 1786 } 1787 } else 1788 tp->snd_cwnd += tp->t_maxseg; 1789 (void) tcp_output(tp); 1790 goto drop; 1791 } else if (tp->t_dupacks == tcprexmtthresh) { 1792 tcp_seq onxt = tp->snd_nxt; 1793 u_int win; 1794 1795 /* 1796 * If we're doing sack, check to 1797 * see if we're already in sack 1798 * recovery. If we're not doing sack, 1799 * check to see if we're in newreno 1800 * recovery. 1801 */ 1802 if (tp->t_flags & TF_SACK_PERMIT) { 1803 if (IN_FASTRECOVERY(tp)) { 1804 tp->t_dupacks = 0; 1805 break; 1806 } 1807 } else if (tcp_do_newreno) { 1808 if (SEQ_LEQ(th->th_ack, 1809 tp->snd_recover)) { 1810 tp->t_dupacks = 0; 1811 break; 1812 } 1813 } 1814 win = min(tp->snd_wnd, tp->snd_cwnd) / 1815 2 / tp->t_maxseg; 1816 if (win < 2) 1817 win = 2; 1818 tp->snd_ssthresh = win * tp->t_maxseg; 1819 ENTER_FASTRECOVERY(tp); 1820 tp->snd_recover = tp->snd_max; 1821 tcp_timer_activate(tp, TT_REXMT, 0); 1822 tp->t_rtttime = 0; 1823 if (tp->t_flags & TF_SACK_PERMIT) { 1824 tcpstat.tcps_sack_recovery_episode++; 1825 tp->sack_newdata = tp->snd_nxt; 1826 tp->snd_cwnd = tp->t_maxseg; 1827 (void) tcp_output(tp); 1828 goto drop; 1829 } 1830 tp->snd_nxt = th->th_ack; 1831 tp->snd_cwnd = tp->t_maxseg; 1832 (void) tcp_output(tp); 1833 KASSERT(tp->snd_limited <= 2, 1834 ("%s: tp->snd_limited too big", 1835 __func__)); 1836 tp->snd_cwnd = tp->snd_ssthresh + 1837 tp->t_maxseg * 1838 (tp->t_dupacks - tp->snd_limited); 1839 if (SEQ_GT(onxt, tp->snd_nxt)) 1840 tp->snd_nxt = onxt; 1841 goto drop; 1842 } else if (tcp_do_rfc3042) { 1843 u_long oldcwnd = tp->snd_cwnd; 1844 tcp_seq oldsndmax = tp->snd_max; 1845 u_int sent; 1846 1847 KASSERT(tp->t_dupacks == 1 || 1848 tp->t_dupacks == 2, 1849 ("%s: dupacks not 1 or 2", 1850 __func__)); 1851 if (tp->t_dupacks == 1) 1852 tp->snd_limited = 0; 1853 tp->snd_cwnd = 1854 (tp->snd_nxt - tp->snd_una) + 1855 (tp->t_dupacks - tp->snd_limited) * 1856 tp->t_maxseg; 1857 (void) tcp_output(tp); 1858 sent = tp->snd_max - oldsndmax; 1859 if (sent > tp->t_maxseg) { 1860 KASSERT((tp->t_dupacks == 2 && 1861 tp->snd_limited == 0) || 1862 (sent == tp->t_maxseg + 1 && 1863 tp->t_flags & TF_SENTFIN), 1864 ("%s: sent too much", 1865 __func__)); 1866 tp->snd_limited = 2; 1867 } else if (sent > 0) 1868 ++tp->snd_limited; 1869 tp->snd_cwnd = oldcwnd; 1870 goto drop; 1871 } 1872 } else 1873 tp->t_dupacks = 0; 1874 break; 1875 } 1876 1877 KASSERT(SEQ_GT(th->th_ack, tp->snd_una), 1878 ("%s: th_ack <= snd_una", __func__)); 1879 1880 /* 1881 * If the congestion window was inflated to account 1882 * for the other side's cached packets, retract it. 1883 */ 1884 if (tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) { 1885 if (IN_FASTRECOVERY(tp)) { 1886 if (SEQ_LT(th->th_ack, tp->snd_recover)) { 1887 if (tp->t_flags & TF_SACK_PERMIT) 1888 tcp_sack_partialack(tp, th); 1889 else 1890 tcp_newreno_partial_ack(tp, th); 1891 } else { 1892 /* 1893 * Out of fast recovery. 1894 * Window inflation should have left us 1895 * with approximately snd_ssthresh 1896 * outstanding data. 1897 * But in case we would be inclined to 1898 * send a burst, better to do it via 1899 * the slow start mechanism. 1900 */ 1901 if (SEQ_GT(th->th_ack + 1902 tp->snd_ssthresh, 1903 tp->snd_max)) 1904 tp->snd_cwnd = tp->snd_max - 1905 th->th_ack + 1906 tp->t_maxseg; 1907 else 1908 tp->snd_cwnd = tp->snd_ssthresh; 1909 } 1910 } 1911 } else { 1912 if (tp->t_dupacks >= tcprexmtthresh && 1913 tp->snd_cwnd > tp->snd_ssthresh) 1914 tp->snd_cwnd = tp->snd_ssthresh; 1915 } 1916 tp->t_dupacks = 0; 1917 /* 1918 * If we reach this point, ACK is not a duplicate, 1919 * i.e., it ACKs something we sent. 1920 */ 1921 if (tp->t_flags & TF_NEEDSYN) { 1922 /* 1923 * T/TCP: Connection was half-synchronized, and our 1924 * SYN has been ACK'd (so connection is now fully 1925 * synchronized). Go to non-starred state, 1926 * increment snd_una for ACK of SYN, and check if 1927 * we can do window scaling. 1928 */ 1929 tp->t_flags &= ~TF_NEEDSYN; 1930 tp->snd_una++; 1931 /* Do window scaling? */ 1932 if ((tp->t_flags & (TF_RCVD_SCALE|TF_REQ_SCALE)) == 1933 (TF_RCVD_SCALE|TF_REQ_SCALE)) { 1934 tp->rcv_scale = tp->request_r_scale; 1935 /* Send window already scaled. */ 1936 } 1937 } 1938 1939process_ACK: 1940 KASSERT(headlocked, ("%s: process_ACK: head not locked", 1941 __func__)); 1942 INP_WLOCK_ASSERT(tp->t_inpcb); 1943 1944 acked = th->th_ack - tp->snd_una; 1945 tcpstat.tcps_rcvackpack++; 1946 tcpstat.tcps_rcvackbyte += acked; 1947 1948 /* 1949 * If we just performed our first retransmit, and the ACK 1950 * arrives within our recovery window, then it was a mistake 1951 * to do the retransmit in the first place. Recover our 1952 * original cwnd and ssthresh, and proceed to transmit where 1953 * we left off. 1954 */ 1955 if (tp->t_rxtshift == 1 && ticks < tp->t_badrxtwin) { 1956 ++tcpstat.tcps_sndrexmitbad; 1957 tp->snd_cwnd = tp->snd_cwnd_prev; 1958 tp->snd_ssthresh = tp->snd_ssthresh_prev; 1959 tp->snd_recover = tp->snd_recover_prev; 1960 if (tp->t_flags & TF_WASFRECOVERY) 1961 ENTER_FASTRECOVERY(tp); 1962 tp->snd_nxt = tp->snd_max; 1963 tp->t_badrxtwin = 0; /* XXX probably not required */ 1964 } 1965 1966 /* 1967 * If we have a timestamp reply, update smoothed 1968 * round trip time. If no timestamp is present but 1969 * transmit timer is running and timed sequence 1970 * number was acked, update smoothed round trip time. 1971 * Since we now have an rtt measurement, cancel the 1972 * timer backoff (cf., Phil Karn's retransmit alg.). 1973 * Recompute the initial retransmit timer. 1974 * 1975 * Some boxes send broken timestamp replies 1976 * during the SYN+ACK phase, ignore 1977 * timestamps of 0 or we could calculate a 1978 * huge RTT and blow up the retransmit timer. 1979 */ 1980 if ((to.to_flags & TOF_TS) != 0 && 1981 to.to_tsecr) { 1982 if (!tp->t_rttlow || tp->t_rttlow > ticks - to.to_tsecr) 1983 tp->t_rttlow = ticks - to.to_tsecr; 1984 tcp_xmit_timer(tp, ticks - to.to_tsecr + 1); 1985 } else if (tp->t_rtttime && SEQ_GT(th->th_ack, tp->t_rtseq)) { 1986 if (!tp->t_rttlow || tp->t_rttlow > ticks - tp->t_rtttime) 1987 tp->t_rttlow = ticks - tp->t_rtttime; 1988 tcp_xmit_timer(tp, ticks - tp->t_rtttime); 1989 } 1990 tcp_xmit_bandwidth_limit(tp, th->th_ack); 1991 1992 /* 1993 * If all outstanding data is acked, stop retransmit 1994 * timer and remember to restart (more output or persist). 1995 * If there is more data to be acked, restart retransmit 1996 * timer, using current (possibly backed-off) value. 1997 */ 1998 if (th->th_ack == tp->snd_max) { 1999 tcp_timer_activate(tp, TT_REXMT, 0); 2000 needoutput = 1; 2001 } else if (!tcp_timer_active(tp, TT_PERSIST)) 2002 tcp_timer_activate(tp, TT_REXMT, tp->t_rxtcur); 2003 2004 /* 2005 * If no data (only SYN) was ACK'd, 2006 * skip rest of ACK processing. 2007 */ 2008 if (acked == 0) 2009 goto step6; 2010 2011 /* 2012 * When new data is acked, open the congestion window. 2013 * If the window gives us less than ssthresh packets 2014 * in flight, open exponentially (maxseg per packet). 2015 * Otherwise open linearly: maxseg per window 2016 * (maxseg^2 / cwnd per packet). 2017 */ 2018 if ((!tcp_do_newreno && !(tp->t_flags & TF_SACK_PERMIT)) || 2019 !IN_FASTRECOVERY(tp)) { 2020 u_int cw = tp->snd_cwnd; 2021 u_int incr = tp->t_maxseg; 2022 if (cw > tp->snd_ssthresh) 2023 incr = incr * incr / cw; 2024 tp->snd_cwnd = min(cw+incr, TCP_MAXWIN<<tp->snd_scale); 2025 } 2026 SOCKBUF_LOCK(&so->so_snd); 2027 if (acked > so->so_snd.sb_cc) { 2028 tp->snd_wnd -= so->so_snd.sb_cc; 2029 sbdrop_locked(&so->so_snd, (int)so->so_snd.sb_cc); 2030 ourfinisacked = 1; 2031 } else { 2032 sbdrop_locked(&so->so_snd, acked); 2033 tp->snd_wnd -= acked; 2034 ourfinisacked = 0; 2035 } 2036 /* NB: sowwakeup_locked() does an implicit unlock. */ 2037 sowwakeup_locked(so); 2038 /* Detect una wraparound. */ 2039 if ((tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) && 2040 !IN_FASTRECOVERY(tp) && 2041 SEQ_GT(tp->snd_una, tp->snd_recover) && 2042 SEQ_LEQ(th->th_ack, tp->snd_recover)) 2043 tp->snd_recover = th->th_ack - 1; 2044 if ((tcp_do_newreno || (tp->t_flags & TF_SACK_PERMIT)) && 2045 IN_FASTRECOVERY(tp) && 2046 SEQ_GEQ(th->th_ack, tp->snd_recover)) 2047 EXIT_FASTRECOVERY(tp); 2048 tp->snd_una = th->th_ack; 2049 if (tp->t_flags & TF_SACK_PERMIT) { 2050 if (SEQ_GT(tp->snd_una, tp->snd_recover)) 2051 tp->snd_recover = tp->snd_una; 2052 } 2053 if (SEQ_LT(tp->snd_nxt, tp->snd_una)) 2054 tp->snd_nxt = tp->snd_una; 2055 2056 switch (tp->t_state) { 2057 2058 /* 2059 * In FIN_WAIT_1 STATE in addition to the processing 2060 * for the ESTABLISHED state if our FIN is now acknowledged 2061 * then enter FIN_WAIT_2. 2062 */ 2063 case TCPS_FIN_WAIT_1: 2064 if (ourfinisacked) { 2065 /* 2066 * If we can't receive any more 2067 * data, then closing user can proceed. 2068 * Starting the timer is contrary to the 2069 * specification, but if we don't get a FIN 2070 * we'll hang forever. 2071 * 2072 * XXXjl: 2073 * we should release the tp also, and use a 2074 * compressed state. 2075 */ 2076 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) { 2077 int timeout; 2078 2079 soisdisconnected(so); 2080 timeout = (tcp_fast_finwait2_recycle) ? 2081 tcp_finwait2_timeout : tcp_maxidle; 2082 tcp_timer_activate(tp, TT_2MSL, timeout); 2083 } 2084 tp->t_state = TCPS_FIN_WAIT_2; 2085 } 2086 break; 2087 2088 /* 2089 * In CLOSING STATE in addition to the processing for 2090 * the ESTABLISHED state if the ACK acknowledges our FIN 2091 * then enter the TIME-WAIT state, otherwise ignore 2092 * the segment. 2093 */ 2094 case TCPS_CLOSING: 2095 if (ourfinisacked) { 2096 KASSERT(headlocked, ("%s: process_ACK: " 2097 "head not locked", __func__)); 2098 tcp_twstart(tp); 2099 INP_INFO_WUNLOCK(&tcbinfo); 2100 headlocked = 0; 2101 m_freem(m); 2102 return; 2103 } 2104 break; 2105 2106 /* 2107 * In LAST_ACK, we may still be waiting for data to drain 2108 * and/or to be acked, as well as for the ack of our FIN. 2109 * If our FIN is now acknowledged, delete the TCB, 2110 * enter the closed state and return. 2111 */ 2112 case TCPS_LAST_ACK: 2113 if (ourfinisacked) { 2114 KASSERT(headlocked, ("%s: process_ACK: " 2115 "tcp_close: head not locked", __func__)); 2116 tp = tcp_close(tp); 2117 goto drop; 2118 } 2119 break; 2120 } 2121 } 2122 2123step6: 2124 KASSERT(headlocked, ("%s: step6: head not locked", __func__)); 2125 INP_WLOCK_ASSERT(tp->t_inpcb); 2126 2127 /* 2128 * Update window information. 2129 * Don't look at window if no ACK: TAC's send garbage on first SYN. 2130 */ 2131 if ((thflags & TH_ACK) && 2132 (SEQ_LT(tp->snd_wl1, th->th_seq) || 2133 (tp->snd_wl1 == th->th_seq && (SEQ_LT(tp->snd_wl2, th->th_ack) || 2134 (tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd))))) { 2135 /* keep track of pure window updates */ 2136 if (tlen == 0 && 2137 tp->snd_wl2 == th->th_ack && tiwin > tp->snd_wnd) 2138 tcpstat.tcps_rcvwinupd++; 2139 tp->snd_wnd = tiwin; 2140 tp->snd_wl1 = th->th_seq; 2141 tp->snd_wl2 = th->th_ack; 2142 if (tp->snd_wnd > tp->max_sndwnd) 2143 tp->max_sndwnd = tp->snd_wnd; 2144 needoutput = 1; 2145 } 2146 2147 /* 2148 * Process segments with URG. 2149 */ 2150 if ((thflags & TH_URG) && th->th_urp && 2151 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2152 /* 2153 * This is a kludge, but if we receive and accept 2154 * random urgent pointers, we'll crash in 2155 * soreceive. It's hard to imagine someone 2156 * actually wanting to send this much urgent data. 2157 */ 2158 SOCKBUF_LOCK(&so->so_rcv); 2159 if (th->th_urp + so->so_rcv.sb_cc > sb_max) { 2160 th->th_urp = 0; /* XXX */ 2161 thflags &= ~TH_URG; /* XXX */ 2162 SOCKBUF_UNLOCK(&so->so_rcv); /* XXX */ 2163 goto dodata; /* XXX */ 2164 } 2165 /* 2166 * If this segment advances the known urgent pointer, 2167 * then mark the data stream. This should not happen 2168 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since 2169 * a FIN has been received from the remote side. 2170 * In these states we ignore the URG. 2171 * 2172 * According to RFC961 (Assigned Protocols), 2173 * the urgent pointer points to the last octet 2174 * of urgent data. We continue, however, 2175 * to consider it to indicate the first octet 2176 * of data past the urgent section as the original 2177 * spec states (in one of two places). 2178 */ 2179 if (SEQ_GT(th->th_seq+th->th_urp, tp->rcv_up)) { 2180 tp->rcv_up = th->th_seq + th->th_urp; 2181 so->so_oobmark = so->so_rcv.sb_cc + 2182 (tp->rcv_up - tp->rcv_nxt) - 1; 2183 if (so->so_oobmark == 0) 2184 so->so_rcv.sb_state |= SBS_RCVATMARK; 2185 sohasoutofband(so); 2186 tp->t_oobflags &= ~(TCPOOB_HAVEDATA | TCPOOB_HADDATA); 2187 } 2188 SOCKBUF_UNLOCK(&so->so_rcv); 2189 /* 2190 * Remove out of band data so doesn't get presented to user. 2191 * This can happen independent of advancing the URG pointer, 2192 * but if two URG's are pending at once, some out-of-band 2193 * data may creep in... ick. 2194 */ 2195 if (th->th_urp <= (u_long)tlen && 2196 !(so->so_options & SO_OOBINLINE)) { 2197 /* hdr drop is delayed */ 2198 tcp_pulloutofband(so, th, m, drop_hdrlen); 2199 } 2200 } else { 2201 /* 2202 * If no out of band data is expected, 2203 * pull receive urgent pointer along 2204 * with the receive window. 2205 */ 2206 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up)) 2207 tp->rcv_up = tp->rcv_nxt; 2208 } 2209dodata: /* XXX */ 2210 KASSERT(headlocked, ("%s: dodata: head not locked", __func__)); 2211 INP_WLOCK_ASSERT(tp->t_inpcb); 2212 2213 /* 2214 * Process the segment text, merging it into the TCP sequencing queue, 2215 * and arranging for acknowledgment of receipt if necessary. 2216 * This process logically involves adjusting tp->rcv_wnd as data 2217 * is presented to the user (this happens in tcp_usrreq.c, 2218 * case PRU_RCVD). If a FIN has already been received on this 2219 * connection then we just ignore the text. 2220 */ 2221 if ((tlen || (thflags & TH_FIN)) && 2222 TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2223 tcp_seq save_start = th->th_seq; 2224 m_adj(m, drop_hdrlen); /* delayed header drop */ 2225 /* 2226 * Insert segment which includes th into TCP reassembly queue 2227 * with control block tp. Set thflags to whether reassembly now 2228 * includes a segment with FIN. This handles the common case 2229 * inline (segment is the next to be received on an established 2230 * connection, and the queue is empty), avoiding linkage into 2231 * and removal from the queue and repetition of various 2232 * conversions. 2233 * Set DELACK for segments received in order, but ack 2234 * immediately when segments are out of order (so 2235 * fast retransmit can work). 2236 */ 2237 if (th->th_seq == tp->rcv_nxt && 2238 LIST_EMPTY(&tp->t_segq) && 2239 TCPS_HAVEESTABLISHED(tp->t_state)) { 2240 if (DELAY_ACK(tp)) 2241 tp->t_flags |= TF_DELACK; 2242 else 2243 tp->t_flags |= TF_ACKNOW; 2244 tp->rcv_nxt += tlen; 2245 thflags = th->th_flags & TH_FIN; 2246 tcpstat.tcps_rcvpack++; 2247 tcpstat.tcps_rcvbyte += tlen; 2248 ND6_HINT(tp); 2249 SOCKBUF_LOCK(&so->so_rcv); 2250 if (so->so_rcv.sb_state & SBS_CANTRCVMORE) 2251 m_freem(m); 2252 else 2253 sbappendstream_locked(&so->so_rcv, m); 2254 /* NB: sorwakeup_locked() does an implicit unlock. */ 2255 sorwakeup_locked(so); 2256 } else { 2257 /* 2258 * XXX: Due to the header drop above "th" is 2259 * theoretically invalid by now. Fortunately 2260 * m_adj() doesn't actually frees any mbufs 2261 * when trimming from the head. 2262 */ 2263 thflags = tcp_reass(tp, th, &tlen, m); 2264 tp->t_flags |= TF_ACKNOW; 2265 } 2266 if (tlen > 0 && (tp->t_flags & TF_SACK_PERMIT)) 2267 tcp_update_sack_list(tp, save_start, save_start + tlen); 2268#if 0 2269 /* 2270 * Note the amount of data that peer has sent into 2271 * our window, in order to estimate the sender's 2272 * buffer size. 2273 * XXX: Unused. 2274 */ 2275 len = so->so_rcv.sb_hiwat - (tp->rcv_adv - tp->rcv_nxt); 2276#endif 2277 } else { 2278 m_freem(m); 2279 thflags &= ~TH_FIN; 2280 } 2281 2282 /* 2283 * If FIN is received ACK the FIN and let the user know 2284 * that the connection is closing. 2285 */ 2286 if (thflags & TH_FIN) { 2287 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) { 2288 socantrcvmore(so); 2289 /* 2290 * If connection is half-synchronized 2291 * (ie NEEDSYN flag on) then delay ACK, 2292 * so it may be piggybacked when SYN is sent. 2293 * Otherwise, since we received a FIN then no 2294 * more input can be expected, send ACK now. 2295 */ 2296 if (tp->t_flags & TF_NEEDSYN) 2297 tp->t_flags |= TF_DELACK; 2298 else 2299 tp->t_flags |= TF_ACKNOW; 2300 tp->rcv_nxt++; 2301 } 2302 switch (tp->t_state) { 2303 2304 /* 2305 * In SYN_RECEIVED and ESTABLISHED STATES 2306 * enter the CLOSE_WAIT state. 2307 */ 2308 case TCPS_SYN_RECEIVED: 2309 tp->t_starttime = ticks; 2310 /* FALLTHROUGH */ 2311 case TCPS_ESTABLISHED: 2312 tp->t_state = TCPS_CLOSE_WAIT; 2313 break; 2314 2315 /* 2316 * If still in FIN_WAIT_1 STATE FIN has not been acked so 2317 * enter the CLOSING state. 2318 */ 2319 case TCPS_FIN_WAIT_1: 2320 tp->t_state = TCPS_CLOSING; 2321 break; 2322 2323 /* 2324 * In FIN_WAIT_2 state enter the TIME_WAIT state, 2325 * starting the time-wait timer, turning off the other 2326 * standard timers. 2327 */ 2328 case TCPS_FIN_WAIT_2: 2329 KASSERT(headlocked == 1, ("%s: dodata: " 2330 "TCP_FIN_WAIT_2: head not locked", __func__)); 2331 tcp_twstart(tp); 2332 INP_INFO_WUNLOCK(&tcbinfo); 2333 return; 2334 } 2335 } 2336 INP_INFO_WUNLOCK(&tcbinfo); 2337 headlocked = 0; 2338#ifdef TCPDEBUG 2339 if (so->so_options & SO_DEBUG) 2340 tcp_trace(TA_INPUT, ostate, tp, (void *)tcp_saveipgen, 2341 &tcp_savetcp, 0); 2342#endif 2343 2344 /* 2345 * Return any desired output. 2346 */ 2347 if (needoutput || (tp->t_flags & TF_ACKNOW)) 2348 (void) tcp_output(tp); 2349 2350check_delack: 2351 KASSERT(headlocked == 0, ("%s: check_delack: head locked", 2352 __func__)); 2353 INP_INFO_UNLOCK_ASSERT(&tcbinfo); 2354 INP_WLOCK_ASSERT(tp->t_inpcb); 2355 if (tp->t_flags & TF_DELACK) { 2356 tp->t_flags &= ~TF_DELACK; 2357 tcp_timer_activate(tp, TT_DELACK, tcp_delacktime); 2358 } 2359 INP_WUNLOCK(tp->t_inpcb); 2360 return; 2361 2362dropafterack: 2363 KASSERT(headlocked, ("%s: dropafterack: head not locked", __func__)); 2364 /* 2365 * Generate an ACK dropping incoming segment if it occupies 2366 * sequence space, where the ACK reflects our state. 2367 * 2368 * We can now skip the test for the RST flag since all 2369 * paths to this code happen after packets containing 2370 * RST have been dropped. 2371 * 2372 * In the SYN-RECEIVED state, don't send an ACK unless the 2373 * segment we received passes the SYN-RECEIVED ACK test. 2374 * If it fails send a RST. This breaks the loop in the 2375 * "LAND" DoS attack, and also prevents an ACK storm 2376 * between two listening ports that have been sent forged 2377 * SYN segments, each with the source address of the other. 2378 */ 2379 if (tp->t_state == TCPS_SYN_RECEIVED && (thflags & TH_ACK) && 2380 (SEQ_GT(tp->snd_una, th->th_ack) || 2381 SEQ_GT(th->th_ack, tp->snd_max)) ) { 2382 rstreason = BANDLIM_RST_OPENPORT; 2383 goto dropwithreset; 2384 } 2385#ifdef TCPDEBUG 2386 if (so->so_options & SO_DEBUG) 2387 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 2388 &tcp_savetcp, 0); 2389#endif 2390 KASSERT(headlocked, ("%s: headlocked should be 1", __func__)); 2391 INP_INFO_WUNLOCK(&tcbinfo); 2392 tp->t_flags |= TF_ACKNOW; 2393 (void) tcp_output(tp); 2394 INP_WUNLOCK(tp->t_inpcb); 2395 m_freem(m); 2396 return; 2397 2398dropwithreset: 2399 KASSERT(headlocked, ("%s: dropwithreset: head not locked", __func__)); 2400 2401 tcp_dropwithreset(m, th, tp, tlen, rstreason); 2402 2403 if (tp != NULL) 2404 INP_WUNLOCK(tp->t_inpcb); 2405 if (headlocked) 2406 INP_INFO_WUNLOCK(&tcbinfo); 2407 return; 2408 2409drop: 2410 /* 2411 * Drop space held by incoming segment and return. 2412 */ 2413#ifdef TCPDEBUG 2414 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 2415 tcp_trace(TA_DROP, ostate, tp, (void *)tcp_saveipgen, 2416 &tcp_savetcp, 0); 2417#endif 2418 if (tp != NULL) 2419 INP_WUNLOCK(tp->t_inpcb); 2420 if (headlocked) 2421 INP_INFO_WUNLOCK(&tcbinfo); 2422 m_freem(m); 2423 return; 2424} 2425 2426/* 2427 * Issue RST and make ACK acceptable to originator of segment. 2428 * The mbuf must still include the original packet header. 2429 * tp may be NULL. 2430 */ 2431static void 2432tcp_dropwithreset(struct mbuf *m, struct tcphdr *th, struct tcpcb *tp, 2433 int tlen, int rstreason) 2434{ 2435 struct ip *ip; 2436#ifdef INET6 2437 struct ip6_hdr *ip6; 2438#endif 2439 2440 if (tp != NULL) { 2441 INP_WLOCK_ASSERT(tp->t_inpcb); 2442 } 2443 2444 /* Don't bother if destination was broadcast/multicast. */ 2445 if ((th->th_flags & TH_RST) || m->m_flags & (M_BCAST|M_MCAST)) 2446 goto drop; 2447#ifdef INET6 2448 if (mtod(m, struct ip *)->ip_v == 6) { 2449 ip6 = mtod(m, struct ip6_hdr *); 2450 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) || 2451 IN6_IS_ADDR_MULTICAST(&ip6->ip6_src)) 2452 goto drop; 2453 /* IPv6 anycast check is done at tcp6_input() */ 2454 } else 2455#endif 2456 { 2457 ip = mtod(m, struct ip *); 2458 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)) || 2459 IN_MULTICAST(ntohl(ip->ip_src.s_addr)) || 2460 ip->ip_src.s_addr == htonl(INADDR_BROADCAST) || 2461 in_broadcast(ip->ip_dst, m->m_pkthdr.rcvif)) 2462 goto drop; 2463 } 2464 2465 /* Perform bandwidth limiting. */ 2466 if (badport_bandlim(rstreason) < 0) 2467 goto drop; 2468 2469 /* tcp_respond consumes the mbuf chain. */ 2470 if (th->th_flags & TH_ACK) { 2471 tcp_respond(tp, mtod(m, void *), th, m, (tcp_seq)0, 2472 th->th_ack, TH_RST); 2473 } else { 2474 if (th->th_flags & TH_SYN) 2475 tlen++; 2476 tcp_respond(tp, mtod(m, void *), th, m, th->th_seq+tlen, 2477 (tcp_seq)0, TH_RST|TH_ACK); 2478 } 2479 return; 2480drop: 2481 m_freem(m); 2482 return; 2483} 2484 2485/* 2486 * Parse TCP options and place in tcpopt. 2487 */ 2488static void 2489tcp_dooptions(struct tcpopt *to, u_char *cp, int cnt, int flags) 2490{ 2491 int opt, optlen; 2492 2493 to->to_flags = 0; 2494 for (; cnt > 0; cnt -= optlen, cp += optlen) { 2495 opt = cp[0]; 2496 if (opt == TCPOPT_EOL) 2497 break; 2498 if (opt == TCPOPT_NOP) 2499 optlen = 1; 2500 else { 2501 if (cnt < 2) 2502 break; 2503 optlen = cp[1]; 2504 if (optlen < 2 || optlen > cnt) 2505 break; 2506 } 2507 switch (opt) { 2508 case TCPOPT_MAXSEG: 2509 if (optlen != TCPOLEN_MAXSEG) 2510 continue; 2511 if (!(flags & TO_SYN)) 2512 continue; 2513 to->to_flags |= TOF_MSS; 2514 bcopy((char *)cp + 2, 2515 (char *)&to->to_mss, sizeof(to->to_mss)); 2516 to->to_mss = ntohs(to->to_mss); 2517 break; 2518 case TCPOPT_WINDOW: 2519 if (optlen != TCPOLEN_WINDOW) 2520 continue; 2521 if (!(flags & TO_SYN)) 2522 continue; 2523 to->to_flags |= TOF_SCALE; 2524 to->to_wscale = min(cp[2], TCP_MAX_WINSHIFT); 2525 break; 2526 case TCPOPT_TIMESTAMP: 2527 if (optlen != TCPOLEN_TIMESTAMP) 2528 continue; 2529 to->to_flags |= TOF_TS; 2530 bcopy((char *)cp + 2, 2531 (char *)&to->to_tsval, sizeof(to->to_tsval)); 2532 to->to_tsval = ntohl(to->to_tsval); 2533 bcopy((char *)cp + 6, 2534 (char *)&to->to_tsecr, sizeof(to->to_tsecr)); 2535 to->to_tsecr = ntohl(to->to_tsecr); 2536 break; 2537#ifdef TCP_SIGNATURE 2538 /* 2539 * XXX In order to reply to a host which has set the 2540 * TCP_SIGNATURE option in its initial SYN, we have to 2541 * record the fact that the option was observed here 2542 * for the syncache code to perform the correct response. 2543 */ 2544 case TCPOPT_SIGNATURE: 2545 if (optlen != TCPOLEN_SIGNATURE) 2546 continue; 2547 to->to_flags |= TOF_SIGNATURE; 2548 to->to_signature = cp + 2; 2549 break; 2550#endif 2551 case TCPOPT_SACK_PERMITTED: 2552 if (optlen != TCPOLEN_SACK_PERMITTED) 2553 continue; 2554 if (!(flags & TO_SYN)) 2555 continue; 2556 if (!tcp_do_sack) 2557 continue; 2558 to->to_flags |= TOF_SACKPERM; 2559 break; 2560 case TCPOPT_SACK: 2561 if (optlen <= 2 || (optlen - 2) % TCPOLEN_SACK != 0) 2562 continue; 2563 if (flags & TO_SYN) 2564 continue; 2565 to->to_flags |= TOF_SACK; 2566 to->to_nsacks = (optlen - 2) / TCPOLEN_SACK; 2567 to->to_sacks = cp + 2; 2568 tcpstat.tcps_sack_rcv_blocks++; 2569 break; 2570 default: 2571 continue; 2572 } 2573 } 2574} 2575 2576/* 2577 * Pull out of band byte out of a segment so 2578 * it doesn't appear in the user's data queue. 2579 * It is still reflected in the segment length for 2580 * sequencing purposes. 2581 */ 2582static void 2583tcp_pulloutofband(struct socket *so, struct tcphdr *th, struct mbuf *m, 2584 int off) 2585{ 2586 int cnt = off + th->th_urp - 1; 2587 2588 while (cnt >= 0) { 2589 if (m->m_len > cnt) { 2590 char *cp = mtod(m, caddr_t) + cnt; 2591 struct tcpcb *tp = sototcpcb(so); 2592 2593 INP_WLOCK_ASSERT(tp->t_inpcb); 2594 2595 tp->t_iobc = *cp; 2596 tp->t_oobflags |= TCPOOB_HAVEDATA; 2597 bcopy(cp+1, cp, (unsigned)(m->m_len - cnt - 1)); 2598 m->m_len--; 2599 if (m->m_flags & M_PKTHDR) 2600 m->m_pkthdr.len--; 2601 return; 2602 } 2603 cnt -= m->m_len; 2604 m = m->m_next; 2605 if (m == NULL) 2606 break; 2607 } 2608 panic("tcp_pulloutofband"); 2609} 2610 2611/* 2612 * Collect new round-trip time estimate 2613 * and update averages and current timeout. 2614 */ 2615static void 2616tcp_xmit_timer(struct tcpcb *tp, int rtt) 2617{ 2618 int delta; 2619 2620 INP_WLOCK_ASSERT(tp->t_inpcb); 2621 2622 tcpstat.tcps_rttupdated++; 2623 tp->t_rttupdated++; 2624 if (tp->t_srtt != 0) { 2625 /* 2626 * srtt is stored as fixed point with 5 bits after the 2627 * binary point (i.e., scaled by 8). The following magic 2628 * is equivalent to the smoothing algorithm in rfc793 with 2629 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed 2630 * point). Adjust rtt to origin 0. 2631 */ 2632 delta = ((rtt - 1) << TCP_DELTA_SHIFT) 2633 - (tp->t_srtt >> (TCP_RTT_SHIFT - TCP_DELTA_SHIFT)); 2634 2635 if ((tp->t_srtt += delta) <= 0) 2636 tp->t_srtt = 1; 2637 2638 /* 2639 * We accumulate a smoothed rtt variance (actually, a 2640 * smoothed mean difference), then set the retransmit 2641 * timer to smoothed rtt + 4 times the smoothed variance. 2642 * rttvar is stored as fixed point with 4 bits after the 2643 * binary point (scaled by 16). The following is 2644 * equivalent to rfc793 smoothing with an alpha of .75 2645 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces 2646 * rfc793's wired-in beta. 2647 */ 2648 if (delta < 0) 2649 delta = -delta; 2650 delta -= tp->t_rttvar >> (TCP_RTTVAR_SHIFT - TCP_DELTA_SHIFT); 2651 if ((tp->t_rttvar += delta) <= 0) 2652 tp->t_rttvar = 1; 2653 if (tp->t_rttbest > tp->t_srtt + tp->t_rttvar) 2654 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 2655 } else { 2656 /* 2657 * No rtt measurement yet - use the unsmoothed rtt. 2658 * Set the variance to half the rtt (so our first 2659 * retransmit happens at 3*rtt). 2660 */ 2661 tp->t_srtt = rtt << TCP_RTT_SHIFT; 2662 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1); 2663 tp->t_rttbest = tp->t_srtt + tp->t_rttvar; 2664 } 2665 tp->t_rtttime = 0; 2666 tp->t_rxtshift = 0; 2667 2668 /* 2669 * the retransmit should happen at rtt + 4 * rttvar. 2670 * Because of the way we do the smoothing, srtt and rttvar 2671 * will each average +1/2 tick of bias. When we compute 2672 * the retransmit timer, we want 1/2 tick of rounding and 2673 * 1 extra tick because of +-1/2 tick uncertainty in the 2674 * firing of the timer. The bias will give us exactly the 2675 * 1.5 tick we need. But, because the bias is 2676 * statistical, we have to test that we don't drop below 2677 * the minimum feasible timer (which is 2 ticks). 2678 */ 2679 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp), 2680 max(tp->t_rttmin, rtt + 2), TCPTV_REXMTMAX); 2681 2682 /* 2683 * We received an ack for a packet that wasn't retransmitted; 2684 * it is probably safe to discard any error indications we've 2685 * received recently. This isn't quite right, but close enough 2686 * for now (a route might have failed after we sent a segment, 2687 * and the return path might not be symmetrical). 2688 */ 2689 tp->t_softerror = 0; 2690} 2691 2692/* 2693 * Determine a reasonable value for maxseg size. 2694 * If the route is known, check route for mtu. 2695 * If none, use an mss that can be handled on the outgoing 2696 * interface without forcing IP to fragment; if bigger than 2697 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES 2698 * to utilize large mbufs. If no route is found, route has no mtu, 2699 * or the destination isn't local, use a default, hopefully conservative 2700 * size (usually 512 or the default IP max size, but no more than the mtu 2701 * of the interface), as we can't discover anything about intervening 2702 * gateways or networks. We also initialize the congestion/slow start 2703 * window to be a single segment if the destination isn't local. 2704 * While looking at the routing entry, we also initialize other path-dependent 2705 * parameters from pre-set or cached values in the routing entry. 2706 * 2707 * Also take into account the space needed for options that we 2708 * send regularly. Make maxseg shorter by that amount to assure 2709 * that we can send maxseg amount of data even when the options 2710 * are present. Store the upper limit of the length of options plus 2711 * data in maxopd. 2712 * 2713 * In case of T/TCP, we call this routine during implicit connection 2714 * setup as well (offer = -1), to initialize maxseg from the cached 2715 * MSS of our peer. 2716 * 2717 * NOTE that this routine is only called when we process an incoming 2718 * segment. Outgoing SYN/ACK MSS settings are handled in tcp_mssopt(). 2719 */ 2720void 2721tcp_mss(struct tcpcb *tp, int offer) 2722{ 2723 int rtt, mss; 2724 u_long bufsize; 2725 u_long maxmtu; 2726 struct inpcb *inp = tp->t_inpcb; 2727 struct socket *so; 2728 struct hc_metrics_lite metrics; 2729 int origoffer = offer; 2730 int mtuflags = 0; 2731#ifdef INET6 2732 int isipv6 = ((inp->inp_vflag & INP_IPV6) != 0) ? 1 : 0; 2733 size_t min_protoh = isipv6 ? 2734 sizeof (struct ip6_hdr) + sizeof (struct tcphdr) : 2735 sizeof (struct tcpiphdr); 2736#else 2737 const size_t min_protoh = sizeof(struct tcpiphdr); 2738#endif 2739 2740 INP_WLOCK_ASSERT(tp->t_inpcb); 2741 2742 /* Initialize. */ 2743#ifdef INET6 2744 if (isipv6) { 2745 maxmtu = tcp_maxmtu6(&inp->inp_inc, &mtuflags); 2746 tp->t_maxopd = tp->t_maxseg = tcp_v6mssdflt; 2747 } else 2748#endif 2749 { 2750 maxmtu = tcp_maxmtu(&inp->inp_inc, &mtuflags); 2751 tp->t_maxopd = tp->t_maxseg = tcp_mssdflt; 2752 } 2753 2754 /* 2755 * No route to sender, stay with default mss and return. 2756 */ 2757 if (maxmtu == 0) 2758 return; 2759 2760 /* What have we got? */ 2761 switch (offer) { 2762 case 0: 2763 /* 2764 * Offer == 0 means that there was no MSS on the SYN 2765 * segment, in this case we use tcp_mssdflt as 2766 * already assigned to t_maxopd above. 2767 */ 2768 offer = tp->t_maxopd; 2769 break; 2770 2771 case -1: 2772 /* 2773 * Offer == -1 means that we didn't receive SYN yet. 2774 */ 2775 /* FALLTHROUGH */ 2776 2777 default: 2778 /* 2779 * Prevent DoS attack with too small MSS. Round up 2780 * to at least minmss. 2781 */ 2782 offer = max(offer, tcp_minmss); 2783 /* 2784 * Sanity check: make sure that maxopd will be large 2785 * enough to allow some data on segments even if the 2786 * all the option space is used (40bytes). Otherwise 2787 * funny things may happen in tcp_output. 2788 */ 2789 offer = max(offer, 64); 2790 } 2791 2792 /* 2793 * rmx information is now retrieved from tcp_hostcache. 2794 */ 2795 tcp_hc_get(&inp->inp_inc, &metrics); 2796 2797 /* 2798 * If there's a discovered mtu int tcp hostcache, use it 2799 * else, use the link mtu. 2800 */ 2801 if (metrics.rmx_mtu) 2802 mss = min(metrics.rmx_mtu, maxmtu) - min_protoh; 2803 else { 2804#ifdef INET6 2805 if (isipv6) { 2806 mss = maxmtu - min_protoh; 2807 if (!path_mtu_discovery && 2808 !in6_localaddr(&inp->in6p_faddr)) 2809 mss = min(mss, tcp_v6mssdflt); 2810 } else 2811#endif 2812 { 2813 mss = maxmtu - min_protoh; 2814 if (!path_mtu_discovery && 2815 !in_localaddr(inp->inp_faddr)) 2816 mss = min(mss, tcp_mssdflt); 2817 } 2818 } 2819 mss = min(mss, offer); 2820 2821 /* 2822 * maxopd stores the maximum length of data AND options 2823 * in a segment; maxseg is the amount of data in a normal 2824 * segment. We need to store this value (maxopd) apart 2825 * from maxseg, because now every segment carries options 2826 * and thus we normally have somewhat less data in segments. 2827 */ 2828 tp->t_maxopd = mss; 2829 2830 /* 2831 * origoffer==-1 indicates that no segments were received yet. 2832 * In this case we just guess. 2833 */ 2834 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && 2835 (origoffer == -1 || 2836 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP)) 2837 mss -= TCPOLEN_TSTAMP_APPA; 2838 2839#if (MCLBYTES & (MCLBYTES - 1)) == 0 2840 if (mss > MCLBYTES) 2841 mss &= ~(MCLBYTES-1); 2842#else 2843 if (mss > MCLBYTES) 2844 mss = mss / MCLBYTES * MCLBYTES; 2845#endif 2846 tp->t_maxseg = mss; 2847 2848 /* 2849 * If there's a pipesize, change the socket buffer to that size, 2850 * don't change if sb_hiwat is different than default (then it 2851 * has been changed on purpose with setsockopt). 2852 * Make the socket buffers an integral number of mss units; 2853 * if the mss is larger than the socket buffer, decrease the mss. 2854 */ 2855 so = inp->inp_socket; 2856 SOCKBUF_LOCK(&so->so_snd); 2857 if ((so->so_snd.sb_hiwat == tcp_sendspace) && metrics.rmx_sendpipe) 2858 bufsize = metrics.rmx_sendpipe; 2859 else 2860 bufsize = so->so_snd.sb_hiwat; 2861 if (bufsize < mss) 2862 mss = bufsize; 2863 else { 2864 bufsize = roundup(bufsize, mss); 2865 if (bufsize > sb_max) 2866 bufsize = sb_max; 2867 if (bufsize > so->so_snd.sb_hiwat) 2868 (void)sbreserve_locked(&so->so_snd, bufsize, so, NULL); 2869 } 2870 SOCKBUF_UNLOCK(&so->so_snd); 2871 tp->t_maxseg = mss; 2872 2873 SOCKBUF_LOCK(&so->so_rcv); 2874 if ((so->so_rcv.sb_hiwat == tcp_recvspace) && metrics.rmx_recvpipe) 2875 bufsize = metrics.rmx_recvpipe; 2876 else 2877 bufsize = so->so_rcv.sb_hiwat; 2878 if (bufsize > mss) { 2879 bufsize = roundup(bufsize, mss); 2880 if (bufsize > sb_max) 2881 bufsize = sb_max; 2882 if (bufsize > so->so_rcv.sb_hiwat) 2883 (void)sbreserve_locked(&so->so_rcv, bufsize, so, NULL); 2884 } 2885 SOCKBUF_UNLOCK(&so->so_rcv); 2886 /* 2887 * While we're here, check the others too. 2888 */ 2889 if (tp->t_srtt == 0 && (rtt = metrics.rmx_rtt)) { 2890 tp->t_srtt = rtt; 2891 tp->t_rttbest = tp->t_srtt + TCP_RTT_SCALE; 2892 tcpstat.tcps_usedrtt++; 2893 if (metrics.rmx_rttvar) { 2894 tp->t_rttvar = metrics.rmx_rttvar; 2895 tcpstat.tcps_usedrttvar++; 2896 } else { 2897 /* default variation is +- 1 rtt */ 2898 tp->t_rttvar = 2899 tp->t_srtt * TCP_RTTVAR_SCALE / TCP_RTT_SCALE; 2900 } 2901 TCPT_RANGESET(tp->t_rxtcur, 2902 ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1, 2903 tp->t_rttmin, TCPTV_REXMTMAX); 2904 } 2905 if (metrics.rmx_ssthresh) { 2906 /* 2907 * There's some sort of gateway or interface 2908 * buffer limit on the path. Use this to set 2909 * the slow start threshhold, but set the 2910 * threshold to no less than 2*mss. 2911 */ 2912 tp->snd_ssthresh = max(2 * mss, metrics.rmx_ssthresh); 2913 tcpstat.tcps_usedssthresh++; 2914 } 2915 if (metrics.rmx_bandwidth) 2916 tp->snd_bandwidth = metrics.rmx_bandwidth; 2917 2918 /* 2919 * Set the slow-start flight size depending on whether this 2920 * is a local network or not. 2921 * 2922 * Extend this so we cache the cwnd too and retrieve it here. 2923 * Make cwnd even bigger than RFC3390 suggests but only if we 2924 * have previous experience with the remote host. Be careful 2925 * not make cwnd bigger than remote receive window or our own 2926 * send socket buffer. Maybe put some additional upper bound 2927 * on the retrieved cwnd. Should do incremental updates to 2928 * hostcache when cwnd collapses so next connection doesn't 2929 * overloads the path again. 2930 * 2931 * RFC3390 says only do this if SYN or SYN/ACK didn't got lost. 2932 * We currently check only in syncache_socket for that. 2933 */ 2934#define TCP_METRICS_CWND 2935#ifdef TCP_METRICS_CWND 2936 if (metrics.rmx_cwnd) 2937 tp->snd_cwnd = max(mss, 2938 min(metrics.rmx_cwnd / 2, 2939 min(tp->snd_wnd, so->so_snd.sb_hiwat))); 2940 else 2941#endif 2942 if (tcp_do_rfc3390) 2943 tp->snd_cwnd = min(4 * mss, max(2 * mss, 4380)); 2944#ifdef INET6 2945 else if ((isipv6 && in6_localaddr(&inp->in6p_faddr)) || 2946 (!isipv6 && in_localaddr(inp->inp_faddr))) 2947#else 2948 else if (in_localaddr(inp->inp_faddr)) 2949#endif 2950 tp->snd_cwnd = mss * ss_fltsz_local; 2951 else 2952 tp->snd_cwnd = mss * ss_fltsz; 2953 2954 /* Check the interface for TSO capabilities. */ 2955 if (mtuflags & CSUM_TSO) 2956 tp->t_flags |= TF_TSO; 2957} 2958 2959/* 2960 * Determine the MSS option to send on an outgoing SYN. 2961 */ 2962int 2963tcp_mssopt(struct in_conninfo *inc) 2964{ 2965 int mss = 0; 2966 u_long maxmtu = 0; 2967 u_long thcmtu = 0; 2968 size_t min_protoh; 2969#ifdef INET6 2970 int isipv6 = inc->inc_isipv6 ? 1 : 0; 2971#endif 2972 2973 KASSERT(inc != NULL, ("tcp_mssopt with NULL in_conninfo pointer")); 2974 2975#ifdef INET6 2976 if (isipv6) { 2977 mss = tcp_v6mssdflt; 2978 maxmtu = tcp_maxmtu6(inc, NULL); 2979 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */ 2980 min_protoh = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 2981 } else 2982#endif 2983 { 2984 mss = tcp_mssdflt; 2985 maxmtu = tcp_maxmtu(inc, NULL); 2986 thcmtu = tcp_hc_getmtu(inc); /* IPv4 and IPv6 */ 2987 min_protoh = sizeof(struct tcpiphdr); 2988 } 2989 if (maxmtu && thcmtu) 2990 mss = min(maxmtu, thcmtu) - min_protoh; 2991 else if (maxmtu || thcmtu) 2992 mss = max(maxmtu, thcmtu) - min_protoh; 2993 2994 return (mss); 2995} 2996 2997 2998/* 2999 * On a partial ack arrives, force the retransmission of the 3000 * next unacknowledged segment. Do not clear tp->t_dupacks. 3001 * By setting snd_nxt to ti_ack, this forces retransmission timer to 3002 * be started again. 3003 */ 3004static void 3005tcp_newreno_partial_ack(struct tcpcb *tp, struct tcphdr *th) 3006{ 3007 tcp_seq onxt = tp->snd_nxt; 3008 u_long ocwnd = tp->snd_cwnd; 3009 3010 INP_WLOCK_ASSERT(tp->t_inpcb); 3011 3012 tcp_timer_activate(tp, TT_REXMT, 0); 3013 tp->t_rtttime = 0; 3014 tp->snd_nxt = th->th_ack; 3015 /* 3016 * Set snd_cwnd to one segment beyond acknowledged offset. 3017 * (tp->snd_una has not yet been updated when this function is called.) 3018 */ 3019 tp->snd_cwnd = tp->t_maxseg + (th->th_ack - tp->snd_una); 3020 tp->t_flags |= TF_ACKNOW; 3021 (void) tcp_output(tp); 3022 tp->snd_cwnd = ocwnd; 3023 if (SEQ_GT(onxt, tp->snd_nxt)) 3024 tp->snd_nxt = onxt; 3025 /* 3026 * Partial window deflation. Relies on fact that tp->snd_una 3027 * not updated yet. 3028 */ 3029 if (tp->snd_cwnd > th->th_ack - tp->snd_una) 3030 tp->snd_cwnd -= th->th_ack - tp->snd_una; 3031 else 3032 tp->snd_cwnd = 0; 3033 tp->snd_cwnd += tp->t_maxseg; 3034} 3035