tcp_timewait.c revision 128452
1/* 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95 30 * $FreeBSD: head/sys/netinet/tcp_timewait.c 128452 2004-04-20 06:33:39Z silby $ 31 */ 32 33#include "opt_compat.h" 34#include "opt_inet.h" 35#include "opt_inet6.h" 36#include "opt_ipsec.h" 37#include "opt_mac.h" 38#include "opt_tcpdebug.h" 39 40#include <sys/param.h> 41#include <sys/systm.h> 42#include <sys/callout.h> 43#include <sys/kernel.h> 44#include <sys/sysctl.h> 45#include <sys/mac.h> 46#include <sys/malloc.h> 47#include <sys/mbuf.h> 48#ifdef INET6 49#include <sys/domain.h> 50#endif 51#include <sys/proc.h> 52#include <sys/socket.h> 53#include <sys/socketvar.h> 54#include <sys/protosw.h> 55#include <sys/random.h> 56 57#include <vm/uma.h> 58 59#include <net/route.h> 60#include <net/if.h> 61 62#include <netinet/in.h> 63#include <netinet/in_systm.h> 64#include <netinet/ip.h> 65#ifdef INET6 66#include <netinet/ip6.h> 67#endif 68#include <netinet/in_pcb.h> 69#ifdef INET6 70#include <netinet6/in6_pcb.h> 71#endif 72#include <netinet/in_var.h> 73#include <netinet/ip_var.h> 74#ifdef INET6 75#include <netinet6/ip6_var.h> 76#include <netinet6/nd6.h> 77#endif 78#include <netinet/tcp.h> 79#include <netinet/tcp_fsm.h> 80#include <netinet/tcp_seq.h> 81#include <netinet/tcp_timer.h> 82#include <netinet/tcp_var.h> 83#ifdef INET6 84#include <netinet6/tcp6_var.h> 85#endif 86#include <netinet/tcpip.h> 87#ifdef TCPDEBUG 88#include <netinet/tcp_debug.h> 89#endif 90#include <netinet6/ip6protosw.h> 91 92#ifdef IPSEC 93#include <netinet6/ipsec.h> 94#ifdef INET6 95#include <netinet6/ipsec6.h> 96#endif 97#endif /*IPSEC*/ 98 99#ifdef FAST_IPSEC 100#include <netipsec/ipsec.h> 101#include <netipsec/xform.h> 102#ifdef INET6 103#include <netipsec/ipsec6.h> 104#endif 105#include <netipsec/key.h> 106#define IPSEC 107#endif /*FAST_IPSEC*/ 108 109#include <machine/in_cksum.h> 110#include <sys/md5.h> 111 112int tcp_mssdflt = TCP_MSS; 113SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW, 114 &tcp_mssdflt , 0, "Default TCP Maximum Segment Size"); 115 116#ifdef INET6 117int tcp_v6mssdflt = TCP6_MSS; 118SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt, 119 CTLFLAG_RW, &tcp_v6mssdflt , 0, 120 "Default TCP Maximum Segment Size for IPv6"); 121#endif 122 123/* 124 * Minimum MSS we accept and use. This prevents DoS attacks where 125 * we are forced to a ridiculous low MSS like 20 and send hundreds 126 * of packets instead of one. The effect scales with the available 127 * bandwidth and quickly saturates the CPU and network interface 128 * with packet generation and sending. Set to zero to disable MINMSS 129 * checking. This setting prevents us from sending too small packets. 130 */ 131int tcp_minmss = TCP_MINMSS; 132SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmss, CTLFLAG_RW, 133 &tcp_minmss , 0, "Minmum TCP Maximum Segment Size"); 134/* 135 * Number of TCP segments per second we accept from remote host 136 * before we start to calculate average segment size. If average 137 * segment size drops below the minimum TCP MSS we assume a DoS 138 * attack and reset+drop the connection. Care has to be taken not to 139 * set this value too small to not kill interactive type connections 140 * (telnet, SSH) which send many small packets. 141 */ 142int tcp_minmssoverload = TCP_MINMSSOVERLOAD; 143SYSCTL_INT(_net_inet_tcp, OID_AUTO, minmssoverload, CTLFLAG_RW, 144 &tcp_minmssoverload , 0, "Number of TCP Segments per Second allowed to" 145 "be under the MINMSS Size"); 146 147#if 0 148static int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ; 149SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt, CTLFLAG_RW, 150 &tcp_rttdflt , 0, "Default maximum TCP Round Trip Time"); 151#endif 152 153int tcp_do_rfc1323 = 1; 154SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW, 155 &tcp_do_rfc1323 , 0, "Enable rfc1323 (high performance TCP) extensions"); 156 157int tcp_do_rfc1644 = 0; 158SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644, CTLFLAG_RW, 159 &tcp_do_rfc1644 , 0, "Enable rfc1644 (TTCP) extensions"); 160 161static int tcp_tcbhashsize = 0; 162SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RDTUN, 163 &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable"); 164 165static int do_tcpdrain = 1; 166SYSCTL_INT(_net_inet_tcp, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0, 167 "Enable tcp_drain routine for extra help when low on mbufs"); 168 169SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD, 170 &tcbinfo.ipi_count, 0, "Number of active PCBs"); 171 172static int icmp_may_rst = 1; 173SYSCTL_INT(_net_inet_tcp, OID_AUTO, icmp_may_rst, CTLFLAG_RW, &icmp_may_rst, 0, 174 "Certain ICMP unreachable messages may abort connections in SYN_SENT"); 175 176static int tcp_isn_reseed_interval = 0; 177SYSCTL_INT(_net_inet_tcp, OID_AUTO, isn_reseed_interval, CTLFLAG_RW, 178 &tcp_isn_reseed_interval, 0, "Seconds between reseeding of ISN secret"); 179 180/* 181 * TCP bandwidth limiting sysctls. Note that the default lower bound of 182 * 1024 exists only for debugging. A good production default would be 183 * something like 6100. 184 */ 185static int tcp_inflight_enable = 1; 186SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_enable, CTLFLAG_RW, 187 &tcp_inflight_enable, 0, "Enable automatic TCP inflight data limiting"); 188 189static int tcp_inflight_debug = 0; 190SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_debug, CTLFLAG_RW, 191 &tcp_inflight_debug, 0, "Debug TCP inflight calculations"); 192 193static int tcp_inflight_min = 6144; 194SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_min, CTLFLAG_RW, 195 &tcp_inflight_min, 0, "Lower-bound for TCP inflight window"); 196 197static int tcp_inflight_max = TCP_MAXWIN << TCP_MAX_WINSHIFT; 198SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_max, CTLFLAG_RW, 199 &tcp_inflight_max, 0, "Upper-bound for TCP inflight window"); 200static int tcp_inflight_stab = 20; 201SYSCTL_INT(_net_inet_tcp, OID_AUTO, inflight_stab, CTLFLAG_RW, 202 &tcp_inflight_stab, 0, "Inflight Algorithm Stabilization 20 = 2 packets"); 203 204static struct inpcb *tcp_notify(struct inpcb *, int); 205static void tcp_discardcb(struct tcpcb *); 206static void tcp_isn_tick(void *); 207 208/* 209 * Target size of TCP PCB hash tables. Must be a power of two. 210 * 211 * Note that this can be overridden by the kernel environment 212 * variable net.inet.tcp.tcbhashsize 213 */ 214#ifndef TCBHASHSIZE 215#define TCBHASHSIZE 512 216#endif 217 218/* 219 * XXX 220 * Callouts should be moved into struct tcp directly. They are currently 221 * separate because the tcpcb structure is exported to userland for sysctl 222 * parsing purposes, which do not know about callouts. 223 */ 224struct tcpcb_mem { 225 struct tcpcb tcb; 226 struct callout tcpcb_mem_rexmt, tcpcb_mem_persist, tcpcb_mem_keep; 227 struct callout tcpcb_mem_2msl, tcpcb_mem_delack; 228}; 229 230static uma_zone_t tcpcb_zone; 231static uma_zone_t tcptw_zone; 232struct callout isn_callout; 233 234/* 235 * Tcp initialization 236 */ 237void 238tcp_init() 239{ 240 int hashsize = TCBHASHSIZE; 241 242 tcp_ccgen = 1; 243 244 tcp_delacktime = TCPTV_DELACK; 245 tcp_keepinit = TCPTV_KEEP_INIT; 246 tcp_keepidle = TCPTV_KEEP_IDLE; 247 tcp_keepintvl = TCPTV_KEEPINTVL; 248 tcp_maxpersistidle = TCPTV_KEEP_IDLE; 249 tcp_msl = TCPTV_MSL; 250 tcp_rexmit_min = TCPTV_MIN; 251 tcp_rexmit_slop = TCPTV_CPU_VAR; 252 253 INP_INFO_LOCK_INIT(&tcbinfo, "tcp"); 254 LIST_INIT(&tcb); 255 tcbinfo.listhead = &tcb; 256 TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", &hashsize); 257 if (!powerof2(hashsize)) { 258 printf("WARNING: TCB hash size not a power of 2\n"); 259 hashsize = 512; /* safe default */ 260 } 261 tcp_tcbhashsize = hashsize; 262 tcbinfo.hashbase = hashinit(hashsize, M_PCB, &tcbinfo.hashmask); 263 tcbinfo.porthashbase = hashinit(hashsize, M_PCB, 264 &tcbinfo.porthashmask); 265 tcbinfo.ipi_zone = uma_zcreate("inpcb", sizeof(struct inpcb), 266 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 267 uma_zone_set_max(tcbinfo.ipi_zone, maxsockets); 268#ifdef INET6 269#define TCP_MINPROTOHDR (sizeof(struct ip6_hdr) + sizeof(struct tcphdr)) 270#else /* INET6 */ 271#define TCP_MINPROTOHDR (sizeof(struct tcpiphdr)) 272#endif /* INET6 */ 273 if (max_protohdr < TCP_MINPROTOHDR) 274 max_protohdr = TCP_MINPROTOHDR; 275 if (max_linkhdr + TCP_MINPROTOHDR > MHLEN) 276 panic("tcp_init"); 277#undef TCP_MINPROTOHDR 278 /* 279 * These have to be type stable for the benefit of the timers. 280 */ 281 tcpcb_zone = uma_zcreate("tcpcb", sizeof(struct tcpcb_mem), 282 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 283 uma_zone_set_max(tcpcb_zone, maxsockets); 284 tcptw_zone = uma_zcreate("tcptw", sizeof(struct tcptw), 285 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 286 uma_zone_set_max(tcptw_zone, maxsockets / 5); 287 tcp_timer_init(); 288 syncache_init(); 289 tcp_hc_init(); 290 tcp_reass_init(); 291 callout_init(&isn_callout, CALLOUT_MPSAFE); 292 tcp_isn_tick(NULL); 293 EVENTHANDLER_REGISTER(shutdown_pre_sync, tcp_fini, NULL, 294 SHUTDOWN_PRI_DEFAULT); 295} 296 297void 298tcp_fini(xtp) 299 void *xtp; 300{ 301 callout_stop(&isn_callout); 302 303} 304 305/* 306 * Fill in the IP and TCP headers for an outgoing packet, given the tcpcb. 307 * tcp_template used to store this data in mbufs, but we now recopy it out 308 * of the tcpcb each time to conserve mbufs. 309 */ 310void 311tcpip_fillheaders(inp, ip_ptr, tcp_ptr) 312 struct inpcb *inp; 313 void *ip_ptr; 314 void *tcp_ptr; 315{ 316 struct tcphdr *th = (struct tcphdr *)tcp_ptr; 317 318#ifdef INET6 319 if ((inp->inp_vflag & INP_IPV6) != 0) { 320 struct ip6_hdr *ip6; 321 322 ip6 = (struct ip6_hdr *)ip_ptr; 323 ip6->ip6_flow = (ip6->ip6_flow & ~IPV6_FLOWINFO_MASK) | 324 (inp->in6p_flowinfo & IPV6_FLOWINFO_MASK); 325 ip6->ip6_vfc = (ip6->ip6_vfc & ~IPV6_VERSION_MASK) | 326 (IPV6_VERSION & IPV6_VERSION_MASK); 327 ip6->ip6_nxt = IPPROTO_TCP; 328 ip6->ip6_plen = sizeof(struct tcphdr); 329 ip6->ip6_src = inp->in6p_laddr; 330 ip6->ip6_dst = inp->in6p_faddr; 331 } else 332#endif 333 { 334 struct ip *ip; 335 336 ip = (struct ip *)ip_ptr; 337 ip->ip_v = IPVERSION; 338 ip->ip_hl = 5; 339 ip->ip_tos = inp->inp_ip_tos; 340 ip->ip_len = 0; 341 ip->ip_id = 0; 342 ip->ip_off = 0; 343 ip->ip_ttl = inp->inp_ip_ttl; 344 ip->ip_sum = 0; 345 ip->ip_p = IPPROTO_TCP; 346 ip->ip_src = inp->inp_laddr; 347 ip->ip_dst = inp->inp_faddr; 348 } 349 th->th_sport = inp->inp_lport; 350 th->th_dport = inp->inp_fport; 351 th->th_seq = 0; 352 th->th_ack = 0; 353 th->th_x2 = 0; 354 th->th_off = 5; 355 th->th_flags = 0; 356 th->th_win = 0; 357 th->th_urp = 0; 358 th->th_sum = 0; /* in_pseudo() is called later for ipv4 */ 359} 360 361/* 362 * Create template to be used to send tcp packets on a connection. 363 * Allocates an mbuf and fills in a skeletal tcp/ip header. The only 364 * use for this function is in keepalives, which use tcp_respond. 365 */ 366struct tcptemp * 367tcpip_maketemplate(inp) 368 struct inpcb *inp; 369{ 370 struct mbuf *m; 371 struct tcptemp *n; 372 373 m = m_get(M_DONTWAIT, MT_HEADER); 374 if (m == NULL) 375 return (0); 376 m->m_len = sizeof(struct tcptemp); 377 n = mtod(m, struct tcptemp *); 378 379 tcpip_fillheaders(inp, (void *)&n->tt_ipgen, (void *)&n->tt_t); 380 return (n); 381} 382 383/* 384 * Send a single message to the TCP at address specified by 385 * the given TCP/IP header. If m == NULL, then we make a copy 386 * of the tcpiphdr at ti and send directly to the addressed host. 387 * This is used to force keep alive messages out using the TCP 388 * template for a connection. If flags are given then we send 389 * a message back to the TCP which originated the * segment ti, 390 * and discard the mbuf containing it and any other attached mbufs. 391 * 392 * In any case the ack and sequence number of the transmitted 393 * segment are as specified by the parameters. 394 * 395 * NOTE: If m != NULL, then ti must point to *inside* the mbuf. 396 */ 397void 398tcp_respond(tp, ipgen, th, m, ack, seq, flags) 399 struct tcpcb *tp; 400 void *ipgen; 401 register struct tcphdr *th; 402 register struct mbuf *m; 403 tcp_seq ack, seq; 404 int flags; 405{ 406 register int tlen; 407 int win = 0; 408 struct ip *ip; 409 struct tcphdr *nth; 410#ifdef INET6 411 struct ip6_hdr *ip6; 412 int isipv6; 413#endif /* INET6 */ 414 int ipflags = 0; 415 struct inpcb *inp = NULL; 416 417 KASSERT(tp != NULL || m != NULL, ("tcp_respond: tp and m both NULL")); 418 419#ifdef INET6 420 isipv6 = ((struct ip *)ipgen)->ip_v == 6; 421 ip6 = ipgen; 422#endif /* INET6 */ 423 ip = ipgen; 424 425 if (tp != NULL) { 426 inp = tp->t_inpcb; 427 KASSERT(inp != NULL, ("tcp control block w/o inpcb")); 428 INP_INFO_WLOCK_ASSERT(&tcbinfo); 429 INP_LOCK_ASSERT(inp); 430 if (!(flags & TH_RST)) { 431 win = sbspace(&inp->inp_socket->so_rcv); 432 if (win > (long)TCP_MAXWIN << tp->rcv_scale) 433 win = (long)TCP_MAXWIN << tp->rcv_scale; 434 } 435 } 436 if (m == NULL) { 437 m = m_gethdr(M_DONTWAIT, MT_HEADER); 438 if (m == NULL) 439 return; 440 tlen = 0; 441 m->m_data += max_linkhdr; 442#ifdef INET6 443 if (isipv6) { 444 bcopy((caddr_t)ip6, mtod(m, caddr_t), 445 sizeof(struct ip6_hdr)); 446 ip6 = mtod(m, struct ip6_hdr *); 447 nth = (struct tcphdr *)(ip6 + 1); 448 } else 449#endif /* INET6 */ 450 { 451 bcopy((caddr_t)ip, mtod(m, caddr_t), sizeof(struct ip)); 452 ip = mtod(m, struct ip *); 453 nth = (struct tcphdr *)(ip + 1); 454 } 455 bcopy((caddr_t)th, (caddr_t)nth, sizeof(struct tcphdr)); 456 flags = TH_ACK; 457 } else { 458 m_freem(m->m_next); 459 m->m_next = NULL; 460 m->m_data = (caddr_t)ipgen; 461 /* m_len is set later */ 462 tlen = 0; 463#define xchg(a,b,type) { type t; t=a; a=b; b=t; } 464#ifdef INET6 465 if (isipv6) { 466 xchg(ip6->ip6_dst, ip6->ip6_src, struct in6_addr); 467 nth = (struct tcphdr *)(ip6 + 1); 468 } else 469#endif /* INET6 */ 470 { 471 xchg(ip->ip_dst.s_addr, ip->ip_src.s_addr, n_long); 472 nth = (struct tcphdr *)(ip + 1); 473 } 474 if (th != nth) { 475 /* 476 * this is usually a case when an extension header 477 * exists between the IPv6 header and the 478 * TCP header. 479 */ 480 nth->th_sport = th->th_sport; 481 nth->th_dport = th->th_dport; 482 } 483 xchg(nth->th_dport, nth->th_sport, n_short); 484#undef xchg 485 } 486#ifdef INET6 487 if (isipv6) { 488 ip6->ip6_flow = 0; 489 ip6->ip6_vfc = IPV6_VERSION; 490 ip6->ip6_nxt = IPPROTO_TCP; 491 ip6->ip6_plen = htons((u_short)(sizeof (struct tcphdr) + 492 tlen)); 493 tlen += sizeof (struct ip6_hdr) + sizeof (struct tcphdr); 494 } else 495#endif 496 { 497 tlen += sizeof (struct tcpiphdr); 498 ip->ip_len = tlen; 499 ip->ip_ttl = ip_defttl; 500 if (path_mtu_discovery) 501 ip->ip_off |= IP_DF; 502 } 503 m->m_len = tlen; 504 m->m_pkthdr.len = tlen; 505 m->m_pkthdr.rcvif = NULL; 506#ifdef MAC 507 if (inp != NULL) { 508 /* 509 * Packet is associated with a socket, so allow the 510 * label of the response to reflect the socket label. 511 */ 512 mac_create_mbuf_from_socket(inp->inp_socket, m); 513 } else { 514 /* 515 * Packet is not associated with a socket, so possibly 516 * update the label in place. 517 */ 518 mac_reflect_mbuf_tcp(m); 519 } 520#endif 521 nth->th_seq = htonl(seq); 522 nth->th_ack = htonl(ack); 523 nth->th_x2 = 0; 524 nth->th_off = sizeof (struct tcphdr) >> 2; 525 nth->th_flags = flags; 526 if (tp != NULL) 527 nth->th_win = htons((u_short) (win >> tp->rcv_scale)); 528 else 529 nth->th_win = htons((u_short)win); 530 nth->th_urp = 0; 531#ifdef INET6 532 if (isipv6) { 533 nth->th_sum = 0; 534 nth->th_sum = in6_cksum(m, IPPROTO_TCP, 535 sizeof(struct ip6_hdr), 536 tlen - sizeof(struct ip6_hdr)); 537 ip6->ip6_hlim = in6_selecthlim(tp != NULL ? tp->t_inpcb : 538 NULL, NULL); 539 } else 540#endif /* INET6 */ 541 { 542 nth->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 543 htons((u_short)(tlen - sizeof(struct ip) + ip->ip_p))); 544 m->m_pkthdr.csum_flags = CSUM_TCP; 545 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 546 } 547#ifdef TCPDEBUG 548 if (tp == NULL || (inp->inp_socket->so_options & SO_DEBUG)) 549 tcp_trace(TA_OUTPUT, 0, tp, mtod(m, void *), th, 0); 550#endif 551#ifdef INET6 552 if (isipv6) 553 (void) ip6_output(m, NULL, NULL, ipflags, NULL, NULL, inp); 554 else 555#endif /* INET6 */ 556 (void) ip_output(m, NULL, NULL, ipflags, NULL, inp); 557} 558 559/* 560 * Create a new TCP control block, making an 561 * empty reassembly queue and hooking it to the argument 562 * protocol control block. The `inp' parameter must have 563 * come from the zone allocator set up in tcp_init(). 564 */ 565struct tcpcb * 566tcp_newtcpcb(inp) 567 struct inpcb *inp; 568{ 569 struct tcpcb_mem *tm; 570 struct tcpcb *tp; 571#ifdef INET6 572 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0; 573#endif /* INET6 */ 574 575 tm = uma_zalloc(tcpcb_zone, M_NOWAIT | M_ZERO); 576 if (tm == NULL) 577 return (NULL); 578 tp = &tm->tcb; 579 /* LIST_INIT(&tp->t_segq); */ /* XXX covered by M_ZERO */ 580 tp->t_maxseg = tp->t_maxopd = 581#ifdef INET6 582 isipv6 ? tcp_v6mssdflt : 583#endif /* INET6 */ 584 tcp_mssdflt; 585 586 /* Set up our timeouts. */ 587 callout_init(tp->tt_rexmt = &tm->tcpcb_mem_rexmt, 0); 588 callout_init(tp->tt_persist = &tm->tcpcb_mem_persist, 0); 589 callout_init(tp->tt_keep = &tm->tcpcb_mem_keep, 0); 590 callout_init(tp->tt_2msl = &tm->tcpcb_mem_2msl, 0); 591 callout_init(tp->tt_delack = &tm->tcpcb_mem_delack, 0); 592 593 if (tcp_do_rfc1323) 594 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP); 595 if (tcp_do_rfc1644) 596 tp->t_flags |= TF_REQ_CC; 597 tp->t_inpcb = inp; /* XXX */ 598 /* 599 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no 600 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives 601 * reasonable initial retransmit time. 602 */ 603 tp->t_srtt = TCPTV_SRTTBASE; 604 tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4; 605 tp->t_rttmin = tcp_rexmit_min; 606 tp->t_rxtcur = TCPTV_RTOBASE; 607 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 608 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 609 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; 610 tp->t_rcvtime = ticks; 611 tp->t_bw_rtttime = ticks; 612 /* 613 * IPv4 TTL initialization is necessary for an IPv6 socket as well, 614 * because the socket may be bound to an IPv6 wildcard address, 615 * which may match an IPv4-mapped IPv6 address. 616 */ 617 inp->inp_ip_ttl = ip_defttl; 618 inp->inp_ppcb = (caddr_t)tp; 619 return (tp); /* XXX */ 620} 621 622/* 623 * Drop a TCP connection, reporting 624 * the specified error. If connection is synchronized, 625 * then send a RST to peer. 626 */ 627struct tcpcb * 628tcp_drop(tp, errno) 629 register struct tcpcb *tp; 630 int errno; 631{ 632 struct socket *so = tp->t_inpcb->inp_socket; 633 634 if (TCPS_HAVERCVDSYN(tp->t_state)) { 635 tp->t_state = TCPS_CLOSED; 636 (void) tcp_output(tp); 637 tcpstat.tcps_drops++; 638 } else 639 tcpstat.tcps_conndrops++; 640 if (errno == ETIMEDOUT && tp->t_softerror) 641 errno = tp->t_softerror; 642 so->so_error = errno; 643 return (tcp_close(tp)); 644} 645 646static void 647tcp_discardcb(tp) 648 struct tcpcb *tp; 649{ 650 struct tseg_qent *q; 651 struct inpcb *inp = tp->t_inpcb; 652 struct socket *so = inp->inp_socket; 653#ifdef INET6 654 int isipv6 = (inp->inp_vflag & INP_IPV6) != 0; 655#endif /* INET6 */ 656 657 /* 658 * Make sure that all of our timers are stopped before we 659 * delete the PCB. 660 */ 661 callout_stop(tp->tt_rexmt); 662 callout_stop(tp->tt_persist); 663 callout_stop(tp->tt_keep); 664 callout_stop(tp->tt_2msl); 665 callout_stop(tp->tt_delack); 666 667 /* 668 * If we got enough samples through the srtt filter, 669 * save the rtt and rttvar in the routing entry. 670 * 'Enough' is arbitrarily defined as 4 rtt samples. 671 * 4 samples is enough for the srtt filter to converge 672 * to within enough % of the correct value; fewer samples 673 * and we could save a bogus rtt. The danger is not high 674 * as tcp quickly recovers from everything. 675 * XXX: Works very well but needs some more statistics! 676 */ 677 if (tp->t_rttupdated >= 4) { 678 struct hc_metrics_lite metrics; 679 u_long ssthresh; 680 681 bzero(&metrics, sizeof(metrics)); 682 /* 683 * Update the ssthresh always when the conditions below 684 * are satisfied. This gives us better new start value 685 * for the congestion avoidance for new connections. 686 * ssthresh is only set if packet loss occured on a session. 687 */ 688 ssthresh = tp->snd_ssthresh; 689 if (ssthresh != 0 && ssthresh < so->so_snd.sb_hiwat / 2) { 690 /* 691 * convert the limit from user data bytes to 692 * packets then to packet data bytes. 693 */ 694 ssthresh = (ssthresh + tp->t_maxseg / 2) / tp->t_maxseg; 695 if (ssthresh < 2) 696 ssthresh = 2; 697 ssthresh *= (u_long)(tp->t_maxseg + 698#ifdef INET6 699 (isipv6 ? sizeof (struct ip6_hdr) + 700 sizeof (struct tcphdr) : 701#endif 702 sizeof (struct tcpiphdr) 703#ifdef INET6 704 ) 705#endif 706 ); 707 } else 708 ssthresh = 0; 709 metrics.rmx_ssthresh = ssthresh; 710 711 metrics.rmx_rtt = tp->t_srtt; 712 metrics.rmx_rttvar = tp->t_rttvar; 713 /* XXX: This wraps if the pipe is more than 4 Gbit per second */ 714 metrics.rmx_bandwidth = tp->snd_bandwidth; 715 metrics.rmx_cwnd = tp->snd_cwnd; 716 metrics.rmx_sendpipe = 0; 717 metrics.rmx_recvpipe = 0; 718 719 tcp_hc_update(&inp->inp_inc, &metrics); 720 } 721 722 /* free the reassembly queue, if any */ 723 while ((q = LIST_FIRST(&tp->t_segq)) != NULL) { 724 LIST_REMOVE(q, tqe_q); 725 m_freem(q->tqe_m); 726 uma_zfree(tcp_reass_zone, q); 727 tp->t_segqlen--; 728 tcp_reass_qsize--; 729 } 730 inp->inp_ppcb = NULL; 731 tp->t_inpcb = NULL; 732 uma_zfree(tcpcb_zone, tp); 733 soisdisconnected(so); 734} 735 736/* 737 * Close a TCP control block: 738 * discard all space held by the tcp 739 * discard internet protocol block 740 * wake up any sleepers 741 */ 742struct tcpcb * 743tcp_close(tp) 744 struct tcpcb *tp; 745{ 746 struct inpcb *inp = tp->t_inpcb; 747#ifdef INET6 748 struct socket *so = inp->inp_socket; 749#endif 750 751 tcp_discardcb(tp); 752#ifdef INET6 753 if (INP_CHECK_SOCKAF(so, AF_INET6)) 754 in6_pcbdetach(inp); 755 else 756#endif 757 in_pcbdetach(inp); 758 tcpstat.tcps_closed++; 759 return (NULL); 760} 761 762void 763tcp_drain() 764{ 765 if (do_tcpdrain) 766 { 767 struct inpcb *inpb; 768 struct tcpcb *tcpb; 769 struct tseg_qent *te; 770 771 /* 772 * Walk the tcpbs, if existing, and flush the reassembly queue, 773 * if there is one... 774 * XXX: The "Net/3" implementation doesn't imply that the TCP 775 * reassembly queue should be flushed, but in a situation 776 * where we're really low on mbufs, this is potentially 777 * usefull. 778 */ 779 INP_INFO_RLOCK(&tcbinfo); 780 LIST_FOREACH(inpb, tcbinfo.listhead, inp_list) { 781 if (inpb->inp_vflag & INP_TIMEWAIT) 782 continue; 783 INP_LOCK(inpb); 784 if ((tcpb = intotcpcb(inpb)) != NULL) { 785 while ((te = LIST_FIRST(&tcpb->t_segq)) 786 != NULL) { 787 LIST_REMOVE(te, tqe_q); 788 m_freem(te->tqe_m); 789 uma_zfree(tcp_reass_zone, te); 790 tcpb->t_segqlen--; 791 tcp_reass_qsize--; 792 } 793 } 794 INP_UNLOCK(inpb); 795 } 796 INP_INFO_RUNLOCK(&tcbinfo); 797 } 798} 799 800/* 801 * Notify a tcp user of an asynchronous error; 802 * store error as soft error, but wake up user 803 * (for now, won't do anything until can select for soft error). 804 * 805 * Do not wake up user since there currently is no mechanism for 806 * reporting soft errors (yet - a kqueue filter may be added). 807 */ 808static struct inpcb * 809tcp_notify(inp, error) 810 struct inpcb *inp; 811 int error; 812{ 813 struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb; 814 815 /* 816 * Ignore some errors if we are hooked up. 817 * If connection hasn't completed, has retransmitted several times, 818 * and receives a second error, give up now. This is better 819 * than waiting a long time to establish a connection that 820 * can never complete. 821 */ 822 if (tp->t_state == TCPS_ESTABLISHED && 823 (error == EHOSTUNREACH || error == ENETUNREACH || 824 error == EHOSTDOWN)) { 825 return inp; 826 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 && 827 tp->t_softerror) { 828 tcp_drop(tp, error); 829 return (struct inpcb *)0; 830 } else { 831 tp->t_softerror = error; 832 return inp; 833 } 834#if 0 835 wakeup( &so->so_timeo); 836 sorwakeup(so); 837 sowwakeup(so); 838#endif 839} 840 841static int 842tcp_pcblist(SYSCTL_HANDLER_ARGS) 843{ 844 int error, i, n, s; 845 struct inpcb *inp, **inp_list; 846 inp_gen_t gencnt; 847 struct xinpgen xig; 848 849 /* 850 * The process of preparing the TCB list is too time-consuming and 851 * resource-intensive to repeat twice on every request. 852 */ 853 if (req->oldptr == NULL) { 854 n = tcbinfo.ipi_count; 855 req->oldidx = 2 * (sizeof xig) 856 + (n + n/8) * sizeof(struct xtcpcb); 857 return 0; 858 } 859 860 if (req->newptr != NULL) 861 return EPERM; 862 863 /* 864 * OK, now we're committed to doing something. 865 */ 866 s = splnet(); 867 INP_INFO_RLOCK(&tcbinfo); 868 gencnt = tcbinfo.ipi_gencnt; 869 n = tcbinfo.ipi_count; 870 INP_INFO_RUNLOCK(&tcbinfo); 871 splx(s); 872 873 error = sysctl_wire_old_buffer(req, 2 * (sizeof xig) 874 + n * sizeof(struct xtcpcb)); 875 if (error != 0) 876 return (error); 877 878 xig.xig_len = sizeof xig; 879 xig.xig_count = n; 880 xig.xig_gen = gencnt; 881 xig.xig_sogen = so_gencnt; 882 error = SYSCTL_OUT(req, &xig, sizeof xig); 883 if (error) 884 return error; 885 886 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK); 887 if (inp_list == NULL) 888 return ENOMEM; 889 890 s = splnet(); 891 INP_INFO_RLOCK(&tcbinfo); 892 for (inp = LIST_FIRST(tcbinfo.listhead), i = 0; inp != NULL && i < n; 893 inp = LIST_NEXT(inp, inp_list)) { 894 INP_LOCK(inp); 895 if (inp->inp_gencnt <= gencnt) { 896 /* 897 * XXX: This use of cr_cansee(), introduced with 898 * TCP state changes, is not quite right, but for 899 * now, better than nothing. 900 */ 901 if (inp->inp_vflag & INP_TIMEWAIT) 902 error = cr_cansee(req->td->td_ucred, 903 intotw(inp)->tw_cred); 904 else 905 error = cr_canseesocket(req->td->td_ucred, 906 inp->inp_socket); 907 if (error == 0) 908 inp_list[i++] = inp; 909 } 910 INP_UNLOCK(inp); 911 } 912 INP_INFO_RUNLOCK(&tcbinfo); 913 splx(s); 914 n = i; 915 916 error = 0; 917 for (i = 0; i < n; i++) { 918 inp = inp_list[i]; 919 if (inp->inp_gencnt <= gencnt) { 920 struct xtcpcb xt; 921 caddr_t inp_ppcb; 922 xt.xt_len = sizeof xt; 923 /* XXX should avoid extra copy */ 924 bcopy(inp, &xt.xt_inp, sizeof *inp); 925 inp_ppcb = inp->inp_ppcb; 926 if (inp_ppcb == NULL) 927 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp); 928 else if (inp->inp_vflag & INP_TIMEWAIT) { 929 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp); 930 xt.xt_tp.t_state = TCPS_TIME_WAIT; 931 } else 932 bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp); 933 if (inp->inp_socket != NULL) 934 sotoxsocket(inp->inp_socket, &xt.xt_socket); 935 else { 936 bzero(&xt.xt_socket, sizeof xt.xt_socket); 937 xt.xt_socket.xso_protocol = IPPROTO_TCP; 938 } 939 xt.xt_inp.inp_gencnt = inp->inp_gencnt; 940 error = SYSCTL_OUT(req, &xt, sizeof xt); 941 } 942 } 943 if (!error) { 944 /* 945 * Give the user an updated idea of our state. 946 * If the generation differs from what we told 947 * her before, she knows that something happened 948 * while we were processing this request, and it 949 * might be necessary to retry. 950 */ 951 s = splnet(); 952 INP_INFO_RLOCK(&tcbinfo); 953 xig.xig_gen = tcbinfo.ipi_gencnt; 954 xig.xig_sogen = so_gencnt; 955 xig.xig_count = tcbinfo.ipi_count; 956 INP_INFO_RUNLOCK(&tcbinfo); 957 splx(s); 958 error = SYSCTL_OUT(req, &xig, sizeof xig); 959 } 960 free(inp_list, M_TEMP); 961 return error; 962} 963 964SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0, 965 tcp_pcblist, "S,xtcpcb", "List of active TCP connections"); 966 967static int 968tcp_getcred(SYSCTL_HANDLER_ARGS) 969{ 970 struct xucred xuc; 971 struct sockaddr_in addrs[2]; 972 struct inpcb *inp; 973 int error, s; 974 975 error = suser_cred(req->td->td_ucred, PRISON_ROOT); 976 if (error) 977 return (error); 978 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 979 if (error) 980 return (error); 981 s = splnet(); 982 INP_INFO_RLOCK(&tcbinfo); 983 inp = in_pcblookup_hash(&tcbinfo, addrs[1].sin_addr, addrs[1].sin_port, 984 addrs[0].sin_addr, addrs[0].sin_port, 0, NULL); 985 if (inp == NULL) { 986 error = ENOENT; 987 goto outunlocked; 988 } 989 INP_LOCK(inp); 990 if (inp->inp_socket == NULL) { 991 error = ENOENT; 992 goto out; 993 } 994 error = cr_canseesocket(req->td->td_ucred, inp->inp_socket); 995 if (error) 996 goto out; 997 cru2x(inp->inp_socket->so_cred, &xuc); 998out: 999 INP_UNLOCK(inp); 1000outunlocked: 1001 INP_INFO_RUNLOCK(&tcbinfo); 1002 splx(s); 1003 if (error == 0) 1004 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 1005 return (error); 1006} 1007 1008SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred, 1009 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0, 1010 tcp_getcred, "S,xucred", "Get the xucred of a TCP connection"); 1011 1012#ifdef INET6 1013static int 1014tcp6_getcred(SYSCTL_HANDLER_ARGS) 1015{ 1016 struct xucred xuc; 1017 struct sockaddr_in6 addrs[2]; 1018 struct inpcb *inp; 1019 int error, s, mapped = 0; 1020 1021 error = suser_cred(req->td->td_ucred, PRISON_ROOT); 1022 if (error) 1023 return (error); 1024 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 1025 if (error) 1026 return (error); 1027 if (IN6_IS_ADDR_V4MAPPED(&addrs[0].sin6_addr)) { 1028 if (IN6_IS_ADDR_V4MAPPED(&addrs[1].sin6_addr)) 1029 mapped = 1; 1030 else 1031 return (EINVAL); 1032 } 1033 s = splnet(); 1034 INP_INFO_RLOCK(&tcbinfo); 1035 if (mapped == 1) 1036 inp = in_pcblookup_hash(&tcbinfo, 1037 *(struct in_addr *)&addrs[1].sin6_addr.s6_addr[12], 1038 addrs[1].sin6_port, 1039 *(struct in_addr *)&addrs[0].sin6_addr.s6_addr[12], 1040 addrs[0].sin6_port, 1041 0, NULL); 1042 else 1043 inp = in6_pcblookup_hash(&tcbinfo, &addrs[1].sin6_addr, 1044 addrs[1].sin6_port, 1045 &addrs[0].sin6_addr, addrs[0].sin6_port, 1046 0, NULL); 1047 if (inp == NULL) { 1048 error = ENOENT; 1049 goto outunlocked; 1050 } 1051 INP_LOCK(inp); 1052 if (inp->inp_socket == NULL) { 1053 error = ENOENT; 1054 goto out; 1055 } 1056 error = cr_canseesocket(req->td->td_ucred, inp->inp_socket); 1057 if (error) 1058 goto out; 1059 cru2x(inp->inp_socket->so_cred, &xuc); 1060out: 1061 INP_UNLOCK(inp); 1062outunlocked: 1063 INP_INFO_RUNLOCK(&tcbinfo); 1064 splx(s); 1065 if (error == 0) 1066 error = SYSCTL_OUT(req, &xuc, sizeof(struct xucred)); 1067 return (error); 1068} 1069 1070SYSCTL_PROC(_net_inet6_tcp6, OID_AUTO, getcred, 1071 CTLTYPE_OPAQUE|CTLFLAG_RW|CTLFLAG_PRISON, 0, 0, 1072 tcp6_getcred, "S,xucred", "Get the xucred of a TCP6 connection"); 1073#endif 1074 1075 1076void 1077tcp_ctlinput(cmd, sa, vip) 1078 int cmd; 1079 struct sockaddr *sa; 1080 void *vip; 1081{ 1082 struct ip *ip = vip; 1083 struct tcphdr *th; 1084 struct in_addr faddr; 1085 struct inpcb *inp; 1086 struct tcpcb *tp; 1087 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify; 1088 tcp_seq icmp_seq; 1089 int s; 1090 1091 faddr = ((struct sockaddr_in *)sa)->sin_addr; 1092 if (sa->sa_family != AF_INET || faddr.s_addr == INADDR_ANY) 1093 return; 1094 1095 if (cmd == PRC_QUENCH) 1096 notify = tcp_quench; 1097 else if (icmp_may_rst && (cmd == PRC_UNREACH_ADMIN_PROHIB || 1098 cmd == PRC_UNREACH_PORT || cmd == PRC_TIMXCEED_INTRANS) && ip) 1099 notify = tcp_drop_syn_sent; 1100 else if (cmd == PRC_MSGSIZE) 1101 notify = tcp_mtudisc; 1102 /* 1103 * Redirects don't need to be handled up here. 1104 */ 1105 else if (PRC_IS_REDIRECT(cmd)) 1106 return; 1107 /* 1108 * Hostdead is ugly because it goes linearly through all PCBs. 1109 * XXX: We never get this from ICMP, otherwise it makes an 1110 * excellent DoS attack on machines with many connections. 1111 */ 1112 else if (cmd == PRC_HOSTDEAD) 1113 ip = NULL; 1114 else if ((unsigned)cmd >= PRC_NCMDS || inetctlerrmap[cmd] == 0) 1115 return; 1116 if (ip != NULL) { 1117 s = splnet(); 1118 th = (struct tcphdr *)((caddr_t)ip 1119 + (ip->ip_hl << 2)); 1120 INP_INFO_WLOCK(&tcbinfo); 1121 inp = in_pcblookup_hash(&tcbinfo, faddr, th->th_dport, 1122 ip->ip_src, th->th_sport, 0, NULL); 1123 if (inp != NULL) { 1124 INP_LOCK(inp); 1125 if (inp->inp_socket != NULL) { 1126 icmp_seq = htonl(th->th_seq); 1127 tp = intotcpcb(inp); 1128 if (SEQ_GEQ(icmp_seq, tp->snd_una) && 1129 SEQ_LT(icmp_seq, tp->snd_max)) 1130 inp = (*notify)(inp, inetctlerrmap[cmd]); 1131 } 1132 if (inp != NULL) 1133 INP_UNLOCK(inp); 1134 } else { 1135 struct in_conninfo inc; 1136 1137 inc.inc_fport = th->th_dport; 1138 inc.inc_lport = th->th_sport; 1139 inc.inc_faddr = faddr; 1140 inc.inc_laddr = ip->ip_src; 1141#ifdef INET6 1142 inc.inc_isipv6 = 0; 1143#endif 1144 syncache_unreach(&inc, th); 1145 } 1146 INP_INFO_WUNLOCK(&tcbinfo); 1147 splx(s); 1148 } else 1149 in_pcbnotifyall(&tcbinfo, faddr, inetctlerrmap[cmd], notify); 1150} 1151 1152#ifdef INET6 1153void 1154tcp6_ctlinput(cmd, sa, d) 1155 int cmd; 1156 struct sockaddr *sa; 1157 void *d; 1158{ 1159 struct tcphdr th; 1160 struct inpcb *(*notify)(struct inpcb *, int) = tcp_notify; 1161 struct ip6_hdr *ip6; 1162 struct mbuf *m; 1163 struct ip6ctlparam *ip6cp = NULL; 1164 const struct sockaddr_in6 *sa6_src = NULL; 1165 int off; 1166 struct tcp_portonly { 1167 u_int16_t th_sport; 1168 u_int16_t th_dport; 1169 } *thp; 1170 1171 if (sa->sa_family != AF_INET6 || 1172 sa->sa_len != sizeof(struct sockaddr_in6)) 1173 return; 1174 1175 if (cmd == PRC_QUENCH) 1176 notify = tcp_quench; 1177 else if (cmd == PRC_MSGSIZE) 1178 notify = tcp_mtudisc; 1179 else if (!PRC_IS_REDIRECT(cmd) && 1180 ((unsigned)cmd >= PRC_NCMDS || inet6ctlerrmap[cmd] == 0)) 1181 return; 1182 1183 /* if the parameter is from icmp6, decode it. */ 1184 if (d != NULL) { 1185 ip6cp = (struct ip6ctlparam *)d; 1186 m = ip6cp->ip6c_m; 1187 ip6 = ip6cp->ip6c_ip6; 1188 off = ip6cp->ip6c_off; 1189 sa6_src = ip6cp->ip6c_src; 1190 } else { 1191 m = NULL; 1192 ip6 = NULL; 1193 off = 0; /* fool gcc */ 1194 sa6_src = &sa6_any; 1195 } 1196 1197 if (ip6 != NULL) { 1198 struct in_conninfo inc; 1199 /* 1200 * XXX: We assume that when IPV6 is non NULL, 1201 * M and OFF are valid. 1202 */ 1203 1204 /* check if we can safely examine src and dst ports */ 1205 if (m->m_pkthdr.len < off + sizeof(*thp)) 1206 return; 1207 1208 bzero(&th, sizeof(th)); 1209 m_copydata(m, off, sizeof(*thp), (caddr_t)&th); 1210 1211 in6_pcbnotify(&tcb, sa, th.th_dport, 1212 (struct sockaddr *)ip6cp->ip6c_src, 1213 th.th_sport, cmd, NULL, notify); 1214 1215 inc.inc_fport = th.th_dport; 1216 inc.inc_lport = th.th_sport; 1217 inc.inc6_faddr = ((struct sockaddr_in6 *)sa)->sin6_addr; 1218 inc.inc6_laddr = ip6cp->ip6c_src->sin6_addr; 1219 inc.inc_isipv6 = 1; 1220 syncache_unreach(&inc, &th); 1221 } else 1222 in6_pcbnotify(&tcb, sa, 0, (const struct sockaddr *)sa6_src, 1223 0, cmd, NULL, notify); 1224} 1225#endif /* INET6 */ 1226 1227 1228/* 1229 * Following is where TCP initial sequence number generation occurs. 1230 * 1231 * There are two places where we must use initial sequence numbers: 1232 * 1. In SYN-ACK packets. 1233 * 2. In SYN packets. 1234 * 1235 * All ISNs for SYN-ACK packets are generated by the syncache. See 1236 * tcp_syncache.c for details. 1237 * 1238 * The ISNs in SYN packets must be monotonic; TIME_WAIT recycling 1239 * depends on this property. In addition, these ISNs should be 1240 * unguessable so as to prevent connection hijacking. To satisfy 1241 * the requirements of this situation, the algorithm outlined in 1242 * RFC 1948 is used, with only small modifications. 1243 * 1244 * Implementation details: 1245 * 1246 * Time is based off the system timer, and is corrected so that it 1247 * increases by one megabyte per second. This allows for proper 1248 * recycling on high speed LANs while still leaving over an hour 1249 * before rollover. 1250 * 1251 * As reading the *exact* system time is too expensive to be done 1252 * whenever setting up a TCP connection, we increment the time 1253 * offset in two ways. First, a small random positive increment 1254 * is added to isn_offset for each connection that is set up. 1255 * Second, the function tcp_isn_tick fires once per clock tick 1256 * and increments isn_offset as necessary so that sequence numbers 1257 * are incremented at approximately ISN_BYTES_PER_SECOND. The 1258 * random positive increments serve only to ensure that the same 1259 * exact sequence number is never sent out twice (as could otherwise 1260 * happen when a port is recycled in less than the system tick 1261 * interval.) 1262 * 1263 * net.inet.tcp.isn_reseed_interval controls the number of seconds 1264 * between seeding of isn_secret. This is normally set to zero, 1265 * as reseeding should not be necessary. 1266 * 1267 */ 1268 1269#define ISN_BYTES_PER_SECOND 1048576 1270#define ISN_STATIC_INCREMENT 4096 1271#define ISN_RANDOM_INCREMENT (4096 - 1) 1272 1273u_char isn_secret[32]; 1274int isn_last_reseed; 1275u_int32_t isn_offset, isn_offset_old; 1276MD5_CTX isn_ctx; 1277 1278tcp_seq 1279tcp_new_isn(tp) 1280 struct tcpcb *tp; 1281{ 1282 u_int32_t md5_buffer[4]; 1283 tcp_seq new_isn; 1284 1285 /* Seed if this is the first use, reseed if requested. */ 1286 if ((isn_last_reseed == 0) || ((tcp_isn_reseed_interval > 0) && 1287 (((u_int)isn_last_reseed + (u_int)tcp_isn_reseed_interval*hz) 1288 < (u_int)ticks))) { 1289 read_random(&isn_secret, sizeof(isn_secret)); 1290 isn_last_reseed = ticks; 1291 } 1292 1293 /* Compute the md5 hash and return the ISN. */ 1294 MD5Init(&isn_ctx); 1295 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_fport, sizeof(u_short)); 1296 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_lport, sizeof(u_short)); 1297#ifdef INET6 1298 if ((tp->t_inpcb->inp_vflag & INP_IPV6) != 0) { 1299 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_faddr, 1300 sizeof(struct in6_addr)); 1301 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->in6p_laddr, 1302 sizeof(struct in6_addr)); 1303 } else 1304#endif 1305 { 1306 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_faddr, 1307 sizeof(struct in_addr)); 1308 MD5Update(&isn_ctx, (u_char *) &tp->t_inpcb->inp_laddr, 1309 sizeof(struct in_addr)); 1310 } 1311 MD5Update(&isn_ctx, (u_char *) &isn_secret, sizeof(isn_secret)); 1312 MD5Final((u_char *) &md5_buffer, &isn_ctx); 1313 new_isn = (tcp_seq) md5_buffer[0]; 1314 isn_offset += ISN_STATIC_INCREMENT + 1315 (arc4random() & ISN_RANDOM_INCREMENT); 1316 new_isn += isn_offset; 1317 return new_isn; 1318} 1319 1320/* 1321 * Increment the offset to the next ISN_BYTES_PER_SECOND / hz boundary 1322 * to keep time flowing at a relatively constant rate. If the random 1323 * increments have already pushed us past the projected offset, do nothing. 1324 */ 1325static void 1326tcp_isn_tick(xtp) 1327 void *xtp; 1328{ 1329 u_int32_t projected_offset; 1330 1331 projected_offset = isn_offset_old + ISN_BYTES_PER_SECOND / hz; 1332 1333 if (projected_offset > isn_offset) 1334 isn_offset = projected_offset; 1335 1336 isn_offset_old = isn_offset; 1337 callout_reset(&isn_callout, 1, tcp_isn_tick, NULL); 1338} 1339 1340/* 1341 * When a source quench is received, close congestion window 1342 * to one segment. We will gradually open it again as we proceed. 1343 */ 1344struct inpcb * 1345tcp_quench(inp, errno) 1346 struct inpcb *inp; 1347 int errno; 1348{ 1349 struct tcpcb *tp = intotcpcb(inp); 1350 1351 if (tp != NULL) 1352 tp->snd_cwnd = tp->t_maxseg; 1353 return (inp); 1354} 1355 1356/* 1357 * When a specific ICMP unreachable message is received and the 1358 * connection state is SYN-SENT, drop the connection. This behavior 1359 * is controlled by the icmp_may_rst sysctl. 1360 */ 1361struct inpcb * 1362tcp_drop_syn_sent(inp, errno) 1363 struct inpcb *inp; 1364 int errno; 1365{ 1366 struct tcpcb *tp = intotcpcb(inp); 1367 1368 if (tp != NULL && tp->t_state == TCPS_SYN_SENT) { 1369 tcp_drop(tp, errno); 1370 return (struct inpcb *)0; 1371 } 1372 return inp; 1373} 1374 1375/* 1376 * When `need fragmentation' ICMP is received, update our idea of the MSS 1377 * based on the new value in the route. Also nudge TCP to send something, 1378 * since we know the packet we just sent was dropped. 1379 * This duplicates some code in the tcp_mss() function in tcp_input.c. 1380 */ 1381struct inpcb * 1382tcp_mtudisc(inp, errno) 1383 struct inpcb *inp; 1384 int errno; 1385{ 1386 struct tcpcb *tp = intotcpcb(inp); 1387 struct rmxp_tao tao; 1388 struct socket *so = inp->inp_socket; 1389 u_int maxmtu; 1390 u_int romtu; 1391 int mss; 1392#ifdef INET6 1393 int isipv6 = (tp->t_inpcb->inp_vflag & INP_IPV6) != 0; 1394#endif /* INET6 */ 1395 bzero(&tao, sizeof(tao)); 1396 1397 if (tp != NULL) { 1398 maxmtu = tcp_hc_getmtu(&inp->inp_inc); /* IPv4 and IPv6 */ 1399 romtu = 1400#ifdef INET6 1401 isipv6 ? tcp_maxmtu6(&inp->inp_inc) : 1402#endif /* INET6 */ 1403 tcp_maxmtu(&inp->inp_inc); 1404 if (!maxmtu) 1405 maxmtu = romtu; 1406 else 1407 maxmtu = min(maxmtu, romtu); 1408 if (!maxmtu) { 1409 tp->t_maxopd = tp->t_maxseg = 1410#ifdef INET6 1411 isipv6 ? tcp_v6mssdflt : 1412#endif /* INET6 */ 1413 tcp_mssdflt; 1414 return inp; 1415 } 1416 mss = maxmtu - 1417#ifdef INET6 1418 (isipv6 ? 1419 sizeof(struct ip6_hdr) + sizeof(struct tcphdr) : 1420#endif /* INET6 */ 1421 sizeof(struct tcpiphdr) 1422#ifdef INET6 1423 ) 1424#endif /* INET6 */ 1425 ; 1426 1427 if (tcp_do_rfc1644) { 1428 tcp_hc_gettao(&inp->inp_inc, &tao); 1429 if (tao.tao_mssopt) 1430 mss = min(mss, tao.tao_mssopt); 1431 } 1432 /* 1433 * XXX - The above conditional probably violates the TCP 1434 * spec. The problem is that, since we don't know the 1435 * other end's MSS, we are supposed to use a conservative 1436 * default. But, if we do that, then MTU discovery will 1437 * never actually take place, because the conservative 1438 * default is much less than the MTUs typically seen 1439 * on the Internet today. For the moment, we'll sweep 1440 * this under the carpet. 1441 * 1442 * The conservative default might not actually be a problem 1443 * if the only case this occurs is when sending an initial 1444 * SYN with options and data to a host we've never talked 1445 * to before. Then, they will reply with an MSS value which 1446 * will get recorded and the new parameters should get 1447 * recomputed. For Further Study. 1448 */ 1449 if (tp->t_maxopd <= mss) 1450 return inp; 1451 tp->t_maxopd = mss; 1452 1453 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && 1454 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP) 1455 mss -= TCPOLEN_TSTAMP_APPA; 1456 if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC && 1457 (tp->t_flags & TF_RCVD_CC) == TF_RCVD_CC) 1458 mss -= TCPOLEN_CC_APPA; 1459#if (MCLBYTES & (MCLBYTES - 1)) == 0 1460 if (mss > MCLBYTES) 1461 mss &= ~(MCLBYTES-1); 1462#else 1463 if (mss > MCLBYTES) 1464 mss = mss / MCLBYTES * MCLBYTES; 1465#endif 1466 if (so->so_snd.sb_hiwat < mss) 1467 mss = so->so_snd.sb_hiwat; 1468 1469 tp->t_maxseg = mss; 1470 1471 tcpstat.tcps_mturesent++; 1472 tp->t_rtttime = 0; 1473 tp->snd_nxt = tp->snd_una; 1474 tcp_output(tp); 1475 } 1476 return inp; 1477} 1478 1479/* 1480 * Look-up the routing entry to the peer of this inpcb. If no route 1481 * is found and it cannot be allocated, then return NULL. This routine 1482 * is called by TCP routines that access the rmx structure and by tcp_mss 1483 * to get the interface MTU. 1484 */ 1485u_long 1486tcp_maxmtu(inc) 1487 struct in_conninfo *inc; 1488{ 1489 struct route sro; 1490 struct sockaddr_in *dst; 1491 struct ifnet *ifp; 1492 u_long maxmtu = 0; 1493 1494 KASSERT(inc != NULL, ("tcp_maxmtu with NULL in_conninfo pointer")); 1495 1496 bzero(&sro, sizeof(sro)); 1497 if (inc->inc_faddr.s_addr != INADDR_ANY) { 1498 dst = (struct sockaddr_in *)&sro.ro_dst; 1499 dst->sin_family = AF_INET; 1500 dst->sin_len = sizeof(*dst); 1501 dst->sin_addr = inc->inc_faddr; 1502 rtalloc_ign(&sro, RTF_CLONING); 1503 } 1504 if (sro.ro_rt != NULL) { 1505 ifp = sro.ro_rt->rt_ifp; 1506 if (sro.ro_rt->rt_rmx.rmx_mtu == 0) 1507 maxmtu = ifp->if_mtu; 1508 else 1509 maxmtu = min(sro.ro_rt->rt_rmx.rmx_mtu, ifp->if_mtu); 1510 RTFREE(sro.ro_rt); 1511 } 1512 return (maxmtu); 1513} 1514 1515#ifdef INET6 1516u_long 1517tcp_maxmtu6(inc) 1518 struct in_conninfo *inc; 1519{ 1520 struct route_in6 sro6; 1521 struct ifnet *ifp; 1522 u_long maxmtu = 0; 1523 1524 KASSERT(inc != NULL, ("tcp_maxmtu6 with NULL in_conninfo pointer")); 1525 1526 bzero(&sro6, sizeof(sro6)); 1527 if (!IN6_IS_ADDR_UNSPECIFIED(&inc->inc6_faddr)) { 1528 sro6.ro_dst.sin6_family = AF_INET6; 1529 sro6.ro_dst.sin6_len = sizeof(struct sockaddr_in6); 1530 sro6.ro_dst.sin6_addr = inc->inc6_faddr; 1531 rtalloc_ign((struct route *)&sro6, RTF_CLONING); 1532 } 1533 if (sro6.ro_rt != NULL) { 1534 ifp = sro6.ro_rt->rt_ifp; 1535 if (sro6.ro_rt->rt_rmx.rmx_mtu == 0) 1536 maxmtu = IN6_LINKMTU(sro6.ro_rt->rt_ifp); 1537 else 1538 maxmtu = min(sro6.ro_rt->rt_rmx.rmx_mtu, 1539 IN6_LINKMTU(sro6.ro_rt->rt_ifp)); 1540 RTFREE(sro6.ro_rt); 1541 } 1542 1543 return (maxmtu); 1544} 1545#endif /* INET6 */ 1546 1547#ifdef IPSEC 1548/* compute ESP/AH header size for TCP, including outer IP header. */ 1549size_t 1550ipsec_hdrsiz_tcp(tp) 1551 struct tcpcb *tp; 1552{ 1553 struct inpcb *inp; 1554 struct mbuf *m; 1555 size_t hdrsiz; 1556 struct ip *ip; 1557#ifdef INET6 1558 struct ip6_hdr *ip6; 1559#endif 1560 struct tcphdr *th; 1561 1562 if ((tp == NULL) || ((inp = tp->t_inpcb) == NULL)) 1563 return 0; 1564 MGETHDR(m, M_DONTWAIT, MT_DATA); 1565 if (!m) 1566 return 0; 1567 1568#ifdef INET6 1569 if ((inp->inp_vflag & INP_IPV6) != 0) { 1570 ip6 = mtod(m, struct ip6_hdr *); 1571 th = (struct tcphdr *)(ip6 + 1); 1572 m->m_pkthdr.len = m->m_len = 1573 sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 1574 tcpip_fillheaders(inp, ip6, th); 1575 hdrsiz = ipsec6_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); 1576 } else 1577#endif /* INET6 */ 1578 { 1579 ip = mtod(m, struct ip *); 1580 th = (struct tcphdr *)(ip + 1); 1581 m->m_pkthdr.len = m->m_len = sizeof(struct tcpiphdr); 1582 tcpip_fillheaders(inp, ip, th); 1583 hdrsiz = ipsec4_hdrsiz(m, IPSEC_DIR_OUTBOUND, inp); 1584 } 1585 1586 m_free(m); 1587 return hdrsiz; 1588} 1589#endif /*IPSEC*/ 1590 1591/* 1592 * Move a TCP connection into TIME_WAIT state. 1593 * tcbinfo is unlocked. 1594 * inp is locked, and is unlocked before returning. 1595 */ 1596void 1597tcp_twstart(tp) 1598 struct tcpcb *tp; 1599{ 1600 struct tcptw *tw; 1601 struct inpcb *inp; 1602 int tw_time, acknow; 1603 struct socket *so; 1604 1605 tw = uma_zalloc(tcptw_zone, M_NOWAIT); 1606 if (tw == NULL) { 1607 tw = tcp_timer_2msl_tw(1); 1608 if (tw == NULL) { 1609 tcp_close(tp); 1610 return; 1611 } 1612 } 1613 inp = tp->t_inpcb; 1614 tw->tw_inpcb = inp; 1615 1616 /* 1617 * Recover last window size sent. 1618 */ 1619 tw->last_win = (tp->rcv_adv - tp->rcv_nxt) >> tp->rcv_scale; 1620 1621 /* 1622 * Set t_recent if timestamps are used on the connection. 1623 */ 1624 if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) == 1625 (TF_REQ_TSTMP|TF_RCVD_TSTMP)) 1626 tw->t_recent = tp->ts_recent; 1627 else 1628 tw->t_recent = 0; 1629 1630 tw->snd_nxt = tp->snd_nxt; 1631 tw->rcv_nxt = tp->rcv_nxt; 1632 tw->iss = tp->iss; 1633 tw->irs = tp->irs; 1634 tw->cc_recv = tp->cc_recv; 1635 tw->cc_send = tp->cc_send; 1636 tw->t_starttime = tp->t_starttime; 1637 tw->tw_time = 0; 1638 1639/* XXX 1640 * If this code will 1641 * be used for fin-wait-2 state also, then we may need 1642 * a ts_recent from the last segment. 1643 */ 1644 /* Shorten TIME_WAIT [RFC-1644, p.28] */ 1645 if (tp->cc_recv != 0 && (ticks - tp->t_starttime) < tcp_msl) { 1646 tw_time = tp->t_rxtcur * TCPTV_TWTRUNC; 1647 /* For T/TCP client, force ACK now. */ 1648 acknow = 1; 1649 } else { 1650 tw_time = 2 * tcp_msl; 1651 acknow = tp->t_flags & TF_ACKNOW; 1652 } 1653 tcp_discardcb(tp); 1654 so = inp->inp_socket; 1655 so->so_pcb = NULL; 1656 tw->tw_cred = crhold(so->so_cred); 1657 tw->tw_so_options = so->so_options; 1658 if (acknow) 1659 tcp_twrespond(tw, TH_ACK); 1660 sotryfree(so); 1661 inp->inp_socket = NULL; 1662 inp->inp_ppcb = (caddr_t)tw; 1663 inp->inp_vflag |= INP_TIMEWAIT; 1664 tcp_timer_2msl_reset(tw, tw_time); 1665 INP_UNLOCK(inp); 1666} 1667 1668/* 1669 * The appromixate rate of ISN increase of Microsoft TCP stacks; 1670 * the actual rate is slightly higher due to the addition of 1671 * random positive increments. 1672 * 1673 * Most other new OSes use semi-randomized ISN values, so we 1674 * do not need to worry about them. 1675 */ 1676#define MS_ISN_BYTES_PER_SECOND 250000 1677 1678/* 1679 * Determine if the ISN we will generate has advanced beyond the last 1680 * sequence number used by the previous connection. If so, indicate 1681 * that it is safe to recycle this tw socket by returning 1. 1682 */ 1683int 1684tcp_twrecycleable(struct tcptw *tw) 1685{ 1686 tcp_seq new_iss = tw->iss; 1687 tcp_seq new_irs = tw->irs; 1688 1689 new_iss += (ticks - tw->t_starttime) * (ISN_BYTES_PER_SECOND / hz); 1690 new_irs += (ticks - tw->t_starttime) * (MS_ISN_BYTES_PER_SECOND / hz); 1691 1692 if (SEQ_GT(new_iss, tw->snd_nxt) && SEQ_GT(new_irs, tw->rcv_nxt)) 1693 return 1; 1694 else 1695 return 0; 1696} 1697 1698struct tcptw * 1699tcp_twclose(struct tcptw *tw, int reuse) 1700{ 1701 struct inpcb *inp; 1702 1703 inp = tw->tw_inpcb; 1704 tw->tw_inpcb = NULL; 1705 tcp_timer_2msl_stop(tw); 1706 inp->inp_ppcb = NULL; 1707#ifdef INET6 1708 if (inp->inp_vflag & INP_IPV6PROTO) 1709 in6_pcbdetach(inp); 1710 else 1711#endif 1712 in_pcbdetach(inp); 1713 tcpstat.tcps_closed++; 1714 crfree(tw->tw_cred); 1715 tw->tw_cred = NULL; 1716 if (reuse) 1717 return (tw); 1718 uma_zfree(tcptw_zone, tw); 1719 return (NULL); 1720} 1721 1722int 1723tcp_twrespond(struct tcptw *tw, int flags) 1724{ 1725 struct inpcb *inp = tw->tw_inpcb; 1726 struct tcphdr *th; 1727 struct mbuf *m; 1728 struct ip *ip = NULL; 1729 u_int8_t *optp; 1730 u_int hdrlen, optlen; 1731 int error; 1732#ifdef INET6 1733 struct ip6_hdr *ip6 = NULL; 1734 int isipv6 = inp->inp_inc.inc_isipv6; 1735#endif 1736 1737 m = m_gethdr(M_DONTWAIT, MT_HEADER); 1738 if (m == NULL) 1739 return (ENOBUFS); 1740 m->m_data += max_linkhdr; 1741 1742#ifdef MAC 1743 mac_create_mbuf_from_inpcb(inp, m); 1744#endif 1745 1746#ifdef INET6 1747 if (isipv6) { 1748 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 1749 ip6 = mtod(m, struct ip6_hdr *); 1750 th = (struct tcphdr *)(ip6 + 1); 1751 tcpip_fillheaders(inp, ip6, th); 1752 } else 1753#endif 1754 { 1755 hdrlen = sizeof(struct tcpiphdr); 1756 ip = mtod(m, struct ip *); 1757 th = (struct tcphdr *)(ip + 1); 1758 tcpip_fillheaders(inp, ip, th); 1759 } 1760 optp = (u_int8_t *)(th + 1); 1761 1762 /* 1763 * Send a timestamp and echo-reply if both our side and our peer 1764 * have sent timestamps in our SYN's and this is not a RST. 1765 */ 1766 if (tw->t_recent && flags == TH_ACK) { 1767 u_int32_t *lp = (u_int32_t *)optp; 1768 1769 /* Form timestamp option as shown in appendix A of RFC 1323. */ 1770 *lp++ = htonl(TCPOPT_TSTAMP_HDR); 1771 *lp++ = htonl(ticks); 1772 *lp = htonl(tw->t_recent); 1773 optp += TCPOLEN_TSTAMP_APPA; 1774 } 1775 1776 /* 1777 * Send `CC-family' options if needed, and it's not a RST. 1778 */ 1779 if (tw->cc_recv != 0 && flags == TH_ACK) { 1780 u_int32_t *lp = (u_int32_t *)optp; 1781 1782 *lp++ = htonl(TCPOPT_CC_HDR(TCPOPT_CC)); 1783 *lp = htonl(tw->cc_send); 1784 optp += TCPOLEN_CC_APPA; 1785 } 1786 optlen = optp - (u_int8_t *)(th + 1); 1787 1788 m->m_len = hdrlen + optlen; 1789 m->m_pkthdr.len = m->m_len; 1790 1791 KASSERT(max_linkhdr + m->m_len <= MHLEN, ("tcptw: mbuf too small")); 1792 1793 th->th_seq = htonl(tw->snd_nxt); 1794 th->th_ack = htonl(tw->rcv_nxt); 1795 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 1796 th->th_flags = flags; 1797 th->th_win = htons(tw->last_win); 1798 1799#ifdef INET6 1800 if (isipv6) { 1801 th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(struct ip6_hdr), 1802 sizeof(struct tcphdr) + optlen); 1803 ip6->ip6_hlim = in6_selecthlim(inp, NULL); 1804 error = ip6_output(m, inp->in6p_outputopts, NULL, 1805 (tw->tw_so_options & SO_DONTROUTE), NULL, NULL, inp); 1806 } else 1807#endif 1808 { 1809 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 1810 htons(sizeof(struct tcphdr) + optlen + IPPROTO_TCP)); 1811 m->m_pkthdr.csum_flags = CSUM_TCP; 1812 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 1813 ip->ip_len = m->m_pkthdr.len; 1814 if (path_mtu_discovery) 1815 ip->ip_off |= IP_DF; 1816 error = ip_output(m, inp->inp_options, NULL, 1817 (tw->tw_so_options & SO_DONTROUTE), NULL, inp); 1818 } 1819 if (flags & TH_ACK) 1820 tcpstat.tcps_sndacks++; 1821 else 1822 tcpstat.tcps_sndctrl++; 1823 tcpstat.tcps_sndtotal++; 1824 return (error); 1825} 1826 1827/* 1828 * TCP BANDWIDTH DELAY PRODUCT WINDOW LIMITING 1829 * 1830 * This code attempts to calculate the bandwidth-delay product as a 1831 * means of determining the optimal window size to maximize bandwidth, 1832 * minimize RTT, and avoid the over-allocation of buffers on interfaces and 1833 * routers. This code also does a fairly good job keeping RTTs in check 1834 * across slow links like modems. We implement an algorithm which is very 1835 * similar (but not meant to be) TCP/Vegas. The code operates on the 1836 * transmitter side of a TCP connection and so only effects the transmit 1837 * side of the connection. 1838 * 1839 * BACKGROUND: TCP makes no provision for the management of buffer space 1840 * at the end points or at the intermediate routers and switches. A TCP 1841 * stream, whether using NewReno or not, will eventually buffer as 1842 * many packets as it is able and the only reason this typically works is 1843 * due to the fairly small default buffers made available for a connection 1844 * (typicaly 16K or 32K). As machines use larger windows and/or window 1845 * scaling it is now fairly easy for even a single TCP connection to blow-out 1846 * all available buffer space not only on the local interface, but on 1847 * intermediate routers and switches as well. NewReno makes a misguided 1848 * attempt to 'solve' this problem by waiting for an actual failure to occur, 1849 * then backing off, then steadily increasing the window again until another 1850 * failure occurs, ad-infinitum. This results in terrible oscillation that 1851 * is only made worse as network loads increase and the idea of intentionally 1852 * blowing out network buffers is, frankly, a terrible way to manage network 1853 * resources. 1854 * 1855 * It is far better to limit the transmit window prior to the failure 1856 * condition being achieved. There are two general ways to do this: First 1857 * you can 'scan' through different transmit window sizes and locate the 1858 * point where the RTT stops increasing, indicating that you have filled the 1859 * pipe, then scan backwards until you note that RTT stops decreasing, then 1860 * repeat ad-infinitum. This method works in principle but has severe 1861 * implementation issues due to RTT variances, timer granularity, and 1862 * instability in the algorithm which can lead to many false positives and 1863 * create oscillations as well as interact badly with other TCP streams 1864 * implementing the same algorithm. 1865 * 1866 * The second method is to limit the window to the bandwidth delay product 1867 * of the link. This is the method we implement. RTT variances and our 1868 * own manipulation of the congestion window, bwnd, can potentially 1869 * destabilize the algorithm. For this reason we have to stabilize the 1870 * elements used to calculate the window. We do this by using the minimum 1871 * observed RTT, the long term average of the observed bandwidth, and 1872 * by adding two segments worth of slop. It isn't perfect but it is able 1873 * to react to changing conditions and gives us a very stable basis on 1874 * which to extend the algorithm. 1875 */ 1876void 1877tcp_xmit_bandwidth_limit(struct tcpcb *tp, tcp_seq ack_seq) 1878{ 1879 u_long bw; 1880 u_long bwnd; 1881 int save_ticks; 1882 1883 /* 1884 * If inflight_enable is disabled in the middle of a tcp connection, 1885 * make sure snd_bwnd is effectively disabled. 1886 */ 1887 if (tcp_inflight_enable == 0) { 1888 tp->snd_bwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 1889 tp->snd_bandwidth = 0; 1890 return; 1891 } 1892 1893 /* 1894 * Figure out the bandwidth. Due to the tick granularity this 1895 * is a very rough number and it MUST be averaged over a fairly 1896 * long period of time. XXX we need to take into account a link 1897 * that is not using all available bandwidth, but for now our 1898 * slop will ramp us up if this case occurs and the bandwidth later 1899 * increases. 1900 * 1901 * Note: if ticks rollover 'bw' may wind up negative. We must 1902 * effectively reset t_bw_rtttime for this case. 1903 */ 1904 save_ticks = ticks; 1905 if ((u_int)(save_ticks - tp->t_bw_rtttime) < 1) 1906 return; 1907 1908 bw = (int64_t)(ack_seq - tp->t_bw_rtseq) * hz / 1909 (save_ticks - tp->t_bw_rtttime); 1910 tp->t_bw_rtttime = save_ticks; 1911 tp->t_bw_rtseq = ack_seq; 1912 if (tp->t_bw_rtttime == 0 || (int)bw < 0) 1913 return; 1914 bw = ((int64_t)tp->snd_bandwidth * 15 + bw) >> 4; 1915 1916 tp->snd_bandwidth = bw; 1917 1918 /* 1919 * Calculate the semi-static bandwidth delay product, plus two maximal 1920 * segments. The additional slop puts us squarely in the sweet 1921 * spot and also handles the bandwidth run-up case and stabilization. 1922 * Without the slop we could be locking ourselves into a lower 1923 * bandwidth. 1924 * 1925 * Situations Handled: 1926 * (1) Prevents over-queueing of packets on LANs, especially on 1927 * high speed LANs, allowing larger TCP buffers to be 1928 * specified, and also does a good job preventing 1929 * over-queueing of packets over choke points like modems 1930 * (at least for the transmit side). 1931 * 1932 * (2) Is able to handle changing network loads (bandwidth 1933 * drops so bwnd drops, bandwidth increases so bwnd 1934 * increases). 1935 * 1936 * (3) Theoretically should stabilize in the face of multiple 1937 * connections implementing the same algorithm (this may need 1938 * a little work). 1939 * 1940 * (4) Stability value (defaults to 20 = 2 maximal packets) can 1941 * be adjusted with a sysctl but typically only needs to be 1942 * on very slow connections. A value no smaller then 5 1943 * should be used, but only reduce this default if you have 1944 * no other choice. 1945 */ 1946#define USERTT ((tp->t_srtt + tp->t_rttbest) / 2) 1947 bwnd = (int64_t)bw * USERTT / (hz << TCP_RTT_SHIFT) + tcp_inflight_stab * tp->t_maxseg / 10; 1948#undef USERTT 1949 1950 if (tcp_inflight_debug > 0) { 1951 static int ltime; 1952 if ((u_int)(ticks - ltime) >= hz / tcp_inflight_debug) { 1953 ltime = ticks; 1954 printf("%p bw %ld rttbest %d srtt %d bwnd %ld\n", 1955 tp, 1956 bw, 1957 tp->t_rttbest, 1958 tp->t_srtt, 1959 bwnd 1960 ); 1961 } 1962 } 1963 if ((long)bwnd < tcp_inflight_min) 1964 bwnd = tcp_inflight_min; 1965 if (bwnd > tcp_inflight_max) 1966 bwnd = tcp_inflight_max; 1967 if ((long)bwnd < tp->t_maxseg * 2) 1968 bwnd = tp->t_maxseg * 2; 1969 tp->snd_bwnd = bwnd; 1970} 1971 1972#ifdef TCP_SIGNATURE 1973/* 1974 * Callback function invoked by m_apply() to digest TCP segment data 1975 * contained within an mbuf chain. 1976 */ 1977static int 1978tcp_signature_apply(void *fstate, void *data, u_int len) 1979{ 1980 1981 MD5Update(fstate, (u_char *)data, len); 1982 return (0); 1983} 1984 1985/* 1986 * Compute TCP-MD5 hash of a TCPv4 segment. (RFC2385) 1987 * 1988 * Parameters: 1989 * m pointer to head of mbuf chain 1990 * off0 offset to TCP header within the mbuf chain 1991 * len length of TCP segment data, excluding options 1992 * optlen length of TCP segment options 1993 * buf pointer to storage for computed MD5 digest 1994 * direction direction of flow (IPSEC_DIR_INBOUND or OUTBOUND) 1995 * 1996 * We do this over ip, tcphdr, segment data, and the key in the SADB. 1997 * When called from tcp_input(), we can be sure that th_sum has been 1998 * zeroed out and verified already. 1999 * 2000 * This function is for IPv4 use only. Calling this function with an 2001 * IPv6 packet in the mbuf chain will yield undefined results. 2002 * 2003 * Return 0 if successful, otherwise return -1. 2004 * 2005 * XXX The key is retrieved from the system's PF_KEY SADB, by keying a 2006 * search with the destination IP address, and a 'magic SPI' to be 2007 * determined by the application. This is hardcoded elsewhere to 1179 2008 * right now. Another branch of this code exists which uses the SPD to 2009 * specify per-application flows but it is unstable. 2010 */ 2011int 2012tcp_signature_compute(struct mbuf *m, int off0, int len, int optlen, 2013 u_char *buf, u_int direction) 2014{ 2015 union sockaddr_union dst; 2016 struct ippseudo ippseudo; 2017 MD5_CTX ctx; 2018 int doff; 2019 struct ip *ip; 2020 struct ipovly *ipovly; 2021 struct secasvar *sav; 2022 struct tcphdr *th; 2023 u_short savecsum; 2024 2025 KASSERT(m != NULL, ("NULL mbuf chain")); 2026 KASSERT(buf != NULL, ("NULL signature pointer")); 2027 2028 /* Extract the destination from the IP header in the mbuf. */ 2029 ip = mtod(m, struct ip *); 2030 bzero(&dst, sizeof(union sockaddr_union)); 2031 dst.sa.sa_len = sizeof(struct sockaddr_in); 2032 dst.sa.sa_family = AF_INET; 2033 dst.sin.sin_addr = (direction == IPSEC_DIR_INBOUND) ? 2034 ip->ip_src : ip->ip_dst; 2035 2036 /* Look up an SADB entry which matches the address of the peer. */ 2037 sav = KEY_ALLOCSA(&dst, IPPROTO_TCP, htonl(TCP_SIG_SPI)); 2038 if (sav == NULL) { 2039 printf("%s: SADB lookup failed for %s\n", __func__, 2040 inet_ntoa(dst.sin.sin_addr)); 2041 return (EINVAL); 2042 } 2043 2044 MD5Init(&ctx); 2045 ipovly = (struct ipovly *)ip; 2046 th = (struct tcphdr *)((u_char *)ip + off0); 2047 doff = off0 + sizeof(struct tcphdr) + optlen; 2048 2049 /* 2050 * Step 1: Update MD5 hash with IP pseudo-header. 2051 * 2052 * XXX The ippseudo header MUST be digested in network byte order, 2053 * or else we'll fail the regression test. Assume all fields we've 2054 * been doing arithmetic on have been in host byte order. 2055 * XXX One cannot depend on ipovly->ih_len here. When called from 2056 * tcp_output(), the underlying ip_len member has not yet been set. 2057 */ 2058 ippseudo.ippseudo_src = ipovly->ih_src; 2059 ippseudo.ippseudo_dst = ipovly->ih_dst; 2060 ippseudo.ippseudo_pad = 0; 2061 ippseudo.ippseudo_p = IPPROTO_TCP; 2062 ippseudo.ippseudo_len = htons(len + sizeof(struct tcphdr) + optlen); 2063 MD5Update(&ctx, (char *)&ippseudo, sizeof(struct ippseudo)); 2064 2065 /* 2066 * Step 2: Update MD5 hash with TCP header, excluding options. 2067 * The TCP checksum must be set to zero. 2068 */ 2069 savecsum = th->th_sum; 2070 th->th_sum = 0; 2071 MD5Update(&ctx, (char *)th, sizeof(struct tcphdr)); 2072 th->th_sum = savecsum; 2073 2074 /* 2075 * Step 3: Update MD5 hash with TCP segment data. 2076 * Use m_apply() to avoid an early m_pullup(). 2077 */ 2078 if (len > 0) 2079 m_apply(m, doff, len, tcp_signature_apply, &ctx); 2080 2081 /* 2082 * Step 4: Update MD5 hash with shared secret. 2083 */ 2084 MD5Update(&ctx, _KEYBUF(sav->key_auth), _KEYLEN(sav->key_auth)); 2085 MD5Final(buf, &ctx); 2086 2087 key_sa_recordxfer(sav, m); 2088 KEY_FREESAV(&sav); 2089 return (0); 2090} 2091#endif /* TCP_SIGNATURE */ 2092