1/*- 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95 30 */ 31 32#include <sys/cdefs.h> 33__FBSDID("$FreeBSD: stable/10/sys/netinet/tcp_timewait.c 309108 2016-11-24 14:48:46Z jch $"); 34 35#include "opt_inet.h" 36#include "opt_inet6.h" 37#include "opt_tcpdebug.h" 38 39#include <sys/param.h> 40#include <sys/systm.h> 41#include <sys/callout.h> 42#include <sys/kernel.h> 43#include <sys/sysctl.h> 44#include <sys/malloc.h> 45#include <sys/mbuf.h> 46#include <sys/priv.h> 47#include <sys/proc.h> 48#include <sys/socket.h> 49#include <sys/socketvar.h> 50#include <sys/protosw.h> 51#include <sys/random.h> 52 53#include <vm/uma.h> 54 55#include <net/route.h> 56#include <net/if.h> 57#include <net/vnet.h> 58 59#include <netinet/in.h> 60#include <netinet/in_pcb.h> 61#include <netinet/in_systm.h> 62#include <netinet/in_var.h> 63#include <netinet/ip.h> 64#include <netinet/ip_icmp.h> 65#include <netinet/ip_var.h> 66#ifdef INET6 67#include <netinet/ip6.h> 68#include <netinet6/in6_pcb.h> 69#include <netinet6/ip6_var.h> 70#include <netinet6/scope6_var.h> 71#include <netinet6/nd6.h> 72#endif 73#include <netinet/tcp.h> 74#include <netinet/tcp_fsm.h> 75#include <netinet/tcp_seq.h> 76#include <netinet/tcp_timer.h> 77#include <netinet/tcp_var.h> 78#ifdef INET6 79#include <netinet6/tcp6_var.h> 80#endif 81#include <netinet/tcpip.h> 82#ifdef TCPDEBUG 83#include <netinet/tcp_debug.h> 84#endif 85#ifdef INET6 86#include <netinet6/ip6protosw.h> 87#endif 88 89#include <machine/in_cksum.h> 90 91#include <security/mac/mac_framework.h> 92 93static VNET_DEFINE(uma_zone_t, tcptw_zone); 94#define V_tcptw_zone VNET(tcptw_zone) 95static int maxtcptw; 96 97/* 98 * The timed wait queue contains references to each of the TCP sessions 99 * currently in the TIME_WAIT state. The queue pointers, including the 100 * queue pointers in each tcptw structure, are protected using the global 101 * timewait lock, which must be held over queue iteration and modification. 102 * 103 * Rules on tcptw usage: 104 * - a inpcb is always freed _after_ its tcptw 105 * - a tcptw relies on its inpcb reference counting for memory stability 106 * - a tcptw is dereferenceable only while its inpcb is locked 107 */ 108static VNET_DEFINE(TAILQ_HEAD(, tcptw), twq_2msl); 109#define V_twq_2msl VNET(twq_2msl) 110 111/* Global timewait lock */ 112static VNET_DEFINE(struct rwlock, tw_lock); 113#define V_tw_lock VNET(tw_lock) 114 115#define TW_LOCK_INIT(tw, d) rw_init_flags(&(tw), (d), 0) 116#define TW_LOCK_DESTROY(tw) rw_destroy(&(tw)) 117#define TW_RLOCK(tw) rw_rlock(&(tw)) 118#define TW_WLOCK(tw) rw_wlock(&(tw)) 119#define TW_RUNLOCK(tw) rw_runlock(&(tw)) 120#define TW_WUNLOCK(tw) rw_wunlock(&(tw)) 121#define TW_LOCK_ASSERT(tw) rw_assert(&(tw), RA_LOCKED) 122#define TW_RLOCK_ASSERT(tw) rw_assert(&(tw), RA_RLOCKED) 123#define TW_WLOCK_ASSERT(tw) rw_assert(&(tw), RA_WLOCKED) 124#define TW_UNLOCK_ASSERT(tw) rw_assert(&(tw), RA_UNLOCKED) 125 126static void tcp_tw_2msl_reset(struct tcptw *, int); 127static void tcp_tw_2msl_stop(struct tcptw *, int); 128static int tcp_twrespond(struct tcptw *, int); 129 130static int 131tcptw_auto_size(void) 132{ 133 int halfrange; 134 135 /* 136 * Max out at half the ephemeral port range so that TIME_WAIT 137 * sockets don't tie up too many ephemeral ports. 138 */ 139 if (V_ipport_lastauto > V_ipport_firstauto) 140 halfrange = (V_ipport_lastauto - V_ipport_firstauto) / 2; 141 else 142 halfrange = (V_ipport_firstauto - V_ipport_lastauto) / 2; 143 /* Protect against goofy port ranges smaller than 32. */ 144 return (imin(imax(halfrange, 32), maxsockets / 5)); 145} 146 147static int 148sysctl_maxtcptw(SYSCTL_HANDLER_ARGS) 149{ 150 int error, new; 151 152 if (maxtcptw == 0) 153 new = tcptw_auto_size(); 154 else 155 new = maxtcptw; 156 error = sysctl_handle_int(oidp, &new, 0, req); 157 if (error == 0 && req->newptr) 158 if (new >= 32) { 159 maxtcptw = new; 160 uma_zone_set_max(V_tcptw_zone, maxtcptw); 161 } 162 return (error); 163} 164 165SYSCTL_PROC(_net_inet_tcp, OID_AUTO, maxtcptw, CTLTYPE_INT|CTLFLAG_RW, 166 &maxtcptw, 0, sysctl_maxtcptw, "IU", 167 "Maximum number of compressed TCP TIME_WAIT entries"); 168 169VNET_DEFINE(int, nolocaltimewait) = 0; 170#define V_nolocaltimewait VNET(nolocaltimewait) 171SYSCTL_VNET_INT(_net_inet_tcp, OID_AUTO, nolocaltimewait, CTLFLAG_RW, 172 &VNET_NAME(nolocaltimewait), 0, 173 "Do not create compressed TCP TIME_WAIT entries for local connections"); 174 175void 176tcp_tw_zone_change(void) 177{ 178 179 if (maxtcptw == 0) 180 uma_zone_set_max(V_tcptw_zone, tcptw_auto_size()); 181} 182 183void 184tcp_tw_init(void) 185{ 186 187 V_tcptw_zone = uma_zcreate("tcptw", sizeof(struct tcptw), 188 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE); 189 TUNABLE_INT_FETCH("net.inet.tcp.maxtcptw", &maxtcptw); 190 if (maxtcptw == 0) 191 uma_zone_set_max(V_tcptw_zone, tcptw_auto_size()); 192 else 193 uma_zone_set_max(V_tcptw_zone, maxtcptw); 194 TAILQ_INIT(&V_twq_2msl); 195 TW_LOCK_INIT(V_tw_lock, "tcptw"); 196} 197 198#ifdef VIMAGE 199void 200tcp_tw_destroy(void) 201{ 202 struct tcptw *tw; 203 204 INP_INFO_RLOCK(&V_tcbinfo); 205 while ((tw = TAILQ_FIRST(&V_twq_2msl)) != NULL) 206 tcp_twclose(tw, 0); 207 INP_INFO_RUNLOCK(&V_tcbinfo); 208 209 TW_LOCK_DESTROY(V_tw_lock); 210 uma_zdestroy(V_tcptw_zone); 211} 212#endif 213 214/* 215 * Move a TCP connection into TIME_WAIT state. 216 * tcbinfo is locked. 217 * inp is locked, and is unlocked before returning. 218 */ 219void 220tcp_twstart(struct tcpcb *tp) 221{ 222 struct tcptw *tw; 223 struct inpcb *inp = tp->t_inpcb; 224 int acknow; 225 struct socket *so; 226#ifdef INET6 227 int isipv6 = inp->inp_inc.inc_flags & INC_ISIPV6; 228#endif 229 230 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 231 INP_WLOCK_ASSERT(inp); 232 233 /* A dropped inp should never transition to TIME_WAIT state. */ 234 KASSERT((inp->inp_flags & INP_DROPPED) == 0, ("tcp_twstart: " 235 "(inp->inp_flags & INP_DROPPED) != 0")); 236 237 if (V_nolocaltimewait) { 238 int error = 0; 239#ifdef INET6 240 if (isipv6) 241 error = in6_localaddr(&inp->in6p_faddr); 242#endif 243#if defined(INET6) && defined(INET) 244 else 245#endif 246#ifdef INET 247 error = in_localip(inp->inp_faddr); 248#endif 249 if (error) { 250 tp = tcp_close(tp); 251 if (tp != NULL) 252 INP_WUNLOCK(inp); 253 return; 254 } 255 } 256 257 258 /* 259 * For use only by DTrace. We do not reference the state 260 * after this point so modifying it in place is not a problem. 261 */ 262 tcp_state_change(tp, TCPS_TIME_WAIT); 263 264 tw = uma_zalloc(V_tcptw_zone, M_NOWAIT); 265 if (tw == NULL) { 266 /* 267 * Reached limit on total number of TIMEWAIT connections 268 * allowed. Remove a connection from TIMEWAIT queue in LRU 269 * fashion to make room for this connection. 270 * 271 * XXX: Check if it possible to always have enough room 272 * in advance based on guarantees provided by uma_zalloc(). 273 */ 274 tw = tcp_tw_2msl_scan(1); 275 if (tw == NULL) { 276 tp = tcp_close(tp); 277 if (tp != NULL) 278 INP_WUNLOCK(inp); 279 return; 280 } 281 } 282 /* 283 * The tcptw will hold a reference on its inpcb until tcp_twclose 284 * is called 285 */ 286 tw->tw_inpcb = inp; 287 in_pcbref(inp); /* Reference from tw */ 288 289 /* 290 * Recover last window size sent. 291 */ 292 if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt)) 293 tw->last_win = (tp->rcv_adv - tp->rcv_nxt) >> tp->rcv_scale; 294 else 295 tw->last_win = 0; 296 297 /* 298 * Set t_recent if timestamps are used on the connection. 299 */ 300 if ((tp->t_flags & (TF_REQ_TSTMP|TF_RCVD_TSTMP|TF_NOOPT)) == 301 (TF_REQ_TSTMP|TF_RCVD_TSTMP)) { 302 tw->t_recent = tp->ts_recent; 303 tw->ts_offset = tp->ts_offset; 304 } else { 305 tw->t_recent = 0; 306 tw->ts_offset = 0; 307 } 308 309 tw->snd_nxt = tp->snd_nxt; 310 tw->rcv_nxt = tp->rcv_nxt; 311 tw->iss = tp->iss; 312 tw->irs = tp->irs; 313 tw->t_starttime = tp->t_starttime; 314 tw->tw_time = 0; 315 316/* XXX 317 * If this code will 318 * be used for fin-wait-2 state also, then we may need 319 * a ts_recent from the last segment. 320 */ 321 acknow = tp->t_flags & TF_ACKNOW; 322 323 /* 324 * First, discard tcpcb state, which includes stopping its timers and 325 * freeing it. tcp_discardcb() used to also release the inpcb, but 326 * that work is now done in the caller. 327 * 328 * Note: soisdisconnected() call used to be made in tcp_discardcb(), 329 * and might not be needed here any longer. 330 */ 331 tcp_discardcb(tp); 332 so = inp->inp_socket; 333 soisdisconnected(so); 334 tw->tw_cred = crhold(so->so_cred); 335 SOCK_LOCK(so); 336 tw->tw_so_options = so->so_options; 337 SOCK_UNLOCK(so); 338 if (acknow) 339 tcp_twrespond(tw, TH_ACK); 340 inp->inp_ppcb = tw; 341 inp->inp_flags |= INP_TIMEWAIT; 342 tcp_tw_2msl_reset(tw, 0); 343 344 /* 345 * If the inpcb owns the sole reference to the socket, then we can 346 * detach and free the socket as it is not needed in time wait. 347 */ 348 if (inp->inp_flags & INP_SOCKREF) { 349 KASSERT(so->so_state & SS_PROTOREF, 350 ("tcp_twstart: !SS_PROTOREF")); 351 inp->inp_flags &= ~INP_SOCKREF; 352 INP_WUNLOCK(inp); 353 ACCEPT_LOCK(); 354 SOCK_LOCK(so); 355 so->so_state &= ~SS_PROTOREF; 356 sofree(so); 357 } else 358 INP_WUNLOCK(inp); 359} 360 361#if 0 362/* 363 * The appromixate rate of ISN increase of Microsoft TCP stacks; 364 * the actual rate is slightly higher due to the addition of 365 * random positive increments. 366 * 367 * Most other new OSes use semi-randomized ISN values, so we 368 * do not need to worry about them. 369 */ 370#define MS_ISN_BYTES_PER_SECOND 250000 371 372/* 373 * Determine if the ISN we will generate has advanced beyond the last 374 * sequence number used by the previous connection. If so, indicate 375 * that it is safe to recycle this tw socket by returning 1. 376 */ 377int 378tcp_twrecycleable(struct tcptw *tw) 379{ 380 tcp_seq new_iss = tw->iss; 381 tcp_seq new_irs = tw->irs; 382 383 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 384 new_iss += (ticks - tw->t_starttime) * (ISN_BYTES_PER_SECOND / hz); 385 new_irs += (ticks - tw->t_starttime) * (MS_ISN_BYTES_PER_SECOND / hz); 386 387 if (SEQ_GT(new_iss, tw->snd_nxt) && SEQ_GT(new_irs, tw->rcv_nxt)) 388 return (1); 389 else 390 return (0); 391} 392#endif 393 394/* 395 * Returns 1 if the TIME_WAIT state was killed and we should start over, 396 * looking for a pcb in the listen state. Returns 0 otherwise. 397 */ 398int 399tcp_twcheck(struct inpcb *inp, struct tcpopt *to __unused, struct tcphdr *th, 400 struct mbuf *m, int tlen) 401{ 402 struct tcptw *tw; 403 int thflags; 404 tcp_seq seq; 405 406 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 407 INP_WLOCK_ASSERT(inp); 408 409 /* 410 * XXXRW: Time wait state for inpcb has been recycled, but inpcb is 411 * still present. This is undesirable, but temporarily necessary 412 * until we work out how to handle inpcb's who's timewait state has 413 * been removed. 414 */ 415 tw = intotw(inp); 416 if (tw == NULL) 417 goto drop; 418 419 thflags = th->th_flags; 420 421 /* 422 * NOTE: for FIN_WAIT_2 (to be added later), 423 * must validate sequence number before accepting RST 424 */ 425 426 /* 427 * If the segment contains RST: 428 * Drop the segment - see Stevens, vol. 2, p. 964 and 429 * RFC 1337. 430 */ 431 if (thflags & TH_RST) 432 goto drop; 433 434#if 0 435/* PAWS not needed at the moment */ 436 /* 437 * RFC 1323 PAWS: If we have a timestamp reply on this segment 438 * and it's less than ts_recent, drop it. 439 */ 440 if ((to.to_flags & TOF_TS) != 0 && tp->ts_recent && 441 TSTMP_LT(to.to_tsval, tp->ts_recent)) { 442 if ((thflags & TH_ACK) == 0) 443 goto drop; 444 goto ack; 445 } 446 /* 447 * ts_recent is never updated because we never accept new segments. 448 */ 449#endif 450 451 /* 452 * If a new connection request is received 453 * while in TIME_WAIT, drop the old connection 454 * and start over if the sequence numbers 455 * are above the previous ones. 456 */ 457 if ((thflags & TH_SYN) && SEQ_GT(th->th_seq, tw->rcv_nxt)) { 458 tcp_twclose(tw, 0); 459 return (1); 460 } 461 462 /* 463 * Drop the segment if it does not contain an ACK. 464 */ 465 if ((thflags & TH_ACK) == 0) 466 goto drop; 467 468 /* 469 * Reset the 2MSL timer if this is a duplicate FIN. 470 */ 471 if (thflags & TH_FIN) { 472 seq = th->th_seq + tlen + (thflags & TH_SYN ? 1 : 0); 473 if (seq + 1 == tw->rcv_nxt) 474 tcp_tw_2msl_reset(tw, 1); 475 } 476 477 /* 478 * Acknowledge the segment if it has data or is not a duplicate ACK. 479 */ 480 if (thflags != TH_ACK || tlen != 0 || 481 th->th_seq != tw->rcv_nxt || th->th_ack != tw->snd_nxt) 482 tcp_twrespond(tw, TH_ACK); 483drop: 484 INP_WUNLOCK(inp); 485 m_freem(m); 486 return (0); 487} 488 489void 490tcp_twclose(struct tcptw *tw, int reuse) 491{ 492 struct socket *so; 493 struct inpcb *inp; 494 495 /* 496 * At this point, we are in one of two situations: 497 * 498 * (1) We have no socket, just an inpcb<->twtcp pair. We can free 499 * all state. 500 * 501 * (2) We have a socket -- if we own a reference, release it and 502 * notify the socket layer. 503 */ 504 inp = tw->tw_inpcb; 505 KASSERT((inp->inp_flags & INP_TIMEWAIT), ("tcp_twclose: !timewait")); 506 KASSERT(intotw(inp) == tw, ("tcp_twclose: inp_ppcb != tw")); 507 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); /* in_pcbfree() */ 508 INP_WLOCK_ASSERT(inp); 509 510 tcp_tw_2msl_stop(tw, reuse); 511 inp->inp_ppcb = NULL; 512 in_pcbdrop(inp); 513 514 so = inp->inp_socket; 515 if (so != NULL) { 516 /* 517 * If there's a socket, handle two cases: first, we own a 518 * strong reference, which we will now release, or we don't 519 * in which case another reference exists (XXXRW: think 520 * about this more), and we don't need to take action. 521 */ 522 if (inp->inp_flags & INP_SOCKREF) { 523 inp->inp_flags &= ~INP_SOCKREF; 524 INP_WUNLOCK(inp); 525 ACCEPT_LOCK(); 526 SOCK_LOCK(so); 527 KASSERT(so->so_state & SS_PROTOREF, 528 ("tcp_twclose: INP_SOCKREF && !SS_PROTOREF")); 529 so->so_state &= ~SS_PROTOREF; 530 sofree(so); 531 } else { 532 /* 533 * If we don't own the only reference, the socket and 534 * inpcb need to be left around to be handled by 535 * tcp_usr_detach() later. 536 */ 537 INP_WUNLOCK(inp); 538 } 539 } else { 540 /* 541 * The socket has been already cleaned-up for us, only free the 542 * inpcb. 543 */ 544 in_pcbfree(inp); 545 } 546 TCPSTAT_INC(tcps_closed); 547} 548 549static int 550tcp_twrespond(struct tcptw *tw, int flags) 551{ 552 struct inpcb *inp = tw->tw_inpcb; 553#if defined(INET6) || defined(INET) 554 struct tcphdr *th = NULL; 555#endif 556 struct mbuf *m; 557#ifdef INET 558 struct ip *ip = NULL; 559#endif 560 u_int hdrlen, optlen; 561 int error = 0; /* Keep compiler happy */ 562 struct tcpopt to; 563#ifdef INET6 564 struct ip6_hdr *ip6 = NULL; 565 int isipv6 = inp->inp_inc.inc_flags & INC_ISIPV6; 566#endif 567 hdrlen = 0; /* Keep compiler happy */ 568 569 INP_WLOCK_ASSERT(inp); 570 571 m = m_gethdr(M_NOWAIT, MT_DATA); 572 if (m == NULL) 573 return (ENOBUFS); 574 m->m_data += max_linkhdr; 575 576#ifdef MAC 577 mac_inpcb_create_mbuf(inp, m); 578#endif 579 580#ifdef INET6 581 if (isipv6) { 582 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 583 ip6 = mtod(m, struct ip6_hdr *); 584 th = (struct tcphdr *)(ip6 + 1); 585 tcpip_fillheaders(inp, ip6, th); 586 } 587#endif 588#if defined(INET6) && defined(INET) 589 else 590#endif 591#ifdef INET 592 { 593 hdrlen = sizeof(struct tcpiphdr); 594 ip = mtod(m, struct ip *); 595 th = (struct tcphdr *)(ip + 1); 596 tcpip_fillheaders(inp, ip, th); 597 } 598#endif 599 to.to_flags = 0; 600 601 /* 602 * Send a timestamp and echo-reply if both our side and our peer 603 * have sent timestamps in our SYN's and this is not a RST. 604 */ 605 if (tw->t_recent && flags == TH_ACK) { 606 to.to_flags |= TOF_TS; 607 to.to_tsval = tcp_ts_getticks() + tw->ts_offset; 608 to.to_tsecr = tw->t_recent; 609 } 610 optlen = tcp_addoptions(&to, (u_char *)(th + 1)); 611 612 m->m_len = hdrlen + optlen; 613 m->m_pkthdr.len = m->m_len; 614 615 KASSERT(max_linkhdr + m->m_len <= MHLEN, ("tcptw: mbuf too small")); 616 617 th->th_seq = htonl(tw->snd_nxt); 618 th->th_ack = htonl(tw->rcv_nxt); 619 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2; 620 th->th_flags = flags; 621 th->th_win = htons(tw->last_win); 622 623 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); 624#ifdef INET6 625 if (isipv6) { 626 m->m_pkthdr.csum_flags = CSUM_TCP_IPV6; 627 th->th_sum = in6_cksum_pseudo(ip6, 628 sizeof(struct tcphdr) + optlen, IPPROTO_TCP, 0); 629 ip6->ip6_hlim = in6_selecthlim(inp, NULL); 630 error = ip6_output(m, inp->in6p_outputopts, NULL, 631 (tw->tw_so_options & SO_DONTROUTE), NULL, NULL, inp); 632 } 633#endif 634#if defined(INET6) && defined(INET) 635 else 636#endif 637#ifdef INET 638 { 639 m->m_pkthdr.csum_flags = CSUM_TCP; 640 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, 641 htons(sizeof(struct tcphdr) + optlen + IPPROTO_TCP)); 642 ip->ip_len = htons(m->m_pkthdr.len); 643 if (V_path_mtu_discovery) 644 ip->ip_off |= htons(IP_DF); 645 error = ip_output(m, inp->inp_options, NULL, 646 ((tw->tw_so_options & SO_DONTROUTE) ? IP_ROUTETOIF : 0), 647 NULL, inp); 648 } 649#endif 650 if (flags & TH_ACK) 651 TCPSTAT_INC(tcps_sndacks); 652 else 653 TCPSTAT_INC(tcps_sndctrl); 654 TCPSTAT_INC(tcps_sndtotal); 655 return (error); 656} 657 658static void 659tcp_tw_2msl_reset(struct tcptw *tw, int rearm) 660{ 661 662 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 663 INP_WLOCK_ASSERT(tw->tw_inpcb); 664 665 TW_WLOCK(V_tw_lock); 666 if (rearm) 667 TAILQ_REMOVE(&V_twq_2msl, tw, tw_2msl); 668 tw->tw_time = ticks + 2 * tcp_msl; 669 TAILQ_INSERT_TAIL(&V_twq_2msl, tw, tw_2msl); 670 TW_WUNLOCK(V_tw_lock); 671} 672 673static void 674tcp_tw_2msl_stop(struct tcptw *tw, int reuse) 675{ 676 struct ucred *cred; 677 struct inpcb *inp; 678 int released; 679 680 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 681 682 TW_WLOCK(V_tw_lock); 683 inp = tw->tw_inpcb; 684 tw->tw_inpcb = NULL; 685 686 TAILQ_REMOVE(&V_twq_2msl, tw, tw_2msl); 687 cred = tw->tw_cred; 688 tw->tw_cred = NULL; 689 TW_WUNLOCK(V_tw_lock); 690 691 if (cred != NULL) 692 crfree(cred); 693 694 released = in_pcbrele_wlocked(inp); 695 KASSERT(!released, ("%s: inp should not be released here", __func__)); 696 697 if (!reuse) 698 uma_zfree(V_tcptw_zone, tw); 699} 700 701struct tcptw * 702tcp_tw_2msl_scan(int reuse) 703{ 704 struct tcptw *tw; 705 struct inpcb *inp; 706 707#ifdef INVARIANTS 708 if (reuse) { 709 /* 710 * Exclusive pcbinfo lock is not required in reuse case even if 711 * two inpcb locks can be acquired simultaneously: 712 * - the inpcb transitioning to TIME_WAIT state in 713 * tcp_tw_start(), 714 * - the inpcb closed by tcp_twclose(). 715 * 716 * It is because only inpcbs in FIN_WAIT2 or CLOSING states can 717 * transition in TIME_WAIT state. Then a pcbcb cannot be in 718 * TIME_WAIT list and transitioning to TIME_WAIT state at same 719 * time. 720 */ 721 INP_INFO_RLOCK_ASSERT(&V_tcbinfo); 722 } 723#endif 724 725 for (;;) { 726 TW_RLOCK(V_tw_lock); 727 tw = TAILQ_FIRST(&V_twq_2msl); 728 if (tw == NULL || (!reuse && (tw->tw_time - ticks) > 0)) { 729 TW_RUNLOCK(V_tw_lock); 730 break; 731 } 732 KASSERT(tw->tw_inpcb != NULL, ("%s: tw->tw_inpcb == NULL", 733 __func__)); 734 735 inp = tw->tw_inpcb; 736 in_pcbref(inp); 737 TW_RUNLOCK(V_tw_lock); 738 739 if (INP_INFO_TRY_RLOCK(&V_tcbinfo)) { 740 741 INP_WLOCK(inp); 742 tw = intotw(inp); 743 if (in_pcbrele_wlocked(inp)) { 744 KASSERT(tw == NULL, ("%s: held last inp " 745 "reference but tw not NULL", __func__)); 746 INP_INFO_RUNLOCK(&V_tcbinfo); 747 continue; 748 } 749 750 if (tw == NULL) { 751 /* tcp_twclose() has already been called */ 752 INP_WUNLOCK(inp); 753 INP_INFO_RUNLOCK(&V_tcbinfo); 754 continue; 755 } 756 757 tcp_twclose(tw, reuse); 758 INP_INFO_RUNLOCK(&V_tcbinfo); 759 if (reuse) 760 return tw; 761 } else { 762 /* INP_INFO lock is busy, continue later. */ 763 INP_WLOCK(inp); 764 if (!in_pcbrele_wlocked(inp)) 765 INP_WUNLOCK(inp); 766 break; 767 } 768 } 769 770 return NULL; 771} 772