tcp_timewait.c revision 55198
1/* 2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by the University of 16 * California, Berkeley and its contributors. 17 * 4. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * @(#)tcp_subr.c 8.2 (Berkeley) 5/24/95 34 * $FreeBSD: head/sys/netinet/tcp_timewait.c 55198 1999-12-28 23:18:33Z msmith $ 35 */ 36 37#include "opt_compat.h" 38#include "opt_inet6.h" 39#include "opt_tcpdebug.h" 40 41#include <sys/param.h> 42#include <sys/systm.h> 43#include <sys/callout.h> 44#include <sys/kernel.h> 45#include <sys/sysctl.h> 46#include <sys/malloc.h> 47#include <sys/mbuf.h> 48#include <sys/proc.h> 49#include <sys/socket.h> 50#include <sys/socketvar.h> 51#include <sys/protosw.h> 52 53#include <vm/vm_zone.h> 54 55#include <net/route.h> 56#include <net/if.h> 57 58#define _IP_VHL 59#include <netinet/in.h> 60#include <netinet/in_systm.h> 61#include <netinet/ip.h> 62#include <netinet/in_pcb.h> 63#include <netinet/in_var.h> 64#include <netinet/ip_var.h> 65#include <netinet/tcp.h> 66#include <netinet/tcp_fsm.h> 67#include <netinet/tcp_seq.h> 68#include <netinet/tcp_timer.h> 69#include <netinet/tcp_var.h> 70#include <netinet/tcpip.h> 71#ifdef TCPDEBUG 72#include <netinet/tcp_debug.h> 73#endif 74 75int tcp_mssdflt = TCP_MSS; 76SYSCTL_INT(_net_inet_tcp, TCPCTL_MSSDFLT, mssdflt, CTLFLAG_RW, 77 &tcp_mssdflt , 0, "Default TCP Maximum Segment Size"); 78 79#ifdef INET6 80int tcp_v6mssdflt = TCP6_MSS; 81SYSCTL_INT(_net_inet_tcp, TCPCTL_V6MSSDFLT, v6mssdflt, 82 CTLFLAG_RW, &tcp_v6mssdflt , 0, ""); 83#endif 84 85#if 0 86static int tcp_rttdflt = TCPTV_SRTTDFLT / PR_SLOWHZ; 87SYSCTL_INT(_net_inet_tcp, TCPCTL_RTTDFLT, rttdflt, CTLFLAG_RW, 88 &tcp_rttdflt , 0, "Default maximum TCP Round Trip Time"); 89#endif 90 91static int tcp_do_rfc1323 = 1; 92SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1323, rfc1323, CTLFLAG_RW, 93 &tcp_do_rfc1323 , 0, "Enable rfc1323 (high performance TCP) extensions"); 94 95static int tcp_do_rfc1644 = 0; 96SYSCTL_INT(_net_inet_tcp, TCPCTL_DO_RFC1644, rfc1644, CTLFLAG_RW, 97 &tcp_do_rfc1644 , 0, "Enable rfc1644 (TTCP) extensions"); 98 99static int tcp_tcbhashsize = 0; 100SYSCTL_INT(_net_inet_tcp, OID_AUTO, tcbhashsize, CTLFLAG_RD, 101 &tcp_tcbhashsize, 0, "Size of TCP control-block hashtable"); 102 103static int do_tcpdrain = 1; 104SYSCTL_INT(_debug, OID_AUTO, do_tcpdrain, CTLFLAG_RW, &do_tcpdrain, 0, 105 "Enable non Net3 compliant tcp_drain"); 106 107SYSCTL_INT(_net_inet_tcp, OID_AUTO, pcbcount, CTLFLAG_RD, 108 &tcbinfo.ipi_count, 0, "Number of active PCBs"); 109 110static void tcp_cleartaocache __P((void)); 111static void tcp_notify __P((struct inpcb *, int)); 112 113/* 114 * Target size of TCP PCB hash tables. Must be a power of two. 115 * 116 * Note that this can be overridden by the kernel environment 117 * variable net.inet.tcp.tcbhashsize 118 */ 119#ifndef TCBHASHSIZE 120#define TCBHASHSIZE 512 121#endif 122 123/* 124 * This is the actual shape of what we allocate using the zone 125 * allocator. Doing it this way allows us to protect both structures 126 * using the same generation count, and also eliminates the overhead 127 * of allocating tcpcbs separately. By hiding the structure here, 128 * we avoid changing most of the rest of the code (although it needs 129 * to be changed, eventually, for greater efficiency). 130 */ 131#define ALIGNMENT 32 132#define ALIGNM1 (ALIGNMENT - 1) 133struct inp_tp { 134 union { 135 struct inpcb inp; 136 char align[(sizeof(struct inpcb) + ALIGNM1) & ~ALIGNM1]; 137 } inp_tp_u; 138 struct tcpcb tcb; 139 struct callout inp_tp_rexmt, inp_tp_persist, inp_tp_keep, inp_tp_2msl; 140 struct callout inp_tp_delack; 141}; 142#undef ALIGNMENT 143#undef ALIGNM1 144 145/* 146 * Tcp initialization 147 */ 148void 149tcp_init() 150{ 151 int hashsize; 152 153 tcp_iss = random(); /* wrong, but better than a constant */ 154 tcp_ccgen = 1; 155 tcp_cleartaocache(); 156 157 tcp_delacktime = TCPTV_DELACK; 158 tcp_keepinit = TCPTV_KEEP_INIT; 159 tcp_keepidle = TCPTV_KEEP_IDLE; 160 tcp_keepintvl = TCPTV_KEEPINTVL; 161 tcp_maxpersistidle = TCPTV_KEEP_IDLE; 162 tcp_msl = TCPTV_MSL; 163 164 LIST_INIT(&tcb); 165 tcbinfo.listhead = &tcb; 166 TUNABLE_INT_FETCH("net.inet.tcp.tcbhashsize", TCBHASHSIZE, hashsize); 167 if (!powerof2(hashsize)) { 168 printf("WARNING: TCB hash size not a power of 2\n"); 169 hashsize = 512; /* safe default */ 170 } 171 tcp_tcbhashsize = hashsize; 172 tcbinfo.hashbase = hashinit(hashsize, M_PCB, &tcbinfo.hashmask); 173 tcbinfo.porthashbase = hashinit(hashsize, M_PCB, 174 &tcbinfo.porthashmask); 175 tcbinfo.ipi_zone = zinit("tcpcb", sizeof(struct inp_tp), maxsockets, 176 ZONE_INTERRUPT, 0); 177 178 if (max_protohdr < sizeof(struct tcpiphdr)) 179 max_protohdr = sizeof(struct tcpiphdr); 180 if (max_linkhdr + sizeof(struct tcpiphdr) > MHLEN) 181 panic("tcp_init"); 182} 183 184/* 185 * Create template to be used to send tcp packets on a connection. 186 * Call after host entry created, allocates an mbuf and fills 187 * in a skeletal tcp/ip header, minimizing the amount of work 188 * necessary when the connection is used. 189 */ 190struct tcpiphdr * 191tcp_template(tp) 192 struct tcpcb *tp; 193{ 194 register struct inpcb *inp = tp->t_inpcb; 195 register struct mbuf *m; 196 register struct tcpiphdr *n; 197 198 if ((n = tp->t_template) == 0) { 199 m = m_get(M_DONTWAIT, MT_HEADER); 200 if (m == NULL) 201 return (0); 202 m->m_len = sizeof (struct tcpiphdr); 203 n = mtod(m, struct tcpiphdr *); 204 } 205 bzero(n->ti_x1, sizeof(n->ti_x1)); 206 n->ti_pr = IPPROTO_TCP; 207 n->ti_len = htons(sizeof (struct tcpiphdr) - sizeof (struct ip)); 208 n->ti_src = inp->inp_laddr; 209 n->ti_dst = inp->inp_faddr; 210 n->ti_sport = inp->inp_lport; 211 n->ti_dport = inp->inp_fport; 212 n->ti_seq = 0; 213 n->ti_ack = 0; 214 n->ti_x2 = 0; 215 n->ti_off = 5; 216 n->ti_flags = 0; 217 n->ti_win = 0; 218 n->ti_sum = 0; 219 n->ti_urp = 0; 220 return (n); 221} 222 223/* 224 * Send a single message to the TCP at address specified by 225 * the given TCP/IP header. If m == 0, then we make a copy 226 * of the tcpiphdr at ti and send directly to the addressed host. 227 * This is used to force keep alive messages out using the TCP 228 * template for a connection tp->t_template. If flags are given 229 * then we send a message back to the TCP which originated the 230 * segment ti, and discard the mbuf containing it and any other 231 * attached mbufs. 232 * 233 * In any case the ack and sequence number of the transmitted 234 * segment are as specified by the parameters. 235 * 236 * NOTE: If m != NULL, then ti must point to *inside* the mbuf. 237 */ 238void 239tcp_respond(tp, ti, m, ack, seq, flags) 240 struct tcpcb *tp; 241 register struct tcpiphdr *ti; 242 register struct mbuf *m; 243 tcp_seq ack, seq; 244 int flags; 245{ 246 register int tlen; 247 int win = 0; 248 struct route *ro = 0; 249 struct route sro; 250 251 if (tp) { 252 if (!(flags & TH_RST)) 253 win = sbspace(&tp->t_inpcb->inp_socket->so_rcv); 254 ro = &tp->t_inpcb->inp_route; 255 } else { 256 ro = &sro; 257 bzero(ro, sizeof *ro); 258 } 259 if (m == 0) { 260 m = m_gethdr(M_DONTWAIT, MT_HEADER); 261 if (m == NULL) 262 return; 263#ifdef TCP_COMPAT_42 264 tlen = 1; 265#else 266 tlen = 0; 267#endif 268 m->m_data += max_linkhdr; 269 *mtod(m, struct tcpiphdr *) = *ti; 270 ti = mtod(m, struct tcpiphdr *); 271 flags = TH_ACK; 272 } else { 273 m_freem(m->m_next); 274 m->m_next = 0; 275 m->m_data = (caddr_t)ti; 276 m->m_len = sizeof (struct tcpiphdr); 277 tlen = 0; 278#define xchg(a,b,type) { type t; t=a; a=b; b=t; } 279 xchg(ti->ti_dst.s_addr, ti->ti_src.s_addr, n_long); 280 xchg(ti->ti_dport, ti->ti_sport, n_short); 281#undef xchg 282 } 283 ti->ti_len = htons((u_short)(sizeof (struct tcphdr) + tlen)); 284 tlen += sizeof (struct tcpiphdr); 285 m->m_len = tlen; 286 m->m_pkthdr.len = tlen; 287 m->m_pkthdr.rcvif = (struct ifnet *) 0; 288 bzero(ti->ti_x1, sizeof(ti->ti_x1)); 289 ti->ti_seq = htonl(seq); 290 ti->ti_ack = htonl(ack); 291 ti->ti_x2 = 0; 292 ti->ti_off = sizeof (struct tcphdr) >> 2; 293 ti->ti_flags = flags; 294 if (tp) 295 ti->ti_win = htons((u_short) (win >> tp->rcv_scale)); 296 else 297 ti->ti_win = htons((u_short)win); 298 ti->ti_urp = 0; 299 ti->ti_sum = 0; 300 ti->ti_sum = in_cksum(m, tlen); 301 ((struct ip *)ti)->ip_len = tlen; 302 ((struct ip *)ti)->ip_ttl = ip_defttl; 303#ifdef TCPDEBUG 304 if (tp == NULL || (tp->t_inpcb->inp_socket->so_options & SO_DEBUG)) 305 tcp_trace(TA_OUTPUT, 0, tp, ti, 0); 306#endif 307 (void) ip_output(m, NULL, ro, 0, NULL); 308 if (ro == &sro && ro->ro_rt) { 309 RTFREE(ro->ro_rt); 310 } 311} 312 313/* 314 * Create a new TCP control block, making an 315 * empty reassembly queue and hooking it to the argument 316 * protocol control block. The `inp' parameter must have 317 * come from the zone allocator set up in tcp_init(). 318 */ 319struct tcpcb * 320tcp_newtcpcb(inp) 321 struct inpcb *inp; 322{ 323 struct inp_tp *it; 324 register struct tcpcb *tp; 325 326 it = (struct inp_tp *)inp; 327 tp = &it->tcb; 328 bzero((char *) tp, sizeof(struct tcpcb)); 329 tp->t_segq = NULL; 330 tp->t_maxseg = tp->t_maxopd = tcp_mssdflt; 331 332 /* Set up our timeouts. */ 333 callout_init(tp->tt_rexmt = &it->inp_tp_rexmt); 334 callout_init(tp->tt_persist = &it->inp_tp_persist); 335 callout_init(tp->tt_keep = &it->inp_tp_keep); 336 callout_init(tp->tt_2msl = &it->inp_tp_2msl); 337 callout_init(tp->tt_delack = &it->inp_tp_delack); 338 339 if (tcp_do_rfc1323) 340 tp->t_flags = (TF_REQ_SCALE|TF_REQ_TSTMP); 341 if (tcp_do_rfc1644) 342 tp->t_flags |= TF_REQ_CC; 343 tp->t_inpcb = inp; /* XXX */ 344 /* 345 * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no 346 * rtt estimate. Set rttvar so that srtt + 4 * rttvar gives 347 * reasonable initial retransmit time. 348 */ 349 tp->t_srtt = TCPTV_SRTTBASE; 350 tp->t_rttvar = ((TCPTV_RTOBASE - TCPTV_SRTTBASE) << TCP_RTTVAR_SHIFT) / 4; 351 tp->t_rttmin = TCPTV_MIN; 352 tp->t_rxtcur = TCPTV_RTOBASE; 353 tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; 354 tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; 355 tp->t_rcvtime = ticks; 356 inp->inp_ip_ttl = ip_defttl; 357 inp->inp_ppcb = (caddr_t)tp; 358 return (tp); /* XXX */ 359} 360 361/* 362 * Drop a TCP connection, reporting 363 * the specified error. If connection is synchronized, 364 * then send a RST to peer. 365 */ 366struct tcpcb * 367tcp_drop(tp, errno) 368 register struct tcpcb *tp; 369 int errno; 370{ 371 struct socket *so = tp->t_inpcb->inp_socket; 372 373 if (TCPS_HAVERCVDSYN(tp->t_state)) { 374 tp->t_state = TCPS_CLOSED; 375 (void) tcp_output(tp); 376 tcpstat.tcps_drops++; 377 } else 378 tcpstat.tcps_conndrops++; 379 if (errno == ETIMEDOUT && tp->t_softerror) 380 errno = tp->t_softerror; 381 so->so_error = errno; 382 return (tcp_close(tp)); 383} 384 385/* 386 * Close a TCP control block: 387 * discard all space held by the tcp 388 * discard internet protocol block 389 * wake up any sleepers 390 */ 391struct tcpcb * 392tcp_close(tp) 393 register struct tcpcb *tp; 394{ 395 register struct mbuf *q; 396 register struct mbuf *nq; 397 struct inpcb *inp = tp->t_inpcb; 398 struct socket *so = inp->inp_socket; 399 register struct rtentry *rt; 400 int dosavessthresh; 401 402 /* 403 * Make sure that all of our timers are stopped before we 404 * delete the PCB. 405 */ 406 callout_stop(tp->tt_rexmt); 407 callout_stop(tp->tt_persist); 408 callout_stop(tp->tt_keep); 409 callout_stop(tp->tt_2msl); 410 callout_stop(tp->tt_delack); 411 412 /* 413 * If we got enough samples through the srtt filter, 414 * save the rtt and rttvar in the routing entry. 415 * 'Enough' is arbitrarily defined as the 16 samples. 416 * 16 samples is enough for the srtt filter to converge 417 * to within 5% of the correct value; fewer samples and 418 * we could save a very bogus rtt. 419 * 420 * Don't update the default route's characteristics and don't 421 * update anything that the user "locked". 422 */ 423 if (tp->t_rttupdated >= 16 && 424 (rt = inp->inp_route.ro_rt) && 425 ((struct sockaddr_in *)rt_key(rt))->sin_addr.s_addr != INADDR_ANY) { 426 register u_long i = 0; 427 428 if ((rt->rt_rmx.rmx_locks & RTV_RTT) == 0) { 429 i = tp->t_srtt * 430 (RTM_RTTUNIT / (hz * TCP_RTT_SCALE)); 431 if (rt->rt_rmx.rmx_rtt && i) 432 /* 433 * filter this update to half the old & half 434 * the new values, converting scale. 435 * See route.h and tcp_var.h for a 436 * description of the scaling constants. 437 */ 438 rt->rt_rmx.rmx_rtt = 439 (rt->rt_rmx.rmx_rtt + i) / 2; 440 else 441 rt->rt_rmx.rmx_rtt = i; 442 tcpstat.tcps_cachedrtt++; 443 } 444 if ((rt->rt_rmx.rmx_locks & RTV_RTTVAR) == 0) { 445 i = tp->t_rttvar * 446 (RTM_RTTUNIT / (hz * TCP_RTTVAR_SCALE)); 447 if (rt->rt_rmx.rmx_rttvar && i) 448 rt->rt_rmx.rmx_rttvar = 449 (rt->rt_rmx.rmx_rttvar + i) / 2; 450 else 451 rt->rt_rmx.rmx_rttvar = i; 452 tcpstat.tcps_cachedrttvar++; 453 } 454 /* 455 * The old comment here said: 456 * update the pipelimit (ssthresh) if it has been updated 457 * already or if a pipesize was specified & the threshhold 458 * got below half the pipesize. I.e., wait for bad news 459 * before we start updating, then update on both good 460 * and bad news. 461 * 462 * But we want to save the ssthresh even if no pipesize is 463 * specified explicitly in the route, because such 464 * connections still have an implicit pipesize specified 465 * by the global tcp_sendspace. In the absence of a reliable 466 * way to calculate the pipesize, it will have to do. 467 */ 468 i = tp->snd_ssthresh; 469 if (rt->rt_rmx.rmx_sendpipe != 0) 470 dosavessthresh = (i < rt->rt_rmx.rmx_sendpipe / 2); 471 else 472 dosavessthresh = (i < so->so_snd.sb_hiwat / 2); 473 if (((rt->rt_rmx.rmx_locks & RTV_SSTHRESH) == 0 && 474 i != 0 && rt->rt_rmx.rmx_ssthresh != 0) 475 || dosavessthresh) { 476 /* 477 * convert the limit from user data bytes to 478 * packets then to packet data bytes. 479 */ 480 i = (i + tp->t_maxseg / 2) / tp->t_maxseg; 481 if (i < 2) 482 i = 2; 483 i *= (u_long)(tp->t_maxseg + sizeof (struct tcpiphdr)); 484 if (rt->rt_rmx.rmx_ssthresh) 485 rt->rt_rmx.rmx_ssthresh = 486 (rt->rt_rmx.rmx_ssthresh + i) / 2; 487 else 488 rt->rt_rmx.rmx_ssthresh = i; 489 tcpstat.tcps_cachedssthresh++; 490 } 491 } 492 /* free the reassembly queue, if any */ 493 for (q = tp->t_segq; q; q = nq) { 494 nq = q->m_nextpkt; 495 tp->t_segq = nq; 496 m_freem(q); 497 } 498 if (tp->t_template) 499 (void) m_free(dtom(tp->t_template)); 500 inp->inp_ppcb = NULL; 501 soisdisconnected(so); 502 in_pcbdetach(inp); 503 tcpstat.tcps_closed++; 504 return ((struct tcpcb *)0); 505} 506 507void 508tcp_drain() 509{ 510 if (do_tcpdrain) 511 { 512 struct inpcb *inpb; 513 struct tcpcb *tcpb; 514 struct mbuf *m, *mq; 515 516 /* 517 * Walk the tcpbs, if existing, and flush the reassembly queue, 518 * if there is one... 519 * XXX: The "Net/3" implementation doesn't imply that the TCP 520 * reassembly queue should be flushed, but in a situation 521 * where we're really low on mbufs, this is potentially 522 * usefull. 523 */ 524 for (inpb = tcbinfo.listhead->lh_first; inpb; 525 inpb = inpb->inp_list.le_next) { 526 if ((tcpb = intotcpcb(inpb))) { 527 for (mq = tcpb->t_segq; mq; mq = m) { 528 m = mq->m_nextpkt; 529 tcpb->t_segq = m; 530 m_freem(mq); 531 } 532 } 533 } 534 535 } 536} 537 538/* 539 * Notify a tcp user of an asynchronous error; 540 * store error as soft error, but wake up user 541 * (for now, won't do anything until can select for soft error). 542 */ 543static void 544tcp_notify(inp, error) 545 struct inpcb *inp; 546 int error; 547{ 548 register struct tcpcb *tp = (struct tcpcb *)inp->inp_ppcb; 549 register struct socket *so = inp->inp_socket; 550 551 /* 552 * Ignore some errors if we are hooked up. 553 * If connection hasn't completed, has retransmitted several times, 554 * and receives a second error, give up now. This is better 555 * than waiting a long time to establish a connection that 556 * can never complete. 557 */ 558 if (tp->t_state == TCPS_ESTABLISHED && 559 (error == EHOSTUNREACH || error == ENETUNREACH || 560 error == EHOSTDOWN)) { 561 return; 562 } else if (tp->t_state < TCPS_ESTABLISHED && tp->t_rxtshift > 3 && 563 tp->t_softerror) 564 so->so_error = error; 565 else 566 tp->t_softerror = error; 567 wakeup((caddr_t) &so->so_timeo); 568 sorwakeup(so); 569 sowwakeup(so); 570} 571 572static int 573tcp_pcblist SYSCTL_HANDLER_ARGS 574{ 575 int error, i, n, s; 576 struct inpcb *inp, **inp_list; 577 inp_gen_t gencnt; 578 struct xinpgen xig; 579 580 /* 581 * The process of preparing the TCB list is too time-consuming and 582 * resource-intensive to repeat twice on every request. 583 */ 584 if (req->oldptr == 0) { 585 n = tcbinfo.ipi_count; 586 req->oldidx = 2 * (sizeof xig) 587 + (n + n/8) * sizeof(struct xtcpcb); 588 return 0; 589 } 590 591 if (req->newptr != 0) 592 return EPERM; 593 594 /* 595 * OK, now we're committed to doing something. 596 */ 597 s = splnet(); 598 gencnt = tcbinfo.ipi_gencnt; 599 n = tcbinfo.ipi_count; 600 splx(s); 601 602 xig.xig_len = sizeof xig; 603 xig.xig_count = n; 604 xig.xig_gen = gencnt; 605 xig.xig_sogen = so_gencnt; 606 error = SYSCTL_OUT(req, &xig, sizeof xig); 607 if (error) 608 return error; 609 610 inp_list = malloc(n * sizeof *inp_list, M_TEMP, M_WAITOK); 611 if (inp_list == 0) 612 return ENOMEM; 613 614 s = splnet(); 615 for (inp = tcbinfo.listhead->lh_first, i = 0; inp && i < n; 616 inp = inp->inp_list.le_next) { 617 if (inp->inp_gencnt <= gencnt && !prison_xinpcb(req->p, inp)) 618 inp_list[i++] = inp; 619 } 620 splx(s); 621 n = i; 622 623 error = 0; 624 for (i = 0; i < n; i++) { 625 inp = inp_list[i]; 626 if (inp->inp_gencnt <= gencnt) { 627 struct xtcpcb xt; 628 caddr_t inp_ppcb; 629 xt.xt_len = sizeof xt; 630 /* XXX should avoid extra copy */ 631 bcopy(inp, &xt.xt_inp, sizeof *inp); 632 inp_ppcb = inp->inp_ppcb; 633 if (inp_ppcb != NULL) 634 bcopy(inp_ppcb, &xt.xt_tp, sizeof xt.xt_tp); 635 else 636 bzero((char *) &xt.xt_tp, sizeof xt.xt_tp); 637 if (inp->inp_socket) 638 sotoxsocket(inp->inp_socket, &xt.xt_socket); 639 error = SYSCTL_OUT(req, &xt, sizeof xt); 640 } 641 } 642 if (!error) { 643 /* 644 * Give the user an updated idea of our state. 645 * If the generation differs from what we told 646 * her before, she knows that something happened 647 * while we were processing this request, and it 648 * might be necessary to retry. 649 */ 650 s = splnet(); 651 xig.xig_gen = tcbinfo.ipi_gencnt; 652 xig.xig_sogen = so_gencnt; 653 xig.xig_count = tcbinfo.ipi_count; 654 splx(s); 655 error = SYSCTL_OUT(req, &xig, sizeof xig); 656 } 657 free(inp_list, M_TEMP); 658 return error; 659} 660 661SYSCTL_PROC(_net_inet_tcp, TCPCTL_PCBLIST, pcblist, CTLFLAG_RD, 0, 0, 662 tcp_pcblist, "S,xtcpcb", "List of active TCP connections"); 663 664static int 665tcp_getcred SYSCTL_HANDLER_ARGS 666{ 667 struct sockaddr_in addrs[2]; 668 struct inpcb *inp; 669 int error, s; 670 671 error = suser(req->p); 672 if (error) 673 return (error); 674 error = SYSCTL_IN(req, addrs, sizeof(addrs)); 675 if (error) 676 return (error); 677 s = splnet(); 678 inp = in_pcblookup_hash(&tcbinfo, addrs[1].sin_addr, addrs[1].sin_port, 679 addrs[0].sin_addr, addrs[0].sin_port, 0, NULL); 680 if (inp == NULL || inp->inp_socket == NULL) { 681 error = ENOENT; 682 goto out; 683 } 684 error = SYSCTL_OUT(req, inp->inp_socket->so_cred, sizeof(struct ucred)); 685out: 686 splx(s); 687 return (error); 688} 689 690SYSCTL_PROC(_net_inet_tcp, OID_AUTO, getcred, CTLTYPE_OPAQUE|CTLFLAG_RW, 691 0, 0, tcp_getcred, "S,ucred", "Get the ucred of a TCP connection"); 692 693void 694tcp_ctlinput(cmd, sa, vip) 695 int cmd; 696 struct sockaddr *sa; 697 void *vip; 698{ 699 register struct ip *ip = vip; 700 register struct tcphdr *th; 701 void (*notify) __P((struct inpcb *, int)) = tcp_notify; 702 703 if (cmd == PRC_QUENCH) 704 notify = tcp_quench; 705 else if (cmd == PRC_MSGSIZE) 706 notify = tcp_mtudisc; 707 else if (!PRC_IS_REDIRECT(cmd) && 708 ((unsigned)cmd > PRC_NCMDS || inetctlerrmap[cmd] == 0)) 709 return; 710 if (ip) { 711 th = (struct tcphdr *)((caddr_t)ip 712 + (IP_VHL_HL(ip->ip_vhl) << 2)); 713 in_pcbnotify(&tcb, sa, th->th_dport, ip->ip_src, th->th_sport, 714 cmd, notify); 715 } else 716 in_pcbnotify(&tcb, sa, 0, zeroin_addr, 0, cmd, notify); 717} 718 719/* 720 * When a source quench is received, close congestion window 721 * to one segment. We will gradually open it again as we proceed. 722 */ 723void 724tcp_quench(inp, errno) 725 struct inpcb *inp; 726 int errno; 727{ 728 struct tcpcb *tp = intotcpcb(inp); 729 730 if (tp) 731 tp->snd_cwnd = tp->t_maxseg; 732} 733 734/* 735 * When `need fragmentation' ICMP is received, update our idea of the MSS 736 * based on the new value in the route. Also nudge TCP to send something, 737 * since we know the packet we just sent was dropped. 738 * This duplicates some code in the tcp_mss() function in tcp_input.c. 739 */ 740void 741tcp_mtudisc(inp, errno) 742 struct inpcb *inp; 743 int errno; 744{ 745 struct tcpcb *tp = intotcpcb(inp); 746 struct rtentry *rt; 747 struct rmxp_tao *taop; 748 struct socket *so = inp->inp_socket; 749 int offered; 750 int mss; 751 752 if (tp) { 753 rt = tcp_rtlookup(inp); 754 if (!rt || !rt->rt_rmx.rmx_mtu) { 755 tp->t_maxopd = tp->t_maxseg = tcp_mssdflt; 756 return; 757 } 758 taop = rmx_taop(rt->rt_rmx); 759 offered = taop->tao_mssopt; 760 mss = rt->rt_rmx.rmx_mtu - sizeof(struct tcpiphdr); 761 if (offered) 762 mss = min(mss, offered); 763 /* 764 * XXX - The above conditional probably violates the TCP 765 * spec. The problem is that, since we don't know the 766 * other end's MSS, we are supposed to use a conservative 767 * default. But, if we do that, then MTU discovery will 768 * never actually take place, because the conservative 769 * default is much less than the MTUs typically seen 770 * on the Internet today. For the moment, we'll sweep 771 * this under the carpet. 772 * 773 * The conservative default might not actually be a problem 774 * if the only case this occurs is when sending an initial 775 * SYN with options and data to a host we've never talked 776 * to before. Then, they will reply with an MSS value which 777 * will get recorded and the new parameters should get 778 * recomputed. For Further Study. 779 */ 780 if (tp->t_maxopd <= mss) 781 return; 782 tp->t_maxopd = mss; 783 784 if ((tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP && 785 (tp->t_flags & TF_RCVD_TSTMP) == TF_RCVD_TSTMP) 786 mss -= TCPOLEN_TSTAMP_APPA; 787 if ((tp->t_flags & (TF_REQ_CC|TF_NOOPT)) == TF_REQ_CC && 788 (tp->t_flags & TF_RCVD_CC) == TF_RCVD_CC) 789 mss -= TCPOLEN_CC_APPA; 790#if (MCLBYTES & (MCLBYTES - 1)) == 0 791 if (mss > MCLBYTES) 792 mss &= ~(MCLBYTES-1); 793#else 794 if (mss > MCLBYTES) 795 mss = mss / MCLBYTES * MCLBYTES; 796#endif 797 if (so->so_snd.sb_hiwat < mss) 798 mss = so->so_snd.sb_hiwat; 799 800 tp->t_maxseg = mss; 801 802 tcpstat.tcps_mturesent++; 803 tp->t_rtttime = 0; 804 tp->snd_nxt = tp->snd_una; 805 tcp_output(tp); 806 } 807} 808 809/* 810 * Look-up the routing entry to the peer of this inpcb. If no route 811 * is found and it cannot be allocated the return NULL. This routine 812 * is called by TCP routines that access the rmx structure and by tcp_mss 813 * to get the interface MTU. 814 */ 815struct rtentry * 816tcp_rtlookup(inp) 817 struct inpcb *inp; 818{ 819 struct route *ro; 820 struct rtentry *rt; 821 822 ro = &inp->inp_route; 823 rt = ro->ro_rt; 824 if (rt == NULL || !(rt->rt_flags & RTF_UP)) { 825 /* No route yet, so try to acquire one */ 826 if (inp->inp_faddr.s_addr != INADDR_ANY) { 827 ro->ro_dst.sa_family = AF_INET; 828 ro->ro_dst.sa_len = sizeof(ro->ro_dst); 829 ((struct sockaddr_in *) &ro->ro_dst)->sin_addr = 830 inp->inp_faddr; 831 rtalloc(ro); 832 rt = ro->ro_rt; 833 } 834 } 835 return rt; 836} 837 838/* 839 * Return a pointer to the cached information about the remote host. 840 * The cached information is stored in the protocol specific part of 841 * the route metrics. 842 */ 843struct rmxp_tao * 844tcp_gettaocache(inp) 845 struct inpcb *inp; 846{ 847 struct rtentry *rt = tcp_rtlookup(inp); 848 849 /* Make sure this is a host route and is up. */ 850 if (rt == NULL || 851 (rt->rt_flags & (RTF_UP|RTF_HOST)) != (RTF_UP|RTF_HOST)) 852 return NULL; 853 854 return rmx_taop(rt->rt_rmx); 855} 856 857/* 858 * Clear all the TAO cache entries, called from tcp_init. 859 * 860 * XXX 861 * This routine is just an empty one, because we assume that the routing 862 * routing tables are initialized at the same time when TCP, so there is 863 * nothing in the cache left over. 864 */ 865static void 866tcp_cleartaocache() 867{ 868} 869